diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 4fca82e5276e71726f81285ba0ffef867e917618..42b0539cffaf7f9e11b5985d4c7b5ce0c0715c6a 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -62,6 +62,7 @@ ata *);
 	int (*removexattr) (struct dentry *, const char *);
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
+	struct file *(*open)(struct dentry *,struct file *,const struct cred *);
 
 locking rules:
 	all may block
@@ -89,6 +90,7 @@ listxattr:	no
 removexattr:	yes
 truncate_range:	yes
 fiemap:		no
+open:		no
 	Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 victim.
 	cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7161dc391fa94da40bab4c86a373019f84af59f1
--- /dev/null
+++ b/Documentation/filesystems/overlayfs.txt
@@ -0,0 +1,199 @@
+Written by: Neil Brown <neilb@suse.de>
+
+Overlay Filesystem
+==================
+
+This document describes a prototype for a new approach to providing
+overlay-filesystem functionality in Linux (sometimes referred to as
+union-filesystems).  An overlay-filesystem tries to present a
+filesystem which is the result over overlaying one filesystem on top
+of the other.
+
+The result will inevitably fail to look exactly like a normal
+filesystem for various technical reasons.  The expectation is that
+many use cases will be able to ignore these differences.
+
+This approach is 'hybrid' because the objects that appear in the
+filesystem do not all appear to belong to that filesystem.  In many
+cases an object accessed in the union will be indistinguishable
+from accessing the corresponding object from the original filesystem.
+This is most obvious from the 'st_dev' field returned by stat(2).
+
+While directories will report an st_dev from the overlay-filesystem,
+all non-directory objects will report an st_dev from the lower or
+upper filesystem that is providing the object.  Similarly st_ino will
+only be unique when combined with st_dev, and both of these can change
+over the lifetime of a non-directory object.  Many applications and
+tools ignore these values and will not be affected.
+
+Upper and Lower
+---------------
+
+An overlay filesystem combines two filesystems - an 'upper' filesystem
+and a 'lower' filesystem.  When a name exists in both filesystems, the
+object in the 'upper' filesystem is visible while the object in the
+'lower' filesystem is either hidden or, in the case of directories,
+merged with the 'upper' object.
+
+It would be more correct to refer to an upper and lower 'directory
+tree' rather than 'filesystem' as it is quite possible for both
+directory trees to be in the same filesystem and there is no
+requirement that the root of a filesystem be given for either upper or
+lower.
+
+The lower filesystem can be any filesystem supported by Linux and does
+not need to be writable.  The lower filesystem can even be another
+overlayfs.  The upper filesystem will normally be writable and if it
+is it must support the creation of trusted.* extended attributes, and
+must provide valid d_type in readdir responses, at least for symbolic
+links - so NFS is not suitable.
+
+A read-only overlay of two read-only filesystems may use any
+filesystem type.
+
+Directories
+-----------
+
+Overlaying mainly involved directories.  If a given name appears in both
+upper and lower filesystems and refers to a non-directory in either,
+then the lower object is hidden - the name refers only to the upper
+object.
+
+Where both upper and lower objects are directories, a merged directory
+is formed.
+
+At mount time, the two directories given as mount options are combined
+into a merged directory:
+
+  mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper /overlay
+
+Then whenever a lookup is requested in such a merged directory, the
+lookup is performed in each actual directory and the combined result
+is cached in the dentry belonging to the overlay filesystem.  If both
+actual lookups find directories, both are stored and a merged
+directory is created, otherwise only one is stored: the upper if it
+exists, else the lower.
+
+Only the lists of names from directories are merged.  Other content
+such as metadata and extended attributes are reported for the upper
+directory only.  These attributes of the lower directory are hidden.
+
+whiteouts and opaque directories
+--------------------------------
+
+In order to support rm and rmdir without changing the lower
+filesystem, an overlay filesystem needs to record in the upper filesystem
+that files have been removed.  This is done using whiteouts and opaque
+directories (non-directories are always opaque).
+
+The overlay filesystem uses extended attributes with a
+"trusted.overlay."  prefix to record these details.
+
+A whiteout is created as a symbolic link with target
+"(overlay-whiteout)" and with xattr "trusted.overlay.whiteout" set to "y".
+When a whiteout is found in the upper level of a merged directory, any
+matching name in the lower level is ignored, and the whiteout itself
+is also hidden.
+
+A directory is made opaque by setting the xattr "trusted.overlay.opaque"
+to "y".  Where the upper filesystem contains an opaque directory, any
+directory in the lower filesystem with the same name is ignored.
+
+readdir
+-------
+
+When a 'readdir' request is made on a merged directory, the upper and
+lower directories are each read and the name lists merged in the
+obvious way (upper is read first, then lower - entries that already
+exist are not re-added).  This merged name list is cached in the
+'struct file' and so remains as long as the file is kept open.  If the
+directory is opened and read by two processes at the same time, they
+will each have separate caches.  A seekdir to the start of the
+directory (offset 0) followed by a readdir will cause the cache to be
+discarded and rebuilt.
+
+This means that changes to the merged directory do not appear while a
+directory is being read.  This is unlikely to be noticed by many
+programs.
+
+seek offsets are assigned sequentially when the directories are read.
+Thus if
+  - read part of a directory
+  - remember an offset, and close the directory
+  - re-open the directory some time later
+  - seek to the remembered offset
+
+there may be little correlation between the old and new locations in
+the list of filenames, particularly if anything has changed in the
+directory.
+
+Readdir on directories that are not merged is simply handled by the
+underlying directory (upper or lower).
+
+
+Non-directories
+---------------
+
+Objects that are not directories (files, symlinks, device-special
+files etc.) are presented either from the upper or lower filesystem as
+appropriate.  When a file in the lower filesystem is accessed in a way
+the requires write-access, such as opening for write access, changing
+some metadata etc., the file is first copied from the lower filesystem
+to the upper filesystem (copy_up).  Note that creating a hard-link
+also requires copy_up, though of course creation of a symlink does
+not.
+
+The copy_up may turn out to be unnecessary, for example if the file is
+opened for read-write but the data is not modified.
+
+The copy_up process first makes sure that the containing directory
+exists in the upper filesystem - creating it and any parents as
+necessary.  It then creates the object with the same metadata (owner,
+mode, mtime, symlink-target etc.) and then if the object is a file, the
+data is copied from the lower to the upper filesystem.  Finally any
+extended attributes are copied up.
+
+Once the copy_up is complete, the overlay filesystem simply
+provides direct access to the newly created file in the upper
+filesystem - future operations on the file are barely noticed by the
+overlay filesystem (though an operation on the name of the file such as
+rename or unlink will of course be noticed and handled).
+
+
+Non-standard behavior
+---------------------
+
+The copy_up operation essentially creates a new, identical file and
+moves it over to the old name.  The new file may be on a different
+filesystem, so both st_dev and st_ino of the file may change.
+
+Any open files referring to this inode will access the old data and
+metadata.  Similarly any file locks obtained before copy_up will not
+apply to the copied up file.
+
+On a file is opened with O_RDONLY fchmod(2), fchown(2), futimesat(2)
+and fsetxattr(2) will fail with EROFS.
+
+If a file with multiple hard links is copied up, then this will
+"break" the link.  Changes will not be propagated to other names
+referring to the same inode.
+
+Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory
+object in overlayfs will not contain vaid absolute paths, only
+relative paths leading up to the filesystem's root.  This will be
+fixed in the future.
+
+Some operations are not atomic, for example a crash during copy_up or
+rename will leave the filesystem in an inconsitent state.  This will
+be addressed in the future.
+
+Changes to underlying filesystems
+---------------------------------
+
+Offline changes, when the overlay is not mounted, are allowed to either
+the upper or the lower trees.
+
+Changes to the underlying filesystems while part of a mounted overlay
+filesystem are not allowed.  If the underlying filesystem is changed,
+the behavior of the overlay is undefined, though it will not result in
+a crash or deadlock.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 0d0492028082c0ecda1a0931cc5100765624a80a..f06c91f7a86a3f3bdf204c70213ca76e007fc297 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -364,6 +364,8 @@ struct inode_operations {
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
+	struct file *(*open) (struct dentry *, struct file *,
+			      const struct cred *);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -475,6 +477,12 @@ otherwise noted.
   truncate_range: a method provided by the underlying filesystem to truncate a
   	range of blocks , i.e. punch a hole somewhere in a file.
 
+  open: this is an alternative to f_op->open(), the difference is that this
+	method may return any open file, not necessarily originating from the
+	same filesystem as the one i_op->open() was called on.  It may be useful
+	for stacking filesystems which want to allow native I/O directly on
+	underlying files.
+
 
 The Address Space Object
 ========================
diff --git a/Kbuild b/Kbuild
index b8b708ad6dc3815eb0d23bfea2c972d03b9477c0..bab221335bff02580f4ea972255f1b7e9350ad11 100644
--- a/Kbuild
+++ b/Kbuild
@@ -35,6 +35,9 @@ kernel/bounds.s: kernel/bounds.c FORCE
 	$(Q)mkdir -p $(dir $@)
 	$(call if_changed_dep,cc_s_c)
 
+$(info "*****************")
+$(info "* Starting Kbuild")
+
 $(obj)/$(bounds-file): kernel/bounds.s Kbuild
 	$(Q)mkdir -p $(dir $@)
 	$(call cmd,bounds)
diff --git a/Kconfig b/Kconfig
index c13f48d65898487105f0193667648382c90d0eda..582bdc53369026fb2de578fe89cd52b900d035e0 100644
--- a/Kconfig
+++ b/Kconfig
@@ -9,3 +9,12 @@ config SRCARCH
 	option env="SRCARCH"
 
 source "arch/$SRCARCH/Kconfig"
+
+#IGNORE_BCM_KF_EXCEPTION
+source "Kconfig.bcm_kf"
+
+source "Kconfig.bcm"
+
+config BCM_IN_KERNEL
+	bool
+	default y
diff --git a/Kconfig.bcm b/Kconfig.bcm
new file mode 100644
index 0000000000000000000000000000000000000000..973dbbe554bb6121c9040ea1d9ff41ca317a64ac
--- /dev/null
+++ b/Kconfig.bcm
@@ -0,0 +1,1249 @@
+if (BCM_KF_MISC_MAKEFILE)
+
+config BRCM_IKOS
+	bool "IKOS"
+
+config BCM_CHIP_NUMBER
+	int "numerical value of chipid"
+
+config BCM_KERNEL_CUSTOM
+	bool "Build kernel with Broadcom custom changes"
+	default y
+	help
+	   This should always be selected for Broadcom
+	   Internal builds
+
+choice
+	prompt "Broadcom Commengine board type"
+	default BCM96362
+	depends on (BCM_KF_MIPS_BCM963XX || BCM_KF_ARM_BCM963XX)
+	help
+	  Select different Broadcom ADSL board
+
+config BCM963148
+	bool "63148"
+	depends on BCM_KF_ARM_BCM963XX
+	depends on ARM
+	select EMBEDDED
+	select MACH_BCM963148
+
+config BCM963138
+	bool "63138"
+	depends on BCM_KF_ARM_BCM963XX
+	depends on ARM
+	select EMBEDDED
+	select MACH_BCM963138
+
+config BCM960333
+	bool "960333"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+
+config BCM96318
+	bool "96318 DSL board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+	
+config BCM96368
+	bool "96368 VDSL board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+
+config BCM96816
+	bool "96816 GPON board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select SYS_SUPPORTS_SMP
+	select DMA_NONCOHERENT
+	select NR_CPUS_DEFAULT_2
+
+config BCM96818
+	bool "96818 GPON board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select SYS_SUPPORTS_SMP
+	select DMA_NONCOHERENT
+	select NR_CPUS_DEFAULT_2
+	
+config BCM96362
+	bool "96362 ADSL board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+
+config BCM96328
+	bool "96328 ADSL board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+
+config BCM963268
+	bool "963268 VDSL board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+
+config BCM96828
+	bool "96828 EPON board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+	select GENERIC_GPIO
+
+config BCM96838
+	bool "96838 GPON/EPON board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+
+config BCM963381
+	bool "963381 VDSL board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+
+config BCM96848
+	bool "96848 GPON/EPON board"
+	depends on BCM_KF_MIPS_BCM963XX
+	depends on MIPS_BCM963XX
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_2
+
+endchoice
+
+config MACH_BCM963148
+	bool "BCM963148 board"
+	select EMBEDDED
+	select PLAT_B15_CORE
+	select PLAT_BCM63148
+	default y
+
+config MACH_BCM963138
+	bool "BCM963138 board"
+	select EMBEDDED
+	select PLAT_CA9_MPCORE
+	select PLAT_BCM63138
+	default y
+
+config BOARD_ZRELADDR
+	hex
+	default 0x00008000
+	depends on BCM_KF_ARM_BCM963XX
+	depends on ARM
+	help
+	  Must be consistent with the definition of "textaddr-y" in
+	  arch/arm/Makefile;
+	  BOARD_ZRELADDR == virt_to_phys(TEXTADDR)
+
+config BOARD_PARAMS_PHYS
+	hex
+	depends on BCM_KF_ARM_BCM963XX
+	depends on ARM
+	default 0x00000000
+	help
+	  Board_PARAMS_PHYS must be within 4MB of ZRELADDR
+
+config DEBUG_UART_ADDR
+	hex
+	default 0xfffe8600 if (BCM963138) || (BCM963148)
+	depends on EARLY_PRINTK
+	depends on BCM_KF_ARM_BCM963XX
+	depends on ARM
+	help
+	  Physical address of the UART used in early kernel debugging.
+
+config BCM63148_SIM
+	bool "63148 SIM"
+	default n
+	depends on BCM963148
+	help
+	  BCM63148 for simulation
+
+config BCM63138_SIM
+	bool "63138 SIM"
+	default n
+	depends on BCM963138
+	help
+	  BCM63138 for simulation
+
+#
+# Endianess selection.  Sufficiently obscure so many users don't know what to
+# answer,so we try hard to limit the available choices.  Also the use of a
+# choice statement should be more obvious to the user.
+#
+choice
+	prompt "Endianess selection"
+	depends on ARM
+	help
+	  Some MIPS machines can be configured for either little or big endian
+	  byte order. These modes require different kernels and a different
+	  Linux distribution.  In general there is one preferred byteorder for a
+	  particular system but some systems are just as commonly used in the
+	  one or the other endianness.
+
+config CPU_BIG_ENDIAN
+	bool "Big endian"
+	depends on ARM
+
+config CPU_LITTLE_ENDIAN
+	bool "Little endian"
+	depends on ARM
+	help
+
+endchoice
+
+config BCM_SCHED_RT_PERIOD
+	int "Period over which RT task cpu usage is measured (in us)"
+	range 0 1000000
+
+config BCM_SCHED_RT_RUNTIME
+	int "Portion of the period that RT tasks are allowed to run (in us)"
+	range 0 1000000
+
+config BCM_SCHED_RT_SHARE
+	bool "Allow RT threads to take time from other cores"
+
+config BCM_BOARD
+	bool "Support for Broadcom Board"
+	select CRC32
+
+config BCM_CHIPINFO
+	tristate "Support for Broadcom chipinfo"
+
+config BCM_OTP
+	tristate "Secure boot support for Broadcom otp"
+
+config BCM_SERIAL
+	tristate "Support for Serial Port"
+	select SERIAL_CORE
+
+config BCM_SERIAL_CONSOLE
+	bool "Console on BCM63XX serial port"
+	depends on BCM_SERIAL=y
+	select SERIAL_CORE_CONSOLE
+
+config BCM_EXT_TIMER
+	bool "Support for external timer"
+	default n
+
+config BCM_WATCHDOG_TIMER
+	bool "Support for watchdog timer"
+	default n
+
+config L2TP
+	tristate "Support for L2TP"
+
+config ACCEL_PPTP
+	tristate "Support for ACCEL_PPTP"
+
+config BCM_SPU
+	tristate "Support for IPSec SPU"
+
+config BCM_SPU_TEST
+	bool "Support for IPSec SPU Test code"
+	depends on BCM_SPU
+
+config BCM_PKTFLOW
+	tristate "Support for Broadcom Packet Flow Cache"
+
+config BCM_FHW
+	tristate "Support for HW Accelerator"
+
+config BCM_PKTCMF
+	tristate "Support for Packet CMF"
+
+config BCM_PKTRUNNER
+	tristate "Support for Packet runner"
+	depends on BCM_KF_MIPS_BCM9685XX
+
+config BCM_PKTRUNNER_GSO
+	bool "Runner GSO Support "
+
+config BCM_PKTRUNNER_CSUM_OFFLOAD
+	bool "Runner CHECKSUM OFFLOAD Support "
+
+config BCM_PKTRUNNER_MCAST_DNAT
+	bool "Runner MCAST_DNAT Support "
+
+config BCM_LTE
+	tristate "BCM LTE WAN support"
+	depends on BCM963138
+
+config BCM_LTE_IMPL
+	int "Implementation index for Broadcom LTE"
+	depends on BCM_LTE
+
+config BCM_LTE_PCI
+	tristate "LTE on PCI"
+	depends on (BCM_LTE && BCM_PCI)
+
+config BCM_LTE_PCI_MODEM_BOOT
+	bool "Modem Boot device node"
+	depends on BCM_LTE_PCI
+
+config BCM_LTE_USB
+	tristate "LTE on USB"
+	depends on (BCM_LTE && BCM_USB)
+
+config BCM_FBOND
+	tristate "Support for Broadcom Packet Flow Bonding"
+
+config BCM_SYSPERF
+	tristate "Support for Broadcom Sys Perf driver"
+
+config BCM_SYSPERF_IMPL
+	int "Sysperf char driver"
+	depends on BCM_SYSPERF
+	
+config BCM_INGQOS
+	tristate "Support for Ingress QoS"
+
+config BCM_BPM
+	tristate "Support for Buffer Pool Manager"
+
+config BCM_BPM_BUF_MEM_PRCNT
+	int "Buffer Memory as % of Total Memory"
+	range 1 100
+	default 15
+	depends on BCM_BPM
+
+config BCM_FAP
+	tristate "Support for Forward Assist Processor (FAP)"
+	depends on MIPS
+	depends on BCM_KF_FAP
+
+config BCM_FAP_GSO
+	bool "Support GSO in FAP"
+	depends on MIPS
+	depends on BCM_KF_FAP && BCM_FAP
+
+config BCM_FAP_GSO_LOOPBACK
+	bool "FAP GSO LOOPBACK Support "
+	depends on MIPS
+	depends on BCM_KF_FAP && BCM_FAP_GSO
+
+config BCM_FAP_LAYER2
+	bool "FAP Layer 2 Bridge"
+	depends on MIPS
+	depends on BCM_KF_FAP && BCM_FAP
+
+config BCM_FAP_IPV6
+	bool "Support for IPV6 in FAP"
+	depends on BCM_FAP && BCM_PKTFLOW
+	depends on BCM_KF_FAP
+	depends on MIPS
+
+config BCM_PKTDMA
+	tristate "Support for Packet DMA"
+	depends on (MIPS || BCM963138)
+
+config BCM_IUDMA
+	bool "Support for Iudma"
+	depends on MIPS
+
+config BCM_RDPA_BRIDGE
+	tristate "Support for Runner bridge"
+	depends on MIPS
+	depends on BCM_RDPA
+
+config BCM_ENET
+	tristate "Support for Ethernet"
+
+config BCM_DEF_NR_RX_DMA_CHANNELS
+	int "Number of RX DMA Channels"
+	range 1 4
+
+config BCM_DEF_NR_TX_DMA_CHANNELS
+	int "Number of TX DMA Channels"
+	range 1 4
+
+config BCM_PKTDMA_RX_SPLITTING
+	bool "PktDma Iudma Rx Splitting"
+	depends on BCM_PKTDMA && BCM_FAP
+
+config BCM_PKTDMA_TX_SPLITTING
+	bool "PktDma Iudma Tx Splitting"
+	depends on BCM_PKTDMA && BCM_FAP
+
+config BCM_GMAC
+	bool "Support for GMAC"
+	depends on MIPS
+
+config EPON_SDK
+	tristate "Support for EPON"
+	depends on MIPS
+
+config EPON_SDK_VOICE_OAM
+	tristate "Support for EPON Voice OAM"
+	depends on MIPS
+
+config GPON_SFU
+	tristate "Support for GPON"
+	depends on MIPS
+
+config EPON_SFU
+	tristate "Support for EPON"
+	depends on MIPS
+
+config EPON_SBU
+	tristate "Support for EPON"
+	depends on MIPS
+
+config EPON_HGU
+	tristate "Support for EPON"
+	depends on MIPS
+
+config EPON_UNI_UNI_ENABLED
+	tristate "Support for EPON SFU/SBU UNI to UNI Forwarding"
+	depends on MIPS
+
+config EPON_DS_DLF_FORWARD
+	tristate "Support for EPON Downstream DLF Forward"
+	depends on MIPS
+
+config BCM_GPON
+	tristate "Support for GPON"
+	depends on BCM96838 || BCM96848
+	depends on MIPS
+
+config BCM_OPTICALDET
+	tristate "Support for optical wan detection"
+    depends on BCM96838
+
+config BCM_OPTICALDET_IMPL
+	int "Implementation index for BRCM optical wan detection"
+    depends on BCM_OPTICALDET
+
+config BCM_SFP
+	tristate "Support for SFP"
+
+config BCM_SFP_IMPL
+	int "Implementation index for external SFP module"
+	depends on BCM_SFP
+
+config BCM_I2C_BUS_IMPL
+	int "Implementation index for I2C Bus module"
+	depends on BCM_I2C_BUS
+
+config BCM_I2C_CHIP_IMPL
+	int "Implementation index for I2C Bus module"
+	depends on BCM_I2C_CHIP
+
+config BCM_I2S_IMPL
+	int "Implementation index for I2S module"
+	depends on BCM_I2S
+
+config BCM_LASER
+	tristate "Support for LASER"
+	depends on MIPS
+
+config BCM_GPON_802_1Q_ENABLED
+	bool "802_1Q mode enabled"
+	depends on MIPS
+
+config BCM_GPON_AE_AUTO_SWITCH
+	bool "Enable GPON-ActiveE Auto Switch"
+	depends on MIPS
+
+config BCM_MAX_GEM_PORTS
+	int "Number of Gem Ports"
+	range 32 256 if BCM96838 || BCM96848
+	range 32 128 if !BCM96838 && !BCM96848
+	default 32
+
+config BCM_MoCA
+	tristate "Support for MoCA"
+
+config BCM_6802_MoCA
+	bool "Support for 6802 MoCA"	 
+	depends on BCM_MoCA
+	help
+		Select 'M' to include support for Broadcom MoCA Solution.
+		No Support For Static Build Model.
+
+config BCM_HS_UART
+	tristate "Support for High Speed UART"
+		
+config BCM_DEFAULT_CONSOLE_LOGLEVEL
+	int "Default console printk loglevel"
+	depends on BCM_KF_CONSOLE_LOGLEVEL
+
+config BCM_TSTAMP
+	bool
+
+config BCM_LOG
+	tristate "Support for BCM LOG"
+
+config BCM_COLORIZE_PRINTS
+	bool "Color code various prints"
+
+config BCM_ASSERTS
+	bool "Compile in assert code"
+
+config BCM_FATAL_ASSERTS
+		bool "Generate a fatal error when assert fails"
+
+config BCM_I2C_BUS
+	tristate "Support for I2C Bus Drivers"
+
+config BCM_I2C_CHIP
+	tristate "Support for I2C Chip Drivers"
+
+config BCM_I2S
+	tristate "Support for I2S Drivers"
+
+config BCM_VLAN
+	tristate "Support for BCM VLAN"
+
+config BCM_EPON
+	tristate "Support for EPON LUE"
+	depends on MIPS
+
+config BCM_EPON_STACK
+	tristate "Support for EPON STACK"
+	help
+		Include it as m to enable EPON stack
+
+config BCM_USB
+	tristate "Support for USB"
+	depends on MIPS
+
+config BCM_USBNET_ACCELERATION
+	bool "Support for USBNET/LTE ACCELERATION in FCACHE"
+	depends on BCM_PKTFLOW
+
+config BCM_SATA_TEST
+	tristate "Support for SATA compliance tests"
+
+config BCM_M2M_DMA
+	bool "Support for M2M DMA"
+	depends on BCM_KF_M2M_DMA
+	depends on BCM963138
+
+config BCM_RECVFILE
+	bool "Support for recvfile"
+	depends on BCM_KF_RECVFILE
+
+config BCM_ISDN
+	tristate "Support for ISDN"
+	depends on MIPS
+
+config BCM_WLAN
+	tristate "Support for Wireless"
+
+config BCM_WAPI
+	bool "Support for Wireless WAPI"
+	depends on BCM_WLAN
+
+config BCM_WLAN_WLVISUALIZATION
+	bool "Enable wlan Visualization"
+	default  n
+	depends on BCM_WLAN
+
+config BCM_WLALTBLD
+	string "Wireless Alternate Build"
+	depends on BCM_WLAN
+
+config BCM_WLAN_IMPL
+	int "Wireless Implementation Selection"
+	depends on BCM_WLAN
+
+config BCM_PCI
+	bool "Support for PCI"
+	select PCI
+	select PCI_DOMAINS
+
+# NOTE: the select PCI_DOMAINS line will cause problems with old kernel, but removing it
+#	will cause PCI_DOMAINS to be removed from the .config file for some unknown reason...
+
+config BCM_WLAN_USBAP
+	bool "Support for Wireless USBAP"
+
+config BCM_XTMCFG
+	tristate "Support for XTM"
+
+config BCM_XTMRT
+	tristate "Support for XTM"
+
+config BCM_ADSL
+	tristate "Support for ADSL"
+
+config BCM_DSL_GINP_RTX
+	tristate "Support for DSL_GINP_RTX"
+
+config BCM_EXT_BONDING
+	tristate "Support for EXT_BONDING"
+	depends on MIPS
+
+config BCM_DSL_GFAST
+	tristate "Support for DSL_GFAST"
+	depends on BCM963138
+
+config BCM_ENDPOINT
+	tristate "Support for VOICE"
+
+config BCM_PCMSHIM
+	tristate "Support for PCM DMA SHIM"
+
+config BCM_DECT
+	tristate "Support for DECT"
+
+config BCM_DECTSHIM
+	tristate "Support for DECTSHIM"
+
+config BCM_BCMPROF
+	tristate "Support for profiling"
+	depends on MIPS
+
+config BCM_PWRMNGT
+	tristate "Support for Power Management"
+	depends on m
+	
+config BCM_ETH_PWRSAVE
+	bool "Support for Ethernet Auto Power Down and Sleep"
+
+config BCM_ETH_DEEP_GREEN_MODE
+	bool "Support for Ethernet Deep Green Mode"
+
+config BCM_ETH_HWAPD_PWRSAVE
+	bool "Support for Ethernet HW Auto Power Down for external PHYs"
+
+config BCM_HOSTMIPS_PWRSAVE
+	bool "Support for PWRMNGT MIPS clock divider"
+	depends on MIPS
+
+config BCM_HOSTMIPS_PWRSAVE_TIMERS
+	bool "Hostmips Power Save Timers"
+	depends on MIPS
+
+config BCM_DDR_SELF_REFRESH_PWRSAVE
+	bool "Support for DRAM Self Refresh mode"
+	depends on MIPS
+
+config BCM_AVS_PWRSAVE
+	bool "Support for Automatic Voltage Scaling"
+	depends on MIPS
+
+config BCM_1V2REG_AUTO_SHUTDOWN
+	bool "Support for Automatically Shutting down Internal 1.2V Regulator"
+	depends on MIPS
+
+config BCM_1V2REG_ALWAYS_SHUTDOWN
+	bool "Support for Overriding Automatically Shutting down Internal 1.2V Regulator"
+	depends on MIPS
+
+config BCM_1V2REG_NEVER_SHUTDOWN
+	bool "Support for Overriding Automatically Shutting down Internal 1.2V Regulator"
+	depends on MIPS
+
+config BCM_CPLD1
+	bool "Support for CPLD standby timer"
+	depends on MIPS
+
+config BCM_BCMDSP
+	tristate "Support for DSP application"
+	depends on MIPS
+
+config BCM_PROCFS
+	tristate "Support for PROCFS"
+
+config BCM_TRNG
+	tristate "BCM HW Random Number Generator support"
+#	---help---
+#	  This driver provides kernel-side support for the Random Number
+#	  Generator hardware found on bcm.
+#
+#	  To compile this driver as a module, choose M here: the
+#	  module will be called intel-rng.
+#
+#	  If unsure, say Y.
+
+config BCM_ARL
+	tristate "Support for ARL Table Management"
+	depends on MIPS
+
+config BCM_TMS
+	tristate "TMS support (802.3ah, 802.1ag, Y.1731)"
+
+config BCM_PMC
+	bool "PMC"
+	depends on BCM96838 || BCM963138 || BCM963148 || BCM963381 || BCM96848
+
+config BCM_PLC_BOOT
+	tristate "PLC boot support"
+	depends on MIPS
+
+config BCM_IEEE1905
+	tristate "IEEE1905 support"
+
+config BCM_BMU
+	bool "Battery Management Unit"
+
+config BCM_DPI
+	tristate "Support for Deep Packet Inspection"
+
+config BCM_BOARD_IMPL
+	int "Implementation index for Board"
+	depends on BCM_BOARD
+
+config BCM_CHIPINFO_IMPL
+	int "Implementation index for Chipinfo module"
+	depends on BCM_CHIPINFO
+
+config BCM_OTP_IMPL
+	int "Implementation index for otp module"
+	depends on BCM_OTP
+
+config BCM_SERIAL_IMPL
+	int "Implementation index for Serial"
+	depends on BCM_SERIAL
+
+config BCM_EXT_TIMER_IMPL
+	int "Implementation index for external timer"
+	depends on BCM_EXT_TIMER
+
+config BCM_SPU_IMPL
+	int "Implementation index for IPSec SPU"
+	depends on BCM_SPU
+
+config BCM_TRNG_IMPL
+	int "Implementation index for TRNG "
+	depends on BCM_TRNG
+
+config BCM_PKTFLOW_IMPL
+	int "Implementation index for Broadcom Flow Cache"
+	depends on BCM_PKTFLOW
+
+config BCM_PKTCMF_IMPL
+	int "Implementation index for Packet CMF"
+	depends on BCM_PKTCMF
+
+config BCM_PKTRUNNER_IMPL
+	int "Implementation index for Broadcom packet runner"
+	depends on BCM_PKTRUNNER
+
+config BCM_INGQOS_IMPL
+	int "Implementation index for Ingress QoS"
+	depends on BCM_INGQOS
+
+config BCM_BPM_IMPL
+	int "Implementation index for BPM"
+	depends on BCM_BPM
+
+config BCM_FAP_IMPL
+	int "Implementation index for FAP"
+	depends on BCM_FAP
+	depends on MIPS
+
+config BCM_PKTDMA_IMPL
+	int "Implementation index for Packet DMA"
+	depends on BCM_PKTDMA
+
+config BCM_RDPA_BRIDGE_IMPL
+	int "Implementation index for Runner bridge"
+	depends on BCM_RDPA_BRIDGE
+	depends on MIPS
+
+config BCM_FBOND_IMPL
+	int "Implementation index for Broadcom Flow Bonding"
+	depends on BCM_FBOND
+
+config BCM_ENET_IMPL
+	int "Implementation index for Ethernet"
+	depends on BCM_ENET
+
+config BCM_GPON_IMPL
+	int "Implementation index for GPON"
+	depends on BCM_GPON
+
+config BCM_LASER_IMPL
+	int "Implementation index for LASER"
+	depends on BCM_LASER
+		
+config BCM_MoCA_IMPL
+	int "Implementation index for MoCA"
+	depends on BCM_MoCA
+
+config BCM_HS_UART_IMPL
+	int "Implementation index for High Speed UART"
+	depends on BCM_HS_UART
+	
+config BCM_LOG_IMPL
+	int "Implementation index for BCM LOG"
+	depends on BCM_LOG
+
+config BCM_I2C_BUS_IMPL
+	int "Implementation index for I2C Bus"
+	depends on BCM_I2C_BUS
+
+config BCM_I2C_CHIP_IMPL
+	int "Implementation index for I2C Chip"
+	depends on BCM_I2C_CHIP
+
+config BCM_VLAN_IMPL
+	int "Implementation index for BCM VLAN"
+	depends on BCM_VLAN
+
+config BCM_EPON_IMPL
+	int "Implementation index for BCM EPON"
+	depends on BCM_EPON
+
+config BCM_USB_IMPL
+	int "Implementation index for USB"
+	depends on BCM_USB
+
+config BCM_ISDN_IMPL
+	int "Implementation index for ISDN"
+	depends on BCM_ISDN
+
+config BCM_XTMCFG_IMPL
+	int "Implementation index for XTMCFG"
+	depends on BCM_XTMCFG
+
+config BCM_XTMRT_IMPL
+	int "Implementation index for XTMRT"
+	depends on BCM_XTMRT
+
+config BCM_ADSL_IMPL
+	int "Implementation index for ADSL"
+	depends on BCM_ADSL
+
+config BCM_EXT_BONDING_IMPL
+	int "Implementation index for EXT_BONDING"
+	depends on BCM_EXT_BONDING
+
+config BCM_ENDPOINT_IMPL
+	int "Implementation index for VOICE"
+	depends on BCM_ENDPOINT
+
+config BCM_DECT_IMPL
+	int "Implementation index for DECT"
+	depends on BCM_DECT
+
+config BCM_DECTSHIM_IMPL
+	int "Implementation index for DECTSHIM"
+	depends on BCM_DECTSHIM
+
+config BCM_PCMSHIM_IMPL
+	int "Implementation index for PCM DMA SHIM"
+	depends on BCM_PCMSHIM
+
+config BCM_BCMPROF_IMPL
+	int "Implementation index for DSP APPLICATION"
+	depends on BCM_BCMPROF
+
+config BCM_BCMDSP_IMPL
+	int "Implementation index for PROFILING"
+	depends on BCM_BCMDSP
+
+config BCM_PROCFS_IMPL
+	int "Implementation index for PROCFS"
+	depends on BCM_PROCFS
+
+config BCM_PWRMNGT_IMPL
+	int "Implementation index for PWRMNGT"
+	depends on BCM_PWRMNGT
+
+config BCM_ARL_IMPL
+	int "Implementation index for ARL"
+	depends on BCM_ARL
+
+config BCM_PMC_IMPL
+	int "Implementation index for PMC"
+	depends on BCM_PMC
+
+config BCM_TMS_IMPL
+	int "Implementation index for TMS"
+	depends on BCM_TMS
+
+config BCM_DPI_IMPL
+	int "Implementation index for Deep Packet Inspection"
+	depends on BCM_DPI
+
+config BCM_PORTS_ON_INT_EXT_SW
+	bool "Ports on both Internal and External Switch"
+	default n
+	help
+	  This option enables the feature where ports can be on Internal switch and External switch.
+
+config BCM_SWITCH_PORT_TRUNK_SUPPORT
+	bool "MAC based switch port trunking support"
+	default n
+	help
+	  This option enables the feature where MAC based port trunking can be used on external switch.
+
+config BCM_VLAN_ROUTED_WAN_USES_ROOT_DEV_MAC
+	bool "Assign Same MAC address to Routed WAN Interface as root"
+	default n
+	help
+	  This option enables the feature where Routed VLANCTL WAN Interfaces are assigned the same MAC as root device.
+
+config BCM_SPDSVC_SUPPORT
+	bool "Speed Service Support"
+	default n
+	help
+	  This option enables the Broadcom Speed Servcie Feature Support.
+
+config ROOT_FLASHFS
+	string "flash partition"
+	help
+	  This is the root file system partition on flash memory
+
+config BRCM_KTOOLS
+	bool "BRCM_KTOOLS"
+	default n
+	help
+		Enables the build of BRCM MIPS Kernel Tools Support for PMON, BOUNCE
+
+
+config BUZZZ
+	bool "BUZZZ"
+	depends on BRCM_KTOOLS && BCM_KF_BUZZZ
+	default n
+	help
+	  Enables the support for MIPS BUZZZ tool suite
+
+config BUZZZ_KEVT
+	bool "BUZZZ_KEVT"
+	depends on BRCM_KTOOLS && BCM_KF_BUZZZ
+	default n
+	help
+	  Enables the support for MIPS BUZZZ Kernel Event tracing
+		
+config PMON
+	bool "PMON"
+	depends on BRCM_KTOOLS
+	default n
+	help
+	  Enables the support for MIPS Performance Monitoring Counters
+
+config BRCM_BOUNCE
+	bool "BOUNCE"
+	depends on BRCM_KTOOLS
+	default n
+	help
+	  This option enables the support for -finstrument-functions.
+
+config BRCM_BOUNCE_EXIT
+	bool "BOUNCE_EXIT"
+	depends on BRCM_BOUNCE
+	default n
+	help
+	  This option enables the instrumentation of kernel function call exit.
+
+config BRCM_BOUNCE_DATAPATH
+	bool "BOUNCE_DATAPATH"
+	depends on BRCM_BOUNCE
+	default n
+	help
+	  This option enables the brute force instrumentation of network data path.
+
+config BCM_EXT_SWITCH
+	bool "External Switch present"
+	default n
+	help
+	  This option enables the external switch support.
+
+config BCM_EXT_SWITCH_TYPE
+	int "External Switch Type/Number"
+	default 0
+
+config BCM_JUMBO_FRAME
+	bool "Jumbo Frame Support (for Enet/Xtm/GPON only)"
+	default n
+
+config BCM_MAX_MTU_SIZE
+	int "Support for Maximum Payload Size"
+	depends on (BCM96838 || BCM96848) && BCM_JUMBO_FRAME
+	range 64 9200
+
+config BCM_USER_DEFINED_DEFAULT_MTU
+	bool "User Defined Default Linux Device Interface MTU"
+	depends on BCM_JUMBO_FRAME
+	default n
+
+config BCM_USER_DEFINED_DEFAULT_MTU_SIZE
+	int "User Defined Default Linux Device Interface MTU size"
+	depends on BCM_USER_DEFINED_DEFAULT_MTU
+
+choice
+	prompt "Restrict memory used (testing)"
+	depends on BCM_KF_MIPS_BCM963XX && MIPS_BCM963XX
+
+config BRCM_MEMORY_RESTRICTION_OFF
+	bool "Use all available"
+
+config BRCM_MEMORY_RESTRICTION_16M
+	bool "Use 16M"
+
+config BRCM_MEMORY_RESTRICTION_32M
+	bool "Use 32M"
+
+config BRCM_MEMORY_RESTRICTION_64M
+	bool "Use 64M"
+
+endchoice
+
+config AUXFS_JFFS2
+	bool "JFFS2"
+	default n
+	help
+	  Say Y here to enable support for an auxillary file system on flash.
+
+config BRCM_OLT_FPGA_RESTORE
+	bool "BRCM_OLT_FPGA_RESTORE"
+	default n
+	help
+	  Enables /proc/brcm/olt_fpga_restore
+
+config PCI_DOMAINS
+	bool
+	depends on MIPS
+	default y
+
+config BCM_DCACHE_SHARED
+	bool "Share Dcache between TPs"
+
+config BCM_CPU_ARCH_NAME
+	string "Name of CPU Architecture"
+
+config BCM_PRINTK_INT_ENABLED
+	bool "printk with interrupt enabled"
+
+config BCM_RDP
+	bool 'Support for Runner Data Path'
+	default y
+
+config BCM_BDMF
+	tristate 'Support for Broadcom Device Management Framework'
+	help
+		Say 'M' to include support for BDMF.
+		Static compile not supported.
+
+config BCM_RDPA
+	tristate 'Support for Runner Data Path API Driver'
+	depends on BCM_BDMF
+	select BCM_RDP
+	help
+		Say 'M' to include support for RDPA.
+		Static compile not supported.
+
+config BCM_SIM_CARD
+	tristate 'Support for Sim Card Driver'
+	help
+		Say 'M' to include support for Sim Card.
+		Static compile not supported.
+
+config BCM_PMD
+	tristate 'Support for PMD'
+	depends on BCM96838 || BCM96848
+	help
+		Say 'M' to include support for PMD.
+		Static compile not supported.
+
+config BCM_RDPA_GPL
+	tristate 'Support for Runner Data Path API GPL Driver'
+	depends on BCM_RDPA
+
+config BCM_GPON_STACK
+	tristate 'Support for GPON Stack Driver'
+	depends on BCM96838 || BCM96848
+	help
+		Say 'M' to include support for GPON Stack driver.
+		Static compile not supported.
+
+		
+config BCM_RDPA_MW
+	tristate 'Support for Runner Data Path API MW Driver'
+	depends on BCM_RDPA
+
+config BCM_RDPA_DRV
+	tristate 'Support for Runner Command Driver'
+	depends on BCM_RDPA
+
+config BCM_WIFI_FORWARDING_DRV
+	tristate 'Support for Wifi forwarding driver'
+	depends on BCM_WLAN
+	help
+		Say 'M' to include support for Wifi forwarding driver.
+		Static compile not supported.
+
+config BCM_WFD_CHAIN_SUPPORT
+	bool 'Support for Wifi forwarding driver packet chainning'
+	depends on BCM_WIFI_FORWARDING_DRV
+	help
+		Say 'M' to include support for Wifi forwarding driver Tx packet chaining.
+		Static compile not supported.
+
+config BCM_DHD_RUNNER
+	depends on BCM_WLAN
+	bool 'Support for DHD acceleration using Runner'
+	help
+		Say 'M' to include support DHD acceleration using Runner
+		Static compile not supported.
+
+config BCM_WFD_RX_ACCELERATION
+	bool 'Support for Wifi forwarding driver Rx Acceleration'
+	depends on BCM_WIFI_FORWARDING_DRV
+	help
+		Say 'M' to include support for Wifi forwarding driver Rx Acceleration.
+		Static compile not supported.
+
+config BCM_RDP_IMPL
+	int "Implementation index for RDP"
+	depends on BCM_RDP
+
+config BCM_BDMF_IMPL
+	int "Implementation index for BDMF"
+	depends on BCM_BDMF
+
+config BCM_RDPA_IMPL
+	int "Implementation index for RDPA"
+	depends on BCM_RDPA
+	
+config BCM_PMD_IMPL
+	int "Implementation index for PMD"
+	depends on BCM_PMD
+
+config BCM_SIM_CARD_IMPL
+	int "Implementation index for Sim Card"
+	depends on BCM_SIM_CARD
+
+config BCM_RDPA_GPL_IMPL
+	int "Implementation index for RDPA GPL"
+	depends on BCM_RDPA_GPL
+
+config BCM_WIFI_FORWARDING_DRV_IMPL
+	int "Implementation index for Wifi Forwarding Driver"
+	depends on BCM_WIFI_FORWARDING_DRV
+
+config BCM_RDPA_MW_IMPL
+	int "Implementation index for RDPA MW"
+	depends on BCM_RDPA_MW
+
+config BCM_RDPA_DRV_IMPL
+	int "Implementation index for RDPA DRV"
+	depends on BCM_RDPA_DRV
+
+config BCM_SATA_TEST_IMPL
+	int "Implementation index for SATA test module"
+	depends on BCM_SATA_TEST
+
+
+config BCM_EPON_STACK_IMPL
+	int "Implementation index for EPON STACK"
+	depends on BCM_EPON_STACK
+
+config EPON_CLOCK_TRANSPORT
+	bool "Epon clock transport support"
+	depends on BCM_EPON_STACK
+
+# Time Synchronization: Feature
+config BCM_TIME_SYNC
+	tristate 'Support for Time Synchronization'
+	depends on BCM96838 || BCM96848
+	help
+		Say 'M' to include support for Time Synchronization driver.
+		Static compile not supported.
+
+# Time Synchronization: Implementation
+config BCM_TIME_SYNC_IMPL
+	int "Implementation index for Time Synchronization"
+	depends on BCM_TIME_SYNC
+
+# GPON ToDD
+config BCM_GPON_TODD
+	bool "Support GPON ToDD"
+	depends on BCM_TIME_SYNC
+
+# PTP 1588
+config BCM_PTP_1588
+	bool "Support PTP 1588"
+	depends on BCM_TIME_SYNC
+
+# Sync Clock: 8KHz
+config BCM_TIME_SYNC_8KHZ
+	bool "Support Sync Clock: 8KHz"
+	depends on BCM_TIME_SYNC
+
+# Sync Signal: 1PPS
+config BCM_TIME_SYNC_1PPS
+	bool "Support Sync Signal: 1PPS"
+	depends on BCM_TIME_SYNC
+
+# Sync Signal: PON Unstable
+config BCM_TIME_SYNC_PON_UNSTABLE
+	bool "Support Sync Signal: PON Unstable"
+	depends on BCM_TIME_SYNC
+
+config BCM_BRIDGE_MAC_FDB_LIMIT
+	bool "Support MAC limit in kernel"
+
+config BCM_ZONE_ACP
+	bool "ACP Support"
+	depends on BCM963138
+
+config BCM_ACP_MEM_SIZE
+	int "Reserved Memory Size for ACP Purpose, unit in MB"
+	default 16
+	depends on BCM_ZONE_ACP
+
+config MTD_BCM_SPI_NAND
+	tristate "Broadcom 63xx SPI NAND MTD support"
+	depends on BCM_KF_MTD_BCMNAND
+	help
+	  Broadcom 963xx SPI NAND MTD support
+
+config BCM_DSL_TX_RX_IUDMA
+	bool "DSL Tx Rx Iudma"
+	default false
+
+config BCM_ANDROID
+	bool "Enable Android"
+	default false
+	depends on BCM_KF_ANDROID
+
+config BCM_OCF
+	tristate "Linux OCF module"
+
+config BCM_OCF_IMPL
+	int "Linux OCF implementation"
+	depends on BCM_OCF
+
+config BCM_BLUETOOTH_USB
+	tristate "Enable BRCM Bluetooth USB Driver"
+	default false
+
+config BCM_BLUETOOTH_USB_IMPL
+	int "Implementation index for BRCM BLUETOOTH USB dongle"
+	depends on BCM_BLUETOOTH_USB
+
+config BCM_NFC_I2C
+	tristate "Enable BRCM NFC I2C Driver"
+	default n
+
+config BCM_NFC_I2C_IMPL
+	int "Implementation index for BRCM NFC I2C"
+	depends on BCM_NFC_I2C
+
+endif
diff --git a/Kconfig.bcm_kf b/Kconfig.bcm_kf
new file mode 100644
index 0000000000000000000000000000000000000000..f80fc2fc0e2a9ae83de6de06c1a3c3fd6c4bb55e
--- /dev/null
+++ b/Kconfig.bcm_kf
@@ -0,0 +1,489 @@
+#
+# Automatically Generated Kconfig File.  DO NOT MODIFY
+#
+
+
+config BCM_KF_MISC_MAKEFILE:
+	bool
+	default y
+
+config BCM_KF_MIPS_BCM963XX:
+	bool
+	default y
+
+config BCM_KF_MIPS_BCM9685XX:
+	bool
+	default y
+
+config BCM_KF_ARM_BCM963XX:
+	bool
+	default y
+
+config BCM_KF_LINKER_WORKAROUND:
+	bool
+	default y
+
+config BCM_KF_KERN_WARNING:
+	bool
+	default y
+
+config BCM_KF_FIXADDR_TOP:
+	bool
+	default y
+
+config BCM_KF_NBUFF:
+	bool
+	default y
+
+config BCM_KF_BLOG:
+	bool
+	default y
+
+config BCM_KF_VLANCTL_BIND:
+	bool
+	default y
+
+config BCM_KF_WANDEV:
+	bool
+	default y
+
+config BCM_KF_FAP:
+	bool
+	default y
+
+config BCM_KF_CSUM_UNALIGNED:
+	bool
+	default y
+
+config BCM_KF_THREAD_SIZE_FIX:
+	bool
+	default y
+
+config BCM_KF_TSTAMP:
+	bool
+	default y
+
+config BCM_KF_UNALIGNED_EXCEPTION:
+	bool
+	default y
+
+config BCM_KF_MIPS_IOPORT_BASE:
+	bool
+	default y
+
+config BCM_KF_SHOW_RAW_BACKTRACE:
+	bool
+	default y
+
+config BCM_KF_DCACHE_SHARED:
+	bool
+	default y
+
+config BCM_KF_CPU_DATA_CPUID:
+	bool
+	default y
+
+config BCM_KF_PCI_FIXUP:
+	bool
+	default y
+
+config BCM_KF_SYSRQ_AUX_CHAR:
+	bool
+	default y
+
+config BCM_KF_CHAR_SYSRQ:
+	bool
+	default y
+
+config BCM_KF_MTD_BCMNAND:
+	bool
+	default y
+
+config BCM_KF_MTD_BCM963XX:
+	bool
+	default y
+
+config BCM_KF_MTD_OOB_AUTO:
+	bool
+	default y
+
+config BCM_KF_MTD_IOCTL_FIX:
+	bool
+	default y
+
+config BCM_KF_PPP:
+	bool
+	default n
+
+config BCM_KF_PROC_BCM:
+	bool
+	default y
+
+config BCM_KF_IKOS:
+	bool
+	default y
+
+config BCM_KF_DSP:
+	bool
+	default y
+
+config BCM_KF_LOG:
+	bool
+	default y
+
+config BCM_KF_TRACE_CUSTOM:
+	bool
+	default y
+
+config BCM_KF_CGROUP:
+	bool
+	default y
+
+config BCM_KF_HARDIRQ_CYCLES:
+	bool
+	default n
+
+config BCM_KF_CONSOLE_LOGLEVEL:
+	bool
+	default y
+
+config BCM_KF_SHOW_HEAP_STACK:
+	bool
+	default y
+
+config BCM_KF_BYPASS_SMP_WARNING:
+	bool
+	default y
+
+config BCM_KF_OOM_REBOOT:
+	bool
+	default y
+
+config BCM_KF_VMSCAN_OPT:
+	bool
+	default y
+
+config BCM_KF_CPP_SUPPORT:
+	bool
+	default y
+
+config BCM_KF_SKB_DEFINES:
+	bool
+	default y
+
+config BCM_KF_VLAN:
+	bool
+	default y
+
+config BCM_KF_IGMP:
+	bool
+	default y
+
+config BCM_KF_IGMP_RATE_LIMIT:
+	bool
+	default y
+
+config BCM_KF_MLD:
+	bool
+	default y
+
+config BCM_KF_NETDEV_PATH:
+	bool
+	default y
+
+config BCM_KF_MISC_IOCTLS:
+	bool
+	default y
+
+config BCM_KF_MISALIGN_MQS:
+	bool
+	default n
+
+config BCM_KF_NETFILTER:
+	bool
+	default y
+
+config BCM_KF_ENET_SWITCH:
+	bool
+	default y
+
+config BCM_KF_NAND:
+	bool
+	default y
+
+config BCM_KF_FAIL_CONFIG_ON_EOF:
+	bool
+	default y
+
+config BCM_KF_RUNNER:
+	bool
+	default y
+
+config BCM_KF_WL:
+	bool
+	default y
+
+config BCM_KF_LINKWATCH_WQ:
+	bool
+	default y
+
+config BCM_KF_IP:
+	bool
+	default y
+
+config BCM_KF_SPU:
+	bool
+	default y
+
+config BCM_KF_80211:
+	bool
+	default y
+
+config BCM_KF_SCHEDAUDIT:
+	bool
+	default n
+
+config BCM_KF_BALOO:
+	bool
+	default y
+
+config BCM_KF_BUZZZ:
+	bool
+	default y
+
+config BCM_KF_BOUNCE:
+	bool
+	default y
+
+config BCM_KF_ATM_BACKEND:
+	bool
+	default y
+
+config BCM_KF_EXTRA_DEBUG:
+	bool
+	default y
+
+config BCM_KF_SPI:
+	bool
+	default y
+
+config BCM_KF_MUTEX_FIX:
+	bool
+	default y
+
+config BCM_KF_POWER_SAVE:
+	bool
+	default y
+
+config BCM_KF_DSP_EXCEPT:
+	bool
+	default y
+
+config BCM_KF_UNI_UNI:
+	bool
+	default y
+
+config BCM_KF_MODULE_OWNER:
+	bool
+	default y
+
+config BCM_KF_RCU_CONSTANT_BUG:
+	bool
+	default y
+
+config BCM_KF_JFFS:
+	bool
+	default y
+
+config BCM_KF_PROC_DEFAULT:
+	bool
+	default y
+
+config BCM_KF_ASSERT:
+	bool
+	default y
+
+config BCM_KF_EXTSTATS:
+	bool
+	default y
+
+config BCM_KF_USB_STORAGE:
+	bool
+	default y
+
+config BCM_KF_USBNET:
+	bool
+	default y
+
+config BCM_KF_FAP_GSO_LOOPBACK:
+	bool
+	default y
+
+config BCM_KF_PROTO_IPSEC:
+	bool
+	default y
+
+config BCM_KF_PROTO_ESP:
+	bool
+	default y
+
+config BCM_KF_PRINTK_INT_ENABLED:
+	bool
+	default y
+
+config BCM_KF_LIMITED_IFINDEX:
+	bool
+	default y
+
+config BCM_KF_PMON:
+	bool
+	default y
+
+config BCM_KF_BRIDGE_PORT_ISOLATION:
+	bool
+	default y
+
+config BCM_KF_BRIDGE_STP:
+	bool
+	default y
+
+config BCM_KF_STP_LOOP:
+	bool
+	default y
+
+config BCM_KF_BRIDGE_STATIC_FDB:
+	bool
+	default y
+
+config BCM_KF_BRIDGE_DYNAMIC_FDB:
+	bool
+	default y
+
+config BCM_KF_BRIDGE_MAC_FDB_LIMIT:
+	bool
+	default y
+
+config BCM_KF_IEEE1905:
+	bool
+	default y
+
+config BCM_KF_FBOND:
+	bool
+	default y
+
+config BCM_KF_MCAST_RP_FILTER:
+	bool
+	default y
+
+config BCM_KF_XT_MATCH_LAYER7:
+	bool
+	default y
+
+config BCM_KF_XT_TARGET_DC:
+	bool
+	default y
+
+config BCM_KF_VLAN_AGGREGATION:
+	bool
+	default y
+
+config BCM_KF_ETHTOOL:
+	bool
+	default y
+
+config BCM_KF_DEBUGGING_DISABLED_FIX:
+	bool
+	default y
+
+config BCM_KF_CPU_DOWN_PREEMPT_ON:
+	bool
+	default y
+
+config BCM_KF_IPV6RD_SECURITY:
+	bool
+	default y
+
+config BCM_KF_DPI:
+	bool
+	default y
+
+config BCM_KF_ARM_ERRATA_798181:
+	bool
+	default y
+
+config BCM_KF_ARM_PLD:
+	bool
+	default y
+
+config BCM_KF_MISC_3_4_CVE_PORTS:
+	bool
+	default y
+
+config BCM_KF_MISC_BACKPORTS:
+	bool
+	default y
+
+config BCM_KF_SCHED_RT:
+	bool
+	default y
+
+config BCM_KF_SCHED_RT_SHARE:
+	bool
+	default y
+
+config BCM_KF_MODINST_DIR:
+	bool
+	default y
+
+config BCM_KF_WLAN:
+	bool
+	default y
+
+config BCM_KF_ONDEMAND:
+	bool
+	default y
+
+config BCM_KF_EMMC:
+	bool
+	default y
+
+config BCM_KF_USB_HOSTS:
+	bool
+	default y
+
+config BCM_KF_SPDSVC:
+	bool
+	default y
+
+config BCM_KF_ANDROID:
+	bool
+	default y
+
+config BCM_KF_OCF:
+	bool
+	default y
+
+config BCM_KF_LEDS:
+	bool
+	default y
+
+config BCM_KF_BRIDGE_COUNTERS:
+	bool
+	default y
+
+config BCM_KF_MHI:
+	bool
+	default y
+
+config BCM_KF_PHONET:
+	bool
+	default y
+
+config BCM_KF_M2M_DMA:
+	bool
+	default y
+
+config BCM_KF_RECVFILE:
+	bool
+	default y
+
diff --git a/MAINTAINERS b/MAINTAINERS
index 148dc98226053171eaed088e08e6f492075adea9..c8467e458ee203c25a7b756fa5c32cd80456ee08 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5007,6 +5007,13 @@ F:	drivers/scsi/osd/
 F:	include/scsi/osd_*
 F:	fs/exofs/
 
+OVERLAYFS FILESYSTEM
+M:	Miklos Szeredi <miklos@szeredi.hu>
+L:	linux-fsdevel@vger.kernel.org
+S:	Supported
+F:	fs/overlayfs/*
+F:	Documentation/filesystems/overlayfs.txt
+
 P54 WIRELESS DRIVER
 M:	Christian Lamparter <chunkeey@googlemail.com>
 L:	linux-wireless@vger.kernel.org
diff --git a/Makefile b/Makefile
index 22345c016221d8c5902ffe4385273dd5d2375640..ea74f9ab3d569f7fd4d8f7ca4e5e229c54d1f603 100644
--- a/Makefile
+++ b/Makefile
@@ -245,7 +245,19 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
 
 HOSTCC       = gcc
 HOSTCXX      = g++
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+BCM_FATAL_CC_WARNING_FLAGS := -Werror -Wfatal-errors
+
+# BCM_KBUILD_CFLAGS is used when building the Linux kernel (not bcmdrivers)
+BCM_KBUILD_CFLAGS := -g $(BCM_FATAL_CC_WARNING_FLAGS)
+
+# lauterbach setting
+#HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -fomit-frame-pointer
+
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 HOSTCXXFLAGS = -O2
 
 # Decide whether to build built-in, modular, or both.
@@ -513,9 +525,45 @@ libs-y		:= lib/
 core-y		:= usr/
 endif # KBUILD_EXTMOD
 
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+# Read in config
+-include include/config/auto.conf
+export HPATH 	:= $(TOPDIR)/include
+ifeq ($(strip $(CONFIG_BRCM_IKOS)),)
+brcmdrivers-y	:= $(INC_BRCMBOARDPARMS_PATH)/$(BRCM_BOARD)/ $(BRCMDRIVERS_DIR)/ $(INC_UTILS_PATH)/  $(INC_FLASH_PATH)/
+brcmdrivers-y	+= $(INC_SPI_PATH)/
+# Other chip specific compilations
+brcmdrivers-$(CONFIG_BCM96838) += $(SHARED_DIR)/opensource/rdp/ $(SHARED_DIR)/opensource/drv/ 
+brcmdrivers-$(CONFIG_BCM96848) += $(SHARED_DIR)/opensource/rdp/ $(SHARED_DIR)/opensource/drv/ 
+brcmdrivers-$(CONFIG_BCM963138) += $(SHARED_DIR)/opensource/rdp/ $(SHARED_DIR)/opensource/drv/
+brcmdrivers-$(CONFIG_BCM963148) += $(SHARED_DIR)/opensource/rdp/ $(SHARED_DIR)/opensource/drv/
+brcmdrivers-y += $(SHARED_DIR)/opensource/drivers/
+else
+brcmdrivers-y	:= $(BRCMDRIVERS_DIR)/ $(INC_UTILS_PATH)/
+endif
+
+ifneq ($(CONFIG_BCM_PMC),)
+brcmdrivers-$(CONFIG_BCM_PMC) += $(SHARED_DIR)/opensource/pmc/impl$(CONFIG_BCM_PMC_IMPL)/
+endif
+
+
+$(info * bcmdrivers-y = $(brcmdrivers-y))
+$(info * INC_BRCMBOARDPARMS_PATH = $(INC_BRCMBOARDPARMS_PATH))
+$(info * BRCM_BOARD = $(BRCM_BOARD))
+$(info * BRCMDRIVERS_DIR = $(BRCMDRIVERS_DIR))
+$(info * INC_SPI_PATH = $(INC_SPI_PATH))
+$(info * INC_FLASH_PATH = $(INC_FLASH_PATH))
+BRCMDRIVERS	:= $(brcmdrivers-y)
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
 ifeq ($(dot-config),1)
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+	#just to pacify testfpatch
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 # Read in config
 -include include/config/auto.conf
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 ifeq ($(KBUILD_EXTMOD),)
 # Read in dependencies to all Kconfig* files, make sure to run
@@ -530,7 +578,11 @@ $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
 # if auto.conf.cmd is missing then we are probably in a cleaned tree so
 # we execute the config step to be sure to catch updated Kconfig files
 include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+	$(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig ARCH=$(ARCH)
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 	$(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 else
 # external modules needs include/generated/autoconf.h and include/config/auto.conf
 # but do not care if they are up-to-date. Use auto.conf to trigger the test
@@ -556,13 +608,111 @@ endif # $(dot-config)
 # command line.
 # This allow a user to issue only 'make' to build a kernel including modules
 # Defaults to vmlinux, but the arch makefile usually adds further targets
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+all: bcm_vmlinux
+else  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 all: vmlinux
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 KBUILD_CFLAGS	+= -Os
 else
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+ifndef BRCM_KERNEL_DEBUG
+BRCM_KERNEL_DEBUG := 0
+endif 
+ifeq ($(shell let "X = $(BRCM_KERNEL_DEBUG) & 1"; echo $$X),1)
+$(info "Setting optimization to debug levels (-O1, -g)")
+KBUILD_CFLAGS	+= -O1 -g 
+KBUILD_AFLAGS	+= -gdwarf-2
+KBUILD_CFLAGS += -Wno-uninitialized
+
+# Kernel does not compile with -O0. Set optimizatin to -O1 and disable all other optimizations
+# except for the ones which result in errors when compiling kernel
+KBUILD_CFLAGS += -fno-branch-count-reg
+KBUILD_CFLAGS += -fno-combine-stack-adjustments 
+KBUILD_CFLAGS += -fno-common
+KBUILD_CFLAGS += -fno-compare-elim
+KBUILD_CFLAGS += -fno-cprop-registers 
+KBUILD_CFLAGS += -fno-dce 
+KBUILD_CFLAGS += -fno-defer-pop 
+KBUILD_CFLAGS += -fno-delayed-branch
+KBUILD_CFLAGS += -fno-delete-null-pointer-checks 
+KBUILD_CFLAGS += -fno-dse 
+KBUILD_CFLAGS += -fno-early-inlining
+#KBUILD_CFLAGS += -fno-forward-propagate 
+KBUILD_CFLAGS += -fno-gcse-lm 
+KBUILD_CFLAGS += -fno-guess-branch-probability
+KBUILD_CFLAGS += -fno-if-conversion 
+KBUILD_CFLAGS += -fno-if-conversion2
+KBUILD_CFLAGS += -fno-inline-functions-called-once 
+KBUILD_CFLAGS += -fno-ipa-profile 
+KBUILD_CFLAGS += -fno-ipa-pure-const
+KBUILD_CFLAGS += -fno-ipa-reference 
+KBUILD_CFLAGS += -fno-ivopts
+KBUILD_CFLAGS += -fno-jump-tables 
+KBUILD_CFLAGS += -fno-math-errno
+KBUILD_CFLAGS += -fno-merge-constants 
+KBUILD_CFLAGS += -fno-move-loop-invariants
+KBUILD_CFLAGS += -fno-omit-frame-pointer
+KBUILD_CFLAGS += -fno-peephole
+KBUILD_CFLAGS += -fno-prefetch-loop-arrays
+KBUILD_CFLAGS += -fno-rename-registers
+#KBUILD_CFLAGS += -fno-rtti
+KBUILD_CFLAGS += -fno-sched-critical-path-heuristic 
+KBUILD_CFLAGS += -fno-sched-dep-count-heuristic 
+KBUILD_CFLAGS += -fno-sched-group-heuristic 
+KBUILD_CFLAGS += -fno-sched-interblock
+KBUILD_CFLAGS += -fno-sched-last-insn-heuristic 
+KBUILD_CFLAGS += -fno-sched-rank-heuristic
+KBUILD_CFLAGS += -fno-sched-spec
+KBUILD_CFLAGS += -fno-sched-spec-insn-heuristic 
+KBUILD_CFLAGS += -fno-sched-stalled-insns-dep 
+KBUILD_CFLAGS += -fno-short-enums 
+KBUILD_CFLAGS += -fno-signed-zeros
+KBUILD_CFLAGS += -fno-split-ivs-in-unroller 
+KBUILD_CFLAGS += -fno-split-wide-types
+#KBUILD_CFLAGS += -fno-no-threadsafe-statics 
+KBUILD_CFLAGS += -fno-toplevel-reorder
+KBUILD_CFLAGS += -fno-trapping-math 
+KBUILD_CFLAGS += -fno-tree-bit-ccp
+#KBUILD_CFLAGS += -fno-tree-ccp
+KBUILD_CFLAGS += -fno-tree-ch 
+KBUILD_CFLAGS += -fno-tree-copy-prop
+KBUILD_CFLAGS += -fno-tree-copyrename 
+KBUILD_CFLAGS += -fno-tree-cselim 
+KBUILD_CFLAGS += -fno-tree-dce
+KBUILD_CFLAGS += -fno-tree-dominator-opts 
+KBUILD_CFLAGS += -fno-tree-dse
+KBUILD_CFLAGS += -fno-tree-forwprop 
+KBUILD_CFLAGS += -fno-tree-fre
+KBUILD_CFLAGS += -fno-tree-loop-if-convert
+KBUILD_CFLAGS += -fno-tree-loop-im
+KBUILD_CFLAGS += -fno-tree-loop-ivcanon 
+KBUILD_CFLAGS += -fno-tree-loop-optimize
+KBUILD_CFLAGS += -fno-tree-phiprop
+KBUILD_CFLAGS += -fno-tree-pta
+KBUILD_CFLAGS += -fno-tree-reassoc
+KBUILD_CFLAGS += -fno-tree-scev-cprop 
+KBUILD_CFLAGS += -fno-tree-sink 
+KBUILD_CFLAGS += -fno-tree-slp-vectorize
+KBUILD_CFLAGS += -fno-tree-sra
+KBUILD_CFLAGS += -fno-tree-ter
+KBUILD_CFLAGS += -fno-tree-vect-loop-version
+KBUILD_CFLAGS += -fno-unit-at-a-time
+KBUILD_CFLAGS += -fno-var-tracking
+KBUILD_CFLAGS += -fno-var-tracking-assignments
+KBUILD_CFLAGS += -fno-web 
+
+CONFIG_FRAME_WARN = 0
+else
 KBUILD_CFLAGS	+= -O2
 endif
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+KBUILD_CFLAGS	+= -O2
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+endif
 
 include $(srctree)/arch/$(SRCARCH)/Makefile
 
@@ -654,7 +804,11 @@ ifneq ($(KAFLAGS),)
         KBUILD_AFLAGS += $(KAFLAGS)
 endif
 ifneq ($(KCFLAGS),)
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+        # do not print scary warning messages when we set KCFLAGS
+else  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
         $(call warn-assign,CFLAGS)
+endif  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
         KBUILD_CFLAGS += $(KCFLAGS)
 endif
 
@@ -664,6 +818,10 @@ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
 KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
 LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+LDFLAGS_vmlinux += -Map vmlinux.map
+endif #BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
 ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
 LDFLAGS_vmlinux	+= $(call ld-option, -X,)
 endif
@@ -686,6 +844,10 @@ export	INSTALL_PATH ?= /boot
 # makefile but the argument can be passed to make if needed.
 #
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+INSTALL_MOD_PATH := $(PROFILE_DIR)/modules
+endif  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
 MODLIB	= $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)
 export MODLIB
 
@@ -710,6 +872,29 @@ export mod_strip_cmd
 ifeq ($(KBUILD_EXTMOD),)
 core-y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+vmlinux-dirs	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+		     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+		     $(brcmdrivers-y) $(brcmdrivers-m) \
+		     $(net-y) $(net-m) $(libs-y) $(libs-m)))
+
+vmlinux-dirs-1	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+		     $(core-y) $(core-m) $(drivers-y) $(drivers-m)))
+
+brcmdriver-dirs	:= $(patsubst %/,%,$(filter %/, \
+		     $(brcmdrivers-y) $(brcmdrivers-m)))
+
+vmlinux-dirs-2	:= $(patsubst %/,%,$(filter %/, \
+		     $(net-y) $(net-m) $(libs-y) $(libs-m)))
+
+vmlinux-alldirs	:= $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
+		     $(init-n) $(init-) \
+		     $(core-n) $(core-) $(drivers-n) $(drivers-) \
+		     $(brcmdrivers-n) $(brcmdrivers-) \
+		     $(net-n)  $(net-)  $(libs-n)    $(libs-))))
+
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
 vmlinux-dirs	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
 		     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
 		     $(net-y) $(net-m) $(libs-y) $(libs-m)))
@@ -718,10 +903,14 @@ vmlinux-alldirs	:= $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
 		     $(init-n) $(init-) \
 		     $(core-n) $(core-) $(drivers-n) $(drivers-) \
 		     $(net-n)  $(net-)  $(libs-n)    $(libs-))))
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 init-y		:= $(patsubst %/, %/built-in.o, $(init-y))
 core-y		:= $(patsubst %/, %/built-in.o, $(core-y))
 drivers-y	:= $(patsubst %/, %/built-in.o, $(drivers-y))
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+brcmdrivers-y   := $(patsubst %/, %/built-in.o, $(brcmdrivers-y))
+endif  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 net-y		:= $(patsubst %/, %/built-in.o, $(net-y))
 libs-y1		:= $(patsubst %/, %/lib.a, $(libs-y))
 libs-y2		:= $(patsubst %/, %/built-in.o, $(libs-y))
@@ -755,7 +944,14 @@ libs-y		:= $(libs-y1) $(libs-y2)
 # System.map is generated to document addresses of all kernel symbols
 
 vmlinux-init := $(head-y) $(init-y)
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(brcmdrivers-y) $(net-y)
+$(info *******************************)
+$(info * vmlinux-main: $(vmlinux-main))
+$(info * brcmdrivers-y: $(brcmdrivers-y))
+else  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
+endif  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 vmlinux-all  := $(vmlinux-init) $(vmlinux-main)
 vmlinux-lds  := arch/$(SRCARCH)/kernel/vmlinux.lds
 export KBUILD_VMLINUX_OBJS := $(vmlinux-all)
@@ -907,6 +1103,19 @@ define rule_vmlinux-modpost
 endef
 
 # vmlinux image - including updated kernel symbols
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
+.PHONY: bcm_vmlinux
+
+bcm_vmlinux: vmlinux | prepare_bcm_driver
+
+# Ensure that prepare_bcm_driver is run before vmlinux starts.  prepare_bcm_driver
+# creates all of the bcmdriver symlinks.  Note that vmlinux performs actions on 
+# its normal prerequisites, so this must be added as order-only.
+vmlinux : | prepare_bcm_driver
+
+endif  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
 ifdef CONFIG_HEADERS_CHECK
 	$(Q)$(MAKE) -f $(srctree)/Makefile headers_check
@@ -932,7 +1141,20 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
 
 # The actual objects are generated when descending, 
 # make sure no implicit rule kicks in
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+$(info * vmlinux-init: $(vmlinux-init))
+$(info * vmlinux-main: $(vmlinux-main))
+$(info * vmlinux-lds: $(vmlinux-lds))
+$(info *)
+$(info * vmlinux-dirs-1: $(vmlinux-dirs-1))
+$(info * brcmdriver-dirs: $(brcmdriver-dirs))
+$(info * vmlinux-dirs-2: $(vmlinux-dirs-2))
+$(info *)
+$(info $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs-1) $(brcmdriver-dirs) $(vmlinux-dirs-2));
+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs-1) $(brcmdriver-dirs) $(vmlinux-dirs-2);
+else  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+endif  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 # Handle descending into subdirectories listed in $(vmlinux-dirs)
 # Preset locale variables to speed up the build process. Limit locale
@@ -940,9 +1162,24 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
 # make menuconfig etc.
 # Error messages still appears in the original language
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+PHONY += $(vmlinux-dirs-1)
+$(vmlinux-dirs-1): prepare scripts
+	$(Q)$(MAKE) BCM_KBUILD_CMDLINE_FLAGS='$(BCM_KBUILD_CFLAGS)' $(build)=$@
+
+# temporary: force bcmdriver compile to be serialized using -j1
+PHONY += $(brcmdriver-dirs)
+$(brcmdriver-dirs): $(vmlinux-dirs-1) | prepare_bcm_driver
+	$(Q)$(MAKE) -j1 $(build)=$@
+
+PHONY += $(vmlinux-dirs-2)
+$(vmlinux-dirs-2): $(brcmdriver-dirs)
+	$(Q)$(MAKE) BCM_KBUILD_CMDLINE_FLAGS='$(BCM_KBUILD_CFLAGS)' $(build)=$@
+else  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 PHONY += $(vmlinux-dirs)
 $(vmlinux-dirs): prepare scripts
 	$(Q)$(MAKE) $(build)=$@
+endif  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 # Store (new) KERNELRELASE string in include/config/kernel.release
 include/config/kernel.release: include/config/auto.conf FORCE
@@ -959,6 +1196,19 @@ include/config/kernel.release: include/config/auto.conf FORCE
 # Listed in dependency order
 PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+PHONY += prepare_bcm_driver
+$(brcmdrivers-y): | prepare_bcm_driver
+
+prepare_bcm_driver:
+	$(Q)$(MAKE) -C $(BRCMDRIVERS_DIR) symlinks
+
+version_info:
+	$(Q)$(MAKE) -C $(BRCMDRIVERS_DIR) version_info
+
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
+
 # prepare3 is used to check if we are building in a separate output directory,
 # and if so do:
 # 1) Check that make has not been executed in the kernel src $(srctree)
@@ -985,7 +1235,11 @@ prepare0: archprepare FORCE
 	$(Q)$(MAKE) $(build)=.
 
 # All the preparing..
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+prepare: prepare0 | prepare_bcm_driver
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 prepare: prepare0
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 # Generate some files
 # ---------------------------------------------------------------------------
@@ -1077,6 +1331,15 @@ headers_check: headers_install
 	$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/asm $(hdr-dst) HDRCHECK=1
 
 # ---------------------------------------------------------------------------
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+nvram_3k: all
+	@mv $(CURDIR)/vmlinux $(CURDIR)/vmlinux_secureboot
+	@mv $(CURDIR)/vmlinux.restore $(CURDIR)/vmlinux
+
+# ---------------------------------------------------------------------------
+endif #BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
 # Modules
 
 ifdef CONFIG_MODULES
@@ -1092,18 +1355,35 @@ all: modules
 #	using awk while concatenating to the final file.
 
 PHONY += modules
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
+$(vmlinux-dirs-2) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin : | prepare_bcm_driver
+
+modules: $(vmlinux-dirs-2) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
+	$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=%/modules.order) > $(objtree)/modules.order
+	@$(kecho) '  Building modules, stage 2.';
+	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild
+else  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
 	$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
 	@$(kecho) '  Building modules, stage 2.';
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
 
 modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
 	$(Q)$(AWK) '!x[$$0]++' $^ > $(objtree)/modules.builtin
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+%/modules.builtin: include/config/auto.conf | prepare_bcm_driver
+	$(Q)$(MAKE) $(modbuiltin)=$*
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 %/modules.builtin: include/config/auto.conf
 	$(Q)$(MAKE) $(modbuiltin)=$*
-
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 # Target to prepare building external modules
 PHONY += modules_prepare
@@ -1111,13 +1391,23 @@ modules_prepare: prepare scripts
 
 # Target to install modules
 PHONY += modules_install
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+modules_install: _modinst_
+$(info "1 install-dir is $(install-dir)")
+$(info "1 INSTALL_MOD_DIR is $(INSTALL_MOD_DIR)")
+$(info "1 MODLIB is $(MODLIB)")
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 modules_install: _modinst_ _modinst_post
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 PHONY += _modinst_
 _modinst_:
 	@rm -rf $(MODLIB)/kernel
 	@rm -f $(MODLIB)/source
 	@mkdir -p $(MODLIB)/kernel
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+	#just a comment, to pacify testfpatch
+else  # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 	@ln -s $(srctree) $(MODLIB)/source
 	@if [ ! $(objtree) -ef  $(MODLIB)/build ]; then \
 		rm -f $(MODLIB)/build ; \
@@ -1125,6 +1415,7 @@ _modinst_:
 	fi
 	@cp -f $(objtree)/modules.order $(MODLIB)/
 	@cp -f $(objtree)/modules.builtin $(MODLIB)/
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
 
 # This depmod is only for convenience to give the initial
@@ -1147,7 +1438,6 @@ modules modules_install: FORCE
 	@echo "Then build a kernel with module support enabled."
 	@echo
 	@exit 1
-
 endif # CONFIG_MODULES
 
 ###
@@ -1162,6 +1452,11 @@ CLEAN_DIRS  += $(MODVERDIR)
 CLEAN_FILES +=	vmlinux System.map \
                 .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+CLEAN_FILES += vmlinux.map
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
+
 # Directories & files removed with 'make mrproper'
 MRPROPER_DIRS  += include/config usr/include include/generated          \
                   arch/*/include/generated
@@ -1372,6 +1667,12 @@ PHONY += modules_install
 modules_install: _emodinst_ _emodinst_post
 
 install-dir := $(if $(INSTALL_MOD_DIR),$(INSTALL_MOD_DIR),extra)
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+$(info "2 install-dir is $(install-dir)")
+$(info "2 INSTALL_MOD_DIR is $(INSTALL_MOD_DIR)")
+$(info "2 MODLIB is $(MODLIB)")
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+
 PHONY += _emodinst_
 _emodinst_:
 	$(Q)mkdir -p $(MODLIB)/$(install-dir)
@@ -1384,7 +1685,12 @@ _emodinst_post: _emodinst_
 clean-dirs := $(addprefix _clean_,$(KBUILD_EXTMOD))
 
 PHONY += $(clean-dirs) clean
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+$(clean-dirs): | prepare_bcm_driver 
+else # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 $(clean-dirs):
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 	$(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
 
 clean:	rm-dirs := $(MODVERDIR)
@@ -1414,6 +1720,17 @@ clean: $(clean-dirs)
 		-o -name '*.symtypes' -o -name 'modules.order' \
 		-o -name modules.builtin -o -name '.tmp_*.o.*' \
 		-o -name '*.gcno' \) -type f -print | xargs rm -f
+ifdef BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+	@echo Cleaning bcmdrivers
+	@find . $(BRCMDRIVERS) $(RCS_FIND_IGNORE) \
+		\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+		-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
+		-o -name '*.symtypes' -o -name 'modules.order' \
+		-o -name modules.builtin -o -name '.tmp_*.o.*' \
+		-o -name '*.gcno' \) -type f -print | xargs rm -f
+	@echo Cleaning bcmlinks
+	$(Q)$(MAKE) -C $(BRCMDRIVERS_DIR) cleanlinks
+endif # BCM_KF #defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 
 # Generate tags for editors
 # ---------------------------------------------------------------------------
diff --git a/android/configs/README b/android/configs/README
new file mode 100644
index 0000000000000000000000000000000000000000..391dffa6f85f5643ae70ddc48e8c9f4c26465282
--- /dev/null
+++ b/android/configs/README
@@ -0,0 +1,13 @@
+The files in this directory are meant to be used as a base for an Android
+kernel config. All devices should have the options in android-base.cfg enabled.
+While not mandatory, the options in android-recommended.cfg enable advanced
+Android features.
+
+Assuming you already have a minimalist defconfig for your device, a possible
+way to enable these options would be:
+
+     ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
+
+This will generate a .config that can then be used to save a new defconfig or
+compile a new kernel with Android features enabled.
+
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..57f59b71e1d54c80b990cfb202733bfd3f61927f
--- /dev/null
+++ b/android/configs/android-base.cfg
@@ -0,0 +1,220 @@
+# CONFIG_INET_LRO is not set
+CONFIG_MODULES=y
+CONFIG_OABI_COMPAT=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SHMEM=y
+CONFIG_ASHMEM=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_DM_CRYPT=y
+CONFIG_EMBEDDED=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_FB=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NET=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_NAT=y
+CONFIG_NO_HZ=y
+CONFIG_PACKET=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PPP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PREEMPT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_STAGING=y
+CONFIG_SWITCH=y
+CONFIG_SYNC=y
+CONFIG_SYSVIPC=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_XFRM_USER=y
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED=y
+CONFIG_CGROUP_MEM_RES_CTLR_KMEM=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+# CONFIG_BLK_CGROUP is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_MM_OWNER=y
+CONFIG_EVENTFD=y
+CONFIG_FREEZER=y
+# CONFIG_NET_CLS_CGROUP is not set
+# CONFIG_NETPRIO_CGROUP is not set
+# CONFIG_VHOST_NET is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_SIGNALFD=y
+CONFIG_EPOLL=y
+CONFIG_YAFFS_FS=y
+CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
+# CONFIG_YAFFS_DOES_ECC is not set
+CONFIG_YAFFS_YAFFS2=y
+CONFIG_YAFFS_AUTO_YAFFS2=y
+# CONFIG_YAFFS_DISABLE_TAGS_ECC is not set
+# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
+# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set
+# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set
+# CONFIG_YAFFS_DISABLE_BACKGROUND is not set
+CONFIG_YAFFS_XATTR=y
+# CONFIG_YAFFS_DISABLE_BAD_BLOCK_MARKING is not set
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=m
+CONFIG_MTD_NAND_NANDSIM=m
+CONFIG_VETH=y
+CONFIG_INPUT=y
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+# CONFIG_RC_CORE is not set
+CONFIG_HID_SUPPORT=y
+# CONFIG_HIDRAW is not set
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_HOLTEK_FF is not set
+# CONFIG_LOGIWHEELS_FF is not set
+# CONFIG_HID_PICOLCD_FB is not set
+# CONFIG_HID_PICOLCD_LEDS is not set
+# CONFIG_THRUSTMASTER_FF is not set
+# CONFIG_ZEROPLUS_FF is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_OT200 is not set
+# CONFIG_LEDS_TRIGGERS is not set
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
diff --git a/android/configs/android-bcm-recommended.cfg b/android/configs/android-bcm-recommended.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..31a2a6648311c54788d60497bbfae71d4ad68be3
--- /dev/null
+++ b/android/configs/android-bcm-recommended.cfg
@@ -0,0 +1,240 @@
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+# CONFIG_ARM_ERRATA_775420 is not set
+# CONFIG_FIQ_DEBUGGER is not set
+CONFIG_HW_PERF_EVENTS=y
+# CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
+# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
+# CONFIG_PM_ADVANCED_DEBUG is not set
+# CONFIG_APM_EMULATION is not set
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+# CONFIG_NET_ACTIVITY_STATS is not set
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_RPS=y
+# CONFIG_SW_SYNC is not set
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_DM_DEBUG is not set
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_USB_NET_CDC_EEM is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_QMI_WWAN is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_SIERRA_NET is not set
+CONFIG_DEVMEM=y
+# CONFIG_DCC_TTY is not set
+CONFIG_TEST_POWER=y
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_MEDIA_CONTROLLER is not set
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+CONFIG_FB_VIRTUAL=y
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_LOGO is not set
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DRIVERS=y
+CONFIG_SND_DUMMY=y
+# CONFIG_SND_ALOOP is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_PCI is not set
+CONFIG_SND_ARM=y
+CONFIG_SND_SPI=y
+# CONFIG_SND_USB is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+# CONFIG_USB_FUSB300 is not set
+# CONFIG_USB_R8A66597 is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_AMD5536UDC is not set
+# CONFIG_USB_CI13XXX_PCI is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_NET2280 is not set
+# CONFIG_USB_GOKU is not set
+# CONFIG_USB_LANGWELL is not set
+# CONFIG_USB_EG20T is not set
+CONFIG_USB_DUMMY_HCD=y
+# CONFIG_RTC_HCTOSYS is not set
+# CONFIG_RTC_HCTOSYS_DEVICE is not set
+# CONFIG_RTC_DEBUG is not set
+# CONFIG_RTC_INTF_SYSFS is not set
+# CONFIG_RTC_INTF_PROC is not set
+# CONFIG_RTC_INTF_DEV is not set
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_ET131X is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_ECHO is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_RTS_PSTOR is not set
+# CONFIG_RTS5139 is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_DX_SEP is not set
+# CONFIG_IIO is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
+# CONFIG_STAGING_MEDIA is not set
+# CONFIG_PERSISTENT_TRACER is not set
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y
+# CONFIG_ANDROID_SWITCH is not set
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_EXT4_USE_FOR_EXT23 is not set
+# CONFIG_EXT4_FS_XATTR is not set
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_DEBUG is not set
+# CONFIG_JBD2_DEBUG is not set
+# CONFIG_DEBUG_RODATA is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_ACM_MS is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_DBGP is not set
+CONFIG_USB_G_HID=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_SENSORS_AK8975 is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA9633 is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_INPUT is not set
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..85054750e77d7529946d01b3048b6c1282694d9e
--- /dev/null
+++ b/android/configs/android-recommended.cfg
@@ -0,0 +1,117 @@
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_COMPACTION=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_PM_WAKELOCKS_GC is not set
+# CONFIG_VT is not set
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_TIMED_GPIO=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_DM_UEVENT=y
+# CONFIG_DRAGONRISE_FF is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+CONFIG_FUSE_FS=y
+# CONFIG_GREENASIA_FF is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_ACRUX_FF is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_LOGITECH_DJ is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_KEYCHORD is not set
+# CONFIG_INPUT_KEYRESET is not set
+# CONFIG_INPUT_MISC is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_ION is not set
+# CONFIG_JOYSTICK_XPAD is not set
+# CONFIG_JOYSTICK_XPAD_FF is not set
+# CONFIG_JOYSTICK_XPAD_LEDS is not set
+CONFIG_KSM=y
+# CONFIG_LOGIG940_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGITECH_FF is not set
+CONFIG_MD=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MSDOS_FS=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_PM_DEBUG=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+CONFIG_POWER_SUPPLY=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_TRACER=y
+# CONFIG_SMARTJOYPLUS_FF is not set
+# CONFIG_SND is not set
+# CONFIG_SOUND is not set
+CONFIG_SUSPEND_TIME=y
+# CONFIG_TABLET_USB_ACECAD is not set
+# CONFIG_TABLET_USB_AIPTEK is not set
+# CONFIG_TABLET_USB_GTCO is not set
+# CONFIG_TABLET_USB_HANWANG is not set
+# CONFIG_TABLET_USB_KBTAB is not set
+# CONFIG_TABLET_USB_WACOM is not set
+CONFIG_TIMER_STATS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UHID=y
+CONFIG_UID_STAT=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_USBNET=y
+CONFIG_VFAT_FS=y
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 48549bc51e44980e10dbe722b64acf825f05e731..635ba249dc1bb6d867d47ffd8bbc4025cb528243 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -72,7 +72,6 @@ config GENERIC_CLOCKEVENTS_BROADCAST
 
 config KTIME_SCALAR
 	bool
-	default y
 
 config HAVE_TCM
 	bool
@@ -1010,8 +1009,43 @@ config ARCH_ZYNQ
 	select USE_OF
 	help
 	  Support for Xilinx Zynq ARM Cortex A9 Platform
+
+config ARCH_BCM63XX
+	bool "Broadcom 63XX Architecture"
+	depends on BCM_KF_ARM_BCM963XX
+	help
+	  Support for Broadcome 63XX series architecture
+
 endchoice
 
+config BUZZZ
+	depends on BCM_KF_ARM_BCM963XX
+	bool "Broadcom BUZZZ Tool Framework"
+	default n
+	help
+	  Enable kernel tools framework: event/performance/function call tracing
+
+config BUZZZ_FUNC
+	depends on BCM_KF_ARM_BCM963XX
+	bool "Broadcom BUZZZ Func Tool"
+	default n
+	help
+	  Enable Function tracing tool
+
+config BUZZZ_PMON
+	depends on BCM_KF_ARM_BCM963XX
+	bool "Broadcom BUZZZ PMON Tool"
+	default n
+	help
+	  Enable performance monitoring tool
+
+config BUZZZ_KEVT
+	depends on BCM_KF_ARM_BCM963XX
+	bool "Broadcom BUZZZ Kernel Event Tracing Tool"
+	default n
+	help
+	  Enable Kernel Event Tracing tool
+
 #
 # This is sorted alphabetically by mach-* pathname.  However, plat-*
 # Kconfigs may be included either alphabetically (according to the
@@ -1127,6 +1161,9 @@ source "arch/arm/plat-versatile/Kconfig"
 source "arch/arm/mach-vt8500/Kconfig"
 
 source "arch/arm/mach-w90x900/Kconfig"
+if BCM_KF_ARM_BCM963XX
+source "arch/arm/plat-bcm63xx/Kconfig"
+endif
 
 # Definitions to make life easier
 config ARCH_ACORN
@@ -1406,6 +1443,29 @@ config PL310_ERRATA_769419
 	  on systems with an outer cache, the store buffer is drained
 	  explicitly.
 
+config ARM_ERRATA_775420
+       bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
+       depends on CPU_V7
+       depends on BCM_KF_ANDROID && BCM_ANDROID
+       help
+	 This option enables the workaround for the 775420 Cortex-A9 (r2p2,
+	 r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
+	 operation aborts with MMU exception, it might cause the processor
+	 to deadlock. This workaround puts DSB before executing ISB if
+	 an abort may occur on cache maintenance.
+
+if BCM_KF_ARM_BCM963XX
+config ARM_ERRATA_798181
+	bool "ARM errata: TLBI/DSB failure on Cortex-A15"
+	depends on CPU_V7 && SMP
+	help
+	  On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
+	  adequately shooting down all use of the old entries. This
+	  option enables the Linux kernel workaround for this erratum
+	  which sends an IPI to the CPUs that are running the same ASID
+	  as the one being invalidated.
+endif
+
 endmenu
 
 source "arch/arm/common/Kconfig"
@@ -1468,6 +1528,10 @@ config PCI_HOST_ITE8152
 
 source "drivers/pci/Kconfig"
 
+if BCM_KF_ARM_BCM963XX
+source "drivers/pci/pcie/Kconfig"
+endif
+
 source "drivers/pcmcia/Kconfig"
 
 endmenu
@@ -1627,6 +1691,7 @@ config HZ
 	default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER
 	default AT91_TIMER_HZ if ARCH_AT91
 	default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
+	default 1000 if ARCH_BCM63XX && BCM_KF_ARM_BCM963XX
 	default 100
 
 config THUMB2_KERNEL
@@ -1886,6 +1951,16 @@ config DEPRECATED_PARAM_STRUCT
 	  This was deprecated in 2001 and announced to live on for 5 years.
 	  Some old boot loaders still use this way.
 
+config ARM_FLUSH_CONSOLE_ON_RESTART
+	bool "Force flush the console on restart"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  If the console is locked while the system is rebooted, the messages
+	  in the temporary logbuffer would not have propogated to all the
+	  console drivers. This option forces the console lock to be
+	  released if it failed to be acquired, which will cause all the
+	  pending messages to be flushed.
+
 endmenu
 
 menu "Boot options"
@@ -1898,6 +1973,23 @@ config USE_OF
 	help
 	  Include support for flattened device tree machine descriptions.
 
+config BUILD_ARM_APPENDED_DTB_IMAGE
+	bool "Build a concatenated zImage/dtb by default"
+	depends on OF
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  Enabling this option will cause a concatenated zImage and list of
+	  DTBs to be built by default (instead of a standalone zImage.)
+	  The image will built in arch/arm/boot/zImage-dtb
+
+config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES
+	string "Default dtb names"
+	depends on BUILD_ARM_APPENDED_DTB_IMAGE
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  Space separated list of names of dtbs to append when
+	  building a concatenated zImage-dtb.
+
 # Compressed boot loader in ROM.  Yes, we really want to ask about
 # TEXT and BSS so we preserve their values in the config files.
 config ZBOOT_ROM_TEXT
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 85348a09d655afc220fc55f71810b07830ea8936..74f50d14c2e586d44b719af60e626a988bf8c757 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,6 +63,29 @@ config DEBUG_USER
 	      8 - SIGSEGV faults
 	     16 - SIGBUS faults
 
+config DEBUG_RODATA
+	bool "Write protect kernel text section"
+	default n
+	depends on DEBUG_KERNEL && MMU
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	---help---
+	  Mark the kernel text section as write-protected in the pagetables,
+	  in order to catch accidental (and incorrect) writes to such const
+	  data. This will cause the size of the kernel, plus up to 4MB, to
+	  be mapped as pages instead of sections, which will increase TLB
+	  pressure.
+	  If in doubt, say "N".
+
+config DEBUG_RODATA_TEST
+	bool "Testcase for the DEBUG_RODATA feature"
+	depends on DEBUG_RODATA
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	---help---
+	  This option enables a testcase for the DEBUG_RODATA
+	  feature.
+	  If in doubt, say "N"
+
 # These options are only for real kernel hackers who want to get their hands dirty.
 config DEBUG_LL
 	bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 047a20780fc15a5b753c3ea80c2178efed29c18c..6bdb1d4dba3e7e0a9decb2c3163ca4cfbe64e8fa 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -116,6 +116,24 @@ endif
 KBUILD_CFLAGS	+=$(CFLAGS_ABI) $(CFLAGS_THUMB2) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
 KBUILD_AFLAGS	+=$(CFLAGS_ABI) $(AFLAGS_THUMB2) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+# Copy from bigisland's Makefile, but most likely it does apply to BCM63138
+# ifeq ($(CONFIG_VFP),y)
+# ifeq ($(CONFIG_VFPv3),y)
+# # v3 vfp
+# KBUILD_CFLAGS	+=-mfpu=vfp3 -mfloat-abi=softfp
+# KBUILD_AFLAGS	+=-mfpu=vfp3 -mfloat-abi=softfp 
+# else
+# #non-v3 vfp
+# KBUILD_CFLAGS	+=-mfloat-abi=softfp
+# KBUILD_AFLAGS	+=-mfloat-abi=softfp
+# endif
+# else
+# #no vfp
+# KBUILD_CFLAGS	+=-mfloat-abi=soft
+# KBUILD_AFLAGS	+=-mfloat-abi=soft
+# endif
+endif # BCM_KF # CONFIG_BCM_KF_MISC_MAKEFILE
 CHECKFLAGS	+= -D__arm__
 
 #Default value
@@ -136,6 +154,9 @@ textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
 # by CONFIG_* macro name.
 machine-$(CONFIG_ARCH_AT91)		:= at91
 machine-$(CONFIG_ARCH_BCMRING)		:= bcmring
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+machine-$(CONFIG_ARCH_BCM63XX)		:= bcm963xx
+endif # BCM_KF
 machine-$(CONFIG_ARCH_CLPS711X)		:= clps711x
 machine-$(CONFIG_ARCH_CNS3XXX)		:= cns3xxx
 machine-$(CONFIG_ARCH_DAVINCI)		:= davinci
@@ -200,6 +221,9 @@ machine-$(CONFIG_ARCH_ZYNQ)		:= zynq
 
 # Platform directory name.  This list is sorted alphanumerically
 # by CONFIG_* macro name.
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+plat-$(CONFIG_ARCH_BCM63XX)	:= bcm63xx
+endif # BCM_KF
 plat-$(CONFIG_ARCH_MXC)		:= mxc
 plat-$(CONFIG_ARCH_OMAP)	:= omap
 plat-$(CONFIG_ARCH_S3C64XX)	:= samsung
@@ -261,11 +285,30 @@ drivers-$(CONFIG_OPROFILE)      += arch/arm/oprofile/
 libs-y				:= arch/arm/lib/ $(libs-y)
 
 # Default target when executing plain make
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+ifeq ($(CONFIG_XIP_KERNEL),y)
+KBUILD_IMAGE := xipImage
+else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE := zImage-dtb
+else
+KBUILD_IMAGE := zImage
+endif
+else # BCM_ANDROID
 ifeq ($(CONFIG_XIP_KERNEL),y)
 KBUILD_IMAGE := xipImage
 else
 KBUILD_IMAGE := zImage
 endif
+endif # BCM_ANDROID
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifeq ($(CONFIG_XIP_KERNEL),y)
+KBUILD_IMAGE := xipImage
+else
+KBUILD_IMAGE := zImage
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 all:	$(KBUILD_IMAGE)
 
@@ -283,10 +326,29 @@ zImage Image xipImage bootpImage uImage: vmlinux
 zinstall uinstall install: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+%.dtb: scripts
+else
 %.dtb:
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+%.dtb:
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+dtbs: scripts
+	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
+zImage-dtb: vmlinux scripts
+else
+dtbs:
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 dtbs:
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
 # We use MRPROPER_FILES and CLEAN_FILES now
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index c877087d2000cf054be2c086f3de957555f46066..929eeeceddf29e2af03a9ffa562eabe99c8431c9 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -27,6 +27,17 @@ export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/,$(DTB_LIST))
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 ifeq ($(CONFIG_XIP_KERNEL),y)
 
 $(obj)/xipImage: vmlinux FORCE
@@ -55,15 +66,39 @@ $(obj)/zImage:	$(obj)/compressed/vmlinux FORCE
 	$(call if_changed,objcopy)
 	@echo '  Kernel: $@ is ready'
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+$(obj)/zImage-dtb:	$(obj)/zImage $(DTB_OBJS) FORCE
+	$(call if_changed,cat)
+	@echo '  Kernel: $@ is ready'
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+
 endif
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+targets += $(DTB_LIST)
+else
+targets += $(dtb-y)
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 targets += $(dtb-y)
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 # Rule to build device tree blobs
 $(obj)/%.dtb: $(src)/dts/%.dts FORCE
 	$(call if_changed_dep,dtc)
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+$(obj)/dtbs: $(DTB_OBJS)
+else
+$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 $(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 clean-files := *.dtb
 
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index bb267562e7ed9a763218c83eaeb2c5c8645debb9..8c1999be893d09cc2b3689d6124adff0c1b1bdb0 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -126,8 +126,19 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 
 ccflags-y := -fpic -fno-builtin -I$(obj)
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+asflags-y :=
+else
+asflags-y := -Wa,-march=all
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 asflags-y := -Wa,-march=all
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ARM_BCM963XX)
+EXTRA_CFLAGS += -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD)
+endif # BCM_KF # defined(CONFIG_BCM_KF_MISC_MAKEFILE)
 # Supply kernel BSS size to the decompressor via a linker symbol.
 KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
 		awk 'END{print $$3}')
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index dc7e8ce8e6bed9ad10a696de4c8bbecdb909fdad..b5118dc5970e302b1b30088a87baa2cf14dacb02 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -10,6 +10,9 @@
  */
 #include <linux/linkage.h>
 
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	.arch	armv7-a
+#endif
 /*
  * Debugging stuff
  *
@@ -648,6 +651,9 @@ __armv7_mmu_cache_on:
 		mcrne	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
 #endif
 		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+		bic	r0, r0, #1 << 28	@ clear SCTLR.TRE
+#endif
 		orr	r0, r0, #0x5000		@ I-cache enable, RR cache replacement
 		orr	r0, r0, #0x003c		@ write buffer
 #ifdef CONFIG_MMU
@@ -766,14 +772,37 @@ proc_types:
 @		b	__arm6_mmu_cache_off
 @		b	__armv3_mmu_cache_flush
 
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_B15_CORE)
+		.word	0x41000000		@ old ARM ID
+		.word	0xff00f000
+#else
+		.word	0x00000000		@ old ARM ID
+		.word	0x0000f000
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_B15_CORE) */
+		mov	pc, lr
+ THUMB(		nop				)
+		mov	pc, lr
+ THUMB(		nop				)
+		mov	pc, lr
+ THUMB(		nop				)
+#else
+#if !defined(CONFIG_CPU_V7)
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_B15_CORE)
+		.word	0x41000000		@ old ARM ID
+		.word	0xff00f000
+#else
 		.word	0x00000000		@ old ARM ID
 		.word	0x0000f000
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_B15_CORE) */
 		mov	pc, lr
  THUMB(		nop				)
 		mov	pc, lr
  THUMB(		nop				)
 		mov	pc, lr
  THUMB(		nop				)
+#endif
+#endif /* ANDROID */
 
 		.word	0x41007000		@ ARM7/710
 		.word	0xfff8fe00
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 283fa1d804f4d34208e544f5c3064a72457afc52..8aee6a1de845379d9c2a41b4afe6bbebc1466251 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -40,3 +40,59 @@ config SHARP_PARAM
 
 config SHARP_SCOOP
 	bool
+
+config FIQ_GLUE
+	bool
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	select FIQ
+
+config FIQ_DEBUGGER
+	bool "FIQ Mode Serial Debugger"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	select FIQ
+	select FIQ_GLUE
+	default n
+	help
+	  The FIQ serial debugger can accept commands even when the
+	  kernel is unresponsive due to being stuck with interrupts
+	  disabled.
+
+
+config FIQ_DEBUGGER_NO_SLEEP
+	bool "Keep serial debugger active"
+	depends on FIQ_DEBUGGER
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  Enables the serial debugger at boot. Passing
+	  fiq_debugger.no_sleep on the kernel commandline will
+	  override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+	bool "Don't disable wakeup IRQ when debugger is active"
+	depends on FIQ_DEBUGGER
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  Don't disable the wakeup irq when enabling the uart clock.  This will
+	  cause extra interrupts, but it makes the serial debugger usable with
+	  on some MSM radio builds that ignore the uart clock request in power
+	  collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+	bool "Console on FIQ Serial Debugger port"
+	depends on FIQ_DEBUGGER
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  Enables a console so that printk messages are displayed on
+	  the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+	bool "Put the FIQ debugger into console mode by default"
+	depends on FIQ_DEBUGGER_CONSOLE
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  If enabled, this puts the fiq debugger into console mode by default.
+	  Otherwise, the fiq debugger will start out in debug mode.
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 215816f1775f5a7a38ed4d5ea5d225aabbad337d..f99c175f1c4f320966b48335e3e387fb2a5bb56f 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -15,3 +15,9 @@ obj-$(CONFIG_ARCH_IXP2000)	+= uengine.o
 obj-$(CONFIG_ARCH_IXP23XX)	+= uengine.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_ARM_TIMER_SP804)	+= timer-sp.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_FIQ_GLUE)		+= fiq_glue.o fiq_glue_setup.o
+obj-$(CONFIG_FIQ_DEBUGGER)	+= fiq_debugger.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644
index 0000000000000000000000000000000000000000..46e09ae8a8aacffd42f1a47bbd2b26af863cbed0
--- /dev/null
+++ b/arch/arm/common/fiq_glue.S
@@ -0,0 +1,113 @@
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+		.text
+
+		.global fiq_glue_end
+
+		/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+		/* store pc, cpsr from previous mode */
+		mrs	r12, spsr
+		sub	r11, lr, #4
+		subs	r10, #1
+		bne	nested_fiq
+
+		stmfd	sp!, {r11-r12, lr}
+
+		/* store r8-r14 from previous mode */
+		sub	sp, sp, #(7 * 4)
+		stmia	sp, {r8-r14}^
+		nop
+
+		/* store r0-r7 from previous mode */
+		stmfd	sp!, {r0-r7}
+
+		/* setup func(data,regs) arguments */
+		mov	r0, r9
+		mov	r1, sp
+		mov	r3, r8
+
+		mov	r7, sp
+
+		/* Get sp and lr from non-user modes */
+		and	r4, r12, #MODE_MASK
+		cmp	r4, #USR_MODE
+		beq	fiq_from_usr_mode
+
+		mov	r7, sp
+		orr	r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+		msr	cpsr_c, r4
+		str	sp, [r7, #(4 * 13)]
+		str	lr, [r7, #(4 * 14)]
+		mrs	r5, spsr
+		str	r5, [r7, #(4 * 17)]
+
+		cmp	r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+		/* use fiq stack if we reenter this mode */
+		subne	sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+		msr	cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+		mov	r2, sp
+		sub	sp, r7, #12
+		stmfd	sp!, {r2, ip, lr}
+		/* call func(data,regs) */
+		blx	r3
+		ldmfd	sp, {r2, ip, lr}
+		mov	sp, r2
+
+		/* restore/discard saved state */
+		cmp	r4, #USR_MODE
+		beq	fiq_from_usr_mode_exit
+
+		msr	cpsr_c, r4
+		ldr	sp, [r7, #(4 * 13)]
+		ldr	lr, [r7, #(4 * 14)]
+		msr	spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+		msr	cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+		ldmfd	sp!, {r0-r7}
+		add	sp, sp, #(7 * 4)
+		ldmfd	sp!, {r11-r12, lr}
+exit_fiq:
+		msr	spsr_cxsf, r12
+		add	r10, #1
+		movs	pc, r11
+
+nested_fiq:
+		orr	r12, r12, #(PSR_F_BIT)
+		b	exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp */
+		mrs		r3, cpsr
+		msr		cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+		movs		r8, r0
+		mov		r9, r1
+		mov		sp, r2
+		moveq		r10, #0
+		movne		r10, #1
+		msr		cpsr_c, r3
+		bx		lr
+
+#endif
diff --git a/arch/arm/configs/bcm963138_defconfig b/arch/arm/configs/bcm963138_defconfig
new file mode 100644
index 0000000000000000000000000000000000000000..1b3bd6487bbf217b33a07820fb1c15a504e87f1b
--- /dev/null
+++ b/arch/arm/configs/bcm963138_defconfig
@@ -0,0 +1,1141 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/arm 3.4.11 Kernel Configuration
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_VECTORS_BASE=0xffff0000
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_NEED_MACH_MEMORY_H=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_HAVE_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE="arm-unknown-linux-uclibcgnueabi-"
+CONFIG_LOCALVERSION="brcmarm"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_CHECKPOINT_RESTORE is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="initramfs-base-files.txt"
+CONFIG_INITRAMFS_ROOT_UID=0
+CONFIG_INITRAMFS_ROOT_GID=0
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_INITRAMFS_COMPRESSION_NONE=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+CONFIG_SHMEM=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_JUMP_LABEL=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_HIGHBANK is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_PRIMA2 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PICOXCELL is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C24XX is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_EXYNOS is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_ARCH_ZYNQ is not set
+# CONFIG_ARCH_BCM5301X is not set
+CONFIG_ARCH_BCM63XX=y
+# CONFIG_ARCH_KONA is not set
+
+#
+# System MMU
+#
+CONFIG_BOARD_ZRELADDR=0x00008000
+CONFIG_BOARD_PARAMS_PHYS=0x00000000
+CONFIG_DEBUG_UART_ADDR=0x8001E000
+CONFIG_HZ=100
+CONFIG_MACH_BCM963138=y
+# CONFIG_PLAT_MPCORE is not set
+# CONFIG_CACHE_L310 is not set
+CONFIG_PLAT_SMP=y
+# CONFIG_ARCH_HAS_HEAD_FIXUP is not set
+CONFIG_PLAT_CA9_MPCORE=y
+CONFIG_PLAT_BCM63138=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_LPAE is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+CONFIG_ARM_THUMB=y
+# CONFIG_ARM_THUMBEE is not set
+# CONFIG_SWP_EMULATE is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
+CONFIG_CACHE_L2X0=y
+CONFIG_CACHE_PL310=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_ARM_NR_BANKS=8
+CONFIG_CPU_HAS_PMU=y
+CONFIG_MULTI_IRQ_HANDLER=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+# CONFIG_ARM_ERRATA_742230 is not set
+# CONFIG_ARM_ERRATA_742231 is not set
+# CONFIG_PL310_ERRATA_588369 is not set
+# CONFIG_ARM_ERRATA_720789 is not set
+# CONFIG_PL310_ERRATA_727915 is not set
+# CONFIG_ARM_ERRATA_743622 is not set
+# CONFIG_ARM_ERRATA_751472 is not set
+# CONFIG_PL310_ERRATA_753970 is not set
+# CONFIG_ARM_ERRATA_754322 is not set
+# CONFIG_ARM_ERRATA_754327 is not set
+# CONFIG_ARM_ERRATA_764369 is not set
+# CONFIG_PL310_ERRATA_769419 is not set
+CONFIG_ARM_GIC=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_HAVE_SMP=y
+CONFIG_SMP=y
+CONFIG_SMP_ON_UP=y
+CONFIG_ARM_CPU_TOPOLOGY=y
+# CONFIG_SCHED_MC is not set
+# CONFIG_SCHED_SMT is not set
+CONFIG_HAVE_ARM_SCU=y
+CONFIG_HAVE_ARM_TWD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_NR_CPUS=2
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_LOCAL_TIMERS is not set
+CONFIG_ARCH_NR_GPIO=0
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT__LL is not set
+# CONFIG_PREEMPT_RTB is not set
+# CONFIG_PREEMPT_RT_FULL is not set
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HAVE_ARCH_PFN_VALID=y
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_COMPACTION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_CLEANCACHE is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+
+#
+# Boot options
+#
+# CONFIG_USE_OF is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyAMA0,115200 earlyprintk debug"
+# CONFIG_CMDLINE_FROM_BOOTLOADER is not set
+# CONFIG_CMDLINE_EXTEND is not set
+CONFIG_CMDLINE_FORCE=y
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_AUTO_ZRELADDR is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+CONFIG_FPE_FASTFPE=y
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_SUSPEND is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ARM_CPU_SUSPEND is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_BLOG is not set
+# CONFIG_BLOG_IPV6 is not set
+# CONFIG_BLOG_MCAST is not set
+# CONFIG_BLOG_FEATURE is not set
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+CONFIG_BQL=y
+CONFIG_HAVE_BPF_JIT=y
+# CONFIG_BPF_JIT is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Runner
+#
+# CONFIG_BRCM_DRIVER_RUNNER is not set
+# CONFIG_BRCM_RUNNER_GPON is not set
+# CONFIG_BRCM_RUNNER_RG is not set
+# CONFIG_BRCM_RUNNER_BR is not set
+CONFIG_BRCM_RUNNER_BR_IMPL=1
+CONFIG_BRCM_RUNNER_RG_IMPL=1
+# CONFIG_BRCM_GMP is not set
+CONFIG_BRCM_GMP_IMPL=1
+# CONFIG_BRCM_GMP_GPL is not set
+CONFIG_BRCM_GMP_GPL_IMPL=1
+# CONFIG_BRCM_GMP_MW is not set
+CONFIG_BRCM_GMP_MW_IMPL=1
+# CONFIG_BCM_GMP_OS_SHIM is not set
+CONFIG_BCM_GMP_OS_SHIM_IMPL=1
+# CONFIG_BCM_GMP_SHELL is not set
+CONFIG_BCM_GMP_SHELL_IMPL=1
+# CONFIG_BCM_GMP_LOGGER is not set
+CONFIG_BCM_GMP_LOGGER_IMPL=1
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_DEBUG_DRIVER=y
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+# CONFIG_DMA_SHARED_BUFFER is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=1
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_RBD is not set
+
+#
+# Misc devices
+#
+# CONFIG_ATMEL_PWM is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+
+#
+# Altera FPGA firmware download module
+#
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_MII is not set
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+
+#
+# CAIF transport drivers
+#
+CONFIG_ETHERNET=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CALXEDA_XGMAC is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_DM9000 is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_PHYLIB is not set
+CONFIG_PPP=y
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_DEFLATE is not set
+# CONFIG_PPP_FILTER is not set
+# CONFIG_PPP_MPPE is not set
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPPOE=y
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=y
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+# CONFIG_DEVKMEM is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+
+#
+# Enable Device Drivers -> PPS to see the PTP clock options.
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_EXYNOS_VIDEO is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_ARCH_HAS_XHCI is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# CONFIG_STAGING is not set
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_HAVE_MACH_CLKDEV=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Remoteproc drivers (EXPERIMENTAL)
+#
+
+#
+# Rpmsg drivers (EXPERIMENTAL)
+#
+# CONFIG_VIRT_DRIVERS is not set
+# CONFIG_PM_DEVFREQ is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_FSNOTIFY is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_FANOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_XATTR is not set
+CONFIG_SQUASHFS_ZLIB=y
+# CONFIG_SQUASHFS_LZO is not set
+CONFIG_SQUASHFS_XZ=y
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+CONFIG_SQUASHFS_EMBEDDED=y
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_LOCKUP_DETECTOR is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_BCM_SCHEDAUDIT is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_CPU_STALL_INFO is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_NONE=y
+# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_DEBUG_SEMIHOSTING is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_USER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=y
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_NO_GENERIC_PCI_IOPORT_MAP is not set
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IO=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+# CONFIG_AVERAGE is not set
+# CONFIG_CORDIC is not set
diff --git a/arch/arm/configs/bcm963138_sim_defconfig b/arch/arm/configs/bcm963138_sim_defconfig
new file mode 100644
index 0000000000000000000000000000000000000000..703a9a7b312daf396f2568fa340c6815a7db4485
--- /dev/null
+++ b/arch/arm/configs/bcm963138_sim_defconfig
@@ -0,0 +1,978 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/arm 3.4.11 Kernel Configuration
+#
+CONFIG_ARM=y
+CONFIG_HAVE_PWM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_VECTORS_BASE=0xffff0000
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_NEED_MACH_MEMORY_H=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_HAVE_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION="brcmarm"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_FHANDLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_CHECKPOINT_RESTORE is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+CONFIG_SHMEM=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_JUMP_LABEL=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_HIGHBANK is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_PRIMA2 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PICOXCELL is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C24XX is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_EXYNOS is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_ARCH_ZYNQ is not set
+# CONFIG_ARCH_BCM5301X is not set
+CONFIG_ARCH_BCM63XX=y
+# CONFIG_ARCH_KONA is not set
+
+#
+# System MMU
+#
+CONFIG_BOARD_ZRELADDR=0x00008000
+CONFIG_BOARD_PARAMS_PHYS=0x00000000
+CONFIG_DEBUG_UART_ADDR=0x80019000
+CONFIG_HZ=100
+CONFIG_MACH_BCM963138=y
+CONFIG_BCM63138_SIM=y
+# CONFIG_PLAT_MPCORE is not set
+# CONFIG_CACHE_L310 is not set
+CONFIG_PLAT_SMP=y
+# CONFIG_ARCH_HAS_HEAD_FIXUP is not set
+CONFIG_PLAT_CA9_MPCORE=y
+CONFIG_PLAT_BCM63138=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_LPAE is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+CONFIG_ARM_THUMB=y
+# CONFIG_ARM_THUMBEE is not set
+# CONFIG_SWP_EMULATE is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
+CONFIG_CACHE_L2X0=y
+CONFIG_CACHE_PL310=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_ARM_NR_BANKS=8
+CONFIG_CPU_HAS_PMU=y
+CONFIG_MULTI_IRQ_HANDLER=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+# CONFIG_ARM_ERRATA_742230 is not set
+# CONFIG_ARM_ERRATA_742231 is not set
+# CONFIG_PL310_ERRATA_588369 is not set
+# CONFIG_ARM_ERRATA_720789 is not set
+# CONFIG_PL310_ERRATA_727915 is not set
+# CONFIG_ARM_ERRATA_743622 is not set
+# CONFIG_ARM_ERRATA_751472 is not set
+# CONFIG_PL310_ERRATA_753970 is not set
+# CONFIG_ARM_ERRATA_754322 is not set
+# CONFIG_ARM_ERRATA_754327 is not set
+# CONFIG_ARM_ERRATA_764369 is not set
+# CONFIG_PL310_ERRATA_769419 is not set
+CONFIG_ARM_GIC=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_HAVE_SMP=y
+CONFIG_SMP=y
+CONFIG_SMP_ON_UP=y
+CONFIG_ARM_CPU_TOPOLOGY=y
+# CONFIG_SCHED_MC is not set
+# CONFIG_SCHED_SMT is not set
+CONFIG_HAVE_ARM_SCU=y
+CONFIG_HAVE_ARM_TWD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_NR_CPUS=2
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_LOCAL_TIMERS is not set
+CONFIG_ARCH_NR_GPIO=0
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT__LL is not set
+# CONFIG_PREEMPT_RTB is not set
+# CONFIG_PREEMPT_RT_FULL is not set
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_HAVE_ARCH_PFN_VALID=y
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_COMPACTION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_CLEANCACHE is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+
+#
+# Boot options
+#
+# CONFIG_USE_OF is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyAMA0,115200n8 debug earlyprintk mem=32M bootmemheap initrd=0x00c00000,524288 nosmp"
+# CONFIG_CMDLINE_FROM_BOOTLOADER is not set
+# CONFIG_CMDLINE_EXTEND is not set
+CONFIG_CMDLINE_FORCE=y
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_AUTO_ZRELADDR is not set
+
+#
+# CPU Power Management
+#
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+CONFIG_FPE_FASTFPE=y
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_SUSPEND is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ARM_CPU_SUSPEND is not set
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+# CONFIG_DMA_SHARED_BUFFER is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=1
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=2048
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_ATMEL_PWM is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+
+#
+# Altera FPGA firmware download module
+#
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_RAMOOPS is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+
+#
+# Enable Device Drivers -> PPS to see the PTP clock options.
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_EXYNOS_VIDEO is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_ARCH_HAS_XHCI is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# CONFIG_STAGING is not set
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_HAVE_MACH_CLKDEV=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Remoteproc drivers (EXPERIMENTAL)
+#
+
+#
+# Rpmsg drivers (EXPERIMENTAL)
+#
+# CONFIG_VIRT_DRIVERS is not set
+# CONFIG_PM_DEVFREQ is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_FSNOTIFY is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_FANOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NLS is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_LOCKUP_DETECTOR is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_BCM_SCHEDAUDIT is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_CPU_STALL_INFO is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_NONE=y
+# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_DEBUG_SEMIHOSTING is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_NO_GENERIC_PCI_IOPORT_MAP is not set
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IO=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+# CONFIG_AVERAGE is not set
+# CONFIG_CORDIC is not set
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 05112380dc5398dd47cbd9fb6adf564f96a4c94c..2d0e078a6a607b6fec3f093e7966fd93cb32e118 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -13,6 +13,10 @@
 #define wfi()	__asm__ __volatile__ ("wfi" : : : "memory")
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+#include <mach/barriers.h>
+#else /*!(defined(CONFIG_BCM_KF_ARM_BCM963XX)*/
+
 #if __LINUX_ARM_ARCH__ >= 7
 #define isb() __asm__ __volatile__ ("isb" : : : "memory")
 #define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
@@ -59,6 +63,7 @@
 #define smp_rmb()	dmb()
 #define smp_wmb()	dmb()
 #endif
+#endif
 
 #define read_barrier_depends()		do { } while(0)
 #define smp_read_barrier_depends()	do { } while(0)
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 42dec04f6170d1871b8056bb35e5e02c923a7b70..28ed329d903b206fff4e287e010febef6f60b59a 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -97,6 +97,9 @@
 struct cpu_cache_fns {
 	void (*flush_icache_all)(void);
 	void (*flush_kern_all)(void);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	void (*flush_kern_louis)(void);
+#endif
 	void (*flush_user_all)(void);
 	void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
 
@@ -119,6 +122,9 @@ extern struct cpu_cache_fns cpu_cache;
 
 #define __cpuc_flush_icache_all		cpu_cache.flush_icache_all
 #define __cpuc_flush_kern_all		cpu_cache.flush_kern_all
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+#define __cpuc_flush_kern_louis		cpu_cache.flush_kern_louis
+#endif
 #define __cpuc_flush_user_all		cpu_cache.flush_user_all
 #define __cpuc_flush_user_range		cpu_cache.flush_user_range
 #define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range
@@ -139,6 +145,9 @@ extern struct cpu_cache_fns cpu_cache;
 
 extern void __cpuc_flush_icache_all(void);
 extern void __cpuc_flush_kern_all(void);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+extern void __cpuc_flush_kern_louis(void);
+#endif
 extern void __cpuc_flush_user_all(void);
 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
@@ -157,6 +166,17 @@ extern void dmac_flush_range(const void *, const void *);
 
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+#ifdef CONFIG_CPU_CACHE_V7
+#define __cpuc_flush_line(_addr)	\
+	__asm__ __volatile__("mcr p15, 0, %0, c7, c14, 1" : : "r" (_addr));
+#define __cpuc_clean_line(_addr)	\
+	__asm__ __volatile__("mcr p15, 0, %0, c7, c10, 1" : : "r" (_addr));
+#define __cpuc_inv_line(_addr)		\
+	__asm__ __volatile__("mcr p15, 0, %0, c7, c6, 1" : : "r" (_addr));
+#endif
+#endif
+
 /*
  * Copy user data from/to a page which is mapped into a different
  * processes address space.  Really, we want to allow our "user
@@ -202,8 +222,18 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 static inline void __flush_icache_all(void)
 {
 	__flush_icache_preferred();
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	dsb();
+#endif
 }
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+ * Flush caches up to Level of Unification Inner Shareable
+ */
+#define flush_cache_louis()		__cpuc_flush_kern_louis()
+#endif
+
 #define flush_cache_all()		__cpuc_flush_kern_all()
 
 static inline void vivt_flush_cache_mm(struct mm_struct *mm)
@@ -339,7 +369,11 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 		 * set_pte_at() called from vmap_pte_range() does not
 		 * have a DSB after cleaning the cache line.
 		 */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+		dsb(ishst);
+#else
 		dsb();
+#endif
 }
 
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cb47d28cbe1f81c4cfb43de4d1f08dbcec9f0cf7..2a133d63d4c5cc9e6cfde12cf63db768af370e2d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -9,6 +9,9 @@
 #define CPUID_TCM	2
 #define CPUID_TLBTYPE	3
 #define CPUID_MPIDR	5
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#define CPUID_REVIDR	6
+#endif
 
 #define CPUID_EXT_PFR0	"c1, 0"
 #define CPUID_EXT_PFR1	"c1, 1"
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index cb3b7c981c4b729c31c8dbf33c9fa4334edfc07d..80453906923a81bb1c0d6d194b03a73aeb0fe225 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -117,6 +117,10 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
 
 extern int dma_supported(struct device *, u64);
 extern int dma_set_mask(struct device *, u64);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+extern void ___dma_page_cpu_to_dev_flush(struct page *, unsigned long,
+	size_t, enum dma_data_direction);
+#endif
 
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 7e30874377e67bede33d1f4a047bfc4f1de65fc0..3439ecbb283245619b7c334e1a8c6c8d606eb374 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -132,6 +132,9 @@
 #ifndef MULTI_CACHE
 #define __cpuc_flush_icache_all		__glue(_CACHE,_flush_icache_all)
 #define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all)
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+#define __cpuc_flush_kern_louis		__glue(_CACHE,_flush_kern_cache_louis)
+#endif
 #define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all)
 #define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
 #define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 8c5e828f484dd7a039a2c5c9d060d6bba008c0ef..e17ee12695797268b6628f5d2e43a075cf8b05f7 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -41,6 +41,15 @@ extern void kunmap_high(struct page *page);
 #endif
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+/*
+ * Needed to be able to broadcast the TLB invalidation for kmap.
+ */
+#ifdef CONFIG_ARM_ERRATA_798181
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#endif
+#endif
+
 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
 extern void *kmap_high_get(struct page *page);
 #else
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 9af5563dd3ebbc6be0f53448e107a05249cca859..2dc120489069062d0f20af0beec609790393d7d5 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -62,6 +62,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 #define MT_DEVICE_NONSHARED	1
 #define MT_DEVICE_CACHED	2
 #define MT_DEVICE_WC		3
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+#define MT_DEVICE_NONSECURED	15
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 /*
  * types 4 onwards can be found in asm/mach/map.h and are undefined
  * for ioremap
@@ -268,6 +271,9 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 #define ioremap_nocache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE)
 #define ioremap_cached(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
 #define ioremap_wc(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE_WC)
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+#define ioremap_nonsecured(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_NONSECURED)
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 #define iounmap				__arm_iounmap
 
 /*
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index b36f3654bf54ebcc9e1c9617663c706d81bcfd97..8700663f78925e90f2e208fac446b16deca415eb 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -30,6 +30,9 @@ struct map_desc {
 #define MT_MEMORY_DTCM		12
 #define MT_MEMORY_ITCM		13
 #define MT_MEMORY_SO		14
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+#define MT_MEMORY_NONSECURED	16
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b8e580a297e49d748d12b1ac17fb6519e8d151b2..8f6e18b2d97c383768a354fa20e816885d80c528 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,14 +5,24 @@
 
 typedef struct {
 #ifdef CONFIG_CPU_HAS_ASID
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	atomic64_t	id;
+#else
 	unsigned int id;
+#endif
 	raw_spinlock_t id_lock;
 #endif
 	unsigned int kvm_seq;
 } mm_context_t;
 
 #ifdef CONFIG_CPU_HAS_ASID
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#define ASID_BITS	8
+#define ASID_MASK	((~0ULL) << ASID_BITS)
+#define ASID(mm)	((mm)->context.id.counter & ~ASID_MASK)
+#else
 #define ASID(mm)	((mm)->context.id & 255)
+#endif
 
 /* init_mm.context.id_lock should be initialized. */
 #define INIT_MM_CONTEXT(name)                                                 \
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a0b3cac0547c0a9949c30cc919adcf5e08fcf500..e1b7de336fe0ae845eccc97e14ff63a1b703895b 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -18,12 +18,30 @@
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
 #include <asm/proc-fns.h>
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#include <asm/smp_plat.h>
+#include <asm/tlbflush.h>
+#endif
 #include <asm-generic/mm_hooks.h>
 
 void __check_kvm_seq(struct mm_struct *mm);
 
 #ifdef CONFIG_CPU_HAS_ASID
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
+#define init_new_context(tsk,mm)	({ atomic64_set(&mm->context.id, 0); 0; })
+
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+			     cpumask_t *mask);
+#else  /* !CONFIG_ARM_ERRATA_798181 */
+static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+					   cpumask_t *mask)
+{
+}
+#endif /* CONFIG_ARM_ERRATA_798181 */
+#else
 /*
  * On ARMv6, we have the following structure in the Context ID:
  *
@@ -68,9 +86,55 @@ static inline void check_context(struct mm_struct *mm)
 }
 
 #define init_new_context(tsk,mm)	(__init_new_context(tsk,mm),0)
-
+#endif /* defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181) */
 #else
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#ifdef CONFIG_MMU
+
+static inline void check_and_switch_context(struct mm_struct *mm,
+					    struct task_struct *tsk)
+{
+	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
+		__check_kvm_seq(mm);
+
+	if (irqs_disabled())
+		/*
+		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
+		 * high interrupt latencies, defer the call and continue
+		 * running with the old mm. Since we only support UP systems
+		 * on non-ASID CPUs, the old mm will remain valid until the
+		 * finish_arch_post_lock_switch() call.
+		 */
+		mm->context.switch_pending = 1;
+	else
+		cpu_switch_mm(mm->pgd, mm);
+}
+
+#define finish_arch_post_lock_switch \
+	finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+	struct mm_struct *mm = current->mm;
+
+	if (mm && mm->context.switch_pending) {
+		/*
+		 * Preemption must be disabled during cpu_switch_mm() as we
+		 * have some stateful cache flush implementations. Check
+		 * switch_pending again in case we were preempted and the
+		 * switch to this mm was already done.
+		 */
+		preempt_disable();
+		if (mm->context.switch_pending) {
+			mm->context.switch_pending = 0;
+			cpu_switch_mm(mm->pgd, mm);
+		}
+		preempt_enable_no_resched();
+	}
+}
+
+#endif	/* CONFIG_MMU */
+#else
 static inline void check_context(struct mm_struct *mm)
 {
 #ifdef CONFIG_MMU
@@ -78,6 +142,7 @@ static inline void check_context(struct mm_struct *mm)
 		__check_kvm_seq(mm);
 #endif
 }
+#endif /* defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181) */
 
 #define init_new_context(tsk,mm)	0
 
@@ -112,19 +177,37 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 #ifdef CONFIG_MMU
 	unsigned int cpu = smp_processor_id();
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	/*
+	 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
+	 * so check for possible thread migration and invalidate the I-cache
+	 * if we're new to this CPU.
+	 */
+	if (cache_ops_need_broadcast() &&
+	    !cpumask_empty(mm_cpumask(next)) &&
+	    !cpumask_test_cpu(cpu, mm_cpumask(next)))
+		__flush_icache_all();
+
+#else
 #ifdef CONFIG_SMP
 	/* check for possible thread migration */
 	if (!cpumask_empty(mm_cpumask(next)) &&
 	    !cpumask_test_cpu(cpu, mm_cpumask(next)))
 		__flush_icache_all();
+#endif
 #endif
 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+		check_and_switch_context(next, tsk);
+		erratum_a15_798181();
+#else
 #ifdef CONFIG_SMP
 		struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
 		*crt_mm = next;
 #endif
 		check_context(next);
 		cpu_switch_mm(next->pgd, next);
+#endif
 		if (cache_is_vivt())
 			cpumask_clear_cpu(cpu, mm_cpumask(prev));
 	}
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352a1bde6f1b3fb510f404ea41ba3206..8f25549059dff448320eb86442cdeb8579271208 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -35,6 +35,13 @@ struct outer_cache_fns {
 #endif
 	void (*set_debug)(unsigned long);
 	void (*resume)(void);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	void (*spin_lock_irqsave)(void);
+	void (*spin_unlock_irqrestore)(void);
+	void (*sync_no_lock)(void);
+	void (*flush_line_no_lock)(unsigned long);
+	void (*inv_line_no_lock)(unsigned long);
+#endif
 };
 
 #ifdef CONFIG_OUTER_CACHE
@@ -81,6 +88,32 @@ static inline void outer_resume(void)
 		outer_cache.resume();
 }
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+static inline void outer_spin_lock_irqsave(void)
+{
+	outer_cache.spin_lock_irqsave();
+}
+
+static inline void outer_spin_unlock_irqrestore(void)
+{
+	outer_cache.spin_unlock_irqrestore();
+}
+
+static inline void outer_sync_no_lock(void)
+{
+	outer_cache.sync_no_lock();
+}
+
+static inline void outer_flush_line_no_lock(phys_addr_t addr)
+{
+	outer_cache.flush_line_no_lock(addr);
+}
+
+static inline void outer_inv_line_no_lock(phys_addr_t addr)
+{
+	outer_cache.inv_line_no_lock(addr);
+}
+#endif
 #else
 
 static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5cfba15cb401e58d7b291fac12e7395683a9d61f..d8d754197837e6a885844a25a913dcd05f798f02 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,9 @@
 #define PMD_BIT4		(_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5)
 #define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+#define PMD_NONSECURE		(_AT(pmdval_t, 1) << 3)
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 /*
  *   - section
  */
@@ -36,6 +39,9 @@
 #define PMD_SECT_S		(_AT(pmdval_t, 1) << 16)	/* v6 */
 #define PMD_SECT_nG		(_AT(pmdval_t, 1) << 17)	/* v6 */
 #define PMD_SECT_SUPER		(_AT(pmdval_t, 1) << 18)	/* v6 */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+#define PMD_SECT_NS		(_AT(pmdval_t, 1) << 19)
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 #define PMD_SECT_AF		(_AT(pmdval_t, 0))
 
 #define PMD_SECT_UNCACHED	(_AT(pmdval_t, 0))
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 23ebc0c82a3975ae5c455dd39598e93ab33922e7..48269a75d3e2d4a9b017022b937ce806b3f409d5 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -142,6 +142,27 @@ struct tag_acorn {
 struct tag_memclk {
 	__u32 fmemclk;
 };
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/* BCM63xx, for passing board param from boot loader */
+#define ATAG_BLPARM	0x41000601
+
+struct tag_blparm {
+	char	blparm[1];	/* this is the minimum size */
+};
+
+#define ATAG_RDPSIZE	0x41000602
+struct tag_rdpsize {
+	__u32 tm_size;
+	__u32 mc_size;
+};
+
+#define ATAG_DHDSIZE	0x41000603
+struct tag_dhdparm {
+	__u32 dhd_size[3];
+};
+
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
 
 struct tag {
 	struct tag_header hdr;
@@ -165,6 +186,15 @@ struct tag {
 		 * DC21285 specific
 		 */
 		struct tag_memclk	memclk;
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+
+		/*
+		 * BCM63xx specific
+		 */
+		struct tag_blparm	blparm;
+		struct tag_rdpsize	rdpsize;
+		struct tag_dhdparm	dhdparm;
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
 	} u;
 };
 
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 65fa3c88095c3c7b324a5027610133dc1f825d4b..13a2c3418c14053128917d8f75907394343edcdc 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -90,8 +90,11 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 	: "=&r" (tmp)
 	: "r" (&lock->lock), "r" (1)
 	: "cc");
-
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	smp_rmb();
+#else
 	smp_mb();
+#endif
 }
 
 static inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -107,7 +110,11 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 	: "cc");
 
 	if (tmp == 0) {
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+		smp_rmb();
+#else
 		smp_mb();
+#endif
 		return 1;
 	} else {
 		return 0;
@@ -116,7 +123,11 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	smp_rmb();
+#else
 	smp_mb();
+#endif
 
 	__asm__ __volatile__(
 "	str	%1, [%0]\n"
@@ -150,7 +161,11 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 	: "r" (&rw->lock), "r" (0x80000000)
 	: "cc");
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	smp_rmb();
+#else
 	smp_mb();
+#endif
 }
 
 static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -166,7 +181,11 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 	: "cc");
 
 	if (tmp == 0) {
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+		smp_rmb();
+#else
 		smp_mb();
+#endif
 		return 1;
 	} else {
 		return 0;
@@ -175,7 +194,11 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	smp_rmb();
+#else
 	smp_mb();
+#endif
 
 	__asm__ __volatile__(
 	"str	%1, [%0]\n"
@@ -216,14 +239,22 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 	: "r" (&rw->lock)
 	: "cc");
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	smp_rmb();
+#else
 	smp_mb();
+#endif
 }
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
 	unsigned long tmp, tmp2;
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	smp_rmb();
+#else
 	smp_mb();
+#endif
 
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%2]\n"
@@ -251,7 +282,11 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 	: "r" (&rw->lock)
 	: "cc");
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	smp_rmb();
+#else
 	smp_mb();
+#endif
 	return tmp2 == 0;
 }
 
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 85fe61e7320265e6932e8659d95c57c7712ad04d..9a5abca0775d22223e400641381a702cd0863382 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -34,10 +34,21 @@
 #define TLB_V6_D_ASID	(1 << 17)
 #define TLB_V6_I_ASID	(1 << 18)
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#define TLB_V6_BP	(1 << 19)
+#endif
+
 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#define TLB_V7_UIS_PAGE	(1 << 20)
+#define TLB_V7_UIS_FULL (1 << 21)
+#define TLB_V7_UIS_ASID (1 << 22)
+#define TLB_V7_UIS_BP	(1 << 23)
+#else
 #define TLB_V7_UIS_PAGE	(1 << 19)
 #define TLB_V7_UIS_FULL (1 << 20)
 #define TLB_V7_UIS_ASID (1 << 21)
+#endif
 
 #define TLB_BARRIER	(1 << 28)
 #define TLB_L2CLEAN_FR	(1 << 29)		/* Feroceon */
@@ -162,10 +173,18 @@
 # define v4wb_always_flags	(-1UL)
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
+			 TLB_V6_I_FULL | TLB_V6_D_FULL | \
+			 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
+			 TLB_V6_I_ASID | TLB_V6_D_ASID | \
+			 TLB_V6_BP)
+#else
 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 			 TLB_V6_I_FULL | TLB_V6_D_FULL | \
 			 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
 			 TLB_V6_I_ASID | TLB_V6_D_ASID)
+#endif
 
 #ifdef CONFIG_CPU_TLB_V6
 # define v6wbi_possible_flags	v6wbi_tlb_flags
@@ -180,10 +199,19 @@
 # define v6wbi_always_flags	(-1UL)
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#define v7wbi_tlb_flags_smp	(TLB_WB | TLB_BARRIER | \
+				 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
+				 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
+#define v7wbi_tlb_flags_up	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
+				 TLB_V6_U_FULL | TLB_V6_U_PAGE | \
+				 TLB_V6_U_ASID | TLB_V6_BP)
+#else
 #define v7wbi_tlb_flags_smp	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 			 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
 #define v7wbi_tlb_flags_up	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 			 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
+#endif
 
 #ifdef CONFIG_CPU_TLB_V7
 
@@ -333,6 +361,51 @@ extern struct cpu_tlb_fns cpu_tlb;
 #define tlb_op(f, regs, arg)	__tlb_op(f, "p15, 0, %0, " regs, arg)
 #define tlb_l2_op(f, regs, arg)	__tlb_op(f, "p15, 1, %0, " regs, arg)
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+static inline void __local_flush_tlb_all(void)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
+	tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
+	tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
+}
+
+static inline void local_flush_tlb_all(void)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_WB))
+		dsb(nshst);
+
+	__local_flush_tlb_all();
+	tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
+
+	if (tlb_flag(TLB_BARRIER)) {
+		dsb(nsh);
+		isb();
+	}
+}
+
+static inline void __flush_tlb_all(void)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_WB))
+		dsb(ishst);
+
+	__local_flush_tlb_all();
+	tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+
+	if (tlb_flag(TLB_BARRIER)) {
+		dsb(ish);
+		isb();
+	}
+}
+#else
 static inline void local_flush_tlb_all(void)
 {
 	const int zero = 0;
@@ -352,7 +425,61 @@ static inline void local_flush_tlb_all(void)
 		isb();
 	}
 }
+#endif
+
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+static inline void __local_flush_tlb_mm(struct mm_struct *mm)
+{
+	const int zero = 0;
+	const int asid = ASID(mm);
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+		if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
+			tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
+			tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
+			tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
+		}
+	}
+
+	tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
+	tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
+	tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
+}
 
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+	const int asid = ASID(mm);
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_WB))
+		dsb(nshst);
+
+	__local_flush_tlb_mm(mm);
+	tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
+
+	if (tlb_flag(TLB_BARRIER))
+		dsb(nsh);
+}
+
+static inline void __flush_tlb_mm(struct mm_struct *mm)
+{
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_WB))
+		dsb(ishst);
+
+	__local_flush_tlb_mm(mm);
+#ifdef CONFIG_ARM_ERRATA_720789
+	tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
+#else
+	tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
+#endif
+
+	if (tlb_flag(TLB_BARRIER))
+		dsb(ish);
+}
+#else
 static inline void local_flush_tlb_mm(struct mm_struct *mm)
 {
 	const int zero = 0;
@@ -384,7 +511,70 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
 	if (tlb_flag(TLB_BARRIER))
 		dsb();
 }
+#endif
+
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+
+static inline void
+__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+	if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+	    cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+		tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
+		tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
+		tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
+		if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
+			asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+	}
+
+	tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
+	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
+	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
+}
 
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+	if (tlb_flag(TLB_WB))
+		dsb(nshst);
+
+	__local_flush_tlb_page(vma, uaddr);
+	tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
+
+	if (tlb_flag(TLB_BARRIER))
+		dsb(nsh);
+}
+
+static inline void
+__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+	if (tlb_flag(TLB_WB))
+		dsb(ishst);
+
+	__local_flush_tlb_page(vma, uaddr);
+#ifdef CONFIG_ARM_ERRATA_720789
+	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
+#else
+	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
+#endif
+
+	if (tlb_flag(TLB_BARRIER))
+		dsb(ish);
+}
+#else
 static inline void
 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 {
@@ -418,7 +608,61 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 	if (tlb_flag(TLB_BARRIER))
 		dsb();
 }
+#endif
+
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
+	tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
+	tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
+	if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
+		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+
+	tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
+	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
+	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	kaddr &= PAGE_MASK;
+
+	if (tlb_flag(TLB_WB))
+		dsb(nshst);
+
+	__local_flush_tlb_kernel_page(kaddr);
+	tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
+
+	if (tlb_flag(TLB_BARRIER)) {
+		dsb(nsh);
+		isb();
+	}
+}
+
+static inline void __flush_tlb_kernel_page(unsigned long kaddr)
+{
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	kaddr &= PAGE_MASK;
+
+	if (tlb_flag(TLB_WB))
+		dsb(ishst);
+
+	__local_flush_tlb_kernel_page(kaddr);
+	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
 
+	if (tlb_flag(TLB_BARRIER)) {
+		dsb(ish);
+		isb();
+	}
+}
+#else
 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 {
 	const int zero = 0;
@@ -446,6 +690,42 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 		isb();
 	}
 }
+#endif
+
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+/*
+ * Branch predictor maintenance is paired with full TLB invalidation, so
+ * there is no need for any barriers here.
+ */
+static inline void __local_flush_bp_all(void)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_V6_BP))
+		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
+
+static inline void local_flush_bp_all(void)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	__local_flush_bp_all();
+	if (tlb_flag(TLB_V7_UIS_BP))
+		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
+
+static inline void __flush_bp_all(void)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	__local_flush_bp_all();
+	if (tlb_flag(TLB_V7_UIS_BP))
+		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
+}
+#endif
 
 /*
  *	flush_pmd_entry
@@ -468,7 +748,11 @@ static inline void flush_pmd_entry(void *pmd)
 	tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
 
 	if (tlb_flag(TLB_WB))
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+		dsb(ishst);
+#else
 		dsb();
+#endif
 }
 
 static inline void clean_pmd_entry(void *pmd)
@@ -497,6 +781,9 @@ static inline void clean_pmd_entry(void *pmd)
 #define flush_tlb_kernel_page	local_flush_tlb_kernel_page
 #define flush_tlb_range		local_flush_tlb_range
 #define flush_tlb_kernel_range	local_flush_tlb_kernel_range
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#define flush_bp_all		local_flush_bp_all
+#endif
 #else
 extern void flush_tlb_all(void);
 extern void flush_tlb_mm(struct mm_struct *mm);
@@ -504,6 +791,28 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 extern void flush_tlb_kernel_page(unsigned long kaddr);
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+extern void flush_bp_all(void);
+#endif
+#endif
+
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+	if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+		erratum_a15_798181_handler))
+		return erratum_a15_798181_handler();
+	return false;
+}
+#endif
 #endif
 
 /*
@@ -526,4 +835,31 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #endif /* CONFIG_MMU */
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#if defined(CONFIG_SMP) && !defined(CONFIG_MMU)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/mm_types.h>
+
+static inline void local_flush_tlb_all(void)									{ }
+static inline void local_flush_tlb_mm(struct mm_struct *mm)							{ }
+static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)			{ }
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)						{ }
+static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)	{ }
+static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)				{ }
+static inline void local_flush_bp_all(void)									{ }
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
+extern void flush_tlb_kernel_page(unsigned long kaddr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
+#endif	/* __ASSEMBLY__ */
+
+#endif
+#endif
+
 #endif
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 8085417555dd7c49176cd06d88354dd1b2faca67..5a8428e13d50fa28a465eae69a93f95f816f4877 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -58,9 +58,11 @@ void *return_address(unsigned int level)
 
 #else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
 
+#if !defined(CONFIG_BCM_KF_ARM_BCM963XX)
 #if defined(CONFIG_ARM_UNWIND)
 #warning "TODO: return_address should use unwind tables"
 #endif
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
 
 void *return_address(unsigned int level)
 {
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ebfac782593f048c9cf81a5619f0b3d24900ca8c..d287d9ddcd24a58f97d7c952a50d957e20b9ba96 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -487,6 +487,10 @@ static void __init setup_processor(void)
 	elf_hwcap &= ~HWCAP_THUMB;
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	erratum_a15_798181_init();
+#endif
+
 	feat_v6_fixup();
 
 	cacheid_init();
@@ -742,6 +746,25 @@ static int __init parse_tag_cmdline(const struct tag *tag)
 
 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+static int __init parse_tag_blparm(const struct tag *tag)
+{
+	/* a dummy function that it has been handled in
+	 * bcm63xx's machine setup code */
+	return 0;
+}
+
+__tagtable(ATAG_BLPARM, parse_tag_blparm);
+
+static int __init parse_tag_rdpsize(const struct tag *tag)
+{
+	/* a dummy function that it has been handled in
+	 * bcm63xx's machine setup code */
+	return 0;
+}
+
+__tagtable(ATAG_RDPSIZE, parse_tag_rdpsize);
+#endif /* (CONFIG_BCM_KF_ARM_BCM963XX) */
 /*
  * Scan the tag table for this tag, and call its parse function.
  * The tag table is built by the linker from all the __tagtable
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7babc3f98a1a4c973789afc632125dbd13196e06..627d20491fa9d36872e261d121a5167a2522d7e9 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -157,7 +157,15 @@ int __cpu_disable(void)
 	 * Flush user cache and TLB mappings, and then remove this CPU
 	 * from the vm mask set of all processes.
 	 */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	 /*
+	 * Caches are flushed to the Level of Unification Inner Shareable
+	 * to write-back dirty lines to unified caches shared by all CPUs.
+	 */
+	flush_cache_louis();
+#else
 	flush_cache_all();
+#endif
 	local_flush_tlb_all();
 
 	read_lock(&tasklist_lock);
@@ -205,9 +213,29 @@ void __ref cpu_die(void)
 	local_irq_disable();
 	mb();
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	/*
+	 * Flush the data out of the L1 cache for this CPU.  This must be
+	 * before the completion to ensure that data is safely written out
+	 * before platform_cpu_kill() gets called - which may disable
+	 * *this* CPU and power down its cache.
+	 */
+	flush_cache_louis();
+#endif
+
 	/* Tell __cpu_die() that this CPU is now safe to dispose of */
 	complete(&cpu_died);
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	/*
+	 * Ensure that the cache lines associated with that completion are
+	 * written out.  This covers the case where _this_ CPU is doing the
+	 * powering down, to ensure that the completion is visible to the
+	 * CPU waiting for this one.
+	 */
+	flush_cache_louis();
+#endif
+
 	/*
 	 * actual CPU shutdown procedure is at least platform (if not
 	 * CPU) specific.
@@ -249,22 +277,47 @@ static void percpu_timer_setup(void);
 asmlinkage void __cpuinit secondary_start_kernel(void)
 {
 	struct mm_struct *mm = &init_mm;
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	unsigned int cpu;
+
+	/*
+	 * The identity mapping is uncached (strongly ordered), so
+	 * switch away from it before attempting any exclusive accesses.
+	 */
+	cpu_switch_mm(mm->pgd, mm);
+#if defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	local_flush_bp_all();
+#endif
+	enter_lazy_tlb(mm, current);
+	local_flush_tlb_all();
+#else
 	unsigned int cpu = smp_processor_id();
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
 
 	/*
 	 * All kernel threads share the same mm context; grab a
 	 * reference and switch to it.
 	 */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	cpu = smp_processor_id();
+#endif
 	atomic_inc(&mm->mm_count);
 	current->active_mm = mm;
 	cpumask_set_cpu(cpu, mm_cpumask(mm));
+#if !defined(CONFIG_BCM_KF_ARM_BCM963XX)
 	cpu_switch_mm(mm->pgd, mm);
 	enter_lazy_tlb(mm, current);
 	local_flush_tlb_all();
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	cpu_init();
+#endif
 	printk("CPU%u: Booted secondary processor\n", cpu);
 
+#if !defined(CONFIG_BCM_KF_ARM_BCM963XX)
 	cpu_init();
+#endif
 	preempt_disable();
 	trace_hardirqs_off();
 
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 02c5d2ce23bf121f17479bba2b52460419481930..d685c781c07ef9c8fc98542bb26e8d2de7349af4 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -12,6 +12,9 @@
 
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#include <asm/mmu_context.h>
+#endif
 
 /**********************************************************************/
 
@@ -64,12 +67,98 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
 	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
 }
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+static inline void ipi_flush_bp_all(void *ignored)
+{
+	local_flush_bp_all();
+}
+
+#ifdef CONFIG_ARM_ERRATA_798181
+bool (*erratum_a15_798181_handler)(void);
+
+static bool erratum_a15_798181_partial(void)
+{
+	asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+	dsb(ish);
+	return false;
+}
+
+static bool erratum_a15_798181_broadcast(void)
+{
+	asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+	dsb(ish);
+	return true;
+}
+
+void erratum_a15_798181_init(void)
+{
+	unsigned int midr = read_cpuid_id();
+	unsigned int revidr = read_cpuid(CPUID_REVIDR);
+
+	/* specifically for the B15 used in BCM63148 */
+	if (midr == 0x420f00f3) {
+		if ((revidr & 0x210) == 0x210)
+			return;
+		if (revidr & 0x10)
+			erratum_a15_798181_handler = erratum_a15_798181_partial;
+		else
+			erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+		return;
+	}
+
+	/* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
+	if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
+	    (revidr & 0x210) == 0x210) {
+		return;
+	}
+	if (revidr & 0x10)
+		erratum_a15_798181_handler = erratum_a15_798181_partial;
+	else
+		erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+}
+#endif
+
+static void ipi_flush_tlb_a15_erratum(void *arg)
+{
+	dmb();
+}
+
+static void broadcast_tlb_a15_erratum(void)
+{
+	if (!erratum_a15_798181())
+		return;
+
+	smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
+}
+
+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
+{
+	int this_cpu;
+	cpumask_t mask = { CPU_BITS_NONE };
+
+	if (!erratum_a15_798181())
+		return;
+
+	this_cpu = get_cpu();
+	a15_erratum_get_cpumask(this_cpu, mm, &mask);
+	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
+	put_cpu();
+}
+#endif
+
 void flush_tlb_all(void)
 {
 	if (tlb_ops_need_broadcast())
 		on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 	else
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+		__flush_tlb_all();
+#else
 		local_flush_tlb_all();
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	broadcast_tlb_a15_erratum();
+#endif
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
@@ -77,7 +166,14 @@ void flush_tlb_mm(struct mm_struct *mm)
 	if (tlb_ops_need_broadcast())
 		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
 	else
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+		__flush_tlb_mm(mm);
+#else
 		local_flush_tlb_mm(mm);
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	broadcast_tlb_mm_a15_erratum(mm);
+#endif
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -89,7 +185,14 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
 					&ta, 1);
 	} else
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+		__flush_tlb_page(vma, uaddr);
+#else
 		local_flush_tlb_page(vma, uaddr);
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
+#endif
 }
 
 void flush_tlb_kernel_page(unsigned long kaddr)
@@ -99,7 +202,14 @@ void flush_tlb_kernel_page(unsigned long kaddr)
 		ta.ta_start = kaddr;
 		on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
 	} else
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+		__flush_tlb_kernel_page(kaddr);
+#else
 		local_flush_tlb_kernel_page(kaddr);
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	broadcast_tlb_a15_erratum();
+#endif
 }
 
 void flush_tlb_range(struct vm_area_struct *vma,
@@ -114,6 +224,9 @@ void flush_tlb_range(struct vm_area_struct *vma,
 					&ta, 1);
 	} else
 		local_flush_tlb_range(vma, start, end);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
+#endif
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -125,5 +238,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
 	} else
 		local_flush_tlb_kernel_range(start, end);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+	broadcast_tlb_a15_erratum();
+#endif
 }
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+void flush_bp_all(void)
+{
+	if (tlb_ops_need_broadcast())
+		on_each_cpu(ipi_flush_bp_all, NULL, 1);
+	else
+		__flush_bp_all();
+}
+#endif
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 1794cc3b0f1836583fec297ee484c554fdefffcd..1638ef4d14655dad762c29ebff44693ab55cc2c0 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -53,6 +53,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 	ret = __cpu_suspend(arg, fn);
 	if (ret == 0) {
 		cpu_switch_mm(mm->pgd, mm);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+		local_flush_bp_all();
+#endif
 		local_flush_tlb_all();
 	}
 
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index a8ad1e34902e967376a52b5c9d07ab6af7edab4f..b2479c84c6da8e4b7b4bbdb9c3495f3282e00c37 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -388,6 +388,19 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 #endif
 			instr = *(u32 *) pc;
 	} else if (thumb_mode(regs)) {
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+		if (get_user(instr, (u16 __user *)pc))
+			goto die_sig;
+		if (is_wide_instruction(instr)) {
+			unsigned int instr2;
+			if (get_user(instr2, (u16 __user *)pc+1))
+				goto die_sig;
+			instr <<= 16;
+			instr |= instr2;
+		}
+	} else if (get_user(instr, (u32 __user *)pc)) {
+		goto die_sig;
+#else
 		get_user(instr, (u16 __user *)pc);
 		if (is_wide_instruction(instr)) {
 			unsigned int instr2;
@@ -397,11 +410,15 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 		}
 	} else {
 		get_user(instr, (u32 __user *)pc);
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
 	}
 
 	if (call_undef_hook(regs, instr) == 0)
 		return;
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+die_sig:
+#endif
 #ifdef CONFIG_DEBUG_USER
 	if (user_debug & UDBG_UNDEFINED) {
 		printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 11093a7c3e32289e95a8c100cc01ef2bbb8d7101..328c3433bf2d78d39bdedc5397293059541ec49e 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -27,16 +27,23 @@
  * Note also that it is intended that __get_user_bad is not global.
  */
 #include <linux/linkage.h>
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+#include <asm/assembler.h>
+#endif
 #include <asm/errno.h>
 #include <asm/domain.h>
 
 ENTRY(__get_user_1)
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	check_uaccess r0, 1, r1, r2, __get_user_bad
+#endif
 1: TUSER(ldrb)	r2, [r0]
 	mov	r0, #0
 	mov	pc, lr
 ENDPROC(__get_user_1)
 
 ENTRY(__get_user_2)
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 #ifdef CONFIG_THUMB2_KERNEL
 2: TUSER(ldrb)	r2, [r0]
 3: TUSER(ldrb)	r3, [r0, #1]
@@ -44,16 +51,39 @@ ENTRY(__get_user_2)
 2: TUSER(ldrb)	r2, [r0], #1
 3: TUSER(ldrb)	r3, [r0]
 #endif
+#else /* BCM_ANDROID */
+	check_uaccess r0, 2, r1, r2, __get_user_bad
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb	.req	ip
+2:	ldrbt	r2, [r0], #1
+3:	ldrbt	rb, [r0], #0
+#else
+rb	.req	r0
+2:	ldrb	r2, [r0]
+3:	ldrb	rb, [r0, #1]
+#endif
+#endif /* BCM_ANDROID */
 #ifndef __ARMEB__
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	orr	r2, r2, r3, lsl #8
 #else
+	orr	r2, r2, rb, lsl #8
+#endif
+#else
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	orr	r2, r3, r2, lsl #8
+#else
+	orr	r2, rb, r2, lsl #8
+#endif
 #endif
 	mov	r0, #0
 	mov	pc, lr
 ENDPROC(__get_user_2)
 
 ENTRY(__get_user_4)
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	check_uaccess r0, 4, r1, r2, __get_user_bad
+#endif
 4: TUSER(ldr)	r2, [r0]
 	mov	r0, #0
 	mov	pc, lr
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 650d5923ab83cd849c38db3a64c55fccf36a631a..45f1f92a57b7f655f96456d3976b846378253919 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,6 +14,7 @@
 
 	.text
 	.align	5
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	.word	0
 
 1:	subs	r2, r2, #4		@ 1 do we have enough
@@ -27,14 +28,24 @@
  * The pointer is now aligned and the length is adjusted.  Try doing the
  * memset again.
  */
+#endif
 
 ENTRY(memset)
 	ands	r3, r0, #3		@ 1 unaligned?
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	bne	1b			@ 1
+#else
+	mov	ip, r0			@ preserve r0 as return value
+	bne	6f			@ 1
+#endif
 /*
  * we know that the pointer in r0 is aligned to a word boundary.
  */
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	orr	r1, r1, r1, lsl #8
+#else
+1:	orr	r1, r1, r1, lsl #8
+#endif
 	orr	r1, r1, r1, lsl #16
 	mov	r3, r1
 	cmp	r2, #16
@@ -46,26 +57,52 @@ ENTRY(memset)
  * We need an extra register for this loop - save the return address and
  * use the LR
  */
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	str	lr, [sp, #-4]!
 	mov	ip, r1
+#else
+	stmfd	sp!, {r8, lr}
+	mov	r8, r1
+#endif
 	mov	lr, r1
 
 2:	subs	r2, r2, #64
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	stmgeia	r0!, {r1, r3, ip, lr}	@ 64 bytes at a time.
 	stmgeia	r0!, {r1, r3, ip, lr}
 	stmgeia	r0!, {r1, r3, ip, lr}
 	stmgeia	r0!, {r1, r3, ip, lr}
+#else
+	stmgeia	ip!, {r1, r3, r8, lr}	@ 64 bytes at a time.
+	stmgeia	ip!, {r1, r3, r8, lr}
+	stmgeia	ip!, {r1, r3, r8, lr}
+	stmgeia	ip!, {r1, r3, r8, lr}
+#endif
 	bgt	2b
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	ldmeqfd	sp!, {pc}		@ Now <64 bytes to go.
+#else
+	ldmeqfd	sp!, {r8, pc}		@ Now <64 bytes to go.
+#endif
 /*
  * No need to correct the count; we're only testing bits from now on
  */
 	tst	r2, #32
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	stmneia	r0!, {r1, r3, ip, lr}
 	stmneia	r0!, {r1, r3, ip, lr}
+#else
+	stmneia	ip!, {r1, r3, r8, lr}
+	stmneia	ip!, {r1, r3, r8, lr}
+#endif
 	tst	r2, #16
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	stmneia	r0!, {r1, r3, ip, lr}
 	ldr	lr, [sp], #4
+#else
+	stmneia	ip!, {r1, r3, r8, lr}
+	ldmfd	sp!, {r8, lr}
+#endif
 
 #else
 
@@ -74,18 +111,31 @@ ENTRY(memset)
  * whole cache lines at once.
  */
 
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	stmfd	sp!, {r4-r7, lr}
+#else
+	stmfd	sp!, {r4-r8, lr}
+#endif
 	mov	r4, r1
 	mov	r5, r1
 	mov	r6, r1
 	mov	r7, r1
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	mov	ip, r1
+#else
+	mov	r8, r1
+#endif
 	mov	lr, r1
 
 	cmp	r2, #96
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	tstgt	r0, #31
+#else
+	tstgt	ip, #31
+#endif
 	ble	3f
 
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	and	ip, r0, #31
 	rsb	ip, ip, #32
 	sub	r2, r2, ip
@@ -95,33 +145,90 @@ ENTRY(memset)
 	tst	ip, #(1 << 30)
 	mov	ip, r1
 	strne	r1, [r0], #4
+#else
+	and	r8, ip, #31
+	rsb	r8, r8, #32
+	sub	r2, r2, r8
+	movs	r8, r8, lsl #(32 - 4)
+	stmcsia	ip!, {r4, r5, r6, r7}
+	stmmiia	ip!, {r4, r5}
+	tst	r8, #(1 << 30)
+	mov	r8, r1
+	strne	r1, [ip], #4
+#endif
 
 3:	subs	r2, r2, #64
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	stmgeia	r0!, {r1, r3-r7, ip, lr}
 	stmgeia	r0!, {r1, r3-r7, ip, lr}
+#else
+	stmgeia	ip!, {r1, r3-r8, lr}
+	stmgeia	ip!, {r1, r3-r8, lr}
+#endif
 	bgt	3b
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	ldmeqfd	sp!, {r4-r7, pc}
+#else
+	ldmeqfd	sp!, {r4-r8, pc}
+#endif
 
 	tst	r2, #32
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	stmneia	r0!, {r1, r3-r7, ip, lr}
+#else
+	stmneia	ip!, {r1, r3-r8, lr}
+#endif
 	tst	r2, #16
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	stmneia	r0!, {r4-r7}
 	ldmfd	sp!, {r4-r7, lr}
+#else
+	stmneia	ip!, {r4-r7}
+	ldmfd	sp!, {r4-r8, lr}
+#endif
 
 #endif
 
 4:	tst	r2, #8
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	stmneia	r0!, {r1, r3}
+#else
+	stmneia	ip!, {r1, r3}
+#endif
 	tst	r2, #4
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	strne	r1, [r0], #4
+#else
+	strne	r1, [ip], #4
+#endif
 /*
  * When we get here, we've got less than 4 bytes to zero.  We
  * may have an unaligned pointer as well.
  */
 5:	tst	r2, #2
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	strneb	r1, [r0], #1
 	strneb	r1, [r0], #1
+#else
+	strneb	r1, [ip], #1
+	strneb	r1, [ip], #1
+#endif
 	tst	r2, #1
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	strneb	r1, [r0], #1
+#else
+	strneb	r1, [ip], #1
+#endif
 	mov	pc, lr
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+
+6:	subs	r2, r2, #4		@ 1 do we have enough
+	blt	5b			@ 1 bytes to align with?
+	cmp	r3, #2			@ 1
+	strltb	r1, [ip], #1		@ 1
+	strleb	r1, [ip], #1		@ 1
+	strb	r1, [ip], #1		@ 1
+	add	r2, r2, r3		@ 1 (r2 = r2 - (4 - r3))
+	b	1b
+#endif
 ENDPROC(memset)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 7db25990c589f3d98554d9aee47cf7b5c3c486fd..5db2faccc03d4ef1cf27c1ee38a7cf5a2d3a79a6 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -27,16 +27,25 @@
  * Note also that it is intended that __put_user_bad is not global.
  */
 #include <linux/linkage.h>
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+#include <asm/assembler.h>
+#endif
 #include <asm/errno.h>
 #include <asm/domain.h>
 
 ENTRY(__put_user_1)
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	check_uaccess r0, 1, r1, ip, __put_user_bad
+#endif
 1: TUSER(strb)	r2, [r0]
 	mov	r0, #0
 	mov	pc, lr
 ENDPROC(__put_user_1)
 
 ENTRY(__put_user_2)
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	check_uaccess r0, 2, r1, ip, __put_user_bad
+#endif
 	mov	ip, r2, lsr #8
 #ifdef CONFIG_THUMB2_KERNEL
 #ifndef __ARMEB__
@@ -60,12 +69,18 @@ ENTRY(__put_user_2)
 ENDPROC(__put_user_2)
 
 ENTRY(__put_user_4)
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	check_uaccess r0, 4, r1, ip, __put_user_bad
+#endif
 4: TUSER(str)	r2, [r0]
 	mov	r0, #0
 	mov	pc, lr
 ENDPROC(__put_user_4)
 
 ENTRY(__put_user_8)
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	check_uaccess r0, 8, r1, ip, __put_user_bad
+#endif
 #ifdef CONFIG_THUMB2_KERNEL
 5: TUSER(str)	r2, [r0]
 6: TUSER(str)	r3, [r0, #4]
diff --git a/arch/arm/mach-bcm963xx/Makefile b/arch/arm/mach-bcm963xx/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..e06b78b65526a9706ceb8e664e2939bb226212c5
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/Makefile
@@ -0,0 +1,20 @@
+#
+# Board-level BSP
+# Preliminary
+#
+
+obj-y	+= irq.o
+obj-$(CONFIG_MACH_BCM963138) += board_963xx.o
+obj-$(CONFIG_MACH_BCM963148) += board_963xx.o
+
+SRCBASE         := $(TOPDIR)
+EXTRA_CFLAGS    += -I$(INC_BRCMBOARDPARMS_PATH)/$(BRCM_BOARD) -I$(SRCBASE)/include -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD)
+#EXTRA_CFLAGS    += -I$(INC_ADSLDRV_PATH) -DDBG
+EXTRA_CFLAGS    += -I$(INC_ADSLDRV_PATH) 
+EXTRA_CFLAGS += -g
+EXTRA_CFLAGS += $(BRCM_WERROR_CFLAGS)
+
+ifneq ($(strip $(BUILD_SWMDK)),)
+EXTRA_CFLAGS += -DSUPPORT_SWMDK
+endif
+
diff --git a/arch/arm/mach-bcm963xx/Makefile.boot b/arch/arm/mach-bcm963xx/Makefile.boot
new file mode 100644
index 0000000000000000000000000000000000000000..2d22d3abd104f7efa68a7b64d2c2f33937ab1330
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/Makefile.boot
@@ -0,0 +1,8 @@
+#
+# SDRAM location for decompressor
+#
+zreladdr-y      := $(CONFIG_BOARD_ZRELADDR)
+#
+# Where boot monitor is expected to leave parameters
+#
+params_phys-y   := $(CONFIG_BOARD_PARAMS_PHYS)
diff --git a/arch/arm/mach-bcm963xx/board_963xx.c b/arch/arm/mach-bcm963xx/board_963xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..24a2ada130488f18a5ee1437864443012d312489
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/board_963xx.c
@@ -0,0 +1,696 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:GPL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+
+/*
+ * Generic board routine for Broadcom 963xx ARM boards
+ */
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clkdev.h>
+#include <linux/module.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <asm/clkdev.h>
+
+#include <mach/hardware.h>
+#include <mach/memory.h>
+#include <mach/smp.h>
+
+#include <plat/bsp.h>
+#if defined(CONFIG_BCM963138)
+#include <plat/ca9mpcore.h>
+#elif defined(CONFIG_BCM963148)
+#include <plat/b15core.h>
+#endif
+
+#include <bcm_map_part.h>
+#include <board.h>
+
+#define SO_MEMORY_SIZE_BYTES SECTION_SIZE
+
+#ifndef CONFIG_BRCM_IKOS
+#if defined(CONFIG_BCM963138) || defined(CONFIG_BCM963148)
+#define BCM963XX_RESERVE_MEM_ADSL
+#define BCM963XX_RESERVE_MEM_RDP
+#endif
+#endif
+
+#define BCM963XX_RESERVE_MEM_DHD
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL) || defined(BCM963XX_RESERVE_MEM_RDP)
+#include <asm/mach/map.h>
+#include <linux/memblock.h>
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL)
+#include "softdsl/AdslCoreDefs.h"
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_RDP)
+unsigned long tm_size = 0, mc_size = 0;
+#endif
+
+#define DHD_RESERVE_MEM_NUM		3
+#if defined(BCM963XX_RESERVE_MEM_DHD)
+unsigned long dhd_pool_size[DHD_RESERVE_MEM_NUM];
+#endif
+
+unsigned char g_blparms_buf[1024];
+unsigned long memsize = SZ_16M;
+bool is_rootfs_set = false;
+bool is_memory_reserved = false;
+
+#if defined(CONFIG_BCM_B15_MEGA_BARRIER)
+static uint32_t so_memory_phys_addr=0;
+static uint32_t preSOmemScratch;
+static void *so_memory_virt_addr=&preSOmemScratch;
+#endif
+
+#define MB_ALIGNED(__val)	(((__val & ~SZ_1M) ? SZ_1M : 0) + (__val & SZ_1M))
+		
+
+/***************************************************************************
+ * C++ New and delete operator functions
+ ***************************************************************************/
+
+/* void *operator new(unsigned int sz) */
+void *_Znwj(unsigned int sz)
+{
+	return( kmalloc(sz, GFP_KERNEL) );
+}
+
+/* void *operator new[](unsigned int sz)*/
+void *_Znaj(unsigned int sz)
+{
+	return( kmalloc(sz, GFP_KERNEL) );
+}
+
+/* placement new operator */
+/* void *operator new (unsigned int size, void *ptr) */
+void *ZnwjPv(unsigned int size, void *ptr)
+{
+	return ptr;
+}
+
+/* void operator delete(void *m) */
+void _ZdlPv(void *m)
+{
+	kfree(m);
+}
+
+/* void operator delete[](void *m) */
+void _ZdaPv(void *m)
+{
+	kfree(m);
+}
+EXPORT_SYMBOL(_Znwj);
+EXPORT_SYMBOL(_Znaj);
+EXPORT_SYMBOL(ZnwjPv);
+EXPORT_SYMBOL(_ZdlPv);
+EXPORT_SYMBOL(_ZdaPv);
+
+unsigned long getMemorySize(void)
+{
+	return memsize;
+}
+
+/* Pointers to memory buffers allocated for the DSP module */
+void *dsp_core;
+void *dsp_init;
+EXPORT_SYMBOL(dsp_core);
+EXPORT_SYMBOL(dsp_init);
+
+/*
+*****************************************************************************
+** FUNCTION:   allocDspModBuffers
+**
+** PURPOSE:    Allocates buffers for the init and core sections of the DSP
+**             module. This module is special since it has to be allocated
+**             in the 0x800.. memory range which is not mapped by the TLB.
+**
+** PARAMETERS: None
+** RETURNS:    Nothing
+*****************************************************************************
+*/
+void __init allocDspModBuffers(void)
+{
+}
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL)
+/* Reserve memory for DSL */
+#define ADSL_SDRAM_RESERVE_SIZE		MB_ALIGNED(ADSL_SDRAM_IMAGE_SIZE)
+
+/***************************************************************************
+ * Function Name: kerSysGetDslPhyMemory
+ * Description  : return the start address of the reserved DSL SDRAM. The memory
+ * 		  is reserved in the arch dependent setup.c
+ * Returns      : physical address of the reserved DSL SDRAM
+ ***************************************************************************/
+void *kerSysGetDslPhyMemory(void)
+{
+	if (is_memory_reserved == true) {
+		if (getMemorySize() <= SZ_256M)
+			return (void *)(getMemorySize() - ADSL_SDRAM_RESERVE_SIZE);
+		else
+			return (void *)(SZ_256M - ADSL_SDRAM_RESERVE_SIZE);
+	} else
+		return NULL;
+}
+
+EXPORT_SYMBOL(kerSysGetDslPhyMemory);
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_RDP)
+/* Reserve memory for RDPA */
+#define RDPA_RESERVE_MEM_NUM		2
+static struct {
+	char name[32];
+	uint32_t phys_addr;
+	uint32_t size;
+} rdpa_reserve_mem[RDPA_RESERVE_MEM_NUM];
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_DHD)
+/* Reserve memory for DHD */
+static struct {
+	char name[32];
+	uint32_t phys_addr;
+	uint32_t size;
+} dhd_reserve_mem[DHD_RESERVE_MEM_NUM];
+#endif
+
+
+
+
+#if defined(BCM963XX_RESERVE_MEM_RDP) || defined(BCM963XX_RESERVE_MEM_DHD)
+int BcmMemReserveGetByName(char *name, void **addr, unsigned int *size)
+{
+	int i;
+
+	*addr = NULL;
+	*size = 0;
+
+	if (is_memory_reserved == false)
+		return -1;
+
+#if defined(BCM963XX_RESERVE_MEM_RDP)
+	for (i = 0; i < RDPA_RESERVE_MEM_NUM; i++) {
+		if (strcmp(name, rdpa_reserve_mem[i].name) == 0) {
+			*addr = phys_to_virt(rdpa_reserve_mem[i].phys_addr);
+			*size = rdpa_reserve_mem[i].size;
+			return 0;
+		}
+	}
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_DHD)
+	for (i = 0; i < DHD_RESERVE_MEM_NUM; i++) {
+		if (strcmp(name, dhd_reserve_mem[i].name) == 0) {
+			*addr = phys_to_virt(dhd_reserve_mem[i].phys_addr);
+			*size = dhd_reserve_mem[i].size;
+			return 0;
+		}
+	}
+#endif
+	return -1;
+}
+EXPORT_SYMBOL(BcmMemReserveGetByName);
+#endif
+
+bool kerSysIsRootfsSet(void)
+{
+	return is_rootfs_set;
+}
+EXPORT_SYMBOL(kerSysIsRootfsSet);
+
+#ifdef CONFIG_BCM_B15_MEGA_BARRIER
+void BcmMegaBarrier(void) 
+{
+	__asm__("dsb");
+	writel_relaxed(so_memory_virt_addr, so_memory_virt_addr);
+	__asm__("dsb");
+}
+EXPORT_SYMBOL(BcmMegaBarrier);
+#endif /*CONFIG_BCM_B15_MEGA_BARRIER*/
+
+void __init board_map_io(void)
+{
+#if defined(BCM963XX_RESERVE_MEM_ADSL) || defined(BCM963XX_RESERVE_MEM_RDP) || defined(BCM963XX_RESERVE_MEM_DHD) || defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	struct map_desc desc[RDPA_RESERVE_MEM_NUM+DHD_RESERVE_MEM_NUM+2];
+	int i = 0, j;
+#endif
+	/* Map SoC specific I/O */
+	soc_map_io();
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL)
+	/* create a noncacheable memory device mapping for DSL driver to
+	 * access the reserved memory */
+	desc[i].virtual = (unsigned long)phys_to_virt(
+			(unsigned long)kerSysGetDslPhyMemory());
+	desc[i].pfn = __phys_to_pfn((unsigned long)kerSysGetDslPhyMemory());
+	desc[i].length = ADSL_SDRAM_RESERVE_SIZE;
+	desc[i].type = MT_MEMORY_NONCACHED;
+	printk("creating a MT_MEMORY_NONCACHED device at physical address of "
+			"0x%08lx to virtual address at "
+			"0x%08lx with size of 0x%lx byte for DSL\n",
+			(unsigned long)kerSysGetDslPhyMemory(),
+			desc[i].virtual, desc[i].length);
+	i++;
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_RDP)
+	for (j = 0; j < RDPA_RESERVE_MEM_NUM; j++) {
+		desc[i].virtual = (unsigned long)phys_to_virt(
+				rdpa_reserve_mem[j].phys_addr);
+		desc[i].pfn = __phys_to_pfn(rdpa_reserve_mem[j].phys_addr);
+		desc[i].length = rdpa_reserve_mem[j].size;
+		desc[i].type = MT_MEMORY_NONCACHED;
+		printk("creating a MT_MEMORY_NONCACHED device at physical "
+				"address of 0x%08lx to virtual address at "
+				"0x%08lx with size of 0x%lx byte for RDPA "
+				"%s\n",
+				(unsigned long)rdpa_reserve_mem[j].phys_addr,
+				desc[i].virtual, desc[i].length,
+				rdpa_reserve_mem[j].name);
+		i++;
+	}
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_DHD)
+	for (j = 0; j < DHD_RESERVE_MEM_NUM; j++) {
+		if(dhd_reserve_mem[j].size != 0) {
+			desc[i].virtual = (unsigned long)phys_to_virt(
+					dhd_reserve_mem[j].phys_addr);
+			desc[i].pfn = __phys_to_pfn(dhd_reserve_mem[j].phys_addr);
+			desc[i].length = dhd_reserve_mem[j].size;
+			desc[i].type = MT_MEMORY_NONCACHED;
+			printk("creating a MT_MEMORY_NONCACHED device at physical "
+					"address of 0x%08lx to virtual address at "
+					"0x%08lx with size of 0x%lx byte for DHD "
+					"%s\n",
+					(unsigned long)dhd_reserve_mem[j].phys_addr,
+					desc[i].virtual, desc[i].length,
+					dhd_reserve_mem[j].name);
+			i++;
+		}
+	}
+#endif
+
+
+
+#if defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	so_memory_virt_addr = (void*)phys_to_virt(so_memory_phys_addr);
+	desc[i].virtual = (unsigned long)so_memory_virt_addr;
+	desc[i].pfn = __phys_to_pfn(so_memory_phys_addr);
+	desc[i].length = SO_MEMORY_SIZE_BYTES;
+	desc[i].type = MT_MEMORY_SO;
+	printk(	"creating a MT_MEMORY_SO device at physical "
+		"address of 0x%08lx to virtual address at "
+		"0x%08lx with size of 0x%lx bytes.\n",
+		(unsigned long)so_memory_phys_addr,
+		(unsigned long)so_memory_virt_addr, SO_MEMORY_SIZE_BYTES);
+	i++;
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL) || defined(BCM963XX_RESERVE_MEM_RDP) || defined(BCM963XX_RESERVE_MEM_DHD)  || defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	iotable_init(desc, i);
+#endif
+
+	if (getMemorySize() <= SZ_32M)
+		printk("WARNING! System is with 0x%0lx memory, might not "
+				"boot successfully.\n"
+				"\tcheck ATAG or CMDLINE\n", getMemorySize());
+
+	soc_init_clock();
+}
+
+void __init board_init_early(void)
+{
+	soc_init_early();
+}
+
+
+void __init board_init_irq(void)
+{
+	soc_init_irq();
+	
+	/* serial_setup(sih); */
+}
+
+void __init board_init_timer(void)
+{
+	soc_init_timer();
+}
+
+#ifdef CONFIG_BCM963138
+extern int pmc_cpu_neon_power_up(unsigned cpu);
+#endif
+
+static void __init bcm_setup(void)
+{
+#if !defined(CONFIG_BCM_KF_IKOS) || !defined(CONFIG_BRCM_IKOS)
+	kerSysEarlyFlashInit();
+	kerSysFlashInit();
+
+#ifdef CONFIG_BCM963138
+	if( pmc_cpu_neon_power_up(0) == 0 )
+#endif
+	{
+		/* Enable NEON using CP15 registers */
+		asm volatile ( "mrc p15, 0, r0, c1, c0, 2" );
+		asm volatile ( "orr r0, r0, #0x00f00000" );
+		asm volatile ( "mcr p15, 0, r0, c1, c0, 2" );
+		asm volatile ( "isb" );
+		asm volatile ( "mov r0, #0x40000000" );
+		asm volatile ( ".word 0xeee80a10" ); //msr fpexc, r0
+	}
+
+#endif
+}
+
+void __init board_init_machine(void)
+{
+	/*
+	 * Add common platform devices that do not have board dependent HW
+	 * configurations
+	 */
+	soc_add_devices();
+
+	bcm_setup();
+
+	return;
+}
+
+static void __init set_memsize_from_cmdline(char *cmdline)
+{
+	char *cmd_ptr, *end_ptr;
+
+	cmd_ptr = strstr(cmdline, "mem=");
+	if (cmd_ptr != NULL) {
+		cmd_ptr += 4;
+		memsize = (unsigned long)memparse(cmd_ptr, &end_ptr);
+	}
+}
+
+static void __init check_if_rootfs_is_set(char *cmdline)
+{
+	char *cmd_ptr;
+
+	cmd_ptr = strstr(cmdline, "root=");
+	if (cmd_ptr != NULL)
+		is_rootfs_set = true;
+}
+
+/* in ARM, there are two ways of passing in memory size.
+ * one is by setting it in ATAG_MEM, and the other one is by setting the
+ * size in CMDLINE.  The first appearance of mem=nn[KMG] in CMDLINE is the
+ * value that has the highest priority. And if there is no memory size set
+ * in CMDLINE, then it will use the value in ATAG_MEM.  If there is no ATAG
+ * given from boot loader, then a default ATAG with memory size set to 16MB
+ * will be taken effect.
+ * Assuming CONFIG_CMDLINE_EXTEND is set. The logic doesn't work if
+ * CONFIG_CMDLINE_FROM_BOOTLOADER is set. */
+static void __init board_fixup(struct tag *t, char **cmdline, struct meminfo *mi)
+{
+	soc_fixup();
+
+	/* obtaining info passing down from boot loader */
+	for (; t->hdr.size; t = tag_next(t)) {
+		if ((t->hdr.tag == ATAG_CORE) && (t->u.core.rootdev != 0xff))
+			is_rootfs_set = true;
+
+		if (t->hdr.tag == ATAG_MEM)
+			memsize = t->u.mem.size;
+
+#if defined(BCM963XX_RESERVE_MEM_RDP)
+		if (t->hdr.tag == ATAG_RDPSIZE) {
+			tm_size = t->u.rdpsize.tm_size * SZ_1M;
+			mc_size = t->u.rdpsize.mc_size * SZ_1M;
+		}
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_DHD)
+		dhd_pool_size[0] = 0;
+		dhd_pool_size[1] = 0;
+		dhd_pool_size[2] = 0;
+		if (t->hdr.tag == ATAG_DHDSIZE) {
+			if(t->u.dhdparm.dhd_size[0] != 0xff)
+				dhd_pool_size[0] = t->u.dhdparm.dhd_size[0] * SZ_1M;
+			if(t->u.dhdparm.dhd_size[1] != 0xff)
+				dhd_pool_size[1] = t->u.dhdparm.dhd_size[1] * SZ_1M;
+			if(t->u.dhdparm.dhd_size[2] != 0xff)
+				dhd_pool_size[2] = t->u.dhdparm.dhd_size[2] * SZ_1M;
+		}
+#endif
+
+
+
+		if (t->hdr.tag == ATAG_BLPARM)
+			memcpy(g_blparms_buf, t->u.blparm.blparm, 1024);
+
+		if (t->hdr.tag == ATAG_CMDLINE) {
+			set_memsize_from_cmdline(t->u.cmdline.cmdline);
+			check_if_rootfs_is_set(t->u.cmdline.cmdline);
+		}
+		if ((t->hdr.tag == ATAG_INITRD2) || (t->hdr.tag == ATAG_INITRD))
+			is_rootfs_set = true;
+	}
+
+	set_memsize_from_cmdline(*cmdline);
+	check_if_rootfs_is_set(*cmdline);
+}
+
+static void __init board_reserve(void)
+{
+	/* used for reserve mem blocks */
+#if defined(BCM963XX_RESERVE_MEM_ADSL) || defined(BCM963XX_RESERVE_MEM_RDP) || defined(BCM963XX_RESERVE_MEM_DHD) || defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	unsigned long mem_end = getMemorySize();
+	unsigned long rsrv_mem_required = SZ_8M;
+
+	/* both reserved memory for RDP and DSL have to be within first
+	 * 256MB */
+	if (mem_end > SZ_256M)
+		mem_end = SZ_256M;
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_RDP)
+	/* Make sure the input values are larger than minimum required */
+	if (tm_size < TM_DEF_DDR_SIZE)
+		tm_size = TM_DEF_DDR_SIZE;
+
+	if (mc_size < TM_MC_DEF_DDR_SIZE)
+		mc_size = TM_MC_DEF_DDR_SIZE;
+
+	/* both TM and MC reserved memory size has to be multiple of 2MB */
+	if (tm_size & SZ_1M)
+		tm_size += SZ_1M;
+	if (mc_size & SZ_1M)
+		mc_size += SZ_1M;
+
+	rsrv_mem_required += tm_size + mc_size;
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_DHD)
+        /* Make sure the input values are larger than minimum required */
+        rsrv_mem_required += dhd_pool_size[0];
+        rsrv_mem_required += dhd_pool_size[1];
+        rsrv_mem_required += dhd_pool_size[2];
+#endif
+
+
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL)
+	rsrv_mem_required += ADSL_SDRAM_RESERVE_SIZE;
+#endif
+
+#if defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	rsrv_mem_required += SO_MEMORY_SIZE_BYTES;
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL) || defined(BCM963XX_RESERVE_MEM_RDP) || defined(BCM963XX_RESERVE_MEM_DHD) || defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	/* check if those configured memory sizes are over what
+	 * system has */
+
+	if (getMemorySize() < rsrv_mem_required) {
+
+#if defined(BCM963XX_RESERVE_MEM_DHD)
+		rsrv_mem_required -= (dhd_pool_size[0] + dhd_pool_size[1] + dhd_pool_size[2]);
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_RDP)
+		/* If RDP is enabled, try to use the default
+		 * TM and MC reserved memory size and try again */
+		rsrv_mem_required -= tm_size + mc_size;
+		tm_size = TM_DEF_DDR_SIZE;
+		mc_size = TM_MC_DEF_DDR_SIZE;
+		rsrv_mem_required += tm_size + mc_size;
+#endif
+
+		if (getMemorySize() < rsrv_mem_required)
+			return;
+	}
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL)
+	/* reserve memory for DSL.  We use memblock_remove + IO_MAP the removed
+	 * memory block to MT_MEMORY_NONCACHED here because ADSL driver code
+	 * will need to access the memory.  Another option is to use
+	 * memblock_reserve where the kernel still sees the memory, but I could
+	 * not find a function to make the reserved memory noncacheable. */
+	mem_end -= ADSL_SDRAM_RESERVE_SIZE;
+	memblock_remove(mem_end, ADSL_SDRAM_RESERVE_SIZE);
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_RDP)
+	mem_end -= tm_size;
+	/* TM reserved memory has to be 2MB-aligned */
+	if (mem_end & SZ_1M)
+		mem_end -= SZ_1M;
+	memblock_remove(mem_end, tm_size);
+	strcpy(rdpa_reserve_mem[0].name, TM_BASE_ADDR_STR);
+	rdpa_reserve_mem[0].phys_addr = (uint32_t)mem_end;
+	rdpa_reserve_mem[0].size = tm_size;
+
+	mem_end -= mc_size;
+	/* MC reserved memory has to be 2MB-aligned */
+	if (unlikely(mem_end & SZ_1M))
+		mem_end -= SZ_1M;
+	memblock_remove(mem_end, mc_size);
+	strcpy(rdpa_reserve_mem[1].name, TM_MC_BASE_ADDR_STR);
+	rdpa_reserve_mem[1].phys_addr = (uint32_t)mem_end;
+	rdpa_reserve_mem[1].size = mc_size;
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_DHD)
+	/* DHD_OFFLOAD */
+	if(dhd_pool_size[0] != 0) {
+		mem_end -= dhd_pool_size[0];
+		/* DHD reserved memory has to be 2MB-aligned */
+		if (unlikely(mem_end & SZ_1M))
+			mem_end -= SZ_1M;
+		memblock_remove(mem_end, dhd_pool_size[0]);
+		strcpy(dhd_reserve_mem[0].name, "dhd0");
+		dhd_reserve_mem[0].phys_addr = (uint32_t)mem_end;
+		dhd_reserve_mem[0].size = dhd_pool_size[0];
+	}
+
+	if(dhd_pool_size[1] != 0) {
+		mem_end -= dhd_pool_size[1];
+		/* DHD reserved memory has to be 2MB-aligned */
+		if (unlikely(mem_end & SZ_1M))
+			mem_end -= SZ_1M;
+		memblock_remove(mem_end, dhd_pool_size[1]);
+		strcpy(dhd_reserve_mem[1].name, "dhd1");
+		dhd_reserve_mem[1].phys_addr = (uint32_t)mem_end;
+		dhd_reserve_mem[1].size = dhd_pool_size[1];
+	}
+
+	if(dhd_pool_size[2] != 0) {
+		mem_end -= dhd_pool_size[2];
+		/* DHD reserved memory has to be 2MB-aligned */
+		if (unlikely(mem_end & SZ_1M))
+			mem_end -= SZ_1M;
+		memblock_remove(mem_end, dhd_pool_size[2]);
+		strcpy(dhd_reserve_mem[2].name, "dhd2");
+		dhd_reserve_mem[2].phys_addr = (uint32_t)mem_end;
+		dhd_reserve_mem[2].size = dhd_pool_size[2];
+	}
+
+
+#endif
+
+#if defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	mem_end -= SO_MEMORY_SIZE_BYTES;
+	memblock_remove(mem_end, SO_MEMORY_SIZE_BYTES);
+	so_memory_phys_addr = (uint32_t)mem_end;
+#endif
+
+#if defined(BCM963XX_RESERVE_MEM_ADSL) || defined(BCM963XX_RESERVE_MEM_RDP) || defined(BCM963XX_RESERVE_MEM_DHD) || defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	is_memory_reserved = true;
+#endif
+}
+
+static struct sys_timer board_timer = {
+	.init = board_init_timer,
+};
+
+static void board_restart(char mode, const char *cmd)
+{
+#ifndef CONFIG_BRCM_IKOS
+	kerSysMipsSoftReset();
+#endif
+}
+
+#if defined(CONFIG_BCM963138)
+MACHINE_START(BCM963138, "BCM963138")
+	/* Maintainer: Broadcom */
+	.fixup		= board_fixup,
+	.reserve	= board_reserve,
+	.map_io		= board_map_io,	
+	.init_early	= board_init_early,
+	.init_irq	= board_init_irq,
+	.timer		= &board_timer,
+	.init_machine	= board_init_machine,
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+	.handle_irq	= gic_handle_irq,
+#endif
+#ifdef CONFIG_ZONE_DMA
+	/* If enable CONFIG_ZONE_DMA, it will reserve the given size of
+	 * memory from SDRAM and use it exclusively for DMA purpose.
+	 * This ensures the device driver can allocate enough memory. */
+	.dma_zone_size	= SZ_16M,	/* must be multiple of 2MB */
+#endif
+	.restart	= board_restart,
+MACHINE_END
+#endif
+
+#if defined(CONFIG_BCM963148)
+MACHINE_START(BCM963148, "BCM963148")
+	/* Maintainer: Broadcom */
+	.fixup		= board_fixup,
+	.reserve	= board_reserve,
+	.map_io		= board_map_io,	
+	.init_early	= board_init_early,
+	.init_irq	= board_init_irq,
+	.timer		= &board_timer,
+	.init_machine	= board_init_machine,
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+	.handle_irq	= gic_handle_irq,
+#endif
+#ifdef CONFIG_ZONE_DMA
+	/* If enable CONFIG_ZONE_DMA, it will reserve the given size of
+	 * memory from SDRAM and use it exclusively for DMA purpose.
+	 * This ensures the device driver can allocate enough memory. */
+	.dma_zone_size	= SZ_16M,	/* must be multiple of 2MB and within 16MB for DSL PHY */
+#endif
+	.restart	= board_restart,
+MACHINE_END
+#endif
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/barriers.h b/arch/arm/mach-bcm963xx/include/mach/barriers.h
new file mode 100644
index 0000000000000000000000000000000000000000..8ff384a84c2be2a7a18015b1159ce56ba7fd46bc
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/barriers.h
@@ -0,0 +1,94 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef _MACH_BCM963XX_BARRIERS_H
+#define _MACH_BCM963XX_BARRIERS_H
+
+#include <asm/outercache.h>
+
+#ifdef CONFIG_BCM_B15_MEGA_BARRIER
+void BcmMegaBarrier(void); /*Implemented in board_963xx.c*/
+#endif
+
+#if defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+ /*WARNING: don't use isb()/dsb()/dmb() macros as a Write-Memory-Memory Barrier. Correctness is not guaranteed.
+  *If you need a write memory barrier, use the wmb()/smp_wmb() macros below.*/
+ #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+ #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+ #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
+#else
+ #define isb() __asm__ __volatile__ ("isb" : : : "memory")
+ #define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+ #define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+#endif
+
+#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
+ #if defined(CONFIG_BCM_B15_MEGA_BARRIER)
+  #define mb()		BcmMegaBarrier()
+  #define wmb()		BcmMegaBarrier()
+ #elif defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+  #define mb()		do { dsb(); outer_sync(); } while (0)
+  #define wmb()		do { dsb(st); outer_sync(); } while (0)
+ #else
+  #define mb()		do { dsb(); outer_sync(); } while (0)
+  #define wmb()		mb()
+ #endif
+ #define rmb()		dsb()
+#else
+ #include <asm/memory.h>
+ #define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+ #define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+ #define wmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#endif
+
+#ifndef CONFIG_SMP
+ #ifdef CONFIG_BCM_B15_MEGA_BARRIER
+  #define smp_mb()	BcmMegaBarrier()
+ #else
+  #define smp_mb()	barrier()
+ #endif
+ #define smp_rmb()	barrier()
+ #define smp_wmb()	smp_mb()
+#else /*CONFIG_SMP:*/
+ #if defined(CONFIG_BCM_B15_MEGA_BARRIER)
+  #define smp_mb()	BcmMegaBarrier()
+  #define smp_rmb()	dmb(ish)
+  #define smp_wmb()	BcmMegaBarrier()
+ #elif defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+  #define smp_mb()	dmb(ish)
+  #define smp_rmb()	smp_mb()
+  #define smp_wmb()	dmb(ishst)
+ #else
+  #define smp_mb()	dmb()
+  #define smp_rmb()	dmb()
+  #define smp_wmb()	dmb()
+ #endif
+#endif /*CONFIG_SMP*/
+#endif /*_MACH_BCM963XX_BARRIERS_H*/
+#endif /*(CONFIG_BCM_KF_ARM_BCM963XX)*/
diff --git a/arch/arm/mach-bcm963xx/include/mach/clkdev.h b/arch/arm/mach-bcm963xx/include/mach/clkdev.h
new file mode 100644
index 0000000000000000000000000000000000000000..e19f4ec72a4415106224493e43787dd73d6a1acd
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/clkdev.h
@@ -0,0 +1,59 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef __ASM_MACH_CLKDEV_H
+#define __ASM_MACH_CLKDEV_H	__FILE__
+
+#include <plat/clock.h>
+#include <asm/atomic.h>
+
+/* FIXME! the following is based on bcm5301x, and it will need to
+ * be modified based on the clk implementation */
+struct clk {
+	const struct clk_ops	*ops;
+	const char		*name;
+	atomic_t		ena_cnt;
+	atomic_t		use_cnt;
+	unsigned long		rate;
+	unsigned		gated :1;
+	unsigned		fixed :1;
+	unsigned		chan  :6;
+	void __iomem		*regs_base;
+	struct clk		*parent;
+	/* TBD: could it have multiple parents to select from ? */
+	enum {
+		CLK_XTAL, CLK_GATE, CLK_PLL, CLK_DIV, CLK_PHA, CLK_UART, CLK_DMAC
+	} type;
+};
+
+int __clk_get(struct clk *clk);
+void __clk_put(struct clk *clk);
+
+#endif /* __ASM_MACH_CLKDEV_H */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/debug-macro.S b/arch/arm/mach-bcm963xx/include/mach/debug-macro.S
new file mode 100644
index 0000000000000000000000000000000000000000..f755651301610cbb8ef6c0e44b0dc6ea73258700
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/debug-macro.S
@@ -0,0 +1,85 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/* FIXME!! this one needs to be adjusted */
+
+/*
+ * Macros used for EARLY_PRINTK, in low-level UART debug console
+ */
+#include <mach/hardware.h>
+//#include <bcm_map_part.h>
+
+#define DEBUG_UART_VA		IO_ADDRESS(CONFIG_DEBUG_UART_ADDR)
+
+	.macro addruart, rp, rv, tmp
+	ldr	\rv, =DEBUG_UART_VA	@ virtual
+	ldr	\rp, =CONFIG_DEBUG_UART_ADDR	@ physical
+	.endm
+
+/* FIXME!! try to use a defined value for the address below */
+#if CONFIG_DEBUG_UART_ADDR==0x80019000
+//#if CONFIG_DEBUG_UART_ADDR==ARM_UART_PHYS_BASE
+	#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL011
+		#include <asm/hardware/debug-pl01x.S>
+	#else
+		#ifdef EARLY_PRINTK
+			/* FIXME! Print a compiling warning message saying */
+			/* that there is no device for early_printk */
+		#endif /* EARLY_PRINTK */
+	#endif /* CONFIG_PLAT_BCM63XX_AMBA_PL011 */
+#else
+/* using PERIPH Uart for debug.S */
+#define BCM63XX_UART_CTRL	0x00
+#define BCM63XX_UART_BDWD	0x04
+#define BCM63XX_UART_MCCTL	0x08
+#define BCM63XX_UART_EIPT	0x0C
+#define BCM63XX_UART_INT	0x10
+#define BCM63XX_UART_DATA	0x14
+
+#define BCM63XX_UART_TXFIFOEMP	0x0020
+#define BCM63XX_UART_TXFIFOTHOLD	0x0008
+
+	.macro senduart, rd, rx
+	/* byte access doesn't work, has to write word */
+	strb	\rd, [\rx, #BCM63XX_UART_DATA]
+	.endm
+
+	.macro waituart, rd, rx
+1001:	ldr	\rd, [\rx, #BCM63XX_UART_INT]
+	tst	\rd, #BCM63XX_UART_TXFIFOEMP
+	beq	1001b
+	.endm
+
+	.macro busyuart, rd, rx
+1002:	ldr	\rd, [\rx, #BCM63XX_UART_INT]
+	tst	\rd, #BCM63XX_UART_TXFIFOEMP
+	beq	1002b
+	.endm
+#endif /* CONFIG_DEBUG_UART_ADDR=0x8001900 */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/hardware.h b/arch/arm/mach-bcm963xx/include/mach/hardware.h
new file mode 100644
index 0000000000000000000000000000000000000000..4c0d360d8b746588c255a9f3617f74615f835770
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/hardware.h
@@ -0,0 +1,61 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef __ARCH_HARDWARE_H
+#define __ARCH_HARDWARE_H
+
+#include <asm/sizes.h>
+
+/* macro to get at IO space when running virtually */
+#ifdef CONFIG_MMU
+/* in 63138, we have two memory space area defined for registers.  One starts
+ * from 0x8000 0000 to roughly 0x80800000. And the other one is for PERIPH.
+ * It starts from 0xfffc 0000 to 0xffff 0100. In addition, SPI boot is from
+ * 0xffd0 0000 to 0xfff0 0000. Therefore, we can define the following macro
+ * of address translation that combines the three different areas into one
+ * contiguous virtual address area. They will be mapped to 0xfc00 0000,
+ * where
+ *    0xfc00 0000 - 0xfc80 0000 -> 0x8000 0000 - 0x8080 0000
+ *    0xfcd0 0000 - 0xfcf0 0000 -> 0xffd0 0000 - 0xfff0 0000
+ *    0xfcfc 0000 - 0xfcff 0100 -> 0xfffc 0000 - 0xffff 0100 */
+
+#define IO_ADDRESS(x)		(((x) & 0x00ffffff) + 0xfc000000)
+
+#else
+#define IO_ADDRESS(x)		(x)
+#endif
+
+#define __io_address(n)		IOMEM(IO_ADDRESS(n))
+
+#ifdef CONFIG_PLAT_BCM63XX_ACP
+#define ACP_ADDRESS(x)		((x) | 0xe0000000)
+#endif
+
+#endif /* __ARCH_HARDWARE_H */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/io.h b/arch/arm/mach-bcm963xx/include/mach/io.h
new file mode 100644
index 0000000000000000000000000000000000000000..55d856ec47beeee3939329e567885e051675e1c2
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/io.h
@@ -0,0 +1,43 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H	__FILE__
+
+/*
+ * This file is required by arch/arm/include/asm/io.h
+ * and is only used to satisfy obscure compile-time dependencies.
+ */
+
+#define __io(a)		__typesafe_io(a)
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+#endif
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/irqs.h b/arch/arm/mach-bcm963xx/include/mach/irqs.h
new file mode 100644
index 0000000000000000000000000000000000000000..4482223680cd171b1452c05918ce4daaef5fa808
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/irqs.h
@@ -0,0 +1,36 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * This is the size of a static IRQ handers array
+ */
+#ifndef	NR_IRQS
+#define	NR_IRQS	256
+#endif
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/memory.h b/arch/arm/mach-bcm963xx/include/mach/memory.h
new file mode 100644
index 0000000000000000000000000000000000000000..337527281369cb4daf83b9bf90319b965f2431df
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/memory.h
@@ -0,0 +1,47 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * Platform memory layout definitions
+ *
+ * Note: due to dependencies in common architecture code
+ * some mappings go in other header files.
+ */
+#ifndef __ASM_ARCH_MEMORY_H
+#define __ASM_ARCH_MEMORY_H
+
+/*
+ * Main memory base address and size
+ * are defined from the board-level configuration file
+ */
+
+#define PLAT_PHYS_OFFSET	UL(0x00000000)
+
+#endif
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/smp.h b/arch/arm/mach-bcm963xx/include/mach/smp.h
new file mode 100644
index 0000000000000000000000000000000000000000..a1259d2a29c56cce634f8f06465631cdbf8210ab
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/smp.h
@@ -0,0 +1,62 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * derived from arch/arm/mach-realview/include/mach/smp.h
+ *
+ * This file is required from common architecture code,
+ * in arch/arm/include/asm/smp.h
+ */
+
+#ifndef __ASM_ARCH_SMP_H
+#define __ASM_ARCH_SMP_H __FILE__
+
+#include <asm/hardware/gic.h>
+
+extern void platform_secondary_startup(void);
+
+/* Used in hotplug.c */
+#define hard_smp_processor_id()			\
+	({						\
+		unsigned int cpunum;			\
+		__asm__("mrc p15, 0, %0, c0, c0, 5"	\
+			: "=r" (cpunum));		\
+		cpunum &= 0x0F;				\
+	})
+
+/*
+ * We use IRQ1 as the IPI
+ */
+static inline void smp_cross_call(const struct cpumask *mask)
+{
+	gic_raise_softirq(mask, 1);
+}
+
+#endif /* __ASM_ARCH_SMP_H */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/system.h b/arch/arm/mach-bcm963xx/include/mach/system.h
new file mode 100644
index 0000000000000000000000000000000000000000..b611ef0ef6675794894544e658209b8c06b4537b
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/system.h
@@ -0,0 +1,36 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef __ASM_ARCH_SYSTEM_H
+#define __ASM_ARCH_SYSTEM_H
+
+#include <linux/io.h>
+
+#endif
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/timex.h b/arch/arm/mach-bcm963xx/include/mach/timex.h
new file mode 100644
index 0000000000000000000000000000000000000000..f5a6303b9105dc4cc57c6b479f196a933bd3a484
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/timex.h
@@ -0,0 +1,44 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * This file exists to satisfy compile-time dependency from:
+ * arch/arm/include/asm/timex.h
+ * It must be a value known at compile time for <linux/jiffies.h>
+ * but its value is never used in the resulting code.
+ * If "get_cycles()" inline function in <asm/timex.h> is rewritten,
+ * then in combination with this constant it could be used to measure
+ * microsecond elapsed time using the global timer clock-source.
+ * -LR
+ */
+
+/* FIXME!! when knowing the real clock tick rate */
+#define CLOCK_TICK_RATE		(1000000)
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/uncompress.h b/arch/arm/mach-bcm963xx/include/mach/uncompress.h
new file mode 100644
index 0000000000000000000000000000000000000000..4ea0180ee6ce119f89236c5f48a1bd1a303b3b2a
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/uncompress.h
@@ -0,0 +1,85 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <mach/hardware.h>
+#include <bcm_map_part.h>
+
+#define AMBA_UART_DR(base)	(*(volatile unsigned char *)((base) + 0x00))
+#define AMBA_UART_LCRH(base)	(*(volatile unsigned char *)((base) + 0x2c))
+#define AMBA_UART_CR(base)	(*(volatile unsigned char *)((base) + 0x30))
+#define AMBA_UART_FR(base)	(*(volatile unsigned char *)((base) + 0x18))
+
+#if defined(ARM_UART_PHYS_BASE) && (CONFIG_DEBUG_UART_ADDR==ARM_UART_PHYS_BASE)
+/*
+ * This does not append a newline
+ */
+static inline void putc(int c)
+{
+	unsigned long base = CONFIG_DEBUG_UART_ADDR;
+
+	while (AMBA_UART_FR(base) & (1 << 5))
+		barrier();
+
+	AMBA_UART_DR(base) = c;
+}
+
+static inline void flush(void)
+{
+	unsigned long base = CONFIG_DEBUG_UART_ADDR;
+
+	while (AMBA_UART_FR(base) & (1 << 3))
+		barrier();
+}
+#elif CONFIG_DEBUG_UART_ADDR==UART0_PHYS_BASE
+
+#define PERIPH_UART_DATA(base)	(*(volatile unsigned char *)((base) + 0x14))
+#define PERIPH_UART_STS(base)	(*(volatile unsigned long *)((base) + 0x10))
+
+static inline void putc(int c)
+{
+#if 0	/* FIXME! TXFIFOTHOLD might not work */
+	while (!(PERIPH_UART_STS(UART0_PHYS_BASE) & TXFIFOTHOLD))
+		barrier();
+#endif
+	PERIPH_UART_DATA(UART0_PHYS_BASE) = (unsigned char)c;
+}
+
+static inline void flush(void)
+{
+	while (!(PERIPH_UART_STS(UART0_PHYS_BASE) & TXFIFOEMT))
+		barrier();
+}
+#endif
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/include/mach/vmalloc.h b/arch/arm/mach-bcm963xx/include/mach/vmalloc.h
new file mode 100644
index 0000000000000000000000000000000000000000..5cb7c9ad2b54820167c2836352f8194d2ef7de28
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/include/mach/vmalloc.h
@@ -0,0 +1,38 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * This file is included from architecture common code in:
+ * arch/arm/include/asm/pgtable.h
+ */
+
+#ifndef VMALLOC_END
+#define VMALLOC_END		(PAGE_OFFSET + 0x30000000)
+#endif
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/irq.c b/arch/arm/mach-bcm963xx/irq.c
new file mode 100644
index 0000000000000000000000000000000000000000..9f5d1ca8b61f69e8d6a4b86a159a974cb6e9e77c
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/irq.c
@@ -0,0 +1,337 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+* <:copyright-BRCM:2013:DUAL/GPL:standard
+* 
+*    Copyright (c) 2013 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+* :>
+*/
+
+/*
+ * Interrupt control functions for Broadcom 963xx ARM boards
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+#include <linux/bcm_assert.h>
+#include <boardparms.h>
+#include <board.h>
+
+#ifdef CONFIG_SMP
+    #define AFFINITY_OF(d) (*(d)->affinity)
+#else
+    #define AFFINITY_OF(d) ((void)(d), CPU_MASK_CPU0)
+#endif
+
+#define INTR_NAME_MAX_LENGTH 16
+
+#ifdef CONFIG_SMP
+extern DEFINE_PER_CPU(unsigned int, ipi_pending);
+#endif
+
+static DEFINE_SPINLOCK(brcm_irqlock);
+
+void disable_brcm_irqsave(struct irq_data *data, unsigned long stateSaveArray[])
+{
+#if 0
+	int cpu;
+	unsigned long flags;
+	unsigned int irq = data->irq;
+
+	/* test for valid interrupt */
+	if ((irq >= INTERNAL_ISR_TABLE_OFFSET) && (irq <= INTERRUPT_ID_LAST)) {
+		/* Disable this processor's interrupts and acquire spinlock */
+		spin_lock_irqsave(&brcm_irqlock, flags);
+
+		/* loop thru each processor */
+		for_each_cpu_mask(cpu, AFFINITY_OF(data)) {
+			/* save original interrupt's enable state */
+			stateSaveArray[cpu] = brcm_irq_ctrl[cpu]->IrqMask & (((IRQ_TYPE)1) << (irq - INTERNAL_ISR_TABLE_OFFSET));
+
+			/* clear each cpu's selected interrupt enable */
+			brcm_irq_ctrl[cpu]->IrqMask &= ~(((IRQ_TYPE)1) << (irq - INTERNAL_ISR_TABLE_OFFSET));
+
+		}
+
+		/* release spinlock and enable this processor's interrupt */
+		spin_unlock_irqrestore(&brcm_irqlock, flags);
+	}
+#endif
+}
+
+
+void restore_brcm_irqsave(struct irq_data *data, unsigned long stateSaveArray[])
+{
+#if 0
+	int cpu;
+	unsigned long flags;
+
+	/* Disable this processor's interrupts and acquire spinlock */
+	spin_lock_irqsave(&brcm_irqlock, flags);
+
+	/* loop thru each processor */
+	for_each_cpu_mask(cpu, AFFINITY_OF(data)) {
+		/* restore cpu's original interrupt enable (off or on). */
+		brcm_irq_ctrl[cpu]->IrqMask |= stateSaveArray[cpu];
+	}
+
+	/* release spinlock and enable this processor's interrupt */
+	spin_unlock_irqrestore(&brcm_irqlock, flags);
+#endif
+}
+
+void enable_brcm_irq_noop(unsigned int irq)
+{
+}
+
+void enable_brcm_irq_irq(unsigned int irq)
+{
+	enable_irq(irq);
+}
+
+void disable_brcm_irq_irq(unsigned int irq)
+{
+	disable_irq(irq);
+}
+
+/* This is a wrapper to standand Linux request_irq, which automatically sets
+ * IRQ flags and interurpt names. 
+ * One major difference between IRQ HAL wrapper between ARM vs MIPS is that
+ * we DO NOT support REARM_NO mode in ARM.  This means the IRQ is always
+ * automatically re-enabled when the ISR is done. */
+unsigned int BcmHalMapInterrupt(FN_HANDLER pfunc, unsigned int param, unsigned int irq)
+{
+	char devname[INTR_NAME_MAX_LENGTH];
+
+	sprintf(devname, "brcm_%d", irq);
+	return BcmHalMapInterruptEx(pfunc, param, irq, devname, INTR_REARM_YES,
+		INTR_AFFINITY_DEFAULT);
+}
+
+/* This is a wrapper to standand Linux request_irq for the VOIP driver
+ * for ARM version, it does exactly the same as the normal one */
+unsigned int BcmHalMapInterruptVoip(FN_HANDLER pfunc, unsigned int param, unsigned int irq)
+{
+	char devname[INTR_NAME_MAX_LENGTH];
+
+	sprintf(devname, "brcm_%d", irq);
+	return BcmHalMapInterruptEx(pfunc, param, irq, devname, INTR_REARM_YES,
+		INTR_AFFINITY_DEFAULT);
+}
+
+/** Broadcom wrapper to linux request_irq.  This version does more stuff.
+ *
+ * @param pfunc (IN) interrupt handler function
+ * @param param (IN) context/cookie that is passed to interrupt handler
+ * @param irq   (IN) interrupt number
+ * @param interruptName (IN) descriptive name for the interrupt.  15 chars
+ *                           or less.  This function will make a copy of
+ *                           the name.
+ * @param INTR_REARM_MODE    (IN) See bcm_intr.h, not used in ARM
+ * @param INTR_AFFINITY_MODE (IN) See bcm_intr.h
+ *
+ * @return 0 on success.
+ */
+unsigned int BcmHalMapInterruptEx(FN_HANDLER pfunc, unsigned int param,
+		unsigned int irq, const char *interruptName,
+		INTR_REARM_MODE_ENUM rearmMode,
+		INTR_AFFINITY_MODE_ENUM affinMode)
+{
+	char *devname;
+	unsigned long irqflags = IRQF_DISABLED;
+	unsigned int retval;
+	struct cpumask mask;
+	unsigned long flags;
+
+#if defined(CONFIG_BCM_KF_ASSERT)
+	BCM_ASSERT_R(interruptName != NULL, -1);
+	BCM_ASSERT_R(strlen(interruptName) < INTR_NAME_MAX_LENGTH, -1);
+#endif
+
+	if ((devname = kmalloc(INTR_NAME_MAX_LENGTH, GFP_ATOMIC)) == NULL) {
+		printk(KERN_ERR "kmalloc(%d, GFP_ATOMIC) failed for intr name\n",
+				INTR_NAME_MAX_LENGTH);
+		return -1;
+	}
+	sprintf( devname, "%s", interruptName );
+
+	if ((irq >= INTERRUPT_ID_TIMER) && (irq <= INTERRUPT_ID_TIMER_MAX))
+		irqflags |= IRQF_TIMER;
+
+#if !defined(CONFIG_BRCM_IKOS)
+	/* For external interrupt, check if it is shared */
+	if (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_MAX) {
+		if (IsExtIntrShared(kerSysGetExtIntInfo(irq)))
+			irqflags |= IRQF_SHARED;
+	}
+#endif
+
+	retval = request_irq(irq, (void*)pfunc, irqflags, devname,
+			(void *)param);
+	if (retval != 0) {
+		printk(KERN_WARNING "request_irq failed for irq=%d (%s) "
+				"retval=%d\n", irq, devname, retval);
+		kfree(devname);
+		return retval;
+	}
+
+#ifdef CONFIG_SMP
+	/* for Timer interrupt, we always use CPU#0 to handle it */
+	if ((irq >= INTERRUPT_ID_TIMER) && (irq <= INTERRUPT_ID_TIMER_MAX)) {
+		cpus_clear(mask);
+		cpu_set(0, mask);
+		irq_set_affinity(irq, &mask);
+	}
+#endif
+
+	/* now deal with interrupt affinity requests */
+	if (affinMode != INTR_AFFINITY_DEFAULT) {
+		cpus_clear(mask);
+
+		if (affinMode == INTR_AFFINITY_TP1_ONLY ||
+				affinMode == INTR_AFFINITY_TP1_IF_POSSIBLE) {
+			if (cpu_online(1)) {
+				cpu_set(1, mask);
+				irq_set_affinity(irq, &mask);
+			} else {
+				/* TP1 is not on-line but caller insisted on it */
+				if (affinMode == INTR_AFFINITY_TP1_ONLY) {
+					printk(KERN_WARNING "cannot assign "
+							"intr %d to TP1, not "
+							"online\n", irq);
+					retval = request_irq(irq, NULL, 0,
+							NULL, NULL);
+					kfree(devname);
+					retval = -1;
+				}
+			}
+		} else {
+			/* INTR_AFFINITY_BOTH_IF_POSSIBLE */
+			cpu_set(0, mask);
+			if (cpu_online(1)) {
+				cpu_set(1, mask);
+				irq_set_affinity(irq, &mask);
+			}
+		}
+	}
+
+#if !defined(CONFIG_BRCM_IKOS)
+	if (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_MAX)
+	{
+		int levelOrEdge, detectSense;
+		int ein = irq - INTERRUPT_ID_EXTERNAL_0;
+
+		if( IsExtIntrTypeActHigh(kerSysGetExtIntInfo(irq)) )
+			detectSense = 1;
+		else
+			detectSense = 0;
+
+		if( IsExtIntrTypeSenseLevel(kerSysGetExtIntInfo(irq)) )
+			levelOrEdge = 1;
+		else
+			levelOrEdge = 0;
+    
+		spin_lock_irqsave(&brcm_irqlock, flags);
+		PERF->ExtIrqCtrl |= (levelOrEdge << (EI_LEVEL_SHFT + ein)) 
+			| (detectSense << (EI_SENSE_SHFT + ein)) 
+			| (1 << (EI_CLEAR_SHFT + ein));
+		PERF->ExtIrqStatus |= (1 << (EI_MASK_SHFT + ein));
+		spin_unlock_irqrestore(&brcm_irqlock, flags);
+	}
+#endif
+
+	return retval;
+}
+EXPORT_SYMBOL(BcmHalMapInterruptEx);
+
+
+//***************************************************************************
+//  void  BcmHalGenerateSoftInterrupt
+//
+//   Triggers a software interrupt.
+//
+//***************************************************************************
+void BcmHalGenerateSoftInterrupt(unsigned int irq)
+{
+#if 0
+	unsigned long flags;
+
+	local_irq_save(flags);
+	set_c0_cause(0x1 << (CAUSEB_IP0 + irq - INTERRUPT_ID_SOFTWARE_0));
+	local_irq_restore(flags);
+#endif
+}
+
+void BcmHalExternalIrqClear(unsigned int irq)
+{
+	// clear interrupt (write 1 then 0)
+	unsigned long flags;
+	spin_lock_irqsave(&brcm_irqlock, flags);
+	PERF->ExtIrqCtrl |=  (1 << (EI_CLEAR_SHFT + irq - INTERRUPT_ID_EXTERNAL_0));
+	PERF->ExtIrqCtrl &= ~(1 << (EI_CLEAR_SHFT + irq - INTERRUPT_ID_EXTERNAL_0));
+	spin_unlock_irqrestore(&brcm_irqlock, flags); 
+}
+
+void BcmHalExternalIrqMask(unsigned int irq)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&brcm_irqlock, flags);
+	PERF->ExtIrqStatus &= ~(1 << (EI_MASK_SHFT + irq - INTERRUPT_ID_EXTERNAL_0));
+	spin_unlock_irqrestore(&brcm_irqlock, flags); 
+}
+
+void BcmHalExternalIrqUnmask(unsigned int irq)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&brcm_irqlock, flags);
+	PERF->ExtIrqStatus |= (1 << (EI_MASK_SHFT + irq - INTERRUPT_ID_EXTERNAL_0));
+	spin_unlock_irqrestore(&brcm_irqlock, flags); 
+}
+
+
+EXPORT_SYMBOL(enable_brcm_irq_noop);
+EXPORT_SYMBOL(enable_brcm_irq_irq);
+EXPORT_SYMBOL(disable_brcm_irq_irq);
+EXPORT_SYMBOL(BcmHalMapInterrupt);
+EXPORT_SYMBOL(BcmHalMapInterruptVoip);
+EXPORT_SYMBOL(BcmHalGenerateSoftInterrupt);
+EXPORT_SYMBOL(BcmHalExternalIrqClear);
+EXPORT_SYMBOL(BcmHalExternalIrqMask);
+EXPORT_SYMBOL(BcmHalExternalIrqUnmask);
+
+EXPORT_SYMBOL(disable_brcm_irqsave);
+EXPORT_SYMBOL(restore_brcm_irqsave);
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mach-bcm963xx/prom.c b/arch/arm/mach-bcm963xx/prom.c
new file mode 100644
index 0000000000000000000000000000000000000000..61c8c30011c1215146cc49c55013c3c8b582764d
--- /dev/null
+++ b/arch/arm/mach-bcm963xx/prom.c
@@ -0,0 +1,266 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+// FIXME!! the following ifdef will be redesigned for ARM, at this point,
+// it is commented out for compilation purpose.  ARM has different way of
+// setting up boot param rather than using PROM library.
+/*
+ * prom.c: PROM library initialization code.
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/bootmem.h>
+#include <linux/blkdev.h>
+#include <asm/cpu.h>
+#if 0
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+#include <asm/time.h>
+#endif
+
+#include <bcm_map_part.h>
+#include <bcm_cpu.h>
+#include <board.h>
+#include <boardparms.h>
+
+// FIXME!! I am just putting this piece of code here.
+// It will be needed; however, as for now, we don't use it.
+extern int  do_syslog(int, char *, int);
+
+unsigned char g_blparms_buf[1024];
+
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+unsigned int main_tp_num;
+#endif
+
+#if 0
+static void __init create_cmdline(char *cmdline);
+#endif
+UINT32 __init calculateCpuSpeed(void);
+void __init retrieve_boot_loader_parameters(void);
+
+/* --------------------------------------------------------------------------
+    Name: prom_init
+ -------------------------------------------------------------------------- */
+
+void __init prom_init(void)
+{
+#if 0
+    int argc = fw_arg0;
+    u32 *argv = (u32 *)CKSEG0ADDR(fw_arg1);
+    int i;
+
+    kerSysEarlyFlashInit();
+
+    // too early in bootup sequence to acquire spinlock, not needed anyways
+    // only the kernel is running at this point
+    kerSysNvRamGetBoardIdLocked(promBoardIdStr);
+    printk( "%s prom init\n", promBoardIdStr );
+
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+    main_tp_num = ((read_c0_diag3() & CP0_CMT_TPID) == CP0_CMT_TPID) ? 1 : 0;
+    printk("Linux TP ID = %u \n", (unsigned int)main_tp_num);
+#endif
+
+    PERF->IrqControl[0].IrqMask=0;
+
+    arcs_cmdline[0] = '\0';
+
+    create_cmdline(arcs_cmdline);
+
+    strcat(arcs_cmdline, " ");
+
+    for (i = 1; i < argc; i++) {
+        strcat(arcs_cmdline, (char *)CKSEG0ADDR(argv[i]));
+        if (i < (argc - 1))
+            strcat(arcs_cmdline, " ");
+    }
+
+
+    /* Count register increments every other clock */
+    mips_hpt_frequency = calculateCpuSpeed() / 2;
+
+    retrieve_boot_loader_parameters();
+#endif
+}
+
+
+/* --------------------------------------------------------------------------
+    Name: prom_free_prom_memory
+Abstract: 
+ -------------------------------------------------------------------------- */
+void __init prom_free_prom_memory(void)
+{
+
+}
+
+#if defined(CONFIG_ROOT_NFS) && defined(SUPPORT_SWMDK)
+  /* We can't use gendefconfig to automatically fix this, so instead we will
+     raise an error here */
+  #error "Kernel cannot be configured for both SWITCHMDK and NFS."
+#endif
+
+#define HEXDIGIT(d) ((d >= '0' && d <= '9') ? (d - '0') : ((d | 0x20) - 'W'))
+#define HEXBYTE(b)  (HEXDIGIT((b)[0]) << 4) + HEXDIGIT((b)[1])
+
+#ifndef CONFIG_ROOT_NFS_DIR
+#define CONFIG_ROOT_NFS_DIR	"h:/"
+#endif
+
+#ifdef CONFIG_BLK_DEV_RAM_SIZE
+#define RAMDISK_SIZE		CONFIG_BLK_DEV_RAM_SIZE
+#else
+#define RAMDISK_SIZE		0x800000
+#endif
+
+/*
+ * This function reads in a line that looks something like this from NvRam:
+ *
+ * CFE bootline=bcmEnet(0,0)host:vmlinux e=192.169.0.100:ffffff00 h=192.169.0.1
+ *
+ * and retuns in the cmdline parameter based on the boot_type that CFE sets up.
+ *
+ * for boot from flash, it will use the definition in CONFIG_ROOT_FLASHFS
+ *
+ * for boot from NFS, it will look like below:
+ * CONFIG_CMDLINE="root=/dev/nfs nfsroot=192.168.0.1:/opt/targets/96345R/fs
+ * ip=192.168.0.100:192.168.0.1::255.255.255.0::eth0:off rw"
+ *
+ * for boot from tftp, it will look like below:
+ * CONFIG_CMDLINE="root=/dev/ram rw rd_start=0x81000000 rd_size=0x1800000"
+ */
+#if 0
+static void __init create_cmdline(char *cmdline)
+{
+	char boot_type = '\0', mask[16] = "";
+	char bootline[NVRAM_BOOTLINE_LEN] = "";
+	char *localip = NULL, *hostip = NULL, *p = bootline, *rdaddr = NULL;
+
+	/*
+	 * too early in bootup sequence to acquire spinlock, not needed anyways
+	 * only the kernel is running at this point
+	 */
+	kerSysNvRamGetBootlineLocked(bootline);
+
+	while (*p) {
+		if (p[0] == 'e' && p[1] == '=') {
+			/* Found local ip address */
+			p += 2;
+			localip = p;
+			while (*p && *p != ' ' && *p != ':')
+				p++;
+			if (*p == ':') {
+				/* Found network mask (eg FFFFFF00 */
+				*p++ = '\0';
+				sprintf(mask, "%u.%u.%u.%u", HEXBYTE(p),
+					HEXBYTE(p + 2),
+				HEXBYTE(p + 4), HEXBYTE(p + 6));
+				p += 4;
+			} else if (*p == ' ')
+				*p++ = '\0';
+		} else if (p[0] == 'h' && p[1] == '=') {
+			/* Found host ip address */
+			p += 2;
+			hostip = p;
+			while (*p && *p != ' ')
+				p++;
+			if (*p == ' ')
+				*p++ = '\0';
+		} else if (p[0] == 'r' && p[1] == '=') {
+			/* Found boot type */
+			p += 2;
+			boot_type = *p;
+			while (*p && *p != ' ')
+				p++;
+			if (*p == ' ')
+				*p++ = '\0';
+		} else if (p[0] == 'a' && p[1] == '=') {
+			p += 2;
+			rdaddr = p;
+			while (*p && *p != ' ')
+				p++;
+			if (*p == ' ')
+				*p++ = '\0';
+		} else 
+			p++;
+	}
+
+	if (boot_type == 'h' && localip && hostip) {
+		/* Boot from NFS with proper IP addresses */
+		sprintf(cmdline, "root=/dev/nfs nfsroot=%s:" CONFIG_ROOT_NFS_DIR
+				" ip=%s:%s::%s::eth0:off rw",
+				hostip, localip, hostip, mask);
+	} else if (boot_type == 'c') {
+		/* boot from tftp */
+		sprintf(cmdline, "root=/dev/ram0 ro rd_start=%s rd_size=0x%x",
+				rdaddr, RAMDISK_SIZE << 10);
+	} else {
+		/* go with the default, boot from flash */
+#ifdef CONFIG_ROOT_FLASHFS
+		strcpy(cmdline, CONFIG_ROOT_FLASHFS);
+#endif
+	}
+}
+#endif
+
+/* Retrieve a buffer of paramters passed by the boot loader.  Functions in
+ * board.c can return requested parameter values to a calling Linux function.
+ */
+void __init retrieve_boot_loader_parameters(void)
+{
+#if 0
+    extern unsigned char _text;
+    unsigned long blparms_magic = *(unsigned long *) (&_text - 8);
+    unsigned long blparms_buf = *(unsigned long *) (&_text - 4);
+    unsigned char *src = (unsigned char *) blparms_buf;
+    unsigned char *dst = g_blparms_buf;
+
+    if( blparms_magic != BLPARMS_MAGIC )
+    {
+        /* Subtract four more bytes for NAND flash images. */
+        blparms_magic = *(unsigned long *) (&_text - 12);
+        blparms_buf = *(unsigned long *) (&_text - 8);
+        src = (unsigned char *) blparms_buf;
+    }
+
+    if( blparms_magic == BLPARMS_MAGIC )
+    {
+        do
+        {
+            *dst++ = *src++;
+        } while( (src[0] != '\0' || src[1] != '\0') &&
+          (unsigned long) (dst - g_blparms_buf) < sizeof(g_blparms_buf) - 2);
+    }
+
+    dst[0] = dst[1] = '\0';
+#endif
+}
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c8a7d8467bf0b40fff40eb01a6533a83e10a18f..dfcfe73da73cf2dff26f7ddfa0cc8f34b6ad69a8 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -881,11 +881,21 @@ config CACHE_XSC3L2
 	help
 	  This option enables the L2 cache on XScale3.
 
+if !BCM_KF_ARM_BCM963XX
 config ARM_L1_CACHE_SHIFT_6
 	bool
 	default y if CPU_V7
 	help
 	  Setting ARM L1 cache line size to 64 Bytes.
+endif
+
+if BCM_KF_ARM_BCM963XX
+config ARM_L1_CACHE_SHIFT_6
+	bool
+	default y if CPU_V7 && !ARCH_BCM63XX
+	help
+	  Setting ARM L1 cache line size to 64 Bytes.
+endif
 
 config ARM_L1_CACHE_SHIFT
 	int
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index bca7e61928c7dbbde1e56737d8514cb221fd7857..5e90349145df60cd35bdffd81bcd3291c40baccc 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -7,6 +7,11 @@ obj-y				:= dma-mapping.o extable.o fault.o init.o \
 
 obj-$(CONFIG_MMU)		+= fault-armv.o flush.o idmap.o ioremap.o \
 				   mmap.o pgd.o mmu.o vmregion.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_DEBUG_RODATA)	+= rodata.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 ifneq ($(CONFIG_MMU),y)
 obj-y				+= nommu.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9107231aacc5ab6c7eaa9fc83e9d05d626084afb..dd91f3c4bfd17ccd49895699bd0a6007989bcfa5 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -756,6 +756,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 	int isize = 4;
 	int thumb2_32b = 0;
 
+#if defined(CONFIG_BCM_KF_KERN_WARNING)
+	offset.un = 0;
+#endif /* CONFIG_BCM_KF_KERN_WARNING */
 	if (interrupts_enabled(regs))
 		local_irq_enable();
 
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 2a8e380501e81a2c0bcaf08c8d018f0c9f20050c..af868554c0b64b38585a0d8ecfe322970890aac0 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -306,6 +306,37 @@ static void l2x0_unlock(u32 cache_id)
 	}
 }
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+static unsigned long gb_flags;
+
+static void l2x0_spin_lock_irqsave(void)
+{
+	raw_spin_lock_irqsave(&l2x0_lock, gb_flags);
+}
+
+static void l2x0_spin_unlock_irqrestore(void)
+{
+	raw_spin_unlock_irqrestore(&l2x0_lock, gb_flags);
+}
+
+static void l2x0_cache_sync_no_lock(void)
+{
+	cache_sync();
+}
+
+static void l2x0_flush_line_no_lock(unsigned long addr)
+{
+	debug_writel(0x03);
+	l2x0_flush_line(addr);
+	debug_writel(0x00);
+}
+
+static void l2x0_inv_line_no_lock(unsigned long addr)
+{
+	l2x0_inv_line(addr);
+}
+#endif
+
 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 {
 	u32 aux;
@@ -383,6 +414,13 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 	outer_cache.flush_all = l2x0_flush_all;
 	outer_cache.inv_all = l2x0_inv_all;
 	outer_cache.disable = l2x0_disable;
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	outer_cache.spin_lock_irqsave = l2x0_spin_lock_irqsave;
+	outer_cache.spin_unlock_irqrestore = l2x0_spin_unlock_irqrestore;
+	outer_cache.sync_no_lock = l2x0_cache_sync_no_lock;
+	outer_cache.flush_line_no_lock = l2x0_flush_line_no_lock;
+	outer_cache.inv_line_no_lock = l2x0_inv_line_no_lock;
+#endif
 
 	printk(KERN_INFO "%s cache controller enabled\n", type);
 	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 74c2e5a33a4d4aa9e0f347e4b45d3959cc9a68fb..70fd07b95833217a72e907c2e59175f93314efe1 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -272,6 +272,13 @@ v6_dma_clean_range:
  *	- end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+	sub	r2, r1, r0
+	cmp	r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+	bhi	v6_dma_flush_dcache_all
+#endif
+#endif
 #ifdef CONFIG_DMA_CACHE_RWFO
 	ldrb	r2, [r0]		@ read for ownership
 	strb	r2, [r0]		@ write for ownership
@@ -294,6 +301,20 @@ ENTRY(v6_dma_flush_range)
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	mov	pc, lr
 
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+	mov	r0, #0
+#ifdef HARVARD_CACHE
+	mcr	p15, 0, r0, c7, c14, 0		@ D cache clean+invalidate
+#else
+	mcr	p15, 0, r0, c7, c15, 0		@ Cache clean+invalidate
+#endif
+	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
+	mov	pc, lr
+#endif
+
+#endif
 /*
  *	dma_map_area(start, size, dir)
  *	- start	- kernel virtual start address
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a655d3da386d6c3620a07bc25176163c21a07e15..7d989d440ae56cef424fe89ccbb33c26c9b93cc6 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -13,10 +13,61 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+#include <asm/errno.h>
+#endif
 #include <asm/unwind.h>
 
 #include "proc-macros.S"
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+ * The secondary kernel init calls v7_flush_dcache_all before it enables
+ * the L1; however, the L1 comes out of reset in an undefined state, so
+ * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
+ * of cache lines with uninitialized data and uninitialized tags to get
+ * written out to memory, which does really unpleasant things to the main
+ * processor.  We fix this by performing an invalidate, rather than a
+ * clean + invalidate, before jumping into the kernel.
+ *
+ * This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs
+ * to be called for both secondary cores startup and primary core resume
+ * procedures.
+ */
+ENTRY(v7_invalidate_l1)
+       mov     r0, #0
+       mcr     p15, 2, r0, c0, c0, 0
+       mrc     p15, 1, r0, c0, c0, 0
+
+       ldr     r1, =0x7fff
+       and     r2, r1, r0, lsr #13
+
+       ldr     r1, =0x3ff
+
+       and     r3, r1, r0, lsr #3      @ NumWays - 1
+       add     r2, r2, #1              @ NumSets
+
+       and     r0, r0, #0x7
+       add     r0, r0, #4      @ SetShift
+
+       clz     r1, r3          @ WayShift
+       add     r4, r3, #1      @ NumWays
+1:     sub     r2, r2, #1      @ NumSets--
+       mov     r3, r4          @ Temp = NumWays
+2:     subs    r3, r3, #1      @ Temp--
+       mov     r5, r3, lsl r1
+       mov     r6, r2, lsl r0
+       orr     r5, r5, r6      @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+       mcr     p15, 0, r5, c7, c6, 2
+       bgt     2b
+       cmp     r2, #0
+       bgt     1b
+       dsb
+       isb
+       mov     pc, lr
+ENDPROC(v7_invalidate_l1)
+#endif
+
 /*
  *	v7_flush_icache_all()
  *
@@ -32,6 +83,36 @@ ENTRY(v7_flush_icache_all)
 	mov	pc, lr
 ENDPROC(v7_flush_icache_all)
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+ /*
+ *     v7_flush_dcache_louis()
+ *
+ *     Flush the D-cache up to the Level of Unification Inner Shareable
+ *
+ *     Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ */
+
+ENTRY(v7_flush_dcache_louis)
+	dmb					@ ensure ordering with previous memory accesses
+	mrc	p15, 1, r0, c0, c0, 1		@ read clidr, r0 = clidr
+	ALT_SMP(ands	r3, r0, #(7 << 21))	@ extract LoUIS from clidr
+	ALT_UP(ands	r3, r0, #(7 << 27))	@ extract LoUU from clidr
+#ifdef CONFIG_ARM_ERRATA_643719
+	ALT_SMP(mrceq	p15, 0, r2, c0, c0, 0)	@ read main ID register
+	ALT_UP(moveq	pc, lr)			@ LoUU is zero, so nothing to do
+	ldreq	r1, =0x410fc090                 @ ID of ARM Cortex A9 r0p?
+	biceq	r2, r2, #0x0000000f             @ clear minor revision number
+	teqeq	r2, r1                          @ test for errata affected core and if so...
+	orreqs	r3, #(1 << 21)			@   fix LoUIS value (and set flags state to 'ne')
+#endif
+	ALT_SMP(mov	r3, r3, lsr #20)	@ r3 = LoUIS * 2
+	ALT_UP(mov	r3, r3, lsr #26)	@ r3 = LoUU * 2
+	moveq	pc, lr				@ return if level == 0
+	mov	r10, #0				@ r10 (starting level) = 0
+	b	flush_levels			@ start flushing cache levels
+ENDPROC(v7_flush_dcache_louis)
+#endif
+
 /*
  *	v7_flush_dcache_all()
  *
@@ -48,7 +129,11 @@ ENTRY(v7_flush_dcache_all)
 	mov	r3, r3, lsr #23			@ left align loc bit field
 	beq	finished			@ if loc is 0, then no need to clean
 	mov	r10, #0				@ start clean at cache level 0
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+flush_levels:
+#else
 loop1:
+#endif
 	add	r2, r10, r10, lsr #1		@ work out 3x current cache level
 	mov	r1, r0, lsr r2			@ extract cache type bits from clidr
 	and	r1, r1, #7			@ mask of the bits for current cache only
@@ -70,9 +155,17 @@ loop1:
 	clz	r5, r4				@ find bit position of way size increment
 	ldr	r7, =0x7fff
 	ands	r7, r7, r1, lsr #13		@ extract max number of the index size
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+loop1:
+#else
 loop2:
+#endif
 	mov	r9, r4				@ create working copy of max way size
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+loop2:
+#else
 loop3:
+#endif
  ARM(	orr	r11, r10, r9, lsl r5	)	@ factor way and cache number into r11
  THUMB(	lsl	r6, r9, r5		)
  THUMB(	orr	r11, r10, r6		)	@ factor way and cache number into r11
@@ -81,13 +174,23 @@ loop3:
  THUMB(	orr	r11, r11, r6		)	@ factor index number into r11
 	mcr	p15, 0, r11, c7, c14, 2		@ clean & invalidate by set/way
 	subs	r9, r9, #1			@ decrement the way
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	bge	loop2
+	subs	r7, r7, #1			@ decrement the index
+	bge	loop1
+#else
 	bge	loop3
 	subs	r7, r7, #1			@ decrement the index
 	bge	loop2
+#endif
 skip:
 	add	r10, r10, #2			@ increment cache number
 	cmp	r3, r10
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	bgt	flush_levels
+#else
 	bgt	loop1
+#endif
 finished:
 	mov	r10, #0				@ swith back to cache level 0
 	mcr	p15, 2, r10, c0, c0, 0		@ select current cache level in cssr
@@ -119,6 +222,26 @@ ENTRY(v7_flush_kern_cache_all)
 	mov	pc, lr
 ENDPROC(v7_flush_kern_cache_all)
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+ /*
+ *     v7_flush_kern_cache_louis(void)
+ *
+ *     Flush the data cache up to Level of Unification Inner Shareable.
+ *     Invalidate the I-cache to the point of unification.
+ */
+ENTRY(v7_flush_kern_cache_louis)
+ ARM(	stmfd	sp!, {r4-r5, r7, r9-r11, lr}	)
+ THUMB(	stmfd	sp!, {r4-r7, r9-r11, lr}	)
+	bl	v7_flush_dcache_louis
+	mov	r0, #0
+	ALT_SMP(mcr	p15, 0, r0, c7, c1, 0)	@ invalidate I-cache inner shareable
+	ALT_UP(mcr	p15, 0, r0, c7, c5, 0)	@ I+BTB cache invalidate
+ ARM(	ldmfd	sp!, {r4-r5, r7, r9-r11, lr}	)
+ THUMB(	ldmfd	sp!, {r4-r7, r9-r11, lr}	)
+	mov	pc, lr
+ENDPROC(v7_flush_kern_cache_louis)
+#endif
+
 /*
  *	v7_flush_cache_all()
  *
@@ -189,7 +312,11 @@ ENTRY(v7_coherent_user_range)
 	add	r12, r12, r2
 	cmp	r12, r1
 	blo	1b
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	dsb	ishst
+#else
 	dsb
+#endif
 	icache_line_size r2, r3
 	sub	r3, r2, #1
 	bic	r12, r0, r3
@@ -202,10 +329,26 @@ ENTRY(v7_coherent_user_range)
 	mov	r0, #0
 	ALT_SMP(mcr	p15, 0, r0, c7, c1, 6)	@ invalidate BTB Inner Shareable
 	ALT_UP(mcr	p15, 0, r0, c7, c5, 6)	@ invalidate BTB
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	dsb	ishst
+#else
 	dsb
+#endif
 	isb
 	mov	pc, lr
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+ * Fault handling for the cache operation above. If the virtual address in r0
+ * isn't mapped, fail with -EFAULT.
+ */
+9001:
+#ifdef CONFIG_ARM_ERRATA_775420
+	dsb
+#endif
+	mov	r0, #-EFAULT
+	mov	pc, lr
+#else
 /*
  * Fault handling for the cache operation above. If the virtual address in r0
  * isn't mapped, just try the next page.
@@ -215,6 +358,7 @@ ENTRY(v7_coherent_user_range)
 	mov	r12, r12, lsl #12
 	add	r12, r12, #4096
 	b	3b
+#endif
  UNWIND(.fnend		)
 ENDPROC(v7_coherent_kern_range)
 ENDPROC(v7_coherent_user_range)
@@ -242,7 +386,11 @@ ENTRY(v7_flush_kern_dcache_area)
 	add	r0, r0, r2
 	cmp	r0, r1
 	blo	1b
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	issue_mega_barrier
+#else
 	dsb
+#endif
 	mov	pc, lr
 ENDPROC(v7_flush_kern_dcache_area)
 
@@ -297,7 +445,11 @@ v7_dma_clean_range:
 	add	r0, r0, r2
 	cmp	r0, r1
 	blo	1b
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	issue_mega_barrier
+#else
 	dsb
+#endif
 	mov	pc, lr
 ENDPROC(v7_dma_clean_range)
 
@@ -319,7 +471,11 @@ ENTRY(v7_dma_flush_range)
 	add	r0, r0, r2
 	cmp	r0, r1
 	blo	1b
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+	issue_mega_barrier
+#else
 	dsb
+#endif
 	mov	pc, lr
 ENDPROC(v7_dma_flush_range)
 
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index ee9bb363d6064aa89276301940ff3980a15602d3..1b109a33faf836f6ee75e90306dab57f92b0de45 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -14,14 +14,101 @@
 #include <linux/percpu.h>
 
 #include <asm/mmu_context.h>
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#include <asm/smp_plat.h>
+#include <asm/thread_notify.h>
+#endif
 #include <asm/tlbflush.h>
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#include <asm/proc-fns.h>
+
+/*
+ * On ARMv6, we have the following structure in the Context ID:
+ *
+ * 31                         7          0
+ * +-------------------------+-----------+
+ * |      process ID         |   ASID    |
+ * +-------------------------+-----------+
+ * |              context ID             |
+ * +-------------------------------------+
+ *
+ * The ASID is used to tag entries in the CPU caches and TLBs.
+ * The context ID is used by debuggers and trace logic, and
+ * should be unique within all running processes.
+ *
+ * In big endian operation, the two 32 bit words are swapped if accesed by
+ * non 64-bit operations.
+ */
+#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
+#define NUM_USER_ASIDS		ASID_FIRST_VERSION
+
+static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
+
+static DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(u64, reserved_asids);
+static cpumask_t tlb_flush_pending;
+
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+			     cpumask_t *mask)
+{
+	int cpu;
+	unsigned long flags;
+	u64 context_id, asid;
+
+	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+	context_id = mm->context.id.counter;
+	for_each_online_cpu(cpu) {
+		if (cpu == this_cpu)
+			continue;
+		/*
+		 * We only need to send an IPI if the other CPUs are
+		 * running the same ASID as the one being invalidated.
+		 */
+		asid = per_cpu(active_asids, cpu).counter;
+		if (asid == 0)
+			asid = per_cpu(reserved_asids, cpu);
+		if (context_id == asid)
+			cpumask_set_cpu(cpu, mask);
+	}
+	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+#endif
+#else
 
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 unsigned int cpu_last_asid = ASID_FIRST_VERSION;
 #ifdef CONFIG_SMP
 DEFINE_PER_CPU(struct mm_struct *, current_mm);
 #endif
+#endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#ifdef CONFIG_ARM_LPAE
+static void cpu_set_reserved_ttbr0(void)
+{
+	/*
+	 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
+	 * ASID is set to 0.
+	 */
+	cpu_set_ttbr(0, __pa(swapper_pg_dir));
+	isb();
+}
+#else
+static void cpu_set_reserved_ttbr0(void)
+{
+	u32 ttb;
+	/* Copy TTBR1 into TTBR0 */
+	asm volatile(
+	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
+	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
+	: "=r" (ttb));
+	isb();
+}
+#endif
+#else
 #ifdef CONFIG_ARM_LPAE
 #define cpu_set_asid(asid) {						\
 	unsigned long ttbl, ttbh;					\
@@ -36,7 +123,164 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
 #define cpu_set_asid(asid) \
 	asm("	mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
 #endif
+#endif
+
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_ERRATA_798181)
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
+			       void *t)
+{
+	u32 contextidr;
+	pid_t pid;
+	struct thread_info *thread = t;
+
+	if (cmd != THREAD_NOTIFY_SWITCH)
+		return NOTIFY_DONE;
+
+	pid = task_pid_nr(thread->task) << ASID_BITS;
+	asm volatile(
+	"	mrc	p15, 0, %0, c13, c0, 1\n"
+	"	and	%0, %0, %2\n"
+	"	orr	%0, %0, %1\n"
+	"	mcr	p15, 0, %0, c13, c0, 1\n"
+	: "=r" (contextidr), "+r" (pid)
+	: "I" (~ASID_MASK));
+	isb();
+
+	return NOTIFY_OK;
+}
 
+static struct notifier_block contextidr_notifier_block = {
+	.notifier_call = contextidr_notifier,
+};
+
+static int __init contextidr_notifier_init(void)
+{
+	return thread_register_notifier(&contextidr_notifier_block);
+}
+arch_initcall(contextidr_notifier_init);
+#endif
+
+static void flush_context(unsigned int cpu)
+{
+	int i;
+	u64 asid;
+
+	/* Update the list of reserved ASIDs and the ASID bitmap. */
+	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+	for_each_possible_cpu(i) {
+		if (i == cpu) {
+			asid = 0;
+		} else {
+			asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
+			/*
+			 * If this CPU has already been through a
+			 * rollover, but hasn't run another task in
+			 * the meantime, we must preserve its reserved
+			 * ASID, as this is the only trace we have of
+			 * the process it is still running.
+			 */
+			if (asid == 0)
+				asid = per_cpu(reserved_asids, i);
+			__set_bit(asid & ~ASID_MASK, asid_map);
+		}
+		per_cpu(reserved_asids, i) = asid;
+	}
+
+	/* Queue a TLB invalidate and flush the I-cache if necessary. */
+	cpumask_setall(&tlb_flush_pending);
+
+	if (icache_is_vivt_asid_tagged())
+		__flush_icache_all();
+}
+
+static int is_reserved_asid(u64 asid)
+{
+	int cpu;
+	for_each_possible_cpu(cpu)
+		if (per_cpu(reserved_asids, cpu) == asid)
+			return 1;
+	return 0;
+}
+
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+{
+	u64 asid = atomic64_read(&mm->context.id);
+	u64 generation = atomic64_read(&asid_generation);
+
+	if (asid != 0 && is_reserved_asid(asid)) {
+		/*
+		 * Our current ASID was active during a rollover, we can
+		 * continue to use it and this was just a false alarm.
+		 */
+		asid = generation | (asid & ~ASID_MASK);
+	} else {
+		/*
+		 * Allocate a free ASID. If we can't find one, take a
+		 * note of the currently active ASIDs and mark the TLBs
+		 * as requiring flushes. We always count from ASID #1,
+		 * as we reserve ASID #0 to switch via TTBR0 and indicate
+		 * rollover events.
+		 */
+		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+		if (asid == NUM_USER_ASIDS) {
+			generation = atomic64_add_return(ASID_FIRST_VERSION,
+							 &asid_generation);
+			flush_context(cpu);
+			asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+		}
+		__set_bit(asid, asid_map);
+		asid |= generation;
+		cpumask_clear(mm_cpumask(mm));
+	}
+
+	return asid;
+}
+
+void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
+{
+	unsigned long flags;
+	unsigned int cpu = smp_processor_id();
+	u64 asid;
+
+	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
+		__check_kvm_seq(mm);
+
+	/*
+	 * Required during context switch to avoid speculative page table
+	 * walking with the wrong TTBR.
+	 */
+	cpu_set_reserved_ttbr0();
+
+	asid = atomic64_read(&mm->context.id);
+	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
+		goto switch_mm_fastpath;
+
+	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+	/* Check that our ASID belongs to the current generation. */
+	asid = atomic64_read(&mm->context.id);
+	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+		asid = new_context(mm, cpu);
+		atomic64_set(&mm->context.id, asid);
+	}
+
+	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+		local_flush_bp_all();
+		local_flush_tlb_all();
+		/* both 3.10.15 and 3.11.4 have the following line, but
+		 * 3.12.13 doesn't.  I will comment it out for now */
+		//erratum_a15_798181();
+	}
+
+	atomic64_set(&per_cpu(active_asids, cpu), asid);
+	cpumask_set_cpu(cpu, mm_cpumask(mm));
+	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+switch_mm_fastpath:
+	cpu_switch_mm(mm->pgd, mm);
+}
+#else
 /*
  * We fork()ed a process, and we need a new context for the child
  * to run in.  We reserve version 0 for initial tasks so we will
@@ -170,3 +414,4 @@ void __new_context(struct mm_struct *mm)
 	set_mm_context(mm, asid);
 	raw_spin_unlock(&cpu_asid_lock);
 }
+#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index db23ae4aaaaba3384d000181e4dfb1c9ac476e60..c4670a8e9b49bd043fdf24bf71164d2d072b89aa 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -79,7 +79,11 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 	if (!mask)
 		return NULL;
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	if ((mask < 0xffffffffULL) && !(gfp & GFP_ACP))
+#else
 	if (mask < 0xffffffffULL)
+#endif
 		gfp |= GFP_DMA;
 
 	page = alloc_pages(gfp, order);
@@ -326,6 +330,13 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
 
 #endif	/* CONFIG_MMU */
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+static void dmac_flush_area(const void * addr, size_t len, int dir)
+{
+	dmac_flush_range(addr, addr + len);
+}
+
+#endif
 static void *
 __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
 	    pgprot_t prot, const void *caller)
@@ -580,6 +591,28 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
 }
 EXPORT_SYMBOL(___dma_page_dev_to_cpu);
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+void ___dma_page_cpu_to_dev_flush(struct page *page, unsigned long off,
+	size_t size, enum dma_data_direction dir)
+{
+#ifdef CONFIG_OUTER_CACHE
+	unsigned long paddr;
+
+	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
+
+	paddr = page_to_phys(page) + off;
+	if (dir == DMA_FROM_DEVICE) {
+		outer_inv_range(paddr, paddr + size);
+	} else {
+		outer_flush_range(paddr, paddr + size);
+	}
+#endif
+
+	dma_cache_maint_page(page, off, size, dir, &dmac_flush_area);
+}
+EXPORT_SYMBOL(___dma_page_cpu_to_dev_flush);
+
+#endif
 /**
  * dma_map_sg - map a set of SG buffers for streaming mode DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8f5813bbffb560b15b44974ff3543f0b5457e026..25869bbc8b3dbe81987eea81788800e36a26eeea 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -225,6 +225,29 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 	hole[ZONE_DMA] = 0;
 }
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+static void __init arm_adjust_acp_zone(unsigned long *size, unsigned long *hole)
+{
+	unsigned long total_size = size[ZONE_NORMAL];
+#ifndef CONFIG_ZONE_DMA
+	total_size += size[0];
+#endif
+	
+	if (total_size <= (CONFIG_BCM_ACP_MEM_SIZE << (20 - PAGE_SHIFT)))
+		return;
+
+#ifdef CONFIG_ZONE_DMA
+	size[ZONE_NORMAL] -= CONFIG_BCM_ACP_MEM_SIZE << (20 - PAGE_SHIFT);
+#else
+	size[ZONE_NORMAL] = size[0] - (CONFIG_BCM_ACP_MEM_SIZE << (20 - PAGE_SHIFT));
+#endif
+	size[ZONE_ACP] = CONFIG_BCM_ACP_MEM_SIZE << (20 - PAGE_SHIFT);
+#ifndef CONFIG_ZONE_DMA
+	hole[ZONE_NORMAL] = hole[0];
+#endif
+	hole[ZONE_ACP] = 0;
+}
+#endif
 
 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 	unsigned long max_high)
@@ -281,6 +304,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 		arm_dma_limit = 0xffffffff;
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	arm_adjust_acp_zone(zone_size, zhole_size);
+#endif
 	free_area_init_node(0, zone_size, min, zhole_size);
 }
 
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 75f9f9d67097fecddd21c0d0090c727e209f08ee..b4bdf62e2ffa9d109d0fcf44c4c114aa1c4348bd 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -233,6 +233,15 @@ static struct mem_type mem_types[] = {
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 		.domain    = DOMAIN_KERNEL,
 	},
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+	[MT_DEVICE_NONSECURED] = {
+		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+				  L_PTE_SHARED,
+		.prot_l1	= PMD_TYPE_TABLE,
+		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S | PMD_SECT_NS,
+		.domain		= DOMAIN_IO,
+	},
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 #ifndef CONFIG_ARM_LPAE
 	[MT_MINICLEAN] = {
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
@@ -288,6 +297,14 @@ static struct mem_type mem_types[] = {
 				PMD_SECT_UNCACHED | PMD_SECT_XN,
 		.domain    = DOMAIN_KERNEL,
 	},
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+	[MT_MEMORY_NONSECURED] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+		.prot_l1   = PMD_TYPE_TABLE,
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_NS,
+		.domain    = DOMAIN_KERNEL,
+	},
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
@@ -462,6 +479,10 @@ static void __init build_mem_type_table(void)
 			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
 			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 			mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+			mem_types[MT_MEMORY_NONSECURED].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_NONSECURED].prot_pte |= L_PTE_SHARED;
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 		}
 	}
 
@@ -514,6 +535,10 @@ static void __init build_mem_type_table(void)
 	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
 	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_PLAT_BCM63XX_ACP)
+	mem_types[MT_MEMORY_NONSECURED].prot_sect |= ecc_mask | cp->pmd;
+	mem_types[MT_MEMORY_NONSECURED].prot_pte |= kern_pgprot;
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX && CONFIG_PLAT_BCM63XX_ACP */
 
 	switch (cp->pmd) {
 	case PMD_SECT_WT:
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 2d8ff3ad86d3e1a2b9d9abd83a1bdd3ab42eba3a..c512b1dcd569d8aaa50c141ac0858daf431bac10 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -167,6 +167,12 @@
 	tst	r1, #L_PTE_YOUNG
 	tstne	r1, #L_PTE_PRESENT
 	moveq	r3, #0
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+#ifndef CONFIG_CPU_USE_DOMAINS
+	tstne	r1, #L_PTE_NONE
+	movne	r3, #0
+#endif
+#endif
 
 	str	r3, [r0]
 	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
@@ -299,6 +305,9 @@ ENTRY(\name\()_processor_functions)
 ENTRY(\name\()_cache_fns)
 	.long	\name\()_flush_icache_all
 	.long	\name\()_flush_kern_cache_all
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	.long   \name\()_flush_kern_cache_louis
+#endif
 	.long	\name\()_flush_user_cache_all
 	.long	\name\()_flush_user_cache_range
 	.long	\name\()_coherent_kern_range
@@ -323,3 +332,11 @@ ENTRY(\name\()_tlb_fns)
 	.endif
 	.size	\name\()_tlb_fns, . - \name\()_tlb_fns
 .endm
+
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_B15_MEGA_BARRIER)
+.macro issue_mega_barrier
+	stmfd	sp!,{r3,lr}
+	blx	BcmMegaBarrier
+	ldmfd	sp!,{r3,lr}
+.endm
+#endif
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 3a4b3e7b888c8c878e1e40984e97848151b530ca..5148535c5f08f870fd183913ae24beeaf63fbdd6 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -49,15 +49,21 @@ ENTRY(cpu_v7_switch_mm)
 #ifdef CONFIG_ARM_ERRATA_754322
 	dsb
 #endif
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
 	isb
 1:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
 	isb
 #ifdef CONFIG_ARM_ERRATA_754322
 	dsb
+#endif
 #endif
 	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
 	isb
+#if defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
+	isb
+#endif
 #endif
 	mov	pc, lr
 ENDPROC(cpu_v7_switch_mm)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c2e2b66f72b5cd08648085c28ffc5d8c2ee8359a..8024e22128a3f691e4dadce3124abf0112d74e4d 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -57,7 +57,11 @@ ENTRY(cpu_v7_reset)
  THUMB(	bic	r1, r1, #1 << 30 )		@ SCTLR.TE (Thumb exceptions)
 	mcr	p15, 0, r1, c1, c0, 0		@ disable MMU
 	isb
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	bx	r0
+#else
 	mov	pc, r0
+#endif
 ENDPROC(cpu_v7_reset)
 	.popsection
 
@@ -75,6 +79,17 @@ ENTRY(cpu_v7_do_idle)
 ENDPROC(cpu_v7_do_idle)
 
 ENTRY(cpu_v7_dcache_clean_area)
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	ALT_SMP(W(nop))			@ MP extensions imply L1 PTW
+	ALT_UP_B(1f)
+	mov	pc, lr
+1:	dcache_line_size r2, r3
+2:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
+	add	r0, r0, r2
+	subs	r1, r1, r2
+	bhi	2b
+	dsb	ishst
+#else
 #ifndef TLB_CAN_READ_FROM_L1_CACHE
 	dcache_line_size r2, r3
 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
@@ -82,6 +97,7 @@ ENTRY(cpu_v7_dcache_clean_area)
 	subs	r1, r1, r2
 	bhi	1b
 	dsb
+#endif
 #endif
 	mov	pc, lr
 ENDPROC(cpu_v7_dcache_clean_area)
@@ -98,9 +114,17 @@ ENTRY(cpu_v7_do_suspend)
 	mrc	p15, 0, r4, c13, c0, 0	@ FCSE/PID
 	mrc	p15, 0, r5, c13, c0, 3	@ User r/o thread ID
 	stmia	r0!, {r4 - r5}
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+#ifdef CONFIG_MMU
+	mrc	p15, 0, r6, c3, c0, 0	@ Domain ID
+	mrc	p15, 0, r7, c2, c0, 1	@ TTB 1
+	mrc	p15, 0, r11, c2, c0, 2	@ TTB control register
+#endif
+#else
 	mrc	p15, 0, r6, c3, c0, 0	@ Domain ID
 	mrc	p15, 0, r7, c2, c0, 1	@ TTB 1
 	mrc	p15, 0, r11, c2, c0, 2	@ TTB control register
+#endif
 	mrc	p15, 0, r8, c1, c0, 0	@ Control register
 	mrc	p15, 0, r9, c1, c0, 1	@ Auxiliary control register
 	mrc	p15, 0, r10, c1, c0, 2	@ Co-processor access control
@@ -110,13 +134,36 @@ ENDPROC(cpu_v7_do_suspend)
 
 ENTRY(cpu_v7_do_resume)
 	mov	ip, #0
+#if !defined(CONFIG_BCM_KF_ARM_BCM963XX)
 	mcr	p15, 0, ip, c8, c7, 0	@ invalidate TLBs
+#endif
 	mcr	p15, 0, ip, c7, c5, 0	@ invalidate I cache
 	mcr	p15, 0, ip, c13, c0, 1	@ set reserved context ID
 	ldmia	r0!, {r4 - r5}
 	mcr	p15, 0, r4, c13, c0, 0	@ FCSE/PID
 	mcr	p15, 0, r5, c13, c0, 3	@ User r/o thread ID
 	ldmia	r0, {r6 - r11}
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+#ifdef CONFIG_MMU
+	mcr	p15, 0, ip, c8, c7, 0	@ invalidate TLBs
+	mcr	p15, 0, r6, c3, c0, 0	@ Domain ID
+#ifndef CONFIG_ARM_LPAE
+	ALT_SMP(orr	r1, r1, #TTB_FLAGS_SMP)
+	ALT_UP(orr	r1, r1, #TTB_FLAGS_UP)
+#endif
+	mcr	p15, 0, r1, c2, c0, 0	@ TTB 0
+	mcr	p15, 0, r7, c2, c0, 1	@ TTB 1
+	mcr	p15, 0, r11, c2, c0, 2	@ TTB control register
+	ldr	r4, =PRRR		@ PRRR
+	ldr	r5, =NMRR		@ NMRR
+	mcr	p15, 0, r4, c10, c2, 0	@ write PRRR
+	mcr	p15, 0, r5, c10, c2, 1	@ write NMRR
+#endif	/* CONFIG_MMU */
+	mrc	p15, 0, r4, c1, c0, 1	@ Read Auxiliary control register
+	teq	r4, r9			@ Is it already set?
+	mcrne	p15, 0, r9, c1, c0, 1	@ No, so write it
+	mcr	p15, 0, r10, c1, c0, 2	@ Co-processor access control
+#else
 	mcr	p15, 0, r6, c3, c0, 0	@ Domain ID
 #ifndef CONFIG_ARM_LPAE
 	ALT_SMP(orr	r1, r1, #TTB_FLAGS_SMP)
@@ -133,6 +180,7 @@ ENTRY(cpu_v7_do_resume)
 	ldr	r5, =NMRR		@ NMRR
 	mcr	p15, 0, r4, c10, c2, 0	@ write PRRR
 	mcr	p15, 0, r5, c10, c2, 1	@ write NMRR
+#endif
 	isb
 	dsb
 	mov	r0, r8			@ control register
@@ -172,7 +220,11 @@ __v7_ca15mp_setup:
 __v7_setup:
 	adr	r12, __v7_setup_stack		@ the local stack
 	stmia	r12, {r0-r5, r7, r9, r11, lr}
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	bl      v7_flush_dcache_louis
+#else
 	bl	v7_flush_dcache_all
+#endif
 	ldmia	r12, {r0-r5, r7, r9, r11, lr}
 
 	mrc	p15, 0, r0, c0, c0, 0		@ read main ID register
@@ -246,7 +298,9 @@ __v7_setup:
 
 3:	mov	r10, #0
 	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
+#if !defined(CONFIG_BCM_KF_ARM_BCM963XX)
 	dsb
+#endif
 #ifdef CONFIG_MMU
 	mcr	p15, 0, r10, c8, c7, 0		@ invalidate I + D TLBs
 	v7_ttb_setup r10, r4, r8, r5		@ TTBCR, TTBRx setup
@@ -255,6 +309,9 @@ __v7_setup:
 	mcr	p15, 0, r5, c10, c2, 0		@ write PRRR
 	mcr	p15, 0, r6, c10, c2, 1		@ write NMRR
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	dsb
+#endif
 #ifndef CONFIG_ARM_THUMBEE
 	mrc	p15, 0, r0, c0, c1, 0		@ read ID_PFR0 for ThumbEE
 	and	r0, r0, #(0xf << 12)		@ ThumbEE enabled field
@@ -364,6 +421,18 @@ __v7_ca15mp_proc_info:
 	__v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
 	.size	__v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	/*
+	 * Broadcom Corporation Brahma-B15 processor.
+	 */
+	.type	__v7_b15mp_proc_info, #object
+__v7_b15mp_proc_info:
+	.long	0x420f00f0
+	.long	0xff0ffff0
+	__v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+	.size	__v7_b15mp_proc_info, . - __v7_b15mp_proc_info
+#endif
+
 	/*
 	 * Match any ARMv7 processor core.
 	 */
diff --git a/arch/arm/plat-bcm63xx/Kconfig b/arch/arm/plat-bcm63xx/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..ed704c66edf8323ff18dff7a3b7c3d7e5a3947a6
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/Kconfig
@@ -0,0 +1,162 @@
+if (BCM_KF_ARM_BCM963XX)
+# Broadcom platforms selection
+
+config PLAT_CA9_MPCORE
+	bool "ARM Cortex A9 MPCORE architecture support"
+	select CPU_V7
+	select ARM_GIC
+	select HAVE_CLK
+	select CLKDEV_LOOKUP
+	select HAVE_MACH_CLKDEV
+	select GENERIC_CLOCKEVENTS_BUILD
+	select GENERIC_CLOCKEVENTS
+	select NEED_MACH_MEMORY_H
+	select ARM_ERRATA_764369 if SMP && PLAT_BCM63138
+#	select CONSTRUCTORS
+	help
+	  Support for ARM A9 MPCORE subsystem
+
+config PLAT_CA9_SMP
+	bool "Enable SMP on ARM Cortex A9 MPCORE"
+	select HAVE_SMP
+	select SMP
+	select SMP_ON_UP
+	select HOTPLUG_CPU
+	select HAVE_ARM_SCU
+
+config PLAT_CA9_MPCORE_TIMER
+	bool "Enable ARM Cortex A9 MPcore Timer for Clock event"
+	select HAVE_ARM_TWD if PLAT_CA9_SMP
+	select LOCAL_TIMERS if PLAT_CA9_SMP
+	depends on PLAT_CA9_MPCORE
+
+config PLAT_B15_CORE
+	bool "Broadcom ARM based on Cortex A15 architecture support"
+	select CPU_V7
+	select ARM_GIC
+	select HAVE_CLK
+	select CLKDEV_LOOKUP
+	select HAVE_MACH_CLKDEV
+	select GENERIC_CLOCKEVENTS_BUILD
+	select GENERIC_CLOCKEVENTS
+	select NEED_MACH_MEMORY_H
+	help
+	  Support for Broadcom's ARMv7 processr based on ARM Cortex A15
+
+config PLAT_B15_SMP
+	bool "Enable SMP on Broadcom ARMv7 core"
+	select HAVE_SMP
+	select SMP
+	select SMP_ON_UP
+	select HOTPLUG_CPU
+	select HAVE_ARM_TWD
+	select HAVE_ARM_SCU
+	select LOCAL_TIMERS
+
+config PLAT_B15_MPCORE_TIMER
+	bool "Enable Broadcom B15 MPcore Timer for Clock event"
+	select HAVE_ARM_TWD if PLAT_B15_SMP
+	select LOCAL_TIMERS if PLAT_B15_SMP
+	depends on PLAT_B15_CORE
+
+config CACHE_L310 
+	bool "PL310 Level-2 Cache Controller"
+        select OUTER_CACHE
+        select OUTER_CACHE_SYNC
+	depends on PLAT_CA9_MPCORE
+
+config PLAT_BCM63XX_AMBA_PL011
+	bool "Enable AMBA PL011 Serial console"
+	select ARM_AMBA
+	select SERIAL_AMBA_PL011
+	select SERIAL_AMBA_PL011_CONSOLE
+	depends on PLAT_BCM63138
+
+config PLAT_BCM63XX_AMBA_PL081
+	bool "Enable AMBA PL081 DMAC"
+	select DMADEVICES
+	select ARM_AMBA
+	select AMBA_PL08X
+	depends on PLAT_BCM63138
+
+config PLAT_BCM63XX_EMMC
+	bool "Enable Broadcom EMMC support"
+	select MMC
+	select MMC_BLOCK
+	select MMC_BLOCK_MINORS
+	select MMC_SDHCI
+	select MMC_SDHCI_PLTFM
+	select MMC_SDHCI_IO_ACCESSORS
+	select MMC_SDHCI_BCM63xx
+	depends on PLAT_BCM63138
+
+config PLAT_BCM63XX_UART
+	bool "Enable Broadcom Serial console"
+
+config PLAT_BCM63XX_EXT_TIMER
+	bool "Enable Broadcom External Timer for Clockevent"
+	depends on BCM_EXT_TIMER
+
+config PLAT_BCM63XX_ACP
+	bool "Enable ARM ACP"
+	default n
+	select BCM_ZONE_ACP
+	depends on PLAT_BCM63138 && PLAT_CA9_SMP
+
+config ARM_BCM63XX_CPUFREQ
+	bool "Broadcom BCM63xx ARM SoCs CPUFreq"
+	select ARCH_HAS_CPUFREQ
+	select CPU_FREQ
+	select CPU_FREQ_TABLE
+	select CPU_FREQ_STAT
+	select CPU_FREQ_GOV_USERSPACE
+	default y
+	help
+	  This adds the CPUFreq driver for Broadcom BCM63xx ARM SoCs.
+
+	  If in doubt, say N.
+
+config PLAT_BCM63138
+	bool "Broadcom BCM63138 SoC support"
+# Based on ARM Cortex-A9 r4p1 and L310 r3p3
+#	select PLAT_CA9_SMP		# will be chosen by BRCM_SMP_EN
+	select ARM_ERRATA_754322
+	select CACHE_L2X0
+	select CACHE_PL310
+	select EARLY_PRINTK
+ 	select PLAT_BCM63XX_EXT_TIMER
+#	select PLAT_CA9_MPCORE_TIMER
+#	select PLAT_BCM63XX_AMBA_PL011
+	select PLAT_BCM63XX_UART
+#	select MIGHT_HAVE_PCI
+#	select HAVE_PWM			# kona has it, Do we?
+	select ZONE_DMA
+	select ARCH_SUPPORTS_MSI
+	select CONFIG_ARCH_HAS_BARRIERS
+	depends on PLAT_CA9_MPCORE
+
+config PLAT_BCM63148
+	bool "Broadcom BCM63148 SoC support"
+# Based on ARM Cortex-A15 r3p2
+	select ARM_L1_CACHE_SHIFT_6	# B15 L1 cache line size is 64 bytes
+#	select PLAT_B15_SMP		# will be chosen by BRCM_SMP_EN
+	select EARLY_PRINTK
+ 	select PLAT_BCM63XX_EXT_TIMER
+	select PLAT_BCM63XX_UART
+	select ARM_ERRATA_798181 if SMP
+	select ZONE_DMA
+	select ARCH_SUPPORTS_MSI
+	select BCM_B15_MEGA_BARRIER
+	select CONFIG_ARCH_HAS_BARRIERS
+	depends on PLAT_B15_CORE
+
+config BCM_PCIE_PLATFORM
+	tristate "PCI Express repower module"
+
+#Apply the mega-barrier prior to DMA operations to work around issue noted
+#in HW7445-1301
+config BCM_B15_MEGA_BARRIER
+	depends on PLAT_B15_CORE
+	bool
+
+endif # BCM_KF_ARM_BCM963XX
diff --git a/arch/arm/plat-bcm63xx/Makefile b/arch/arm/plat-bcm63xx/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..35a1fbbd5511c0028b7914588205ddacb25c1bea
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/Makefile
@@ -0,0 +1,29 @@
+# SHARED := ../../../../../../../src/shared
+
+AFLAGS_plat-head.o        := -DTEXT_OFFSET=$(TEXT_OFFSET)
+
+obj-y += clock.o
+# it seems ARMv7 share the same headsmp.S file, if so, we should rename the
+# current files
+obj-$(CONFIG_SMP) += platsmp.o plat-ca9mp-headsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+
+obj-$(CONFIG_PLAT_CA9_MPCORE) += ca9mp_core.o
+obj-$(CONFIG_PLAT_CA9_MPCORE_TIMER) += ca9mp_timer.o
+obj-$(CONFIG_PLAT_B15_CORE) += b15_core.o
+obj-$(CONFIG_CACHE_L310) += cache-l310.o
+obj-$(CONFIG_PLAT_BCM63138) += bcm63138.o
+obj-$(CONFIG_PLAT_BCM63148) += bcm63148.o
+obj-$(CONFIG_PLAT_BCM63XX_EXT_TIMER) += bcm63xx_timer.o
+obj-$(CONFIG_ARM_BCM63XX_CPUFREQ) += bcm63xx_cpufreq.o
+obj-$(CONFIG_PLAT_BCM63XX_ACP) += bcm63xx_acp.o
+
+obj-$(CONFIG_BCM_PCI) += pci-bcm63xx.o pcie-bcm63xx.o
+obj-$(CONFIG_BCM_PCIE_PLATFORM) += bcm63xx_pcie.o
+obj-$(CONFIG_USB_XHCI_PLATFORM) += bcm63xx_usb.o 
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += bcm63xx_sata.o 
+obj-$(CONFIG_BUZZZ) += buzzz.o
+obj-$(CONFIG_PCI_MSI) += pcie-msi-bcm63xx.o
+obj-$(CONFIG_BCM_M2M_DMA) += bcm63xx_m2mdma.o
+
+EXTRA_CFLAGS    += -I$(INC_BRCMBOARDPARMS_PATH)/$(BRCM_BOARD) -I$(SRCBASE)/include -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/pmc
diff --git a/arch/arm/plat-bcm63xx/README b/arch/arm/plat-bcm63xx/README
new file mode 100644
index 0000000000000000000000000000000000000000..0b2964fae21f3ef04451561e000066df5d715757
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/README
@@ -0,0 +1,11 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+This directory provides support to selected ARM-based SoC from Broadcom.
+
+The code in this directory is pertinent to the contents of each SoC.
+One can choose to enable the SoC in SMP or uni-core mode with the flag.
+
+This directory also provides the binding information 
+(i.e. base address and IRQ#) for all the peripherals contained in the SoC.
+
+All board-level support is in "mach-bcm*'.
+#endif
diff --git a/arch/arm/plat-bcm63xx/b15_core.c b/arch/arm/plat-bcm63xx/b15_core.c
new file mode 100644
index 0000000000000000000000000000000000000000..0966db0fb9c5814e55221386f849411f0359abf7
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/b15_core.c
@@ -0,0 +1,131 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+* Broadcom ARM based on Cortex A15 Platform base
+*/
+
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/clockchips.h>
+#include <linux/ioport.h>
+#include <linux/cpumask.h>
+#include <linux/irq.h>
+#include <asm/mach/map.h>
+#include <asm/hardware/gic.h>
+#include <mach/hardware.h>
+#include <plat/b15core.h>
+#include <bcm_map_part.h>
+
+void __iomem * scu_base_addr(void)
+{
+	return __io_address(B15_PHYS_BASE + B15_SCU_OFF);
+}
+
+void __init b15_fixup(void)
+{
+	/* in case if any fixup that has to be done in really early
+	 * stage of kernel booting */
+}
+
+/* map_io should be called the first, so we have the register base
+ * address for the core. */
+void __init b15_map_io(void)
+{
+	struct map_desc desc;
+
+#if 0
+	/* 
+	 * Cortex A9 Architecture Manual specifies this as a way to get
+	 * MPCORE PERHIPHBASE address at run-time
+	 */
+	asm("mrc p15,4,%0,c15,c0,0 @ Read Configuration Base Address Register" 
+			: "=&r" (base_addr) : : "cc");
+
+	printk(KERN_INFO "CA9 MPCORE found at %p\n", (void *)base_addr); 
+#endif
+
+	/* Fix-map the entire PERIPHBASE 2*4K register block */
+	desc.virtual = IO_ADDRESS(B15_PHYS_BASE);
+	desc.pfn = __phys_to_pfn(B15_PHYS_BASE);
+	desc.length = SZ_64K;	// FIXME! 64K actually cover whole 0x10000 area.. a smaller value once RDB is out
+	desc.type = MT_DEVICE;
+	iotable_init(&desc, 1);
+}
+
+void __init b15_init_gic(void)
+{
+	printk(KERN_INFO "Broadcom B15 CORE GIC init\n");
+	printk(KERN_INFO "DIST at %p, CPU_IF at %p\n",
+			(void *)IO_ADDRESS(B15_PHYS_BASE) + B15_GIC_DIST_OFF,
+			(void *)IO_ADDRESS(B15_PHYS_BASE) + B15_GIC_CPUIF_OFF);
+
+	// FIXME!! hardcored value below for the interrupt line#, will need to define
+	// the interrupt line# in a header file for all different chips
+	gic_init(0, 16, (void *)IO_ADDRESS(B15_PHYS_BASE) + B15_GIC_DIST_OFF,
+			(void *)IO_ADDRESS(B15_PHYS_BASE) + B15_GIC_CPUIF_OFF);
+
+	//irq_set_handler(B15_IRQ_GLOBALTIMER, handle_percpu_irq);
+	/* try it.. handle_edge_irq, handle_percpu_irq, or handle_level_irq */
+}
+
+void __init b15_init_early(void)
+{
+	/* NOP */
+}
+
+void __cpuinit b15_power_up_cpu(int cpu_id)
+{
+	B15CTRL->cpu_ctrl.cpu1_pwr_zone_ctrl |= 0x400;
+	printk("%s:Power up CPU1\n", __func__);
+
+	B15CTRL->cpu_ctrl.reset_cfg &= 0xfffffffd;
+	printk("%s:release reset to CPU1\n", __func__);
+}
+
+/*
+ * For SMP - initialize GIC CPU interface for secondary cores
+ */
+void __cpuinit b15_cpu_init(void)
+{
+	/* Initialize the GIC CPU interface for the next processor */
+	gic_secondary_init(0);
+#if 0
+	gic_cpu_init(0, (void *)IO_ADDRESS(SCU_PHYS_BASE) + B15_GIC_CPUIF_OFF);
+#endif
+}
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/bcm63138.c b/arch/arm/plat-bcm63xx/bcm63138.c
new file mode 100644
index 0000000000000000000000000000000000000000..90097a677de5f51c77f84ecb5f7f087afc3100f0
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63138.c
@@ -0,0 +1,680 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * BCM63138 SoC main platform file.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/stop_machine.h>
+#include <linux/bug.h>
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL011
+#include <linux/amba/bus.h>
+#include <linux/amba/serial.h>
+#endif
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL081
+#include <linux/amba/bus.h>
+#include <linux/amba/pl08x.h>
+#include <asm/hardware/pl080.h>
+#include <linux/dmaengine.h>
+#endif
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/map.h>
+#include <asm/clkdev.h>
+#include <asm/system_misc.h>
+#include <mach/hardware.h>
+#include <mach/smp.h>
+#include <plat/bsp.h>
+#include <plat/ca9mpcore.h>
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+#include <pmc_drv.h>
+#include <pmc_cpu_core.h>
+#if defined(CONFIG_BCM_EXT_TIMER) && defined(CONFIG_PLAT_BCM63XX_EXT_TIMER)
+#include <plat/bcm63xx_timer.h>
+#endif
+#ifdef CONFIG_PLAT_BCM63XX_ACP
+#include <mach/memory.h>
+#endif
+
+static struct clk ref_clk = {
+	.name = "refclk",
+	.rate = FREQ_MHZ(25),	/* run-time override */
+	.fixed = 1,
+	.type = CLK_XTAL,
+};
+
+extern unsigned long getMemorySize(void);
+
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL011
+static struct clk uart_clk = {
+	.name = "uart",
+	.rate = FREQ_MHZ(50),
+	.fixed = 1,
+	.type = CLK_UART,
+};
+#endif
+
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL081
+static struct clk pl081dmac_clk = {
+	.name = "pl081dmac",
+	.rate = FREQ_MHZ(50),
+	.fixed = 1,
+	.type = CLK_DMAC,
+};
+#endif
+
+static struct clk_lookup board_clk_lookups[] = {	
+	CLKDEV_INIT(NULL, "refclk", &ref_clk),
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL011
+	/* Uart0 */
+	CLKDEV_INIT,("uart0", NULL, &uart_clk),	
+#endif
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL081
+	CLKDEV_INIT("pl08xdmac.0", NULL, &pl081dmac_clk),
+#endif
+};
+
+#define IO_DESC(pa, sz) { \
+		.virtual = IO_ADDRESS(pa), \
+		.pfn = __phys_to_pfn(pa), \
+		.length = sz, \
+		.type = MT_DEVICE, \
+	}
+
+#define MEM_DESC(pa, sz) { \
+		.virtual = IO_ADDRESS(pa), \
+		.pfn = __phys_to_pfn(pa), \
+		.length = sz, \
+		.type = MT_MEMORY_NONCACHED, \
+	}
+#ifdef CONFIG_PLAT_BCM63XX_ACP
+#define ACP_MEM_DESC(pa, sz) { \
+		.virtual = ACP_ADDRESS(pa), \
+		.pfn = __phys_to_pfn(pa), \
+		.length = sz, \
+		.type = MT_MEMORY_NONSECURED, \
+	}
+#endif
+
+
+static struct map_desc bcm63138_io_desc[] __initdata = {
+	IO_DESC(USB_CTL_PHYS_BASE, SZ_4K),
+	IO_DESC(MEMC_PHYS_BASE, SZ_4K),
+	IO_DESC(DDRPHY_PHYS_BASE, SZ_4K),
+	IO_DESC(SAR_PHYS_BASE, SZ_16K),
+	IO_DESC(SATA_PHYS_BASE, SZ_16K),
+	IO_DESC(USBH_PHYS_BASE, SZ_8K),
+	IO_DESC(ERROR_PORT_PHYS_BASE, SZ_4K),
+	IO_DESC(AIP_PHYS_BASE, SZ_4K),
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL011
+	IO_DESC(ARM_UART_PHYS_BASE, SZ_4K),
+#endif
+	IO_DESC(L2C_PHYS_BASE, SZ_4K),
+	IO_DESC(ARMCFG_PHYS_BASE, SZ_4K),
+	IO_DESC(DECT_PHYS_BASE, SZ_128K),
+	IO_DESC(PCIE_0_PHYS_BASE, SZ_64K),
+	IO_DESC(PCIE_1_PHYS_BASE, SZ_64K),
+	IO_DESC(SWITCH_PHYS_BASE, SZ_512K),
+	IO_DESC(APM_PHYS_BASE, SZ_128K),
+	IO_DESC(RDP_PHYS_BASE, SZ_1M),
+	IO_DESC(PMC_PHYS_BASE, SZ_512K),
+	IO_DESC(PROC_MON_PHYS_BASE, SZ_4K),
+	IO_DESC(DSLPHY_PHYS_BASE, SZ_1M),
+	IO_DESC(DSLLMEM_PHYS_BASE, SZ_1M),
+	IO_DESC(PERF_PHYS_BASE, SZ_32K),
+	IO_DESC(BOOTLUT_PHYS_BASE, SZ_4K),
+	IO_DESC(SPIFLASH_PHYS_BASE, SZ_128K),
+	IO_DESC(NANDFLASH_PHYS_BASE, SZ_128K),
+};
+
+/* any fixup that has to be performed in the early stage of
+ * kernel booting */
+void __init soc_fixup(void)
+{
+	ca9mp_fixup();
+}
+
+/*
+ * Map fix-mapped I/O that is needed before full MMU operation
+ */
+void __init soc_map_io(void)
+{
+#ifdef CONFIG_PLAT_BCM63XX_ACP
+	struct map_desc acp_desc;
+#endif
+	ca9mp_map_io();
+
+	iotable_init(bcm63138_io_desc, ARRAY_SIZE(bcm63138_io_desc));
+
+#ifdef CONFIG_PLAT_BCM63XX_ACP
+	acp_desc.virtual = ACP_ADDRESS(PLAT_PHYS_OFFSET);
+	acp_desc.pfn = __phys_to_pfn(PLAT_PHYS_OFFSET);
+	acp_desc.length = getMemorySize();
+	acp_desc.type = MT_MEMORY_NONSECURED;
+	iotable_init(&acp_desc, 1);
+#endif
+}
+
+#define ARM_PROC_CLK_POLICY_FREQ_ALL(x)		( \
+		(x << ARM_PROC_CLK_POLICY3_FREQ_SHIFT) | \
+		(x << ARM_PROC_CLK_POLICY2_FREQ_SHIFT) | \
+		(x << ARM_PROC_CLK_POLICY1_FREQ_SHIFT) | \
+		(x << ARM_PROC_CLK_POLICY0_FREQ_SHIFT))
+
+static inline unsigned long get_arm_core_clk(void)
+{
+	int ndiv, pdiv, mdiv;
+	uint32 policy;
+
+	policy = ARMCFG->proc_clk.policy_freq & ARM_PROC_CLK_POLICY_FREQ_MASK;
+	if (policy == ARM_PROC_CLK_POLICY_FREQ_ALL(ARM_PROC_CLK_POLICY_FREQ_CRYSTAL))
+		return FREQ_MHZ(50);
+	else if (policy == ARM_PROC_CLK_POLICY_FREQ_ALL(ARM_PROC_CLK_POLICY_FREQ_SYSCLK))
+		return FREQ_MHZ(200);
+	else if (policy == ARM_PROC_CLK_POLICY_FREQ_ALL(ARM_PROC_CLK_POLICY_FREQ_ARMPLL_FAST))
+		mdiv = 2;
+	else if (policy == ARM_PROC_CLK_POLICY_FREQ_ALL(ARM_PROC_CLK_POLICY_FREQ_ARMPLL_SLOW))
+		mdiv = ARMCFG->proc_clk.pllarmc & 0xff;
+	else
+		return 0;
+
+	pdiv = (ARMCFG->proc_clk.pllarma & ARM_PROC_CLK_PLLARMA_PDIV_MASK) >> ARM_PROC_CLK_PLLARMA_PDIV_SHIFT;
+	ndiv = (ARMCFG->proc_clk.pllarma & ARM_PROC_CLK_PLLARMA_NDIV_MASK) >> ARM_PROC_CLK_PLLARMA_NDIV_SHIFT;
+
+	return FREQ_MHZ(50) / pdiv * ndiv / mdiv;
+}
+
+#define BCM63138_MAX_CORE_FREQ	1500
+/* freq in unit of Hz */
+/* Note for 63138, even though we do have capability to support various value
+ * for the inputted clock speed, but we try to minimize the usage to use
+ * just 2 of the clock policies: 1) system (200MHz) and 2) ARM_FAST (a clock
+ * source of multiple of 25MHz and it has to be faster than 200 MHz).
+ * And in this case, ARM's AXI and Periph will be driven with a clock speed
+ * that's 1/2 of the ARM clock speed.  And APB is driven by a clock with speed
+ * that's 1/4 of the ARM clock speed. */
+static int core_set_freq(void *p)
+{
+	unsigned int mdiv = *(unsigned int *)p;
+
+	ARMCFG->proc_clk.pllarmc = (ARMCFG->proc_clk.pllarmc & ~0xff) | mdiv;
+
+	return 0;
+}
+
+int soc_set_arm_core_clock(struct clk *cur_clk, unsigned long freqHz)
+{
+	struct clk *axi_clk, *apb_clk;
+	const struct cpumask *cpus;
+	unsigned int mdiv;
+
+	mdiv = FREQ_MHZ(2000) / freqHz;
+
+	if (mdiv < 2 || mdiv > 10) {
+		printk("\tInvalid cpu frequency %ld Hz, the supported range is "
+			"between 200MHz to %d MHz and it will be "
+			"computed based on 2 GHz / given frequency\n",
+			freqHz, BCM63138_MAX_CORE_FREQ);
+		return -EINVAL;
+	}
+
+	/* tie up cores to change frequency */
+	cpus = cpumask_of(smp_processor_id());
+	/* interrupts disabled in stop_machine */
+	__stop_machine(core_set_freq, &mdiv, cpus);
+
+	cur_clk->rate = freqHz;
+
+	/* update the depending clocks */
+	axi_clk = clk_get_sys("cpu", "axi_pclk");
+	BUG_ON(IS_ERR_OR_NULL(axi_clk));
+	apb_clk = clk_get_sys("cpu", "apb_pclk");
+	BUG_ON(IS_ERR_OR_NULL(apb_clk));
+
+	axi_clk->rate = freqHz >> 1;
+	apb_clk->rate = freqHz >> 2;
+
+#ifdef CONFIG_PLAT_CA9_MPCORE_TIMER
+	ca9mp_timer_update_freq(axi_clk->rate);
+#endif
+
+	return 0;
+}
+
+static struct clk_ops arm_clk_ops = {
+	.enable = NULL,
+	.disable = NULL,
+	.round = NULL,
+	.setrate = &soc_set_arm_core_clock,
+	.status = NULL,
+};
+
+void __init soc_init_clock(void)
+{
+	unsigned long arm_periph_clk = get_arm_core_clk();
+
+	/* change policy to use ARMPLL_SLOW in case cfe isn't up-to-date */
+	unsigned pll = ARM_PROC_CLK_POLICY_FREQ_ALL(ARM_PROC_CLK_POLICY_FREQ_ARMPLL_SLOW);
+	unsigned policy = ARMCFG->proc_clk.policy_freq;
+	const int mdiv_en = 1 << 11, mdiv = 2000 / 1000;
+
+	if ((policy & ~ARM_PROC_CLK_POLICY_FREQ_MASK) != pll) {
+		ARMCFG->proc_clk.pllarmc = (ARMCFG->proc_clk.pllarmc & ~0xff) | mdiv_en | mdiv;
+		ARMCFG->proc_clk.policy_freq = (policy & ~ARM_PROC_CLK_POLICY_FREQ_MASK) | pll;
+
+		/* enable policy and wait for policy to be activated */
+		ARMCFG->proc_clk.policy_ctl |= ARM_PROC_CLK_POLICY_CTL_GO_AC|ARM_PROC_CLK_POLICY_CTL_GO;
+		while (ARMCFG->proc_clk.policy_ctl & ARM_PROC_CLK_POLICY_CTL_GO);
+	}
+
+	/* install clock source into the lookup table */
+	clkdev_add_table(board_clk_lookups,
+			ARRAY_SIZE(board_clk_lookups));
+
+	if (arm_periph_clk != 0) {
+		/* install the clock source for ARM PLL */
+		static struct clk arm_pclk = {
+			.name = "arm_pclk",
+			.fixed = 1,
+			.type = CLK_PLL,
+			.ops = &arm_clk_ops,
+		};
+		static struct clk axi_pclk = {
+			.name = "axi_pclk",
+			.fixed = 1,
+			.type = CLK_DIV,
+		};
+		static struct clk apb_pclk = {
+			.name = "apb_pclk",
+			.fixed = 1,
+			.type = CLK_DIV,
+		};
+		static struct clk_lookup arm_clk_lookups[] = {
+			/* ARM CPU clock */
+			CLKDEV_INIT("cpu", "arm_pclk", &arm_pclk),
+			/* Periph/Axi clock */
+			CLKDEV_INIT("cpu", "axi_pclk", &axi_pclk),
+			/* Bus clock */
+			CLKDEV_INIT("cpu", "apb_pclk", &apb_pclk),
+		};
+		arm_pclk.rate = arm_periph_clk;
+		axi_pclk.rate = arm_periph_clk >> 1;
+		apb_pclk.rate = arm_periph_clk >> 2;
+		clkdev_add_table(arm_clk_lookups,
+				ARRAY_SIZE(arm_clk_lookups));
+	} else {
+		/* need to insert a dummy apb_pclk */
+		static struct clk dummy_arm_pclk;
+		static struct clk dummy_axi_pclk;
+		static struct clk dummy_apb_pclk;
+		static struct clk_lookup arm_clk_lookups[] = {
+			/* ARM CPU clock */
+			CLKDEV_INIT("cpu", "arm_pclk", &dummy_arm_pclk),
+			/* Periph/Axi clock */
+			CLKDEV_INIT("cpu", "axi_pclk", &dummy_axi_pclk),
+			/* Bus clock */
+			CLKDEV_INIT("cpu", "apb_pclk", &dummy_apb_pclk),
+		};
+		clkdev_add_table(arm_clk_lookups,
+				ARRAY_SIZE(arm_clk_lookups));
+	}
+}
+
+#if 0
+static int soc_abort_handler(unsigned long addr, unsigned int fsr,
+		struct pt_regs *regs)
+{
+	/*
+	 * These happen for no good reason
+	 * possibly left over from CFE
+	 */
+	printk(KERN_WARNING "External imprecise Data abort at "
+			"addr=%#lx, fsr=%#x ignored.\n", addr, fsr);
+
+	/* Returning non-zero causes fault display and panic */
+	return 0;
+}
+#endif
+
+static void soc_aborts_enable(void)
+{
+#if 0
+	u32 x;
+
+	/* Install our hook */
+	hook_fault_code(16 + 6, soc_abort_handler, SIGBUS, 0,
+			"imprecise external abort");
+
+	/* Enable external aborts - clear "A" bit in CPSR */
+
+	/* Read CPSR */
+	asm( "mrs	%0,cpsr": "=&r" (x) : : );
+
+	x &= ~ PSR_A_BIT;
+
+	/* Update CPSR, affect bits 8-15 */
+	asm( "msr	cpsr_x,%0; nop; nop": : "r" (x) : "cc" );
+#endif
+}
+
+/*
+ * This SoC relies on MPCORE GIC interrupt controller
+ */
+void __init soc_init_irq(void)
+{
+	ca9mp_init_gic();
+	soc_aborts_enable();
+}
+
+#ifdef CONFIG_CACHE_L2X0
+/*
+ * SoC initialization that need to be done early,
+ * e.g. L2 cache, clock, I/O pin mux, power management
+ */
+static int  __init bcm63138_l2_cache_init(void)
+{
+	u32 auxctl_val = 0, auxctl_msk = ~0UL;
+
+	auxctl_val |= (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT);	/* 16-way cache */
+	auxctl_msk &= ~(1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT);
+	auxctl_val |= (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);	/* 32KB */
+	auxctl_msk &= ~(L2X0_AUX_CTRL_WAY_SIZE_MASK);
+
+	/*
+	 * Set bit 22 in the auxiliary control register. If this bit
+	 * is cleared, PL310 treats Normal Shared Non-cacheable
+	 * accesses as Cacheable no-allocate.
+	 */
+	auxctl_val |= (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT);
+
+	auxctl_val |= (1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT);	/* Allow non-secure access */
+	/* Disable data prefetch to avoid pulling data cache lines that are not needed
+	   Revisit this logic once we fully understand/address the cache invalidate issues in the A9 processor
+	auxctl_val |= (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT); */	/* Data prefetch */
+	auxctl_val |= (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT);	/* Instruction prefetch */
+	auxctl_val |= (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT);	/* Early BRESP */
+
+	/* Configure using default aux control value */
+	l2x0_init(__io_address(L2C_PHYS_BASE), auxctl_val, auxctl_msk);
+
+	return 0;
+}
+#endif
+
+void __init soc_init_early(void)
+{
+	ca9mp_init_early();
+
+	pmc_init();
+
+#ifdef CONFIG_CACHE_L2X0
+	/* cache initialization */
+	bcm63138_l2_cache_init();
+#endif
+	/* TODO: can we use the following for reserving DMA memory for ACP?
+	 * Will it maintain the same region of memory all the time? */
+	//init_consistent_dma_size(SZ_128M);
+}
+
+/*
+ * Initialize SoC timers
+ */
+void __init soc_init_timer(void)
+{
+	/* in BCM63138, we provide 2 ways to initialize timers.
+	 * One is based on PERIPH Timer, and the other is using
+	 * CA9MP's own GPTIMER */
+#if defined(CONFIG_BCM_EXT_TIMER) && defined(CONFIG_PLAT_BCM63XX_EXT_TIMER)
+	bcm63xx_timer_init();
+#endif
+
+#ifdef CONFIG_PLAT_CA9_MPCORE_TIMER
+	{
+		unsigned long axi_freq;
+		struct clk *axi_clk;
+
+		axi_clk = clk_get_sys("cpu", "axi_pclk");
+		BUG_ON(IS_ERR_OR_NULL(axi_clk));
+		axi_freq = clk_get_rate(axi_clk);
+		BUG_ON(!axi_freq);
+
+		/* Fire up the global MPCORE timer */
+		ca9mp_timer_init(axi_freq);
+	}
+#endif
+}
+
+/*
+ * Install all other SoC device drivers
+ * that are not automatically discoverable.
+ */
+ 
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL081
+struct pl08x_channel_data pl081_dmac0_channeldata [] = 
+{
+	/* HS_UART HOFIFO Channel */
+	{
+		.bus_id          = PL081_DMA_CHAN_HS_UART_RX,
+		.min_signal      = 0,
+		.max_signal      = 0,
+		.muxval          = 0,
+		.circular_buffer = false,
+		.single          = false,
+		.periph_buses    = PL08X_AHB1,
+	},
+	
+	/* HS_UART HIFIFO Channel */
+	{
+		.bus_id          = PL081_DMA_CHAN_HS_UART_TX,
+		.min_signal      = 1,
+		.max_signal      = 1,
+		.muxval          = 0,
+		.circular_buffer = false,
+		.single          = false,
+		.periph_buses    = PL08X_AHB1,
+	}	
+};
+
+struct pl08x_channel_data pl081_dmac0_memcp_chdata [] = 
+{
+	{
+		.bus_id          = "DMA_MTOM",
+		.min_signal      = 2,
+		.max_signal      = 2,
+		.muxval          = 0,
+		.circular_buffer = false,
+		.single          = false,
+		.periph_buses    = PL08X_AHB1,
+	},
+};
+
+struct pl08x_platform_data pl081dmac0_pdata;
+static AMBA_AHB_DEVICE(pl081dmac0, "pl08xdmac.0", 0x00041081, PL081_DMA_PHYS_BASE, {INTERRUPT_ID_PL081}, &pl081dmac0_pdata);
+#endif
+ 
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL011
+static AMBA_APB_DEVICE(uart0, "uart0", 0, ARM_UART_PHYS_BASE, { INTERRUPT_ID_UART2 }, NULL);
+static struct amba_device *amba_devs[] __initdata = {
+	&uart0_device,
+};
+#endif
+
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL081
+/*
+ * Get PL081 periperal DMA request signal number
+ * PL081 has 16 DMA request signals. This function
+ * returns thhe DMA request signal number associated
+ * with the specified dma channel
+ */
+static int get_signal(struct pl08x_dma_chan * dma_chan)
+{
+	int signal = -1;
+	
+	/* Just return min_signal as dma request lines are not muxed */
+	if( dma_chan && dma_chan->cd )
+		signal = dma_chan->cd->min_signal;
+
+	return signal;		
+}
+
+/*
+ * Release PL081 periperal DMA request signal number
+ */
+static void put_signal(struct pl08x_dma_chan * dma_chan)
+{
+	/* Do nothing as dma request lines are not muxed */	
+}
+#endif
+
+#ifdef CONFIG_PLAT_BCM63XX_EMMC  /* Arasan emmc SD */
+static struct resource bcm63xx_emmc_resources[] = {
+	[0] = {
+				.start = EMMC_HOSTIF_PHYS_BASE,
+				.end = EMMC_HOSTIF_PHYS_BASE + SZ_256 - 1,  /* we only need this area */
+				/* the memory map actually makes SZ_4K available  */
+				.flags = IORESOURCE_MEM,
+			},
+	[1] =	{
+				.start = INTERRUPT_ID_EMMC,
+				.end = INTERRUPT_ID_EMMC,
+				.flags = IORESOURCE_IRQ,
+			},
+};
+
+static u64 bcm63xx_emmc_dmamask = 0xffffffffUL;
+
+struct platform_device bcm63xx_emmc_device = {
+	.name = "sdhci-bcm63xx",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bcm63xx_emmc_resources),
+	.resource = bcm63xx_emmc_resources,
+	.dev = {
+		.dma_mask = &bcm63xx_emmc_dmamask,
+		.coherent_dma_mask = 0xffffffffUL},
+};
+#endif
+
+void __init soc_add_devices(void)
+{
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL011
+	{
+		int i;
+
+		/* init uart (AMBA device) here */
+		for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
+			struct amba_device *d = amba_devs[i];
+			int ret;
+
+			ret = amba_device_register(d, &iomem_resource);
+			if (ret)
+				printk("%s:%d:amba device[%d] registered failed, err = %d",
+					   __func__, __LINE__, i, ret);
+		}
+	}
+#endif
+
+#ifdef CONFIG_PLAT_BCM63XX_AMBA_PL081
+	{
+		pl081dmac0_pdata.slave_channels = &pl081_dmac0_channeldata[0];
+		pl081dmac0_pdata.num_slave_channels = 2;
+// 		pl081dmac0_pdata.memcpy_channel = NULL;
+		pl081dmac0_pdata.get_signal = get_signal;
+		pl081dmac0_pdata.put_signal = put_signal;
+		pl081dmac0_pdata.lli_buses = PL08X_AHB1;
+		pl081dmac0_pdata.mem_buses = PL08X_AHB1;
+				
+		/* Register AMBA device */
+		amba_device_register(&pl081dmac0_device, &iomem_resource);			
+	}
+#endif
+
+#ifdef CONFIG_PLAT_BCM63XX_EMMC
+	{
+		/* Only register EMMC device if NAND i/f is NOT active */
+		if ( MISC->miscStrapBus & MISC_STRAP_BUS_SW_BOOT_SPI_SPINAND_EMMC_MASK ) {
+			platform_device_register(&bcm63xx_emmc_device);
+		}
+	}
+#endif
+}
+
+/*
+ * Wakeup secondary core
+ * This is SoC-specific code used by the platform SMP code.
+ */
+void plat_wake_secondary_cpu(unsigned cpu, void (*_sec_entry_va)(void))
+{
+	void __iomem *bootlut_base = __io_address(BOOTLUT_PHYS_BASE);
+	u32 val;
+
+	/* 1) covert the virtual starting address into physical, then
+	 * write it to boot look-up table. */
+	val = virt_to_phys(_sec_entry_va);
+	__raw_writel(val, bootlut_base + 0x20);
+
+	/* 2) power up the 2nd core here */
+	pmc_cpu_core_power_up(1);
+}
+
+/*
+ * Functions to allow enabling/disabling WAIT instruction
+ */
+static void bcm63xx_arm_pm_idle(void) {};
+
+void set_cpu_arm_wait(int enable)
+{
+	if (enable) {
+		arm_pm_idle = NULL; /* This forces using the default implementation */
+	}
+	else {
+		arm_pm_idle = bcm63xx_arm_pm_idle; /* This is a stub to skip the default implementation */
+	}
+	cpu_idle_wait();
+	printk("wait instruction: %sabled\n", enable ? "en" : "dis");
+}
+EXPORT_SYMBOL(set_cpu_arm_wait);
+
+int get_cpu_arm_wait(void)
+{
+   return (arm_pm_idle == NULL);
+}
+EXPORT_SYMBOL(get_cpu_arm_wait);
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/bcm63148.c b/arch/arm/plat-bcm63xx/bcm63148.c
new file mode 100644
index 0000000000000000000000000000000000000000..6e508049d9cf751f752d2cd7176287eef4588366
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63148.c
@@ -0,0 +1,529 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * BCM63148 SoC main platform file.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/sched.h>
+#include <linux/stop_machine.h>
+#include <asm/mach/map.h>
+#include <asm/clkdev.h>
+#include <asm/system_misc.h>
+#include <mach/hardware.h>
+#include <mach/smp.h>
+#include <plat/bsp.h>
+#include <plat/b15core.h>
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+#include <pmc_drv.h>
+#include <BPCM.h>
+#include <pmc_cpu_core.h>
+#if defined(CONFIG_BCM_EXT_TIMER) && defined(CONFIG_PLAT_BCM63XX_EXT_TIMER)
+#include <plat/bcm63xx_timer.h>
+#endif
+
+#define BUS_RANGE_3_DEFAULT_ULIMIT  0x3ffffU
+#define BUS_RANGE_4_DEFAULT_ULIMIT 0x1bffffU
+
+static volatile uint32_t core_set_freq_done, core_set_freq_core_1_rdy;
+
+static struct clk ref_clk = {
+	.name = "refclk",
+	.rate = FREQ_MHZ(25),	/* run-time override */
+	.fixed = 1,
+	.type = CLK_XTAL,
+};
+
+static struct clk_lookup board_clk_lookups[] = {
+	{
+		.con_id = "refclk",
+		.clk = &ref_clk,
+	},
+};
+
+#define IO_DESC(pa, sz) { \
+		.virtual = IO_ADDRESS(pa), \
+		.pfn = __phys_to_pfn(pa), \
+		.length = sz, \
+		.type = MT_DEVICE, \
+	}
+
+#define MEM_DESC(pa, sz) { \
+		.virtual = IO_ADDRESS(pa), \
+		.pfn = __phys_to_pfn(pa), \
+		.length = sz, \
+		.type = MT_MEMORY_NONCACHED, \
+	}
+
+
+static struct map_desc bcm63148_io_desc[] __initdata = {
+	IO_DESC(USB_CTL_PHYS_BASE, SZ_4K),
+	IO_DESC(MEMC_PHYS_BASE, SZ_4K),
+	IO_DESC(DDRPHY_PHYS_BASE, SZ_4K),
+	IO_DESC(SAR_PHYS_BASE, SZ_16K),
+	IO_DESC(SATA_PHYS_BASE, SZ_16K),
+	IO_DESC(USBH_PHYS_BASE, SZ_8K),
+	IO_DESC(ERROR_PORT_PHYS_BASE, SZ_4K),
+//	IO_DESC(L2C_PHYS_BASE, SZ_4K),
+	IO_DESC(B15_CTRL_PHYS_BASE, SZ_16K),	// FIXME! once we know the real RDB
+	IO_DESC(DECT_PHYS_BASE, SZ_128K),
+	IO_DESC(PCIE_0_PHYS_BASE, SZ_64K),
+	IO_DESC(PCIE_1_PHYS_BASE, SZ_64K),
+	IO_DESC(SWITCH_PHYS_BASE, SZ_512K),
+	IO_DESC(APM_PHYS_BASE, SZ_128K),
+	IO_DESC(RDP_PHYS_BASE, SZ_1M),
+	IO_DESC(PMC_PHYS_BASE, SZ_512K),
+	IO_DESC(PROC_MON_PHYS_BASE, SZ_4K),
+	IO_DESC(DSLPHY_PHYS_BASE, SZ_1M),
+	IO_DESC(DSLLMEM_PHYS_BASE, SZ_1M),
+	IO_DESC(PERF_PHYS_BASE, SZ_16K),
+	IO_DESC(BOOTLUT_PHYS_BASE, SZ_4K),
+	IO_DESC(SPIFLASH_PHYS_BASE, SZ_128K),
+	IO_DESC(NANDFLASH_PHYS_BASE, SZ_128K),
+	/* FIXME!! more!! */
+};
+
+/* any fixup that has to be performed in the early stage of
+ * kernel booting */
+void __init soc_fixup(void)
+{
+	b15_fixup();
+}
+
+/*
+ * Map fix-mapped I/O that is needed before full MMU operation
+ */
+void __init soc_map_io(void)
+{
+	b15_map_io();
+
+	iotable_init(bcm63148_io_desc, ARRAY_SIZE(bcm63148_io_desc));
+}
+
+static int arm_wfi_allowed = 1; // administratively allowed
+
+/* power is significantly reduced by re-enabling interrupts
+ * and looping locally until a reschedule is needed.
+ * nops would help further but create droops/spikes.
+ */
+__attribute__ (( aligned(16),hot ))
+static void bcm63xx_arm_pm_idle(void)
+{
+	local_irq_enable();
+	while (!need_resched());
+}
+
+// selective wfi enable/disable based on frequency
+static void arm_wfi_enable(unsigned freqHz)
+{
+	/* enable only if administratively allowed and under 1500MHz */
+	if (arm_wfi_allowed && freqHz < FREQ_MHZ(1500)) {
+		arm_pm_idle = 0;
+	} else {
+		arm_pm_idle = bcm63xx_arm_pm_idle;
+	}
+}
+
+/*
+ * clk<n> = Fvco / mdiv<n>
+ * where clk0 connects to B15,
+ *       clk1 connects to MCP
+ *       Fvco is 3GHz
+ * clk0 is further scaled by 2^clk_ratio
+ */
+
+/* assume multiplier of 60 with 50MHz reference clock */
+#define FOSC (60 * FREQ_MHZ(50u))
+
+static unsigned get_arm_core_clk(void)
+{
+	unsigned ratio = B15CTRL->cpu_ctrl.clock_cfg & 7;
+	const unsigned osc = FOSC;
+	PLL_CHCFG_REG ch01_cfg;
+
+	ReadBPCMRegister(PMB_ADDR_B15_PLL, PLLBPCMRegOffset(ch01_cfg), &ch01_cfg.Reg32);
+
+	return (osc / ch01_cfg.Bits.mdiv0) >> ratio;
+}
+
+/*
+ * CPU frequency can be changed via the B15 pll or clock-ratio
+ *
+ * Access to the pll is through bpcm so reads/writes are slow.
+ * Access to the clock-ratio is through a fast soc register.
+ *
+ * To change the frequency from:
+ *
+ * 1:1 to 1:n
+ * - stop all write traffic (i.e. stop all CPUs)
+ * - set safe-clock-mode (clock configuration register)
+ * - DSB
+ * - set clock-divisor (clock configuration register)
+ * - DSB
+ * - start stopped CPUs
+ *
+ * 1:n to 1:1
+ * - stop all write traffic (i.e. stop all CPUs)
+ * - clear clock-divisor (clock configuration register)
+ * - DSB
+ * - clear safe-clock-mode (clock configuration register)
+ * - DSB
+ * - start stopped CPUs
+ *
+ * The configuration changes should be done close together and
+ * as quickly as possible to limit the down time for other CPUS.
+ * [this makes changing the clock-ratio preferrable to the pll]
+ */
+static int core_set_freq(unsigned shift)
+{
+	unsigned ratio = B15CTRL->cpu_ctrl.clock_cfg;
+	const unsigned safe_mode = 16;
+
+	// only one core running, no idlers;
+	// enable/disable wfi for idlers
+	arm_wfi_enable(FOSC/2 >> shift);
+
+	if (shift != 0) {
+		//A barrier here to ensure there are no pending memory accesses
+		//when entering safe mode.
+		smp_wmb();
+		//Switching ARM DDR access over to UBUS temporarily. We need to make sure there's no
+		//MCP activity when we enter Safe mode.
+		B15CTRL->cpu_ctrl.bus_range[3].ulimit = (BUS_RANGE_3_DEFAULT_ULIMIT<<ULIMIT_SHIFT)|BUSNUM_UBUS;
+		B15CTRL->cpu_ctrl.bus_range[4].ulimit = (BUS_RANGE_4_DEFAULT_ULIMIT<<ULIMIT_SHIFT)|BUSNUM_UBUS;
+		//Read back to make sure the setting has taken effect before moving on.
+		(void)B15CTRL->cpu_ctrl.bus_range[3].ulimit;
+		(void)B15CTRL->cpu_ctrl.bus_range[4].ulimit;
+		dsb();
+		// set safe_clk_mode if < 1000MHz (2x 500MHz MCP)
+		ratio |= safe_mode;
+		B15CTRL->cpu_ctrl.clock_cfg = ratio; // set safe-mode
+		//UBUS fast-ack makes above write operation a posted write.
+		//Counter fast-ack by reading back the register. We want to
+		//be sure the clock_cfg change has taken effect before
+		//moving on.
+		B15CTRL->cpu_ctrl.clock_cfg;
+		dsb();
+
+		ratio = (ratio & ~7) | shift;
+		B15CTRL->cpu_ctrl.clock_cfg = ratio; // new divisor
+		//Counter fast-ack
+		B15CTRL->cpu_ctrl.clock_cfg;
+		dsb();
+		//Switching ARM DDR access back to MCP
+		B15CTRL->cpu_ctrl.bus_range[3].ulimit = (BUS_RANGE_3_DEFAULT_ULIMIT<<ULIMIT_SHIFT)|BUSNUM_MCP0;
+		B15CTRL->cpu_ctrl.bus_range[4].ulimit = (BUS_RANGE_4_DEFAULT_ULIMIT<<ULIMIT_SHIFT)|BUSNUM_MCP0;
+		//Read back to make sure the setting has taken effect before moving on.
+		(void)B15CTRL->cpu_ctrl.bus_range[3].ulimit;
+		(void)B15CTRL->cpu_ctrl.bus_range[4].ulimit;
+		dsb();
+	} else {
+		shift = ratio & 7;
+		while (shift--) {
+			// frequency doubling one step at a time
+			ratio = (ratio & ~7) | shift;
+			B15CTRL->cpu_ctrl.clock_cfg = ratio;
+			//Counter fast-ack
+			B15CTRL->cpu_ctrl.clock_cfg;
+			if (shift <= 1) {
+				// 50us spike mitigation at 750 & 1500MHz
+				// tmrctl = enable | microseconds | 50
+				PMC->ctrl.gpTmr0Ctl = (1 << 31) | (1 << 29) | 50;
+				while (PMC->ctrl.gpTmr0Ctl & (1 << 31));
+			}
+		}
+
+		//A barrier here to ensure there are no pending memory accesses
+		//when exiting safe mode.
+		smp_wmb();
+		// clear safe_clk_mode if >= 1000MHz (2x 500MHz MCP)
+		B15CTRL->cpu_ctrl.clock_cfg = ratio & ~safe_mode; // clear safe-mode
+		//Counter fast-ack
+		B15CTRL->cpu_ctrl.clock_cfg;
+		dsb();
+	}
+
+	return 0;
+}
+
+static int core_set_freq_sync(void *p) {
+	//Load variables used into cache. We don't want DDR accesses
+	//in the code sequence below.
+	(void)core_set_freq_core_1_rdy;
+	(void)core_set_freq_done;
+
+	if (smp_processor_id()==0) {
+		//Core0 is doing the frequency change. Wait until core1
+		//is ready for it. We have to make sure core1 is not
+		//doing any memory accesses while core0 is changing
+		//CPU frequency.
+		//Deliberately using cached variables for inter-core
+		//synchronization instead of atomic variables.
+		//Atomic variable primitives would generate a memory
+		//access because MegaBarriers are used.
+		while(!core_set_freq_core_1_rdy);
+		core_set_freq(*(unsigned*)p);
+		core_set_freq_done=1;
+	}
+	else {
+
+		core_set_freq_core_1_rdy=1;
+		//Wait until core0 is done changing frequency before moving on.
+		while(!core_set_freq_done);
+
+	}
+
+	return 0;
+}
+
+/* freq in unit of Hz */
+int soc_set_arm_core_clock(struct clk *cur_clk, unsigned long freqHz)
+{
+	unsigned shift;
+
+	// change frequency through cpu ratio register
+	// find power-of-2 divisor
+	for (shift = 0; shift <= 4; shift++)
+		/* default pll shift 2 */
+		if ((FOSC/2 >> shift) == freqHz)
+			break;
+	if (shift > 4) {
+		printk("Invalid cpu frequency %luMHz\n", freqHz / FREQ_MHZ(1));
+		return -EINVAL;
+	}
+
+	cur_clk->rate = freqHz;
+	smp_mb();
+
+	core_set_freq_done=0;
+	core_set_freq_core_1_rdy=0;
+	__stop_machine(core_set_freq_sync, &shift, cpu_online_mask);
+
+	return 0;
+}
+
+static struct clk_ops arm_clk_ops = {
+	.enable = NULL,
+	.disable = NULL,
+	.round = NULL,
+	.setrate = soc_set_arm_core_clock,
+	.status = NULL,
+};
+
+void __init soc_init_clock(void)
+{
+	unsigned arm_periph_clk;
+
+	pmc_init();
+
+	arm_periph_clk = get_arm_core_clk();
+
+	/* install clock source into the lookup table */
+	clkdev_add_table(board_clk_lookups,
+			ARRAY_SIZE(board_clk_lookups));
+
+	if (arm_periph_clk != 0) {
+		/* install the clock source for ARM PLL */
+		static struct clk arm_pclk = {
+			.name = "arm_pclk",
+			.fixed = 1,
+			.type = CLK_PLL,
+			.ops = &arm_clk_ops,
+		};
+		static struct clk_lookup arm_clk_lookups[] = {
+			/* ARM CPU clock */
+			CLKDEV_INIT("cpu", "arm_pclk", &arm_pclk),
+		};
+
+		arm_pclk.rate = arm_periph_clk;
+		clkdev_add_table(arm_clk_lookups,
+				ARRAY_SIZE(arm_clk_lookups));
+	} else {
+		static struct clk dummy_arm_pclk;
+		static struct clk_lookup arm_clk_lookups[] = {
+			/* ARM CPU clock */
+			CLKDEV_INIT("cpu", "arm_pclk", &dummy_arm_pclk),
+		};
+
+		clkdev_add_table(arm_clk_lookups,
+				ARRAY_SIZE(arm_clk_lookups));
+	}
+}
+
+#if 0
+static int soc_abort_handler(unsigned long addr, unsigned int fsr,
+		struct pt_regs *regs)
+{
+	/*
+	 * These happen for no good reason
+	 * possibly left over from CFE
+	 */
+	printk(KERN_WARNING "External imprecise Data abort at "
+			"addr=%#lx, fsr=%#x ignored.\n", addr, fsr);
+
+	/* Returning non-zero causes fault display and panic */
+	return 0;
+}
+#endif
+
+static void soc_aborts_enable(void)
+{
+#if 0
+	u32 x;
+
+	/* Install our hook */
+	hook_fault_code(16 + 6, soc_abort_handler, SIGBUS, 0,
+			"imprecise external abort");
+
+	/* Enable external aborts - clear "A" bit in CPSR */
+
+	/* Read CPSR */
+	asm( "mrs	%0,cpsr": "=&r" (x) : : );
+
+	x &= ~ PSR_A_BIT;
+
+	/* Update CPSR, affect bits 8-15 */
+	asm( "msr	cpsr_x,%0; nop; nop": : "r" (x) : "cc" );
+#endif
+}
+
+/*
+ * This SoC relies on MPCORE GIC interrupt controller
+ */
+void __init soc_init_irq(void)
+{
+	b15_init_gic();
+	soc_aborts_enable();
+}
+
+void __init soc_init_early(void)
+{
+	b15_init_early();
+
+	/*
+	 * DMA memory
+	 *
+	 * The PCIe-to-AXI mapping (PAX) has a window of 128 MB alighed at 1MB
+	 * we should make the DMA-able DRAM at least this large.
+	 * Will need to use CONSISTENT_BASE and CONSISTENT_SIZE macros
+	 * to program the PAX inbound mapping registers.
+	 */
+	// FIXME!!!
+	//init_consistent_dma_size(SZ_128M);
+}
+
+/*
+ * Initialize SoC timers
+ */
+void __init soc_init_timer(void)
+{
+	/* in BCM63148, we provide 2 ways to initialize timers.
+	 * One is based on PERIPH Timer, and the other is using
+	 * Cortex B15 MPcore own GTIMER */
+#if defined(CONFIG_BCM_EXT_TIMER) && defined(CONFIG_PLAT_BCM63XX_EXT_TIMER)
+	bcm63xx_timer_init();
+#endif
+
+	// FIXME!! the timer needs to be implemented!
+#if 0
+#ifdef CONFIG_PLAT_B15_MPCORE_TIMER
+#define GTIMER_CLK_FREQ		FREQ_MHZ(25)
+	b15_init_timer(GTIMER_CLK_FREQ);
+#endif
+#endif
+}
+
+/*
+ * Install all other SoC device drivers
+ * that are not automatically discoverable.
+ */
+
+void __init soc_add_devices(void)
+{
+	/* if there is soc specific device */
+
+	/* to ensure RAC is disabled, due to some known issues with RAC */
+	B15CTRL->cpu_ctrl.rac_cfg0 = 0;
+}
+
+/*
+ * Wakeup secondary core
+ * This is SoC-specific code used by the platform SMP code.
+ */
+void plat_wake_secondary_cpu(unsigned cpu, void (*_sec_entry_va)(void))
+{
+	void __iomem *bootlut_base = __io_address(BOOTLUT_PHYS_BASE);
+	u32 val;
+
+	/* 1) covert the virtual starting address into physical, then
+	 * write it to boot look-up table. */
+	val = virt_to_phys(_sec_entry_va);
+	__raw_writel(val, bootlut_base + 0x20);
+
+	/* 2) power up the 2nd core here */
+	b15_power_up_cpu(1);
+}
+
+/*
+ * Functions to allow enabling/disabling WAIT instruction
+ */
+void set_cpu_arm_wait(int enable)
+{
+	arm_wfi_allowed = enable;
+	printk("wait instruction: %s\n", enable ? "enabled" : "disabled");
+	arm_wfi_enable(get_arm_core_clk());
+	cpu_idle_wait(); // wakeup idlers
+}
+EXPORT_SYMBOL(set_cpu_arm_wait);
+
+int get_cpu_arm_wait(void)
+{
+	return arm_wfi_allowed;
+}
+EXPORT_SYMBOL(get_cpu_arm_wait);
+
+static int __init bcm963xx_idle_init(void)
+{
+	arm_wfi_enable(get_arm_core_clk());
+	return 0;
+}
+arch_initcall(bcm963xx_idle_init);
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/bcm63xx_acp.c b/arch/arm/plat-bcm63xx/bcm63xx_acp.c
new file mode 100644
index 0000000000000000000000000000000000000000..9369d0ba578e643ac2ad6bbda72a126b66d76801
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63xx_acp.c
@@ -0,0 +1,1258 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <asm/mach/map.h>
+#include <mach/memory.h>
+#include <plat/bcm63xx_acp.h>
+#include <linux/mm.h>
+
+#include "bcm_map_part.h"
+#include "pmc_drv.h"
+#include "BPCM.h"
+
+typedef struct {
+	uint32_t addr_in;
+	uint32_t addr_out;
+	uint8_t dst_pid;
+	uint8_t size_shift;
+	uint8_t en;
+	struct proc_dir_entry *proc_dir;
+} ubus_cfg_t;
+
+typedef struct {
+	ubus_cfg_t ubus_cfg[BCM_UBUS_CFG_MAX];
+	int pmb_addr;
+	uint32_t acp_ctrl;
+	uint8_t en;
+	uint8_t name[10];
+	struct proc_dir_entry *proc_dir;
+} acp_cfg_entry_t;
+
+acp_cfg_entry_t acp_cfg_tbl[BCM_UBUS_PID_MAX];
+struct proc_dir_entry *proc_acp_dir = NULL;
+
+static void acp_ctrl_set(uint8_t id, uint8_t enable)
+{
+	if (enable)
+		ARMAIPCTRL->acp_ctrl[id] = acp_cfg_tbl[id].acp_ctrl;
+	else
+		ARMAIPCTRL->acp_ctrl[id] = 0;
+}
+
+static int ubus_cfg_entry_set(uint8_t ubus_pid, uint8_t entry_id,
+		uint32_t addr_in, uint32_t addr_out, uint8_t dst_pid,
+		uint8_t size_shift, uint8_t enable)
+{
+	BPCM_UBUS_CFG_REG ubus_cfg;
+	int ret;
+
+	if (acp_cfg_tbl[ubus_pid].pmb_addr == 0)
+		return -1;
+
+	ret = ReadBPCMRegister(acp_cfg_tbl[ubus_pid].pmb_addr,
+			UBUSBPCMRegOffset(cfg[entry_id]),
+			&ubus_cfg.Regs32.word0);
+	if (ret)
+		return ret;
+
+	ret = ReadBPCMRegister(acp_cfg_tbl[ubus_pid].pmb_addr,
+			UBUSBPCMRegOffset(cfg[entry_id]) + 1,
+			&ubus_cfg.Regs32.word1);
+	if (ret)
+		return ret;
+
+	ubus_cfg.Bits.addr_in = addr_in >> 8;
+	ubus_cfg.Bits.addr_out = addr_out >> 8;
+	ubus_cfg.Bits.pid = dst_pid;
+	ubus_cfg.Bits.size = size_shift;
+	ubus_cfg.Bits.cmddta = 0;
+	ubus_cfg.Bits.en = enable;
+
+	ret = WriteBPCMRegister(acp_cfg_tbl[ubus_pid].pmb_addr,
+			UBUSBPCMRegOffset(cfg[entry_id]),
+			ubus_cfg.Regs32.word0);
+	if (ret)
+		return ret;
+
+	ret = WriteBPCMRegister(acp_cfg_tbl[ubus_pid].pmb_addr,
+			UBUSBPCMRegOffset(cfg[entry_id]) + 1,
+			ubus_cfg.Regs32.word1);
+	return ret;
+}
+
+int bcm63xx_acp_ubus_cfg_get_entry(uint8_t ubus_pid, uint8_t idx,
+		bcm_acp_ubus_cfg_t *acp_ubus_cfg)
+{
+	if (acp_ubus_cfg == NULL)
+		return -EINVAL;
+
+	if (idx >= 4)
+		return -EINVAL;
+
+	acp_ubus_cfg->addr_in = acp_cfg_tbl[ubus_pid].ubus_cfg[idx].addr_in;
+	acp_ubus_cfg->addr_out = acp_cfg_tbl[ubus_pid].ubus_cfg[idx].addr_out;
+	acp_ubus_cfg->dst_pid = acp_cfg_tbl[ubus_pid].ubus_cfg[idx].dst_pid;
+	acp_ubus_cfg->size_shift = acp_cfg_tbl[ubus_pid].ubus_cfg[idx].size_shift;
+	acp_ubus_cfg->en = acp_cfg_tbl[ubus_pid].ubus_cfg[idx].en;
+
+	return 0;
+}
+
+int bcm63xx_acp_ubus_cfg_get_all(uint8_t ubus_pid,
+		bcm_acp_ubus_cfg_t *acp_ubus_cfg)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		if (bcm63xx_acp_ubus_cfg_get_entry(ubus_pid, i, &acp_ubus_cfg[i]) != 0)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+int bcm63xx_acp_ubus_cfg_set_entry(uint8_t ubus_pid, uint8_t idx,
+		bcm_acp_ubus_cfg_t *acp_ubus_cfg)
+{
+	int ret = 0;
+
+	if (acp_ubus_cfg == NULL)
+		return -EINVAL;
+
+	if (idx >= 4)
+		return -EINVAL;
+
+	ret = ubus_cfg_entry_set(ubus_pid, idx, acp_ubus_cfg->addr_in,
+			acp_ubus_cfg->addr_out, acp_ubus_cfg->dst_pid,
+			acp_ubus_cfg->size_shift, acp_ubus_cfg->en);
+	if (ret != 0)
+		return ret;
+
+	acp_cfg_tbl[ubus_pid].ubus_cfg[idx].addr_in = acp_ubus_cfg->addr_in;
+	acp_cfg_tbl[ubus_pid].ubus_cfg[idx].addr_out = acp_ubus_cfg->addr_out;
+	acp_cfg_tbl[ubus_pid].ubus_cfg[idx].dst_pid = acp_ubus_cfg->dst_pid;
+	acp_cfg_tbl[ubus_pid].ubus_cfg[idx].size_shift = acp_ubus_cfg->size_shift;
+	acp_cfg_tbl[ubus_pid].ubus_cfg[idx].en = acp_ubus_cfg->en;
+
+	return ret;
+}
+
+int bcm63xx_acp_ubus_cfg_set_all(uint8_t ubus_pid, bcm_acp_ubus_cfg_t *acp_ubus_cfg)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		if (bcm63xx_acp_ubus_cfg_set_entry(ubus_pid, i, &acp_ubus_cfg[i]) != 0)
+			goto SET_UBUS_CFG_FAIL;
+	}
+	return 0;
+
+SET_UBUS_CFG_FAIL:
+	bcm63xx_acp_ubus_cfg_reset(ubus_pid);
+	return -EINVAL;
+}
+
+void bcm63xx_acp_ubus_cfg_reset(uint8_t ubus_pid)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		ubus_cfg_entry_set(ubus_pid, i, 0, 0, 0, 0, 0);
+		acp_cfg_tbl[ubus_pid].ubus_cfg[i].addr_in = 0;
+		acp_cfg_tbl[ubus_pid].ubus_cfg[i].addr_out = 0;
+		acp_cfg_tbl[ubus_pid].ubus_cfg[i].dst_pid = 0;
+		acp_cfg_tbl[ubus_pid].ubus_cfg[i].size_shift = 0;
+		acp_cfg_tbl[ubus_pid].ubus_cfg[i].en = 0;
+	}
+}
+
+int bcm63xx_acp_cache_ctrl_get(uint8_t ubus_pid, bcm_acp_cache_ctrl_t *cache_ctrl)
+{
+	if (cache_ctrl == NULL)
+		return -EINVAL;
+
+	cache_ctrl->wcache = (acp_cfg_tbl[ubus_pid].acp_ctrl >> AIPACP_WCACHE_SHIFT) & 0xf;
+	cache_ctrl->rcache = (acp_cfg_tbl[ubus_pid].acp_ctrl >> AIPACP_RCACHE_SHIFT) & 0xf;
+	cache_ctrl->wuser = (acp_cfg_tbl[ubus_pid].acp_ctrl >> AIPACP_WUSER_SHIFT) & 0x1f;
+	cache_ctrl->ruser = (acp_cfg_tbl[ubus_pid].acp_ctrl >> AIPACP_RUSER_SHIFT) & 0x1f;
+	return 0;
+}
+
+int bcm63xx_acp_cache_ctrl_set(uint8_t ubus_pid, bcm_acp_cache_ctrl_t *cache_ctrl)
+{
+	if (cache_ctrl == NULL)
+		return -EINVAL;
+
+	if ((cache_ctrl->ruser > 0x1f) || (cache_ctrl->wuser > 0x1f) ||
+			(cache_ctrl->rcache > 0xf) || (cache_ctrl->wcache > 0xf))
+		return -EINVAL;
+
+	acp_cfg_tbl[ubus_pid].acp_ctrl = cache_ctrl->ruser << AIPACP_RUSER_SHIFT |
+			cache_ctrl->wuser << AIPACP_WUSER_SHIFT |
+			cache_ctrl->rcache << AIPACP_RCACHE_SHIFT |
+			cache_ctrl->wcache << AIPACP_WCACHE_SHIFT;
+
+	acp_ctrl_set(ubus_pid, acp_cfg_tbl[ubus_pid].en);
+	return 0;
+}
+
+int bcm63xx_acp_enable(uint8_t ubus_pid)
+{
+	uint8_t i;
+	int ret = 0;
+
+	/* enable ACP ctrl */
+	acp_ctrl_set(ubus_pid, 1);
+
+	for (i = 0; i < 4; i++) {
+		ret |= ubus_cfg_entry_set(ubus_pid, i,
+				acp_cfg_tbl[ubus_pid].ubus_cfg[i].addr_in,
+				acp_cfg_tbl[ubus_pid].ubus_cfg[i].addr_out,
+				acp_cfg_tbl[ubus_pid].ubus_cfg[i].dst_pid,
+				acp_cfg_tbl[ubus_pid].ubus_cfg[i].size_shift,
+				acp_cfg_tbl[ubus_pid].ubus_cfg[i].en);
+	}
+	if (ret)
+		goto fail_reset_hw;
+
+	acp_cfg_tbl[ubus_pid].en = 1;
+
+	return 0;
+
+fail_reset_hw:
+	for (i = 0; i < 4; i++)
+		ubus_cfg_entry_set(ubus_pid, i, 0, 0, 0, 0, 0);
+	acp_ctrl_set(ubus_pid, 0);
+
+	return ret;
+}
+
+int bcm63xx_acp_disable(uint8_t ubus_pid)
+{
+	int i;
+
+	for (i = 0; i < 4; i++)
+		ubus_cfg_entry_set(ubus_pid, i, 0, 0, 0, 0, 0);
+	acp_ctrl_set(ubus_pid, 0);
+	acp_cfg_tbl[ubus_pid].en = 0;
+
+	return 0;
+}
+
+bool bcm63xx_acp_on(uint8_t ubus_pid)
+{
+	return acp_cfg_tbl[ubus_pid].en != 0;
+}
+
+static void acp_cfg_tbl_init(int entry_use, uint32_t *addr_in,
+		uint32_t *addr_out, uint8_t *dst_pid, uint8_t *size_shift)
+{
+	uint8_t i, j;
+
+	memset(acp_cfg_tbl, 0x0, BCM_UBUS_PID_MAX * sizeof(acp_cfg_entry_t));
+
+	/* only initialize table for supported device */
+#ifdef CONFIG_BCM963138
+	acp_cfg_tbl[BCM_UBUS_PID_PCIE0].pmb_addr = UBUS_CFG_PMB_ADDR_PCIE0;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_PCIE0].name, "pcie0");
+
+	acp_cfg_tbl[BCM_UBUS_PID_ARMAXIACP].pmb_addr = UBUS_PMB_ADDR_ARM;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_ARMAXIACP].name, "arm");
+
+	acp_cfg_tbl[BCM_UBUS_PID_PERIPH].pmb_addr = UBUS_CFG_PMB_ADDR_PERIPH;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_PERIPH].name, "periph");
+
+	acp_cfg_tbl[BCM_UBUS_PID_USBD].pmb_addr = UBUS_CFG_PMB_ADDR_USBD;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_USBD].name, "usbd");
+
+	acp_cfg_tbl[BCM_UBUS_PID_USBH].pmb_addr = UBUS_CFG_PMB_ADDR_USBH;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_USBH].name, "usbh");
+
+	acp_cfg_tbl[BCM_UBUS_PID_SATA].pmb_addr = UBUS_CFG_PMB_ADDR_SATA;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_SATA].name, "sata");
+
+	acp_cfg_tbl[BCM_UBUS_PID_DECT].pmb_addr = UBUS_PMB_ADDR_DECT;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_DECT].name, "dect");
+
+	acp_cfg_tbl[BCM_UBUS_PID_APM].pmb_addr = UBUS_CFG_PMB_ADDR_APM;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_APM].name, "apm");
+
+#if 0
+	// FIXME! not sure which PMB_ADDR VDSL_PID uses.
+	acp_cfg_tbl[BCM_UBUS_PID_VDSL].pmb_addr = UBUS_PMB_ADDR_VDSL3_CORE;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_VDSL].name, "vdsl");
+#endif
+
+	acp_cfg_tbl[BCM_UBUS_PID_SAR].pmb_addr = UBUS_CFG0_PMB_ADDR_SAR;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_SAR].name, "sar0");
+
+	acp_cfg_tbl[BCM_UBUS_PID_RNR].pmb_addr = UBUS_CFG_PMB_ADDR_DBR;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_RNR].name, "rnr0");
+
+	acp_cfg_tbl[BCM_UBUS_PID_RNR_RABR].pmb_addr = UBUS_CFG_PMB_ADDR_RABR;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_RNR_RABR].name, "rnr1");
+
+	acp_cfg_tbl[BCM_UBUS_PID_SF2].pmb_addr = UBUS_CFG_PMB_ADDR_SWITCH;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_SF2].name, "sf2");
+
+	acp_cfg_tbl[BCM_UBUS_PID_PCIE1].pmb_addr = UBUS_CFG_PMB_ADDR_PCIE1;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_PCIE1].name, "pcie1");
+
+	acp_cfg_tbl[BCM_UBUS_PID_ARMAIPDAP].pmb_addr = UBUS_PMB_ADDR_DAP;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_ARMAIPDAP].name, "dap");
+
+	acp_cfg_tbl[BCM_UBUS_PID_SAR2].pmb_addr = UBUS_CFG1_PMB_ADDR_SAR;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_SAR2].name, "sar1");
+
+	acp_cfg_tbl[BCM_UBUS_PID_RNR_RBBR].pmb_addr = UBUS_CFG_PMB_ADDR_RBBR;
+	sprintf((char *)acp_cfg_tbl[BCM_UBUS_PID_RNR_RBBR].name, "rnr2");
+#endif
+
+	/* initialize software entry for the first ubus cfg entry */
+	for (i = 0; i < BCM_UBUS_PID_MAX; i++) {
+		if (acp_cfg_tbl[i].pmb_addr != 0) {
+			acp_cfg_tbl[i].acp_ctrl = (0xf << AIPACP_WCACHE_SHIFT) |
+					(0xf << AIPACP_RCACHE_SHIFT) |
+					(0x1 << AIPACP_WUSER_SHIFT) | 
+					(0x1 << AIPACP_RUSER_SHIFT);
+			for (j = 0; j < entry_use; j++) {
+				acp_cfg_tbl[i].ubus_cfg[j].addr_in = addr_in[j];
+				acp_cfg_tbl[i].ubus_cfg[j].addr_out = addr_out[j];
+				acp_cfg_tbl[i].ubus_cfg[j].dst_pid = dst_pid[j];
+				acp_cfg_tbl[i].ubus_cfg[j].size_shift = size_shift[j];
+				acp_cfg_tbl[i].ubus_cfg[j].en = 1;
+			}
+		}
+	}
+}
+
+/* the following are for the proc file control */
+static uint8_t get_ubus_pid_by_proc_dir(struct proc_dir_entry *proc_dir)
+{
+	uint8_t i, j;
+
+	for (i = 0; i < BCM_UBUS_PID_MAX; i++) {
+		if (acp_cfg_tbl[i].pmb_addr != 0) {
+			if (acp_cfg_tbl[i].proc_dir == proc_dir)
+				return i;
+			for (j = 0; j < BCM_UBUS_CFG_MAX; j++) {
+				if (acp_cfg_tbl[i].ubus_cfg[j].proc_dir
+						== proc_dir)
+					return i;
+			}
+		}
+	}
+	return BCM_UBUS_PID_INVALID;
+}
+
+static inline uint8_t get_cfg_id_by_ubus_pid_proc_dir(uint8_t ubus_pid,
+		struct proc_dir_entry *proc_dir)
+{
+	uint8_t i;
+
+	for (i = 0; i < BCM_UBUS_CFG_MAX; i++) {
+		if (acp_cfg_tbl[ubus_pid].ubus_cfg[i].proc_dir == proc_dir)
+			return i;
+	}
+	return BCM_UBUS_CFG_MAX;
+
+}
+
+static int read_proc_acp_en(char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	uint8_t ubus_pid;
+	int len = 0;
+
+	if (data == NULL)
+		return -EINVAL;
+	
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		return -EINVAL;
+
+	len = sprintf(page + len, "%d\n", acp_cfg_tbl[ubus_pid].en);
+	*eof = 1;
+
+	return len;
+}
+
+static int write_proc_acp_en(struct file *file, const char __user *buffer,
+                           unsigned long count, void *data)
+{
+	uint8_t ubus_pid;
+	char buf[16];
+	unsigned long input_val;
+	int len, ret;
+
+	if (count >= sizeof(buf))
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	len = min(count, (unsigned long)(sizeof(buf) - 1));
+
+	if (copy_from_user(buf, buffer, len))
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &input_val))
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	if (data == NULL)
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	if ((uint8_t)input_val == acp_cfg_tbl[ubus_pid].en) {
+		printk(KERN_WARNING "Nothing has been done\n");
+		return count;
+	}
+
+	if (input_val == 0) {
+		ret = bcm63xx_acp_disable(ubus_pid);
+		if (ret == 0)
+			printk(KERN_WARNING "Done disabling ACP for %s\n",
+					acp_cfg_tbl[ubus_pid].name);
+	} else {
+		ret = bcm63xx_acp_enable(ubus_pid);
+		if (ret == 0)
+			printk(KERN_WARNING "Done enabling ACP for %s\n",
+					acp_cfg_tbl[ubus_pid].name);
+	}
+	if (ret)
+		printk(KERN_WARNING "Fail to configure\n");
+
+	return count;
+
+WRITE_PROC_ACP_EN_EXIT:
+	printk(KERN_WARNING "invalid input value\n");
+	return count;
+}
+
+static int read_proc_acp_ctrl(char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	uint8_t ubus_pid;
+	int len = 0;
+
+	if (data == NULL)
+		return -EINVAL;
+	
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		return -EINVAL;
+
+	len += sprintf(page + len, "bit[3-0] = WCACHE, bit[7-4] = RCACHE, bit[12-8] = WUSER, bit[17-13] = RUSER\n");
+	len += sprintf(page + len, "0x%lx\n", (unsigned long)acp_cfg_tbl[ubus_pid].acp_ctrl);
+	*eof = 1;
+
+	return len;
+}
+
+static int write_proc_acp_ctrl(struct file *file, const char __user *buffer,
+                           unsigned long count, void *data)
+{
+	uint8_t ubus_pid;
+	char buf[16];
+	unsigned long input_val;
+	int len;
+
+	if (count >= sizeof(buf))
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	len = min(count, (unsigned long)(sizeof(buf) - 1));
+
+	if (copy_from_user(buf, buffer, len))
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &input_val))
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	if (data == NULL)
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		goto WRITE_PROC_ACP_EN_EXIT;
+
+	if ((uint32_t)input_val == acp_cfg_tbl[ubus_pid].acp_ctrl) {
+		printk(KERN_WARNING "Nothing has been done\n");
+		return count;
+	}
+
+	acp_cfg_tbl[ubus_pid].acp_ctrl = (uint32_t)input_val;
+	acp_ctrl_set(ubus_pid, acp_cfg_tbl[ubus_pid].en);
+	printk(KERN_WARNING "Done setting ACP ctrl for %s\n",
+				acp_cfg_tbl[ubus_pid].name);
+
+	return count;
+
+WRITE_PROC_ACP_EN_EXIT:
+	printk(KERN_WARNING "invalid input value\n");
+	return count;
+}
+
+static int read_proc_addr_in(char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	int len = 0;
+
+	if (data == NULL)
+		return -EINVAL;
+	
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (unlikely(ubus_pid == BCM_UBUS_PID_INVALID))
+		return -EINVAL;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		return -EINVAL;
+
+	len = sprintf(page + len, "%p\n",
+			(void *)acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_in);
+	*eof = 1;
+
+	return len;
+}
+
+static int write_proc_addr_in(struct file *file, const char __user *buffer,
+                           unsigned long count, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	char buf[16];
+	unsigned long input_val;
+	int len, ret;
+
+	if (count >= sizeof(buf))
+		goto WRITE_PROC_ADDR_IN_EXIT;
+
+	len = min(count, (unsigned long)(sizeof(buf) - 1));
+
+	if (copy_from_user(buf, buffer, len))
+		goto WRITE_PROC_ADDR_IN_EXIT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &input_val))
+		goto WRITE_PROC_ADDR_IN_EXIT;
+
+	if (data == NULL)
+		goto WRITE_PROC_ADDR_IN_EXIT;
+
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		goto WRITE_PROC_ADDR_IN_EXIT;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		goto WRITE_PROC_ADDR_IN_EXIT;
+
+	if ((uint32_t)input_val == acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_in) {
+		printk(KERN_WARNING "Nothing has been done\n");
+		return count;
+	}
+
+	ret = ubus_cfg_entry_set(ubus_pid, cfg_id, (uint32_t)input_val,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_out,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].dst_pid,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].size_shift,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].en);
+	if (ret) {
+		printk(KERN_WARNING "Fail to configure\n");
+	} else {
+		printk(KERN_WARNING "Done setting the new value\n");
+		acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_in = (uint32_t)input_val;
+
+	}
+	return count;
+
+WRITE_PROC_ADDR_IN_EXIT:
+	printk(KERN_WARNING "invalid input value\n");
+	return count;
+}
+
+static int read_proc_addr_out(char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	int len = 0;
+
+	if (data == NULL)
+		return -EINVAL;
+	
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (unlikely(ubus_pid == BCM_UBUS_PID_INVALID))
+		return -EINVAL;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		return -EINVAL;
+
+	len = sprintf(page + len, "%p\n",
+			(void *)acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_out);
+	*eof = 1;
+
+	return len;
+}
+
+static int write_proc_addr_out(struct file *file, const char __user *buffer,
+                           unsigned long count, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	char buf[16];
+	unsigned long input_val;
+	int len, ret;
+
+	if (count >= sizeof(buf))
+		goto WRITE_PROC_ADDR_OUT_EXIT;
+
+	len = min(count, (unsigned long)(sizeof(buf) - 1));
+
+	if (copy_from_user(buf, buffer, len))
+		goto WRITE_PROC_ADDR_OUT_EXIT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &input_val))
+		goto WRITE_PROC_ADDR_OUT_EXIT;
+
+	if (data == NULL)
+		goto WRITE_PROC_ADDR_OUT_EXIT;
+
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		goto WRITE_PROC_ADDR_OUT_EXIT;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		goto WRITE_PROC_ADDR_OUT_EXIT;
+
+	if ((uint32_t)input_val == acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_out) {
+		printk(KERN_WARNING "Nothing has been done\n");
+		return count;
+	}
+
+	ret = ubus_cfg_entry_set(ubus_pid, cfg_id,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_in,
+			(uint32_t)input_val,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].dst_pid,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].size_shift,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].en);
+	if (ret) {
+		printk(KERN_WARNING "Fail to configure\n");
+	} else {
+		printk(KERN_WARNING "Done setting the new value\n");
+		acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_out = (uint32_t)input_val;
+
+	}
+	return count;
+
+WRITE_PROC_ADDR_OUT_EXIT:
+	printk(KERN_WARNING "invalid input value\n");
+	return count;
+}
+
+static int read_proc_dst_pid(char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	int len = 0;
+
+	if (data == NULL)
+		return -EINVAL;
+	
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (unlikely(ubus_pid == BCM_UBUS_PID_INVALID))
+		return -EINVAL;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		return -EINVAL;
+
+	len = sprintf(page + len, "%u\n",
+			(unsigned)acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].dst_pid);
+	*eof = 1;
+
+	return len;
+}
+
+static int write_proc_dst_pid(struct file *file, const char __user *buffer,
+                           unsigned long count, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	char buf[16];
+	unsigned long input_val;
+	int len, ret;
+
+	if (count >= sizeof(buf))
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	len = min(count, (unsigned long)(sizeof(buf) - 1));
+
+	if (copy_from_user(buf, buffer, len))
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &input_val))
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	if (data == NULL)
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	if (acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].dst_pid ==
+			(uint8_t)input_val) {
+		printk(KERN_WARNING "Nothing has been done\n");
+		return count;
+	}
+
+	ret = ubus_cfg_entry_set(ubus_pid, cfg_id,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_in,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_out,
+			(uint8_t)input_val,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].size_shift,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].en);
+	if (ret) {
+		printk(KERN_WARNING "Fail to configure\n");
+	} else {
+		printk(KERN_WARNING "Done setting the new value\n");
+		acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].dst_pid =
+			(uint8_t)input_val;
+
+	}
+	return count;
+
+WRITE_PROC_SIZE_SHIFT_EXIT:
+	printk(KERN_WARNING "invalid input value\n");
+	return count;
+}
+
+static int read_proc_size_shift(char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	int len = 0;
+
+	if (data == NULL)
+		return -EINVAL;
+	
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (unlikely(ubus_pid == BCM_UBUS_PID_INVALID))
+		return -EINVAL;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		return -EINVAL;
+
+	len = sprintf(page + len, "%u\n",
+			(unsigned)acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].size_shift);
+	*eof = 1;
+
+	return len;
+}
+
+static int write_proc_size_shift(struct file *file, const char __user *buffer,
+                           unsigned long count, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	char buf[16];
+	unsigned long input_val;
+	int len, ret;
+
+	if (count >= sizeof(buf))
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	len = min(count, (unsigned long)(sizeof(buf) - 1));
+
+	if (copy_from_user(buf, buffer, len))
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &input_val))
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	if (data == NULL)
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		goto WRITE_PROC_SIZE_SHIFT_EXIT;
+
+	if (acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].size_shift ==
+			(uint8_t)input_val) {
+		printk(KERN_WARNING "Nothing has been done\n");
+		return count;
+	}
+
+	ret = ubus_cfg_entry_set(ubus_pid, cfg_id,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_in,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_out,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].dst_pid,
+			(uint8_t)input_val,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].en);
+	if (ret) {
+		printk(KERN_WARNING "Fail to configure\n");
+	} else {
+		printk(KERN_WARNING "Done setting the new value\n");
+		acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].size_shift =
+			(uint8_t)input_val;
+
+	}
+	return count;
+
+WRITE_PROC_SIZE_SHIFT_EXIT:
+	printk(KERN_WARNING "invalid input value\n");
+	return count;
+}
+
+static int read_proc_cfg_en(char *page, char **start, off_t off,
+		int count, int *eof, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	int len = 0;
+
+	if (data == NULL)
+		return -EINVAL;
+	
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (unlikely(ubus_pid == BCM_UBUS_PID_INVALID))
+		return -EINVAL;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		return -EINVAL;
+
+	len = sprintf(page + len, "%u\n",
+			(unsigned)acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].en);
+	*eof = 1;
+
+	return len;
+}
+
+static int write_proc_cfg_en(struct file *file, const char __user *buffer,
+                           unsigned long count, void *data)
+{
+	uint8_t ubus_pid, cfg_id;
+	char buf[16];
+	unsigned long input_val;
+	int len, ret;
+
+	if (count >= sizeof(buf))
+		goto WRITE_PROC_ENABLE_EXIT;
+
+	len = min(count, (unsigned long)(sizeof(buf) - 1));
+
+	if (copy_from_user(buf, buffer, len))
+		goto WRITE_PROC_ENABLE_EXIT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &input_val))
+		goto WRITE_PROC_ENABLE_EXIT;
+
+	if (data == NULL)
+		goto WRITE_PROC_ENABLE_EXIT;
+
+	ubus_pid = get_ubus_pid_by_proc_dir((struct proc_dir_entry *)data);
+	if (ubus_pid == BCM_UBUS_PID_INVALID)
+		goto WRITE_PROC_ENABLE_EXIT;
+
+	cfg_id = get_cfg_id_by_ubus_pid_proc_dir(ubus_pid,
+			(struct proc_dir_entry *)data);
+	if (unlikely(cfg_id >= BCM_UBUS_CFG_MAX))
+		goto WRITE_PROC_ENABLE_EXIT;
+
+	if (acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].en == (uint8_t)input_val) {
+		printk(KERN_WARNING "Nothing has been done\n");
+		return count;
+	}
+
+	ret = ubus_cfg_entry_set(ubus_pid, cfg_id,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_in,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].addr_out,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].dst_pid,
+			acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].size_shift,
+			(uint8_t)input_val);
+	if (ret) {
+		printk(KERN_WARNING "Fail to configure\n");
+	} else {
+		printk(KERN_WARNING "Done setting the new value\n");
+		acp_cfg_tbl[ubus_pid].ubus_cfg[cfg_id].en = (uint8_t)input_val;
+
+	}
+	return count;
+
+WRITE_PROC_ENABLE_EXIT:
+	printk(KERN_WARNING "invalid input value\n");
+	return count;
+}
+
+static void create_proc_dir_file(uint8_t ubus_pid)
+{
+	int i;
+	char buff[10];
+	acp_cfg_entry_t *cur_cfg = &acp_cfg_tbl[ubus_pid];
+	struct proc_dir_entry *res;
+
+	if (cur_cfg->proc_dir == NULL)
+		cur_cfg->proc_dir = proc_mkdir(cur_cfg->name, proc_acp_dir);
+
+	if (cur_cfg->proc_dir == NULL) {
+		printk(KERN_ERR "fail to create proc dir (%s)\n",
+				cur_cfg->name);
+		return;
+	}
+
+	/* proc file for acp_en */
+	res = create_proc_entry("acp_enable", S_IRUGO | S_IWUGO,
+			cur_cfg->proc_dir);
+
+	if (res) {
+		res->read_proc = read_proc_acp_en;
+		res->write_proc = write_proc_acp_en;
+		res->data = (void *)cur_cfg->proc_dir;
+	} else {
+		printk(KERN_ERR "fail to create proc file (%s)"
+				"->acp_enable\n", cur_cfg->name);
+	}
+
+	/* proc file for acp_ctrl */
+	res = create_proc_entry("acp_ctrl", S_IRUGO | S_IWUGO,
+			cur_cfg->proc_dir);
+
+	if (res) {
+		res->read_proc = read_proc_acp_ctrl;
+		res->write_proc = write_proc_acp_ctrl;
+		res->data = (void *)cur_cfg->proc_dir;
+	} else {
+		printk(KERN_ERR "fail to create proc file (%s)"
+				"->acp_ctrl\n", cur_cfg->name);
+	}
+
+	for (i = 0; i < BCM_UBUS_CFG_MAX; i++) {
+		if (cur_cfg->ubus_cfg[i].proc_dir == NULL) {
+			sprintf(buff, "cfg%d", i);
+			cur_cfg->ubus_cfg[i].proc_dir = proc_mkdir(buff,
+					cur_cfg->proc_dir);
+		}
+
+		/* supposedly shouldn't happen */
+		if (unlikely(cur_cfg->ubus_cfg[i].proc_dir == NULL)) {
+			printk(KERN_ERR "fail to create proc dir (%s)\n", buff);
+			return;
+		}
+
+		/* proc file for addr_in */
+		res = create_proc_entry("addr_in", S_IRUGO | S_IWUGO,
+				cur_cfg->ubus_cfg[i].proc_dir);
+
+		if (res) {
+			res->read_proc = read_proc_addr_in;
+			res->write_proc = write_proc_addr_in;
+			res->data = (void *)cur_cfg->ubus_cfg[i].proc_dir;
+		} else {
+			printk(KERN_ERR "fail to create proc file (%s)"
+					"->addr_in\n", buff);
+		}
+
+		/* proc file for addr_out */
+		res = create_proc_entry("addr_out", S_IRUGO | S_IWUGO,
+				cur_cfg->ubus_cfg[i].proc_dir);
+
+		if (res) {
+			res->read_proc = read_proc_addr_out;
+			res->write_proc = write_proc_addr_out;
+			res->data = (void *)cur_cfg->ubus_cfg[i].proc_dir;
+		} else {
+			printk(KERN_ERR "fail to create proc file (%s)"
+					"->addr_out\n", buff);
+		}
+
+		/* proc file for dst_pid */
+		res = create_proc_entry("dst_pid", S_IRUGO | S_IWUGO,
+				cur_cfg->ubus_cfg[i].proc_dir);
+
+		if (res) {
+			res->read_proc = read_proc_dst_pid;
+			res->write_proc = write_proc_dst_pid;
+			res->data = (void *)cur_cfg->ubus_cfg[i].proc_dir;
+		} else {
+			printk(KERN_ERR "fail to create proc file (%s)"
+					"->dst_pid\n", buff);
+		}
+
+		/* proc file for size_shift */
+		res = create_proc_entry("size_shift", S_IRUGO | S_IWUGO,
+				cur_cfg->ubus_cfg[i].proc_dir);
+
+		if (res) {
+			res->read_proc = read_proc_size_shift;
+			res->write_proc = write_proc_size_shift;
+			res->data = (void *)cur_cfg->ubus_cfg[i].proc_dir;
+		} else {
+			printk(KERN_ERR "fail to create proc file (%s)"
+					"->size_shift\n", buff);
+		}
+
+		/* proc file for enable */
+		res = create_proc_entry("config_enable", S_IRUGO | S_IWUGO,
+				cur_cfg->ubus_cfg[i].proc_dir);
+
+		if (res) {
+			res->read_proc = read_proc_cfg_en;
+			res->write_proc = write_proc_cfg_en;
+			res->data = (void *)cur_cfg->ubus_cfg[i].proc_dir;
+		} else {
+			printk(KERN_ERR "fail to create proc file (%s)"
+					"->en\n", buff);
+		}
+	}
+}
+
+static void remove_proc_dir_file(uint8_t ubus_pid)
+{
+	int i;
+	char buff[10];
+	acp_cfg_entry_t *cur_cfg = &acp_cfg_tbl[ubus_pid];
+
+	for (i = 0; i < BCM_UBUS_CFG_MAX; i++) {
+		remove_proc_entry("addr_in", cur_cfg->ubus_cfg[i].proc_dir);
+		remove_proc_entry("addr_out", cur_cfg->ubus_cfg[i].proc_dir);
+		remove_proc_entry("dst_pid", cur_cfg->ubus_cfg[i].proc_dir);
+		remove_proc_entry("size_shift", cur_cfg->ubus_cfg[i].proc_dir);
+		remove_proc_entry("config_enable", cur_cfg->ubus_cfg[i].proc_dir);
+		sprintf(buff, "cfg%d", i);
+		remove_proc_entry(buff, cur_cfg->proc_dir);
+	}
+	remove_proc_entry("acp_enable", cur_cfg->proc_dir);
+	remove_proc_entry("acp_ctrl", cur_cfg->proc_dir);
+	remove_proc_entry(cur_cfg->name, proc_acp_dir);
+}
+
+static void acp_proc_file_init(void)
+{
+	uint8_t i;
+
+	if (proc_acp_dir == NULL)
+		proc_acp_dir = proc_mkdir("driver/acp", NULL);
+
+	if (proc_acp_dir == NULL) {
+		printk(KERN_ERR "fail to create proc dir driver/acp\n");
+		return;
+	}
+
+	for (i = 0; i < BCM_UBUS_PID_MAX; i++) {
+		if (acp_cfg_tbl[i].pmb_addr != 0)
+			create_proc_dir_file(i);
+	}
+}
+
+static void acp_proc_file_deinit(void)
+{
+	uint8_t i;
+	for (i = 0; i < BCM_UBUS_PID_MAX; i++) {
+		if (acp_cfg_tbl[i].pmb_addr != 0)
+			remove_proc_dir_file(i);
+	}
+
+	if (proc_acp_dir)
+		remove_proc_entry("driver/acp", NULL);
+	proc_acp_dir = NULL;
+}
+
+static void acp_cfg_tbl_deinit(void)
+{
+	memset(acp_cfg_tbl, 0x0, BCM_UBUS_PID_MAX * sizeof(acp_cfg_entry_t));
+}
+
+/* size will be in the multiple of MB */
+/* some limitations of the resulted value.  For each entry, addr must be in
+ * multiple of the size. An invalid example is, if addr is 0x800000, and size
+ * is 0x1600000.  HW will not be able to process it. */
+static int ubus_cfg_convert(uint32_t addr_start, uint32_t size,
+		uint32_t *addr_in, uint32_t *addr_out,
+		uint8_t *dst_pid, uint8_t *size_shift)
+{
+	int used = 0, cur_bit_to_add;
+	uint32_t added_size = 0, extra_added = 0;
+
+	/* first method, go from the least significant bit set in the address
+	 * to add the size into the table.  Then go from remaining size to add
+	 * from the most significant bit */
+	do {
+		cur_bit_to_add = ffs((addr_start + added_size)) - 1;
+		if ((0x1 << cur_bit_to_add) > (size - added_size))
+			cur_bit_to_add = fls((size - added_size)) - 1;
+		addr_in[used] = addr_start + added_size;
+		addr_out[used] = addr_start + added_size;
+		dst_pid[used] = BCM_UBUS_PID_ARMAXIACP;
+		size_shift[used] = cur_bit_to_add; 
+		added_size += 0x1 << cur_bit_to_add;
+		used++;
+	} while ((used < 4) && ((size - added_size) != 0));
+
+	if ((size - added_size) == 0)
+		return used;
+
+	/* second method, add the total and subtract those that should go DDR */
+	added_size = 0;
+	extra_added = 0;
+	used = 0;
+	do {
+		if (extra_added != 0) {
+			cur_bit_to_add = fls(extra_added) - 1;
+			if ((cur_bit_to_add >= 2) &&
+					((0x1 << cur_bit_to_add) & extra_added) &&
+					((0x1 << (cur_bit_to_add - 1)) & extra_added) &&
+					((0x1 << (cur_bit_to_add - 2)) & extra_added)) {
+				cur_bit_to_add++;
+				extra_added = 0;
+			} else {
+				extra_added -= 0x1 << cur_bit_to_add;
+			}
+			added_size -= 0x1 << cur_bit_to_add;
+			addr_in[used] = added_size;
+			addr_out[used] = added_size;
+			dst_pid[used] = BCM_UBUS_PID_DDR;
+			size_shift[used] = cur_bit_to_add; 
+			used++;
+		} else {
+			cur_bit_to_add = fls((addr_start + size - added_size)) - 1;
+			if (0x1 << (cur_bit_to_add - 1) & (addr_start + size - added_size)) {
+				cur_bit_to_add++;
+				extra_added = (0x1 << cur_bit_to_add) + added_size - addr_start - size;
+			}
+			addr_in[used] = added_size;
+			addr_out[used] = added_size;
+			dst_pid[used] = BCM_UBUS_PID_ARMAXIACP;
+			size_shift[used] = cur_bit_to_add; 
+			added_size += 0x1 << cur_bit_to_add;
+			used++;
+		}
+	} while ((used < 4) && ((addr_start + size) != added_size));
+
+	if ((addr_start + size) != added_size) {
+		printk("BCM63XX ACP ERROR!: please define a new ACP_MEM_SIZE\n");
+		return -1;
+	}
+
+	added_size = 0;
+	while ((used < 4) && (addr_start - added_size)) {
+		cur_bit_to_add = fls((addr_start - added_size)) - 1;
+		addr_in[used] = added_size;
+		addr_out[used] = added_size;
+		dst_pid[used] = BCM_UBUS_PID_DDR;
+		size_shift[used] = cur_bit_to_add; 
+		added_size += 0x1 << cur_bit_to_add;
+		used++;
+	}
+
+	if ((addr_start - added_size) == 0)
+		return used;
+
+	/* TODO: Maybe other way to fill the table entry? */
+
+	printk("BCM63XX ACP ERROR!: please define a new ACP_MEM_SIZE\n");
+	return -1;
+}
+
+int bcm63xx_acp_init(void)
+{
+	uint32_t addr_in[4], addr_out[4];
+	uint8_t size_shift[4], dst_pid[4];
+	int entry_use;
+	struct zone *acp_zone = &NODE_DATA(0)->node_zones[ZONE_ACP];
+
+	printk("BCM63XX ACP: zone_acp start at 0x%08lx of size %d MB\n",
+		(acp_zone->zone_start_pfn << PAGE_SHIFT),
+		CONFIG_BCM_ACP_MEM_SIZE);
+
+	memset(addr_in, 0, sizeof(uint32_t) << 2);
+	memset(addr_out, 0, sizeof(uint32_t) << 2);
+	memset(size_shift, 0, sizeof(uint8_t) << 2);
+	memset(dst_pid, 0, sizeof(uint8_t) << 2);
+	entry_use = ubus_cfg_convert(acp_zone->zone_start_pfn << PAGE_SHIFT,
+		CONFIG_BCM_ACP_MEM_SIZE * SZ_1M, addr_in, addr_out, dst_pid,
+		size_shift);
+
+	if (entry_use == -1)
+		return -EPERM;
+
+	acp_cfg_tbl_init(entry_use, addr_in, addr_out, dst_pid, size_shift);
+	acp_proc_file_init();
+
+	bcm63xx_acp_enable(BCM_UBUS_PID_RNR);
+	bcm63xx_acp_enable(BCM_UBUS_PID_RNR_RABR);
+	bcm63xx_acp_enable(BCM_UBUS_PID_RNR_RBBR);
+	bcm63xx_acp_enable(BCM_UBUS_PID_SAR);
+	bcm63xx_acp_enable(BCM_UBUS_PID_SAR2);
+	return 0;
+}
+
+void bcm63xx_acp_exit(void)
+{
+	bcm63xx_acp_disable(BCM_UBUS_PID_RNR);
+	bcm63xx_acp_disable(BCM_UBUS_PID_RNR_RABR);
+	bcm63xx_acp_disable(BCM_UBUS_PID_RNR_RBBR);
+	bcm63xx_acp_disable(BCM_UBUS_PID_SAR);
+	bcm63xx_acp_disable(BCM_UBUS_PID_SAR2);
+
+	acp_proc_file_deinit();
+	acp_cfg_tbl_deinit();
+}
+
+module_init(bcm63xx_acp_init);
+module_exit(bcm63xx_acp_exit);
+#endif /* defined(CONFIG_BCM_KF_ARM_BCM963XX) */
diff --git a/arch/arm/plat-bcm63xx/bcm63xx_cpufreq.c b/arch/arm/plat-bcm63xx/bcm63xx_cpufreq.c
new file mode 100644
index 0000000000000000000000000000000000000000..b417068dd92a9f874531fb1bc73cecc84abed158
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63xx_cpufreq.c
@@ -0,0 +1,344 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/* CPU Frequency scaling support for BCM63xx ARM series */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/suspend.h>
+
+#include <bcm_map_part.h>
+#include <asm/cpu.h>
+
+/* frequency in units of kHz */
+/* note: index column specified in initialization */
+/*       but may be used for other purposes */
+struct cpufreq_frequency_table bcm63xx_freq_normal_table[] = {
+#if defined CONFIG_BCM963138
+/* support divisors of 2GHz */
+	{10,  200000},
+	{8,   250000},
+	{6,   333333},
+	{5,   400000},
+	{4,   500000},
+	{3,   666667},
+	{2,  1000000},
+#elif defined CONFIG_BCM963148
+/* support divisors of 3GHz */
+	{16,  187500},
+	{8,   375000},
+	{4,   750000},
+	{2,  1500000},
+#endif
+	{0, CPUFREQ_TABLE_END},
+};
+
+/* frequency is in the unit of kHz */
+/* note: index column specified in initialization */
+/*       but may be used for other purposes */
+struct cpufreq_frequency_table bcm63xx_freq_extended_table[] = {
+#if defined CONFIG_BCM963138
+	{10,  200000},
+	{9,   222222},
+	{8,   250000},
+	{7,   285714},
+	{6,   333333},
+	{5,   400000},
+	{4,   500000},
+	{3,   666667},
+	{2,  1000000},
+#elif defined CONFIG_BCM963148
+	{32,   93750},
+//	{30,  100000},
+//	{24,  125000},
+//	{20,  150000},
+	{16,  187500},
+//	{15,  200000},
+//	{12,  250000},
+//	{10,  300000},
+	{8,   375000},
+//	{6,   500000},
+//	{5,   600000},
+	{4,   750000},
+//	{3,  1000000},
+	{2,  1500000},
+#endif
+	{0, CPUFREQ_TABLE_END},
+};
+
+struct cpufreq_frequency_table *bcm63xx_freq_table = bcm63xx_freq_normal_table;
+
+int bcm63xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
+{
+	return cpufreq_frequency_table_verify(policy, bcm63xx_freq_table);
+}
+
+unsigned int bcm63xx_cpufreq_getspeed(unsigned int cpu)
+{
+	struct clk *arm_clk;
+	unsigned long arm_freq;
+
+	arm_clk = clk_get_sys("cpu", "arm_pclk");
+	BUG_ON(IS_ERR_OR_NULL(arm_clk));
+	arm_freq = clk_get_rate(arm_clk);
+	BUG_ON(!arm_freq);
+
+	return (arm_freq / 1000);
+}
+
+/*
+ * loops_per_jiffy is not updated on SMP systems in cpufreq driver.
+ * Update the per-CPU loops_per_jiffy value on frequency transition.
+ * And don't forget to adjust the global one.
+ */
+static void adjust_jiffies(cpumask_var_t cpus, struct cpufreq_freqs *freqs)
+{
+#ifdef CONFIG_SMP
+	extern unsigned long loops_per_jiffy;
+	static struct lpj_info {
+		unsigned long ref;
+		unsigned int  freq;
+	} global_lpj_ref;
+	unsigned cpu;
+
+	if (freqs->flags & CPUFREQ_CONST_LOOPS)
+		return;
+	if (freqs->old == freqs->new)
+		  return;
+	if (!global_lpj_ref.freq) {
+		global_lpj_ref.ref = loops_per_jiffy;
+		global_lpj_ref.freq = freqs->old;
+	}
+
+	loops_per_jiffy =
+		cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq, freqs->new);
+
+	for_each_cpu(cpu, cpus) {
+		per_cpu(cpu_data, cpu).loops_per_jiffy = loops_per_jiffy;
+	}
+#endif
+}
+
+static int bcm63xx_cpufreq_target(struct cpufreq_policy *policy,
+		unsigned int target_freq,
+		unsigned int relation)
+{
+	struct cpufreq_freqs freqs;
+	unsigned int index, old_index;
+	struct clk *arm_clk;
+	int ret = 0;
+
+	freqs.old = policy->cur;
+
+	if (cpufreq_frequency_table_target(policy, bcm63xx_freq_table,
+				freqs.old, relation, &old_index))
+		return -EINVAL;
+
+	if (cpufreq_frequency_table_target(policy, bcm63xx_freq_table,
+				target_freq, relation, &index))
+		return -EINVAL;
+
+	if (index == old_index)
+		return 0;
+
+	freqs.new = bcm63xx_freq_table[index].frequency;
+	freqs.cpu = policy->cpu;
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+	arm_clk = clk_get_sys("cpu", "arm_pclk");
+	BUG_ON(IS_ERR_OR_NULL(arm_clk));
+
+	ret = clk_set_rate(arm_clk, freqs.new * 1000);
+	if (ret != 0)
+		freqs.new = freqs.old;
+
+	adjust_jiffies(policy->cpus, &freqs);
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	
+	return ret;
+}
+
+static ssize_t store_set_freq_table(struct cpufreq_policy *policy,
+		const char *buf, size_t count)
+{
+	unsigned int ret;
+	char str_freqtable[16];
+	struct cpufreq_policy new_policy;
+
+	ret = sscanf(buf, "%15s", str_freqtable);
+	if (ret != 1)
+		return -EINVAL;
+
+	if (!strnicmp(str_freqtable, "normal", 16)) {
+		if (bcm63xx_freq_table == bcm63xx_freq_normal_table)
+			return count;
+		bcm63xx_freq_table = bcm63xx_freq_normal_table;
+	} else if (!strnicmp(str_freqtable, "extended", 16)) {
+		if (bcm63xx_freq_table == bcm63xx_freq_extended_table)
+			return count;
+		bcm63xx_freq_table = bcm63xx_freq_extended_table;
+	} else {
+		return -EINVAL;
+	}
+
+	/* update the current policy info */
+	ret = cpufreq_frequency_table_cpuinfo(policy, bcm63xx_freq_table);
+	if (ret)
+		return ret;
+	cpufreq_frequency_table_get_attr(bcm63xx_freq_table, policy->cpu);
+
+	/* to get the policy updated with the new freq_table */
+	ret = cpufreq_get_policy(&new_policy, policy->cpu);
+	if (ret)
+		return ret;
+
+	ret = cpufreq_set_policy(policy, &new_policy);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t show_set_freq_table(struct cpufreq_policy *policy, char *buf)
+{
+	ssize_t i = 0;
+
+	if (bcm63xx_freq_table == bcm63xx_freq_normal_table)
+		i += sprintf(buf, "normal\n");
+	else if (bcm63xx_freq_table == bcm63xx_freq_extended_table)
+		i += sprintf(buf, "extended\n");
+	else
+		i += sprintf(buf, "error!\n");
+
+	i += sprintf(&buf[i], "available tables: normal, extended\n");
+	return i;
+}
+
+cpufreq_freq_attr_rw(set_freq_table);
+
+static int bcm63xx_cpufreq_init_sysfs(struct cpufreq_policy *policy)
+{
+	/* creating the sysfs for changing freq table */
+	int ret = sysfs_create_file(&policy->kobj, &set_freq_table.attr);
+	if (ret)
+		printk("%s:fail to create sysfs for set_freq_table\n", __func__);
+
+	return ret;
+}
+
+static int bcm63xx_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	int ret;
+	policy->cur = policy->min =
+		policy->max = bcm63xx_cpufreq_getspeed(policy->cpu);
+
+	/* set the transition latency value */
+#if defined CONFIG_BCM963138
+	// down 43..45us, up 82..87us
+	policy->cpuinfo.transition_latency = 40000; // ~40-90us
+#elif defined CONFIG_BCM963148
+	// down 25..75us, up 200..210us
+	policy->cpuinfo.transition_latency = 40000; // ~40-280us
+#endif
+
+	/*
+	 * In BCM63xx, all ARM CPUs are set to the same speed.
+	 * They all have the same clock source. */
+	if (num_online_cpus() == 1) {
+		cpumask_copy(policy->related_cpus, cpu_possible_mask);
+		cpumask_copy(policy->cpus, cpu_online_mask);
+	} else {
+		cpumask_setall(policy->cpus);
+	}
+
+	ret = cpufreq_frequency_table_cpuinfo(policy, bcm63xx_freq_table);
+	if (ret != 0)
+		return ret;
+
+	if (policy->cur > policy->max) {
+		bcm63xx_freq_table = bcm63xx_freq_extended_table;
+		ret = cpufreq_frequency_table_cpuinfo(policy, bcm63xx_freq_table);
+		if (ret != 0) {
+			/* if unable to set up the extended cpufreq_table, then
+			 * we go back use the normal one, it should work */
+			bcm63xx_freq_table = bcm63xx_freq_normal_table;
+			ret = cpufreq_frequency_table_cpuinfo(policy, bcm63xx_freq_table);
+		}
+	}
+
+	cpufreq_frequency_table_get_attr(bcm63xx_freq_table, policy->cpu);
+
+	return ret;
+}
+
+// TODO! As for October 2013, we do not support PM yet.
+#ifdef CONFIG_PM
+static int bcm63xx_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+	return 0;
+}
+
+static int bcm63xx_cpufreq_resume(struct cpufreq_policy *policy)
+{
+	return 0;
+}
+#endif
+
+static struct freq_attr *bcm63xx_cpufreq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver bcm63xx_cpufreq_driver = {
+	.flags		= CPUFREQ_STICKY,
+	.verify		= bcm63xx_cpufreq_verify_speed,
+	.target		= bcm63xx_cpufreq_target,
+	.get		= bcm63xx_cpufreq_getspeed,
+	.init		= bcm63xx_cpufreq_cpu_init,
+	.name		= "bcm63xx_cpufreq",
+	.attr		= bcm63xx_cpufreq_attr,
+	.init_sysfs	= bcm63xx_cpufreq_init_sysfs,
+#ifdef CONFIG_PM
+	.suspend	= bcm63xx_cpufreq_suspend,
+	.resume		= bcm63xx_cpufreq_resume,
+#endif
+};
+
+static int __init bcm63xx_cpufreq_init(void)
+{
+	return cpufreq_register_driver(&bcm63xx_cpufreq_driver);
+}
+late_initcall(bcm63xx_cpufreq_init);
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/bcm63xx_m2mdma.c b/arch/arm/plat-bcm63xx/bcm63xx_m2mdma.c
new file mode 100644
index 0000000000000000000000000000000000000000..c70eb590ec2e104009473a90efef7fda700b8ed1
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63xx_m2mdma.c
@@ -0,0 +1,488 @@
+/*
+<:copyright-BRCM:2015:DUAL/GPL:standard
+
+   Copyright (c) 2015 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/bug.h>
+#include <linux/nbuff.h>
+#include "bcm_map_part.h"
+
+
+#define M2M_DMA_REG_BASE BCM_IO_ADDR(M2M_DMA_PHYS_BASE)
+
+typedef struct {
+
+    uint32_t ch0_desc_status;
+    uint32_t ch1_desc_status;
+    uint32_t ch2_desc_status;
+    uint32_t ch3_desc_status;
+
+    uint32_t ch0_src_addr;
+    uint32_t ch0_dest_addr;
+    uint32_t ch0_desc_id;
+    uint32_t ch0_dma_config;
+
+    uint32_t ch1_src_addr;
+    uint32_t ch1_dest_addr;
+    uint32_t ch1_desc_id;
+    uint32_t ch1_dma_config;
+
+    uint32_t ch2_src_addr;
+    uint32_t ch2_dest_addr;
+    uint32_t ch2_desc_id;
+    uint32_t ch2_dma_config;
+
+    uint32_t ch3_src_addr;
+    uint32_t ch3_dest_addr;
+    uint32_t ch3_desc_id;
+    uint32_t ch3_dma_config;
+
+    uint32_t int_clear;
+    uint32_t control;
+    uint32_t dma_status;
+    uint32_t ch_stop;
+    uint32_t desc_clear;
+
+    uint32_t ch0_ubus_err_debug0;
+    uint32_t ch0_ubus_err_debug1;
+
+    uint32_t ch1_ubus_err_debug0;
+    uint32_t ch1_ubus_err_debug1;
+
+    uint32_t ch2_ubus_err_debug0;
+    uint32_t ch2_ubus_err_debug1;
+
+    uint32_t ch3_ubus_err_debug0;
+    uint32_t ch3_ubus_err_debug1;
+
+    uint32_t ch0_stop_src_addr;
+    uint32_t ch0_stop_dest_addr;
+    uint32_t ch0_stop_addr_msb;
+
+    uint32_t ch1_stop_src_addr;
+    uint32_t ch1_stop_dest_addr;
+    uint32_t ch1_stop_addr_msb;
+
+    uint32_t ch2_stop_src_addr;
+    uint32_t ch2_stop_dest_addr;
+    uint32_t ch2_stop_addr_msb;
+
+    uint32_t ch3_stop_src_addr;
+    uint32_t ch3_stop_dest_addr;
+    uint32_t ch3_stop_addr_msb;
+
+    uint32_t ch0_status_id_fifo;
+    uint32_t ch1_status_id_fifo;
+    uint32_t ch2_status_id_fifo;
+    uint32_t ch3_status_id_fifo;
+
+    uint32_t spare0;
+    uint32_t spare1;
+    uint32_t spare2;
+} bcm_m2m_dma_reg_t;
+
+#define M2M_DMA_REG ((volatile bcm_m2m_dma_reg_t * const) M2M_DMA_REG_BASE)
+
+
+#define MAX_ASYNC_DMA_CHNLS 4
+#define MAX_M2M_CHNL_QUEUE_DEPTH 8
+
+typedef struct {
+    volatile uint32_t src_addr;
+    volatile uint32_t dest_addr;
+    volatile uint32_t desc_id;
+    volatile uint32_t dma_config;
+}m2m_dma_desc_t;
+
+
+typedef struct {
+    m2m_dma_desc_t *dma_desc;
+    volatile uint32_t *desc_status;
+    uint32_t enable_mask;
+    uint16_t desc_id;
+    uint8_t chnl_idx;
+    uint8_t avail_desc;
+}m2m_dma_chanl_t;
+
+typedef struct {
+    m2m_dma_chanl_t async_chnls[MAX_ASYNC_DMA_CHNLS];
+    spinlock_t  async_chnl_lock;
+    uint8_t cur_async_chnl_idx;
+}bcm_m2m_dma_t;
+
+static bcm_m2m_dma_t bcm_m2m_dma;
+
+
+#define M2M_UBUS_BURST_SIZE_128 0x100000  /*128 bytes*/
+#define M2M_DMA_LEN_MASK        0x0FFFFF
+
+/* DMA channels enable mask with 1 oustanding UBUS request */
+
+#define DMA_CHANL0_ENABLE_MASK 0x01
+#define DMA_CHANL1_ENABLE_MASK 0x02
+#define DMA_CHANL2_ENABLE_MASK 0x004
+#define DMA_CHANL3_ENABLE_MASK 0x008
+
+#define M2M_ASYNC_LOCK()    spin_lock(&bcm_m2m_dma.async_chnl_lock)
+#define M2M_ASYNC_UNLOCK()  spin_unlock(&bcm_m2m_dma.async_chnl_lock)
+
+
+
+/* 
+ * Find a DMA channel with a free descriptor slot 
+ * caller should acquire lock 
+*/
+static inline m2m_dma_chanl_t * get_free_dma_channel_async(void)
+{
+    uint8_t chnl = bcm_m2m_dma.cur_async_chnl_idx;
+    m2m_dma_chanl_t *m2m_dma_chnl;
+    int i;
+
+    /* here <= is needed to check the starting channel twice before returning NULL */
+    for(i=0; i<=MAX_ASYNC_DMA_CHNLS; i++)
+    {
+        chnl = (chnl+1) % MAX_ASYNC_DMA_CHNLS;
+        m2m_dma_chnl = &bcm_m2m_dma.async_chnls[chnl];
+        if(m2m_dma_chnl->avail_desc)
+        {
+            //printk("channel=%d, avail_desc=%d\n",chnl, m2m_dma_chnl->avail_desc); 
+            m2m_dma_chnl->avail_desc--;
+            bcm_m2m_dma.cur_async_chnl_idx = chnl; 
+            return m2m_dma_chnl;
+        }
+
+        /*read num of free descriptors from HW and update the avil_desc*/
+        m2m_dma_chnl->avail_desc = *m2m_dma_chnl->desc_status & 0xFF;
+    }
+    return NULL;
+}
+
+/* 
+ * check if a given transcation and all the transactions before it are completed
+ *
+ * id: bits 0-15 desc_id
+ * id: 16-17 channel num 
+ */
+static inline int bcm_m2m_is_async_dma_done(uint32_t id)
+{
+    volatile uint32_t busy;
+    int i;
+    uint16_t cur_desc_id;
+    uint16_t desc_id;
+    uint8_t chnl = (id >> 16) & (MAX_ASYNC_DMA_CHNLS-1);
+
+    /* first check if M2M is idle */
+    busy = M2M_DMA_REG->dma_status & 0xf;
+    if(!busy)
+        return 1;
+
+
+    /* here given an id we need to find out if the corresponding transaction and
+     * all the transcations before it in other channels are completed
+     * 
+     * each channel maintains it's own desc_id, but since transactions are
+     * scheduled in round robin fashion among channels,
+     * for the channels before a given chnl we expect cur_desc_id >= desc_id 
+     * and for the channels after a given chnl we expect cur_desc_id >=desc_id-1
+     *
+     * any holes will be catched by M2M idle check 
+     */
+
+    busy=0;
+    for(i=0; i<=MAX_ASYNC_DMA_CHNLS; i++)
+    {
+        cur_desc_id = *bcm_m2m_dma.async_chnls[i].desc_status >> 16;
+        
+        desc_id = id & 0xFFFF; /* 16 bits */
+        if(i > chnl)
+        {
+            desc_id--;
+        }
+
+        if(cur_desc_id < desc_id)
+        {
+            busy=1;
+            break;
+        }
+        else if((cur_desc_id + MAX_M2M_CHNL_QUEUE_DEPTH) >= desc_id)
+        {
+            /*no rollover */
+            busy=1;
+            break;
+        }
+    }
+
+    return (!busy);
+}
+
+#define MAX_WAIT_LOOP_COUNT 50000
+
+
+/* bcm_m2m_wait_for_complete:
+ *
+ * given a transcation this function checks if the corresponding DMA transaction 
+ * and all the transactions before it are completed 
+ *
+ * desc_id - DMA transaction to check
+ *
+ * returns non-zero value if DMA is complete, zero if DMA is still pending
+ */
+int bcm_m2m_wait_for_complete(uint32_t desc_id)
+{
+    int i = MAX_WAIT_LOOP_COUNT +1;
+
+    /*dont wait indefinitely */
+    while(--i && !bcm_m2m_is_async_dma_done(desc_id));
+
+    if(i == 0)
+    {
+        printk(KERN_WARNING"%s: M2M transaction %x has not yet completed\n", __func__, desc_id);
+    }
+
+    return i;
+}
+EXPORT_SYMBOL(bcm_m2m_wait_for_complete);
+
+static inline void queue_m2m_transfer(m2m_dma_desc_t *dma_desc, uint32_t phys_dest,
+        uint32_t phys_src, uint32_t desc_id, uint16_t len)
+{
+    dma_desc->src_addr = phys_src;
+    dma_desc->dest_addr = phys_dest;
+    dma_desc->desc_id = desc_id;
+    dma_desc->dma_config = M2M_UBUS_BURST_SIZE_128 | (len & M2M_DMA_LEN_MASK);
+}
+
+/* caller must ensure len is maximum of 16 bits only */
+/* caller must ensure src & dest are in contiguos physical memory */
+static inline uint32_t __bcm_m2m_dma_memcpy_async(void *dest, void *src, uint16_t len)
+{
+    m2m_dma_chanl_t *m2m_dma_chnl;
+    uint32_t phys_src;
+    uint32_t phys_dest;
+    uint32_t desc_id;
+
+    phys_src =  virt_to_phys(src);
+    phys_dest = virt_to_phys(dest);
+
+    M2M_ASYNC_LOCK();
+    
+    do{
+
+        m2m_dma_chnl = get_free_dma_channel_async();
+
+        if(m2m_dma_chnl)
+        {
+            desc_id = m2m_dma_chnl->desc_id++;
+
+            queue_m2m_transfer(m2m_dma_chnl->dma_desc, phys_dest, phys_src,
+                    desc_id<<16, len);
+
+            M2M_ASYNC_UNLOCK();
+        }
+        else
+        {
+            /* Instead of waiting fallback to memcpy if cache lines are
+             * not shared by dest. This check is needed to avoid corruption
+             * when both DMA & CPU try to use same cache line 
+             */
+            if(!(((uint32_t)dest & (L2_CACHE_LINE_SIZE - 1)) || (len % L2_CACHE_LINE_SIZE)))
+            {
+                /*get a channel pointer -needed just for a desc_id*/
+                m2m_dma_chnl = &bcm_m2m_dma.async_chnls[bcm_m2m_dma.cur_async_chnl_idx];
+                desc_id = m2m_dma_chnl->desc_id -1;
+
+                M2M_ASYNC_UNLOCK();
+        
+
+                memcpy(dest, src, len);
+                /*flush dest to make it look like DMA copy to caller */
+                dma_map_single(NULL, dest, len, DMA_TO_DEVICE);
+            }
+        }
+    } while(!m2m_dma_chnl);
+
+    return ((m2m_dma_chnl->chnl_idx << 16) | desc_id );
+}
+
+/* bcm_m2m_dma_memcpy_async:
+ * use this function with cached memory
+ * here we flush src & invalidate dest before scheduling the transfer 
+ *
+ *
+ * dest - virtual address of destination
+ * src  - virtual address of destination
+ * len  - length of data to be copied
+ *
+ * this function expects src & dest to be in contiguos physcial memory 
+ *
+ * returns a transaction id for the DMA operation,
+ * copy is not complete on return, caller has to explicitly check if
+ * transaction is completed
+ */ 
+uint32_t bcm_m2m_dma_memcpy_async(void *dest, void *src, uint16_t len)
+{
+    /* TODO do we need to call dma_unmap_single for NULL device */
+    dma_map_single(NULL, src,  len, DMA_TO_DEVICE);
+    dma_map_single(NULL, dest, len, DMA_FROM_DEVICE);
+    
+    return __bcm_m2m_dma_memcpy_async(dest, src, len);
+}
+EXPORT_SYMBOL(bcm_m2m_dma_memcpy_async);
+
+
+/* bcm_m2m_dma_memcpy_async_no_flush:
+ * use this function with cached memory
+ * here there is no cache flush of src, use this function
+ * when you are sure that src is not dirty in cache
+ *
+ * dest - virtual address of destination
+ * src  - virtual address of destination
+ * len  - length of data to be copied
+ *
+ * this function expects src & dest to be in contiguos physcial memory 
+ *
+ * returns a transaction id for the DMA operation,
+ * copy is not complete on return, caller has to explicitly check if
+ * transaction is completed
+ */ 
+uint32_t bcm_m2m_dma_memcpy_async_no_flush(void *dest, void *src, uint16_t len)
+{
+    /* TODO do we need to call dma_unmap_single for NULL device */
+    dma_map_single(NULL, src,  len, DMA_TO_DEVICE);
+    
+    return __bcm_m2m_dma_memcpy_async(dest, src, len);
+}
+EXPORT_SYMBOL(bcm_m2m_dma_memcpy_async_no_flush);
+
+/* bcm_m2m_dma_memcpy_async_no_flush_inv:
+ * Here there is no cache flush of src,and also there is no invalidate on dest 
+ * use this when you are that src is not dirty in cache & dest is not in cache
+ *
+ * dest - virtual address of destination
+ * src  - virtual address of destination
+ * len  - length of data to be copied
+ *
+ * this function expects src & dest to be in contiguos physcial memory 
+ *
+ * returns a transaction id for the DMA operation,
+ * copy is not complete on return, caller has to explicitly check if
+ * transaction is completed
+ */ 
+uint32_t bcm_m2m_dma_memcpy_async_no_flush_inv(void *dest, void *src, uint16_t len)
+{
+    return __bcm_m2m_dma_memcpy_async(dest, src, len);
+}
+EXPORT_SYMBOL(bcm_m2m_dma_memcpy_async_no_flush_inv);
+
+
+/* bcm_m2m_dma_memcpy_async_uncached:
+ * use with uncached memory, caller has to pass physical addresses
+ *
+ * phys_dest - physical address of destination
+ * phys_src  - virtual address of destination
+ * len       - length of data to be copied
+ *
+ *
+ * returns a transaction id for the DMA operation,
+ * copy is not complete on return, caller has to explicitly check if
+ * transaction is completed
+ */
+uint32_t bcm_m2m_dma_memcpy_async_uncached(uint32_t phys_dest, uint32_t phys_src, uint16_t len)
+{
+    m2m_dma_chanl_t *m2m_dma_chnl;
+    uint32_t desc_id;
+
+    M2M_ASYNC_LOCK();
+
+    do
+    {
+        m2m_dma_chnl = get_free_dma_channel_async();
+
+        if(m2m_dma_chnl)
+        {
+            desc_id = m2m_dma_chnl->desc_id++;
+
+            queue_m2m_transfer(m2m_dma_chnl->dma_desc, phys_dest, phys_src,
+                    desc_id<<16, len);
+
+            M2M_ASYNC_UNLOCK();
+        }
+
+    } while(!m2m_dma_chnl);
+
+    return ((m2m_dma_chnl->chnl_idx << 16) | desc_id );
+}
+EXPORT_SYMBOL(bcm_m2m_dma_memcpy_async_uncached);
+ 
+static __init int bcm_m2m_dma_init(void)
+{
+    spin_lock_init(&bcm_m2m_dma.async_chnl_lock);
+
+    bcm_m2m_dma.async_chnls[0].dma_desc = (m2m_dma_desc_t *)&M2M_DMA_REG->ch0_src_addr;
+    bcm_m2m_dma.async_chnls[0].desc_status = &M2M_DMA_REG->ch0_desc_status;
+    bcm_m2m_dma.async_chnls[0].chnl_idx = 0;
+    bcm_m2m_dma.async_chnls[0].desc_id = 0;
+    bcm_m2m_dma.async_chnls[0].avail_desc = M2M_DMA_REG->ch0_desc_status & 0xFF;
+    bcm_m2m_dma.async_chnls[0].enable_mask = DMA_CHANL0_ENABLE_MASK;
+
+    bcm_m2m_dma.async_chnls[1].dma_desc = (m2m_dma_desc_t *)&M2M_DMA_REG->ch1_src_addr;
+    bcm_m2m_dma.async_chnls[1].desc_status = &M2M_DMA_REG->ch1_desc_status;
+    bcm_m2m_dma.async_chnls[1].chnl_idx = 1;
+    bcm_m2m_dma.async_chnls[1].desc_id = 0;
+    bcm_m2m_dma.async_chnls[1].avail_desc = M2M_DMA_REG->ch1_desc_status & 0xFF;
+    bcm_m2m_dma.async_chnls[1].enable_mask = DMA_CHANL1_ENABLE_MASK;
+
+    bcm_m2m_dma.async_chnls[2].dma_desc = (m2m_dma_desc_t *)&M2M_DMA_REG->ch2_src_addr;
+    bcm_m2m_dma.async_chnls[2].desc_status = &M2M_DMA_REG->ch2_desc_status;
+    bcm_m2m_dma.async_chnls[2].chnl_idx = 2;
+    bcm_m2m_dma.async_chnls[2].desc_id = 0;
+    bcm_m2m_dma.async_chnls[2].avail_desc = M2M_DMA_REG->ch2_desc_status & 0xFF;
+    bcm_m2m_dma.async_chnls[2].enable_mask = DMA_CHANL2_ENABLE_MASK;
+
+    bcm_m2m_dma.async_chnls[3].dma_desc = (m2m_dma_desc_t *)&M2M_DMA_REG->ch3_src_addr;
+    bcm_m2m_dma.async_chnls[3].desc_status = &M2M_DMA_REG->ch3_desc_status;
+    bcm_m2m_dma.async_chnls[3].chnl_idx = 3;
+    bcm_m2m_dma.async_chnls[3].desc_id = 0;
+    bcm_m2m_dma.async_chnls[3].avail_desc = M2M_DMA_REG->ch3_desc_status & 0xFF;
+    bcm_m2m_dma.async_chnls[3].enable_mask = DMA_CHANL3_ENABLE_MASK;
+
+    bcm_m2m_dma.cur_async_chnl_idx=0;
+
+    M2M_DMA_REG->control = DMA_CHANL0_ENABLE_MASK | DMA_CHANL1_ENABLE_MASK
+        | DMA_CHANL2_ENABLE_MASK | DMA_CHANL3_ENABLE_MASK;
+
+    printk(KERN_DEBUG "+++ Successfully registered M2M DMA\n");
+    return 0;
+}
+
+arch_initcall(bcm_m2m_dma_init);
diff --git a/arch/arm/plat-bcm63xx/bcm63xx_pcie.c b/arch/arm/plat-bcm63xx/bcm63xx_pcie.c
new file mode 100644
index 0000000000000000000000000000000000000000..75dea18d74b2a3e3c71d9b70c5249d65fb9a1c43
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63xx_pcie.c
@@ -0,0 +1,74 @@
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#ifdef CONFIG_BRCM_PCIE_PLATFORM
+
+/* current linux kernel doesn't support pci bus rescan if we
+ * power-down then power-up pcie.
+ *
+ * work-around by saving pci configuration after initial scan and
+ * restoring it every time we repower pcie (implemented by module
+ * init routine)
+ *
+ * module exit function powers down pcie
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <bcm_map_part.h>
+#include <pmc_pcie.h>
+
+extern void bcm63xx_pcie_aloha(int hello);
+
+static __init int bcm_mod_init(void)
+{
+
+	/* first invocation: save pci configuration
+	 * subsequent: repower and restore configuration
+	 */
+	bcm63xx_pcie_aloha(1);
+
+	return 0;
+}
+
+static void bcm_mod_exit(void)
+{
+	/* power down pcie */
+	bcm63xx_pcie_aloha(0);
+}
+
+module_init(bcm_mod_init);
+module_exit(bcm_mod_exit);
+
+MODULE_LICENSE("GPL");
+
+#endif
diff --git a/arch/arm/plat-bcm63xx/bcm63xx_sata.c b/arch/arm/plat-bcm63xx/bcm63xx_sata.c
new file mode 100644
index 0000000000000000000000000000000000000000..fe5339e9a89187a2cf417cb49a9471f6425f2863
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63xx_sata.c
@@ -0,0 +1,256 @@
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ ****************************************************************************
+ * File Name  : bcm63xx_sata.c
+ *
+ * Description: This file contains the initilzation and registration routines
+ * to enable sata controller on bcm63xxx boards.
+ *
+ *
+ ***************************************************************************/
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/bug.h>
+#include <linux/ahci_platform.h>
+
+#include <bcm_intr.h>
+#include <bcm_map_part.h>
+#include <pmc_sata.h>
+
+/* macros to read write, to registers, with memory barriers to avoid reordering */
+#define BDEV_RD(x)      (*((volatile unsigned *)(x))); mb()
+#define BDEV_WR(x, y)   do { *((volatile unsigned *)(x)) = (y); mb(); } while (0)
+
+/*TODO move these reg definitions to map_part.h */
+
+#define SATA_HBA_BASE_ADDR          SATA_BASE
+
+#define SATA_TOP_CTRL               (SATA_HBA_BASE_ADDR+0x0040)
+#define SATA_PORT0_PCB              (SATA_HBA_BASE_ADDR+0x0100)
+#define SATA_AHCI_BASE              (SATA_HBA_BASE_ADDR+0x2000)
+#define SATA_AHCI_GHC               (SATA_HBA_BASE_ADDR+0x2000)
+#define SATA_AHCI_PORT0_S1          (SATA_HBA_BASE_ADDR+0x2100)
+
+#define SATA_AHCI_GHC_PHYS          (SATA_PHYS_BASE+0x2000)
+
+#define SATA_MEM_SIZE               0x00002000
+
+/* SATA_TOP_CTRL regsiters */
+#define SATA_TOP_CTRL_BUS_CTRL      (SATA_TOP_CTRL+0x04)
+
+/* SATA_PORT0_AHCI_S1 registers */
+#define SATA_PORT0_AHCI_S1_PXIS     (SATA_AHCI_PORT0_S1+0x10)
+#define SATA_PORT0_AHCI_S1_PXIE     (SATA_AHCI_PORT0_S1+0x14)
+#define SATA_PORT0_AHCI_S1_PXCMD    (SATA_AHCI_PORT0_S1+0x18)
+
+/* GHC regs */
+#define GHC_HBA_CAP                 (SATA_AHCI_GHC+0x00) /* host capabilities */
+#define GHC_GLOBAL_HBA_CONTROL      (SATA_AHCI_GHC+0x04) /* global host control */
+#define GHC_INTERRUPT_STATUS        (SATA_AHCI_GHC+0x08) /* interrupt status */
+#define GHC_PORTS_IMPLEMENTED       (SATA_AHCI_GHC+0x0c) /* bitmap of implemented ports */
+#define GHC_HOST_VERSION            (SATA_AHCI_GHC+0x10) /* AHCI spec. version compliancy */
+
+/* Phy reg */
+#define PORT0_SATA3_PCB_REG0        (SATA_PORT0_PCB+0x0200)
+#define PORT0_SATA3_PCB_REG1        (SATA_PORT0_PCB+0x0204)
+#define PORT0_SATA3_PCB_REG2        (SATA_PORT0_PCB+0x0208)
+#define PORT0_SATA3_PCB_REG3        (SATA_PORT0_PCB+0x020c)
+#define PORT0_SATA3_PCB_REG4        (SATA_PORT0_PCB+0x0210)
+#define PORT0_SATA3_PCB_REG5        (SATA_PORT0_PCB+0x0214)
+#define PORT0_SATA3_PCB_REG6        (SATA_PORT0_PCB+0x0218)
+#define PORT0_SATA3_PCB_REG7        (SATA_PORT0_PCB+0x021c)
+#define PORT0_SATA3_PCB_REG8        (SATA_PORT0_PCB+0x0220)
+#define PORT0_SATA3_PCB_BLOCK_ADDR  (SATA_PORT0_PCB+0x023C)
+
+#define PCB_REG(x) (uint32_t)(PORT0_SATA3_PCB_REG0 + x*4)
+
+#define SATA3_TXPMD_REG_BANK    0x01a0
+
+static void write_2_pcb_block(unsigned reg_addr, unsigned value, unsigned pcb_block)
+{
+    BDEV_WR(PORT0_SATA3_PCB_BLOCK_ADDR, pcb_block);
+    BDEV_WR(reg_addr, value);
+}
+
+static unsigned read_from_pcb_block(unsigned reg_addr, unsigned pcb_block)
+{
+    unsigned int value;
+    BDEV_WR(PORT0_SATA3_PCB_BLOCK_ADDR, pcb_block);
+    value = BDEV_RD(reg_addr);
+    return value;
+}
+
+static __init void GetFreqLock( void )
+{
+    uint32_t regData;
+    int i = 10;
+
+    printk("writing PORT0_SATA3_PCB_BLOCK_ADDR\n");
+
+    write_2_pcb_block(PORT0_SATA3_PCB_REG7, 0x873, 0x60);
+
+    write_2_pcb_block(PORT0_SATA3_PCB_REG6, 0xc000, 0x60);
+
+    write_2_pcb_block(PORT0_SATA3_PCB_REG1, 0x3089, 0x50);
+    udelay(100);
+    write_2_pcb_block(PORT0_SATA3_PCB_REG1, 0x3088, 0x50);
+    udelay(1000);
+    //// Done with PLL ratio change and re-tunning
+
+    write_2_pcb_block(PORT0_SATA3_PCB_REG2, 0x3000, 0xE0);
+    write_2_pcb_block(PORT0_SATA3_PCB_REG6, 0x3000, 0xE0);
+
+    udelay(1000);
+    write_2_pcb_block(PORT0_SATA3_PCB_REG3, 0x32, 0x50);
+
+    write_2_pcb_block(PORT0_SATA3_PCB_REG4, 0xA, 0x50);
+
+    write_2_pcb_block(PORT0_SATA3_PCB_REG6, 0x64, 0x50);
+
+    udelay(1000);
+    BDEV_WR(PORT0_SATA3_PCB_BLOCK_ADDR, 0x00);
+    wmb();
+
+    regData = BDEV_RD(PORT0_SATA3_PCB_REG1);
+
+    while (i && ((regData & 0x1000) == 0))
+    {
+        regData = BDEV_RD(PORT0_SATA3_PCB_REG1);
+        udelay(1000);
+        i--;
+    }
+    printk("INFO: PLL lock for port0 detected %0x...\n", regData);
+}
+
+static __init void sata_sim_init(void)
+{
+    BDEV_WR(GHC_GLOBAL_HBA_CONTROL, 0x80000001);
+    mdelay(1);
+    BDEV_WR(GHC_GLOBAL_HBA_CONTROL, 0x80000000);
+    mdelay(10);
+
+    BDEV_WR(SATA_PORT0_AHCI_S1_PXIS, 0x7fffffff);
+    BDEV_WR(GHC_INTERRUPT_STATUS, 0x7fffffff);
+    BDEV_WR(SATA_PORT0_AHCI_S1_PXIE, 0x7fffffff);
+
+    BDEV_WR(SATA_PORT0_AHCI_S1_PXCMD, 0x00000010);
+    /* setup endianess */
+    BDEV_WR(SATA_TOP_CTRL_BUS_CTRL, 0x00000000);
+}
+
+static void bcm_dev_release(struct device *dev)
+{
+    put_device(dev->parent);
+}
+
+static struct resource bcm_ahci_resource[] = {
+    [0] = {
+        .start  = SATA_AHCI_GHC_PHYS,
+        .end    = SATA_AHCI_GHC_PHYS + SATA_MEM_SIZE - 1,
+        .flags  = IORESOURCE_MEM,
+    },
+    [1] = {
+        .start  = INTERRUPT_ID_SATAC,
+        .end    = INTERRUPT_ID_SATAC,
+        .flags  = IORESOURCE_IRQ,
+    },
+};
+
+static u64 bcm_ahci_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device bcm_ahci_pdev = {
+    .name       = "strict-ahci",
+    .id         = 0,
+    .resource   = bcm_ahci_resource,
+    .num_resources      = ARRAY_SIZE(bcm_ahci_resource),
+    .dev                = {
+        .dma_mask               = &bcm_ahci_dmamask,
+        .coherent_dma_mask      = DMA_BIT_MASK(32),
+        .release                = bcm_dev_release,
+    },
+};
+
+static __init int bcm_add_sata(void)
+{
+    printk("++++ Powering up SATA block\n");
+
+    pmc_sata_power_up();
+    mdelay(1);
+
+    GetFreqLock();
+    mdelay(1);
+
+    sata_sim_init();
+    mdelay(1);
+
+    /*enable SSC */
+    {
+        int rvalue;
+
+        rvalue = read_from_pcb_block(PCB_REG(1), SATA3_TXPMD_REG_BANK);
+        rvalue |= 0x3;
+        write_2_pcb_block( PCB_REG(1), rvalue, SATA3_TXPMD_REG_BANK);
+    }
+
+
+    if(platform_device_register(&bcm_ahci_pdev))
+    {
+        printk(KERN_ERR "++++ Failed to add platform device for SATA \n");
+    }
+    return 0;
+}
+
+#if defined CONFIG_SATA_AHCI_MODULE
+static void bcm_mod_cleanup(void)
+{
+    platform_device_del(&bcm_ahci_pdev);
+    pmc_sata_power_down();
+    mdelay(1);
+}
+
+module_init(bcm_add_sata);
+module_exit(bcm_mod_cleanup);
+
+MODULE_LICENSE("GPL");
+#else
+arch_initcall(bcm_add_sata);
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/plat-bcm63xx/bcm63xx_timer.c b/arch/arm/plat-bcm63xx/bcm63xx_timer.c
new file mode 100644
index 0000000000000000000000000000000000000000..060f98629beab0251d2adb3ca6d936c2b7c9c1f2
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63xx_timer.c
@@ -0,0 +1,195 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * BCM63xx SoC timer implementation based on external PERIPH Timer
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <plat/bcm63xx_timer.h>
+#include <bcm_ext_timer.h>
+
+/*
+ * timer implementations for clocksource and clockevent
+ * We will use 2 PERIPH timers, one for clocksource and one for
+ * clockevent.
+ */
+int timer_cs_used = -1;		/* for clock source */
+int timer_ce_used = -1;		/* for clock event */
+#define PERIPH_TIMER_CLK_FREQ	50000	/* in KHz, value is 50MHz */
+/* the below timer value will convert into the larger timercount supported
+ * in PERIPH_TIMER */
+#define PERIPH_TIMER_PERIOD_MAX	(22 * 1000 * 1000) /* 22 sec, unit is usec */
+
+static notrace cycle_t bcm63xx_read_timer_count(struct clocksource *cs)
+{
+	int ret;
+	unsigned int count;
+
+	if (timer_cs_used == -1)
+		return 0;
+
+	ret = ext_timer_read_count(timer_cs_used, &count);
+	if (ret == 0)
+		return (cycle_t)count;
+	else
+		return 0;
+}
+
+static struct clocksource bcm63xx_clocksource = {
+	.name = "timer_cs",
+	.rating = 350,
+	.read = bcm63xx_read_timer_count,
+	.mask = CLOCKSOURCE_MASK(30), 
+	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+
+};
+
+static void __init periph_timer_clocksource_init(void)
+{
+
+	if (timer_cs_used != -1)
+		return;
+
+	timer_cs_used = ext_timer_alloc(-1, PERIPH_TIMER_PERIOD_MAX, NULL, 0);
+
+	/* cannot allocate timer, just quit.  Shouldn't happen! */
+	if (timer_cs_used == -1)
+		return;
+
+	ext_timer_start(timer_cs_used);
+
+	/* bcm63xx_clocksource->shift/mult will be computed by the following
+	 * register function */
+	clocksource_register_khz(&bcm63xx_clocksource, PERIPH_TIMER_CLK_FREQ);
+}
+
+void timer_set_mode(enum clock_event_mode mode,
+		struct clock_event_device *clk)
+{
+	switch (mode) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		if (timer_cs_used == -1)
+			return;
+		ext_timer_stop(timer_ce_used);
+
+		/* set up timer based on HZ given, unit is msec */
+		ext_timer_set_period(timer_ce_used, 1000000/HZ);
+
+		ext_timer_set_mode(timer_ce_used, EXT_TIMER_MODE_PERIODIC);
+
+		ext_timer_start(timer_ce_used);
+		break;
+	case CLOCK_EVT_MODE_ONESHOT:
+		/* timer is set and enabled in 'set_next_event' hook */
+		break;
+	case CLOCK_EVT_MODE_RESUME:
+		ext_timer_start(timer_ce_used);
+		break;
+	case CLOCK_EVT_MODE_UNUSED:
+	case CLOCK_EVT_MODE_SHUTDOWN:
+		ext_timer_stop(timer_ce_used);
+	default:
+		break;
+	}
+}
+
+int timer_set_next_event(unsigned long cycle,
+		struct clock_event_device *unused)
+{
+	if (timer_cs_used == -1)
+		return -ENODEV;
+
+	/* stop the timer will clear the residual counter */
+	ext_timer_stop(timer_ce_used);
+
+	ext_timer_set_count(timer_ce_used, cycle);
+
+	ext_timer_set_mode(timer_ce_used, EXT_TIMER_MODE_ONESHOT);
+
+	ext_timer_start(timer_ce_used);
+
+	return 0;
+}
+
+const unsigned int cpu_0_mask = 0x1;
+
+static struct clock_event_device clockevent_timer = {
+	.name = "timer_ce",
+	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+	.set_mode = timer_set_mode,
+	.set_next_event = timer_set_next_event,
+	.rating = 200,
+};
+
+void clock_event_callback(unsigned int param)
+{
+	struct clock_event_device *evt = (struct clock_event_device *)param;
+	evt->event_handler(evt);
+}
+
+static void __init periph_timer_clockevent_init(void)
+{
+
+	if (timer_ce_used != -1)
+		return;
+
+	timer_ce_used = ext_timer_alloc_only(-1,
+			(ExtTimerHandler)&clock_event_callback,
+			(unsigned int)&clockevent_timer);
+
+	/* cannot allocate timer, just quit.  Shouldn't happen! */
+	if (timer_ce_used == -1)
+		return;
+
+	clockevent_timer.cpumask = cpumask_of(0);
+
+	/* clockevents_config_and_register(dev, freq, min_delta, max_delta)
+	 * freq is in the unit of Hz
+	 * min_delta: minimum clock tick to program in oneshot mode
+	 * max_delta: maximum clock tick to program in oneshot mode */
+	clockevents_config_and_register(&clockevent_timer,
+			PERIPH_TIMER_CLK_FREQ * 1000, 0, 0x3fffffff);
+}
+
+void __init bcm63xx_timer_init(void)
+{
+	init_hw_timers();
+
+	periph_timer_clocksource_init();
+
+	periph_timer_clockevent_init();
+}
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/bcm63xx_usb.c b/arch/arm/plat-bcm63xx/bcm63xx_usb.c
new file mode 100644
index 0000000000000000000000000000000000000000..634ad817fc66faadf5cfb9a680fa288d05f00db9
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/bcm63xx_usb.c
@@ -0,0 +1,321 @@
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard 
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ ****************************************************************************
+ * File Name  : bcm63xx_usb.c
+ *
+ * Description: This file contains the initilzation and registration routines
+ * to enable USB controllers on bcm63xxx boards. 
+ *
+ *
+ ***************************************************************************/
+
+#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/bug.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+#include <pmc_usb.h>
+
+#include <boardparms.h>
+
+extern void bcm_set_pinmux(unsigned int pin_num, unsigned int mux_num);
+
+
+#define CAP_TYPE_EHCI       0x00
+#define CAP_TYPE_OHCI       0x01
+#define CAP_TYPE_XHCI       0x02
+
+/*TODO double check the values for these 2 structures */
+static struct usb_ehci_pdata bcm_ehci_pdata = {
+    .caps_offset         = 0,
+    .has_tt              = 0,
+    .has_synopsys_hc_bug = 0,
+    .port_power_off      = 0,
+};
+
+static struct usb_ohci_pdata bcm_ohci_pdata = {};
+
+static struct platform_device *xhci_dev;
+static struct platform_device *ehci_dev;
+static struct platform_device *ohci_dev;
+
+
+static __init struct platform_device *bcm_add_usb_host(int type, int id,
+                        uint32_t mem_base, uint32_t mem_size, int irq,
+                        const char *devname, void *private_data)
+{
+    struct resource res[2];
+    struct platform_device *pdev;
+    //static const u64 usb_dmamask = ~(u32)0;
+    static const u64 usb_dmamask = 0xffffffff;
+
+    memset(&res, 0, sizeof(res));
+    res[0].start = mem_base;
+    res[0].end   = mem_base + (mem_size -1);
+    res[0].flags = IORESOURCE_MEM;
+
+    res[1].flags = IORESOURCE_IRQ;
+    res[1].start = res[1].end = irq;
+
+    pdev = platform_device_alloc(devname, id);
+    if(!pdev)
+    {
+        printk(KERN_ERR "Error Failed to allocate platform device for devname=%s id=%d\n",
+                devname, id);
+        return 0;
+    }
+
+    platform_device_add_resources(pdev, res, 2);
+
+    pdev->dev.dma_mask = (u64 *)&usb_dmamask;
+    pdev->dev.coherent_dma_mask = 0xffffffff;
+
+    if(private_data)
+    {
+        pdev->dev.platform_data = private_data;
+    }
+
+    if(platform_device_add(pdev))
+    {
+        printk(KERN_ERR "Error Failed to add platform device for devname=%s id=%d\n",
+                devname, id);
+        return 0;
+    }
+
+    return pdev;
+}
+
+#if defined(CONFIG_BCM963138)
+static void bcm63138B0_manual_usb_ldo_start(void)
+{
+    USBH_CTRL->pll_ctl &= ~(1 << 30); /*pll_resetb=0*/
+    USBH_CTRL->utmi_ctl_1 = 0; 
+    USBH_CTRL->pll_ldo_ctl = 4; /*ldo_ctl=core_rdy */
+    USBH_CTRL->pll_ctl |= ( 1 << 31); /*pll_iddq=1*/
+    mdelay(10);
+    USBH_CTRL->pll_ctl &= ~( 1 << 31); /*pll_iddq=0*/
+    USBH_CTRL->pll_ldo_ctl |= 1; /*ldo_ctl.AFE_LDO_PWRDWNB=1*/
+    USBH_CTRL->pll_ldo_ctl |= 2; /*ldo_ctl.AFE_BG_PWRDWNB=1*/
+    mdelay(1);
+    USBH_CTRL->utmi_ctl_1 = 0x00020002;/* utmi_resetb &ref_clk_sel=0; */ 
+    USBH_CTRL->pll_ctl |= ( 1 << 30); /*pll_resetb=1*/
+    mdelay(10);
+}    
+
+
+#define XHCI_ECIRA_BASE USB_XHCI_BASE + 0xf90
+
+uint32_t xhci_ecira_read(uint32_t reg)
+{
+    volatile uint32_t *addr;
+    uint32_t value;
+
+    addr = (uint32_t *)(XHCI_ECIRA_BASE + 8);
+    *addr =reg;
+
+    addr = (uint32_t *)(XHCI_ECIRA_BASE + 0xc);
+    value = *addr; 
+
+    return value;
+}
+
+void xhci_ecira_write(uint32_t reg, uint32_t value)
+{
+
+    volatile uint32_t *addr;
+
+    addr = (uint32_t *)(XHCI_ECIRA_BASE + 8);
+    *addr =reg;
+
+    addr = (uint32_t *)(XHCI_ECIRA_BASE + 0xc);
+    *addr =value; 
+}
+
+static void bcm63138B0_usb3_erdy_nump_bypass(void)
+{
+    uint32_t value;
+
+    value = xhci_ecira_read(0xa20c);
+    value |= 0x10000;
+    xhci_ecira_write(0xa20c, value);
+}
+
+#endif
+
+#define MDIO_USB2   0
+#define MDIO_USB3   (1 << 31)
+
+static uint32_t usb_mdio_read(volatile uint32_t *mdio, uint32_t reg, int mode)
+{
+    uint32_t data;
+
+    data = (reg << 16) | mode;
+    mdio[0] = data;
+    data |= (1 << 24);
+    mdio[0] = data;
+    mdelay(1);
+    data &= ~(1 << 24);
+    mdelay(1);
+
+    return (mdio[1] & 0xffff);
+}
+
+static void usb_mdio_write(volatile uint32_t *mdio, uint32_t reg, uint32_t val, int mode)
+{
+    uint32_t data;
+    data = (reg << 16) | val | mode;
+    *mdio = data;
+    data |= (1 << 25);
+    *mdio = data;
+    mdelay(1);
+    data &= ~(1 << 25);
+    *mdio = data;
+}
+
+static void usb2_eye_fix(void)
+{
+    /* Updating USB 2.0 PHY registers */
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x1f, 0x80a0, MDIO_USB2);
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x0a, 0xc6a0, MDIO_USB2);
+}
+
+static void usb3_ssc_enable(void)
+{
+    uint32 val;
+
+    /* Enable USB 3.0 TX spread spectrum */
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x1f, 0x8040, MDIO_USB3);
+    val = usb_mdio_read((void *)&USBH_CTRL->mdio, 0x01, MDIO_USB3) | 0x0f;
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x01, val, MDIO_USB3);
+
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x1f, 0x9040, MDIO_USB3);
+    val = usb_mdio_read((void *)&USBH_CTRL->mdio, 0x01, MDIO_USB3) | 0x0f;
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x01, val, MDIO_USB3);
+}
+
+static __init int bcm_add_usb_hosts(void)
+{
+   
+     short usb_gpio;
+
+     printk("++++ Powering up USB blocks\n");
+   
+    if(pmc_usb_power_up(PMC_USB_HOST_ALL))
+    {
+        printk(KERN_ERR "+++ Failed to Power Up USB Host\n");
+        return -1;
+    }
+    mdelay(1);
+
+    /*initialize XHCI settings*/
+#if defined(CONFIG_BCM963138)
+    bcm63138B0_manual_usb_ldo_start();
+    USBH_CTRL->usb_pm |= XHC_SOFT_RESETB;
+    USBH_CTRL->usb30_ctl1 &= ~PHY3_PLL_SEQ_START;
+#else
+    USBH_CTRL->usb30_ctl1 |= USB3_IOC;
+    USBH_CTRL->usb30_ctl1 |= XHC_SOFT_RESETB;
+#endif
+
+    USBH_CTRL->usb30_ctl1 |= PHY3_PLL_SEQ_START;
+
+#if defined(CONFIG_BCM963138)
+     bcm63138B0_usb3_erdy_nump_bypass();
+#endif
+
+    /*adjust the default AFE settings for better eye diagrams */
+     usb2_eye_fix();
+
+    /*enable SSC for usb3.0 */
+     usb3_ssc_enable();
+
+    /*initialize EHCI & OHCI settings*/
+    USBH_CTRL->bridge_ctl &= ~(EHCI_ENDIAN_SWAP | OHCI_ENDIAN_SWAP);
+    USBH_CTRL->setup |= (USBH_IOC);
+    USBH_CTRL->setup |= (USBH_IPP);
+    if(BpGetUsbPwrFlt0(&usb_gpio) == BP_SUCCESS)
+    {
+       if((usb_gpio & BP_ACTIVE_MASK) !=  BP_ACTIVE_LOW)
+       {
+          USBH_CTRL->setup &= ~(USBH_IOC);
+       }
+    }
+    if(BpGetUsbPwrOn0(&usb_gpio) == BP_SUCCESS)
+    {
+       if((usb_gpio & BP_ACTIVE_MASK) != BP_ACTIVE_LOW)
+       {
+          USBH_CTRL->setup &= ~(USBH_IPP);
+       }
+    }
+
+    xhci_dev = bcm_add_usb_host(CAP_TYPE_XHCI, 0, USB_XHCI_PHYS_BASE,
+        0x1000, INTERRUPT_ID_USB_XHCI, "xhci-hcd", NULL);
+    ehci_dev = bcm_add_usb_host(CAP_TYPE_EHCI, 0, USB_EHCI_PHYS_BASE,
+        0x100, INTERRUPT_ID_USB_EHCI, "ehci-platform", &bcm_ehci_pdata);
+    ohci_dev = bcm_add_usb_host(CAP_TYPE_OHCI, 0, USB_OHCI_PHYS_BASE,
+        0x100, INTERRUPT_ID_USB_OHCI, "ohci-platform", &bcm_ohci_pdata);
+
+    return 0;
+}
+
+#if defined CONFIG_USB_MODULE || defined CONFIG_USB_XHCI_HCD_MODULE
+static void bcm_mod_cleanup(void)
+{
+    // we want to just disable usb interrupts and power down usb
+    // we'll probably be restart later, re-add resources ok then?
+    platform_device_del(xhci_dev);
+    platform_device_del(ehci_dev);
+    platform_device_del(ohci_dev);
+    pmc_usb_power_down(PMC_USB_HOST_ALL);
+    mdelay(1);
+}
+
+module_init(bcm_add_usb_hosts);
+module_exit(bcm_mod_cleanup);
+
+MODULE_LICENSE("GPL");
+#else
+arch_initcall(bcm_add_usb_hosts);
+#endif
+
+#endif /* defined(CONFIG_USB) || defined(CONFIG_USB_MODULE) */
diff --git a/arch/arm/plat-bcm63xx/ca9mp_cache.S b/arch/arm/plat-bcm63xx/ca9mp_cache.S
new file mode 100644
index 0000000000000000000000000000000000000000..aa1ba916bf1ac9ab4f96430f73167627d626cfff
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/ca9mp_cache.S
@@ -0,0 +1,136 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+	__CPUINIT
+
+/*
+ * v7_l1_cache_invalidate
+ *
+ * Invalidate contents of L1 cache without flushing its contents
+ * into outer cache and memory. This is needed when the contents
+ * of the cache are unpredictable after power-up.
+ *
+ * corrupts r0-r6
+ */
+
+ENTRY(v7_l1_cache_invalidate)
+        mov     r0, #0
+        mcr     p15, 2, r0, c0, c0, 0	@ set cache level to 1
+        mrc     p15, 1, r0, c0, c0, 0	@ read CLIDR
+
+        ldr     r1, =0x7fff
+        and     r2, r1, r0, lsr #13	@ get max # of index size
+
+        ldr     r1, =0x3ff
+        and     r3, r1, r0, lsr #3	@ NumWays - 1
+        add     r2, r2, #1		@ NumSets
+
+        and     r0, r0, #0x7
+        add     r0, r0, #4		@ SetShift
+
+        clz     r1, r3			@ WayShift
+        add     r4, r3, #1		@ NumWays
+1:      sub     r2, r2, #1		@ NumSets--
+        mov     r3, r4			@ Temp = NumWays
+2:      subs    r3, r3, #1		@ Temp--
+        mov     r5, r3, lsl r1
+        mov     r6, r2, lsl r0
+        orr     r5, r5, r6		@ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+        mcr     p15, 0, r5, c7, c6, 2	@ Invalidate line
+        bgt     2b
+        cmp     r2, #0
+        bgt     1b
+        dsb
+        mov     r0,#0
+        mcr     p15,0,r0,c7,c5,0                /* Invalidate icache */
+        isb
+        mov     pc, lr
+ENDPROC(v7_l1_cache_invalidate)
+
+	__CPUINIT
+/*
+ * v7_all_dcache_invalidate
+ *
+ * Invalidate without flushing the contents of all cache levels
+ * accesible by the current processor core.
+ * This is useful when the contents of cache memory are undetermined
+ * at power-up.
+ *	Corrupted registers: r0-r7, r9-r11 
+ *
+ * Based on cache-v7.S: v7_flush_dcache_all()
+ */
+
+ENTRY(v7_all_dcache_invalidate)
+	mrc	p15, 1, r0, c0, c0, 1	@ read clidr
+	ands	r3, r0, #0x7000000	@ extract loc from clidr
+	mov	r3, r3, lsr #23		@ left align loc bit field
+	beq	finished		@ if loc is 0, then no need to clean
+	mov	r10, #0			@ start clean at cache level 0
+loop1:
+	add	r2, r10, r10, lsr #1	@ work out 3x current cache level
+	mov	r1, r0, lsr r2		@ extract cache type bits from clidr
+	and	r1, r1, #7		@ mask of bits for current cache only
+	cmp	r1, #2			@ see what cache we have at this level
+	blt	skip			@ skip if no cache, or just i-cache
+	mcr	p15, 2, r10, c0, c0, 0	@ select current cache level in cssr
+	isb				@ isb to sych the new cssr&csidr
+	mrc	p15, 1, r1, c0, c0, 0	@ read the new csidr
+	and	r2, r1, #7		@ extract the length of the cache lines
+	add	r2, r2, #4		@ add 4 (line length offset)
+	ldr	r4, =0x3ff
+	ands	r4, r4, r1, lsr #3	@ find maximum number on the way size
+	clz	r5, r4			@ find bit pos of way size increment
+	ldr	r7, =0x7fff
+	ands	r7, r7, r1, lsr #13	@ extract max number of the index size
+loop2:
+	mov	r9, r4			@ create working copy of max way size
+loop3:
+ 	orr	r11, r10, r9, lsl r5	@ factor way and cache number into r11
+ 	orr	r11, r11, r7, lsl r2	@ factor index number into r11
+        mcr     p15, 0, r11, c7, c6, 2	@ Invalidate line
+	subs	r9, r9, #1		@ decrement the way
+	bge	loop3
+	subs	r7, r7, #1		@ decrement the index
+	bge	loop2
+skip:
+	add	r10, r10, #2		@ increment cache number
+	cmp	r3, r10
+	bgt	loop1
+finished:
+	mov	r10, #0			@ swith back to cache level 0
+	mcr	p15, 2, r10, c0, c0, 0	@ select current cache level in cssr
+	dsb
+	isb
+	mov	pc, lr
+ENDPROC(v7_all_dcache_invalidate)
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/ca9mp_core.c b/arch/arm/plat-bcm63xx/ca9mp_core.c
new file mode 100644
index 0000000000000000000000000000000000000000..abbf6a922988267546785a41b5f4f02b8c6f939e
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/ca9mp_core.c
@@ -0,0 +1,122 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+* ARM Cortex A9 MPCORE Platform base
+*/
+
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/clockchips.h>
+#include <linux/ioport.h>
+#include <linux/cpumask.h>
+#include <linux/irq.h>
+#include <asm/mach/map.h>
+#include <asm/hardware/gic.h>
+#include <mach/hardware.h>
+#include <plat/ca9mpcore.h>
+#include <bcm_map_part.h>
+
+void __iomem * scu_base_addr(void)
+{
+	return __io_address(SCU_PHYS_BASE + CA9MP_SCU_OFF);
+}
+
+void __init ca9mp_fixup(void)
+{
+	/* in case of any fixup that needs to be done for processor, such
+	 * as cache invalidation. */
+}
+
+/* map_io should be called the first, so we have the register base
+ * address for the core. */
+void __init ca9mp_map_io(void)
+{
+	struct map_desc desc;
+
+#if 0
+	/* 
+	 * Cortex A9 Architecture Manual specifies this as a way to get
+	 * MPCORE PERHIPHBASE address at run-time
+	 */
+	asm("mrc p15,4,%0,c15,c0,0 @ Read Configuration Base Address Register" 
+			: "=&r" (base_addr) : : "cc");
+
+	printk(KERN_INFO "CA9 MPCORE found at %p\n", (void *)base_addr); 
+#endif
+
+	/* Fix-map the entire PERIPHBASE 2*4K register block */
+	desc.virtual = IO_ADDRESS(SCU_PHYS_BASE);
+	desc.pfn = __phys_to_pfn(SCU_PHYS_BASE);
+	desc.length = SZ_8K;
+	desc.type = MT_DEVICE;
+	iotable_init(&desc, 1);
+}
+
+void __init ca9mp_init_gic(void)
+{
+	printk(KERN_INFO "Cortex A9 MPCORE GIC init\n");
+	printk(KERN_INFO "DIST at %p, CPU_IF at %p\n",
+			(void *)IO_ADDRESS(SCU_PHYS_BASE) + CA9MP_GIC_DIST_OFF,
+			(void *)IO_ADDRESS(SCU_PHYS_BASE) + CA9MP_GIC_CPUIF_OFF);
+
+	// FIXME!! hardcored value below for the interrupt line#, will need to define
+	// the interrupt line# in a header file for all different chips
+	gic_init(0, 27, (void *)IO_ADDRESS(SCU_PHYS_BASE) + CA9MP_GIC_DIST_OFF,
+			(void *)IO_ADDRESS(SCU_PHYS_BASE) + CA9MP_GIC_CPUIF_OFF);
+
+	//irq_set_handler(CA9MP_IRQ_GLOBALTIMER, handle_percpu_irq);
+	/* try it.. handle_edge_irq, handle_percpu_irq, or handle_level_irq */
+}
+
+void __init ca9mp_init_early(void)
+{
+	/* NOP */
+}
+
+/*
+ * For SMP - initialize GIC CPU interface for secondary cores
+ */
+void __cpuinit ca9mp_cpu_init(void)
+{
+	/* Initialize the GIC CPU interface for the next processor */
+	gic_secondary_init(0);
+#if 0
+	gic_cpu_init(0, (void *)IO_ADDRESS(SCU_PHYS_BASE) + CA9MP_GIC_CPUIF_OFF);
+#endif
+}
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/ca9mp_timer.c b/arch/arm/plat-bcm63xx/ca9mp_timer.c
new file mode 100644
index 0000000000000000000000000000000000000000..0fac1bc57451102d4e06b5b4ebd1c1fec2304fb6
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/ca9mp_timer.c
@@ -0,0 +1,317 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/sched_clock.h>
+#include <asm/localtimer.h>
+#include <asm/smp_twd.h>
+
+#include <plat/ca9mpcore.h>
+#include <bcm_map_part.h>
+
+/*
+ * The ARM9 MPCORE Global Timer is a continously-running 64-bit timer,
+ * which is used both as a "clock source" and as a "clock event" -
+ * there is a banked per-cpu compare and reload registers that are
+ * used to generated either one-shot or periodic interrupts on the cpu
+ * that calls the mode_set function.
+ *
+ * NOTE: This code does not support dynamic change of the source clock
+ * frequency. The interrupt interval is only calculated once during
+ * initialization.
+ */
+
+/*
+ * Global Timer Registers
+ */
+#define	GTIMER_COUNT_LO		0x00	/* Lower 32 of 64 bits counter */
+#define	GTIMER_COUNT_HI		0x04	/* Higher 32 of 64 bits counter */
+#define	GTIMER_CTRL		0x08	/* Control (partially banked) */
+#define	GTIMER_CTRL_EN		(1<<0)	/* Timer enable bit */
+#define	GTIMER_CTRL_CMP_EN	(1<<1)	/* Comparator enable */
+#define	GTIMER_CTRL_IRQ_EN	(1<<2)	/* Interrupt enable */
+#define	GTIMER_CTRL_AUTO_EN	(1<<3)	/* Auto-increment enable */
+#define	GTIMER_INT_STAT		0x0C	/* Interrupt Status (banked) */
+#define	GTIMER_COMP_LO		0x10	/* Lower half comparator (banked) */
+#define	GTIMER_COMP_HI		0x14	/* Upper half comparator (banked) */
+#define	GTIMER_RELOAD		0x18	/* Auto-increment (banked) */
+
+#define	GTIMER_MIN_RANGE	30	/* Minimum wrap-around time in sec */
+
+#define GTIMER_VIRT_ADDR	(IO_ADDRESS(SCU_PHYS_BASE) + CA9MP_GTIMER_OFF)
+#define LTIMER_PHY_ADDR		(SCU_PHYS_BASE + CA9MP_LTIMER_OFF)
+
+/* Gobal variables */
+static u32 ticks_per_jiffy;
+
+static cycle_t gptimer_count_read(struct clocksource *cs)
+{
+	u32 count_hi, count_ho, count_lo;
+	u64 count;
+
+	/* Avoid unexpected rollover with double-read of upper half */
+	do {
+		count_hi = readl_relaxed(GTIMER_VIRT_ADDR + GTIMER_COUNT_HI);
+		count_lo = readl_relaxed(GTIMER_VIRT_ADDR + GTIMER_COUNT_LO);
+		count_ho = readl_relaxed(GTIMER_VIRT_ADDR + GTIMER_COUNT_HI);
+	} while (count_hi != count_ho);
+
+	count = (u64)count_hi << 32 | count_lo;
+	return count;
+}
+
+static struct clocksource clocksource_gptimer = {
+	.name		= "ca9mp_gtimer",
+	.rating		= 300,
+	.read		= gptimer_count_read,
+	.mask		= CLOCKSOURCE_MASK(64),
+//	.shift		= 20,
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static notrace u32 brcm_sched_clock_read(void)
+{
+	return clocksource_gptimer.read(&clocksource_gptimer);
+}
+
+/*
+ * IRQ handler for the global timer
+ * This interrupt is banked per CPU so is handled identically
+ */
+static irqreturn_t gtimer_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+
+	if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+		u32 ctrl = readl_relaxed(GTIMER_VIRT_ADDR + GTIMER_CTRL);
+		ctrl &= ~GTIMER_CTRL_EN;
+		writel_relaxed(ctrl, GTIMER_VIRT_ADDR + GTIMER_CTRL);
+	}
+	/* clear the interrupt */
+	writel_relaxed(1, GTIMER_VIRT_ADDR + GTIMER_INT_STAT);
+
+	evt->event_handler(evt);
+
+	return IRQ_HANDLED;
+}
+
+static void gtimer_set_mode(enum clock_event_mode mode,
+		struct clock_event_device *evt)
+{
+	u32 ctrl = 0, period;
+	u64 count;
+
+	/* By default, when we enter this function, we can just stop
+	 * the timer completely, once a mode is selected, then we
+	 * can start the timer at that point. */
+
+	switch (mode) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		period = ticks_per_jiffy;
+		count = gptimer_count_read(NULL);
+		count += period;
+		writel_relaxed(ctrl, GTIMER_VIRT_ADDR + GTIMER_CTRL);
+		writel_relaxed(count & 0xffffffffUL, GTIMER_VIRT_ADDR + GTIMER_COMP_LO);
+		writel_relaxed(count >> 32, GTIMER_VIRT_ADDR + GTIMER_COMP_HI);
+		writel_relaxed(period, GTIMER_VIRT_ADDR + GTIMER_RELOAD);
+		ctrl = GTIMER_CTRL_EN | GTIMER_CTRL_CMP_EN |
+				GTIMER_CTRL_IRQ_EN | GTIMER_CTRL_AUTO_EN;
+		break;
+
+	case CLOCK_EVT_MODE_ONESHOT:
+		/* period set, and timer enabled in 'next_event' hook */
+		break;
+
+	case CLOCK_EVT_MODE_UNUSED:
+	case CLOCK_EVT_MODE_SHUTDOWN:
+	default:
+		break;
+	}
+	/* Apply the new mode */
+	writel_relaxed(ctrl, GTIMER_VIRT_ADDR + GTIMER_CTRL);
+}
+
+static int gtimer_set_next_event(unsigned long next,
+		struct clock_event_device *evt)
+{
+	u32 ctrl = readl_relaxed(GTIMER_VIRT_ADDR + GTIMER_CTRL);
+	u64 count = gptimer_count_read(NULL);
+
+	ctrl &= ~GTIMER_CTRL_CMP_EN;
+	writel_relaxed(ctrl, GTIMER_VIRT_ADDR + GTIMER_CTRL);
+
+	count += next;
+
+	writel_relaxed(count & 0xffffffffUL, GTIMER_VIRT_ADDR + GTIMER_COMP_LO);
+	writel_relaxed(count >> 32, GTIMER_VIRT_ADDR + GTIMER_COMP_HI);
+
+	/* enable IRQ for the same cpu that loaded comparator */
+	ctrl |= GTIMER_CTRL_EN | GTIMER_CTRL_CMP_EN | GTIMER_CTRL_IRQ_EN;
+
+	writel_relaxed(ctrl, GTIMER_VIRT_ADDR + GTIMER_CTRL);
+
+	return 0;
+}
+
+static struct clock_event_device gtimer_clockevent = {
+	.name		= "ca9mp_gtimer",
+	.shift		= 20,
+	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+	.set_mode	= gtimer_set_mode,
+	.set_next_event	= gtimer_set_next_event,
+	.rating		= 300,
+};
+
+static union {
+	struct clock_event_device *evt;
+	struct clock_event_device __percpu **percpu_evt;
+} brcm_evt;
+
+
+static void __init gtimer_clockevents_init(u32 rate)
+{
+	struct clock_event_device *evt = &gtimer_clockevent;
+	int res;
+
+	evt->irq = CA9MP_IRQ_GLOBALTIMER;
+	evt->cpumask = cpumask_of(0);
+
+#ifdef CONFIG_BCM63138_SIM
+        ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ) / 20;
+#else
+        ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
+#endif
+
+        clockevents_calc_mult_shift(evt, rate, GTIMER_MIN_RANGE);
+
+	evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
+	evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
+
+	/* Register the device to install handler before enabing IRQ */
+	clockevents_register_device(evt);
+
+	brcm_evt.percpu_evt = alloc_percpu(struct clock_event_device *);
+	if (!brcm_evt.percpu_evt) {
+		pr_err("alloc_percpu failed for %s\n", evt->name);
+	}
+	*__this_cpu_ptr(brcm_evt.percpu_evt) = evt;
+	res = request_percpu_irq(evt->irq, gtimer_interrupt, evt->name, 
+			brcm_evt.percpu_evt);
+	if (!res) {
+		pr_err("request_percpu_irq succeeds for %s\n", evt->name);
+		enable_percpu_irq(evt->irq, 0);
+	} else
+		pr_err("request_percpu_irq fails! for %s\n", evt->name);
+}
+
+static void inline gtimer_clockevents_updatefreq_hz(u32 rate)
+{
+	struct clock_event_device *evt = &gtimer_clockevent;
+
+	/* there is an API called clockevents_update_freq which does
+	 * almost identical task as what we do here */
+#ifdef CONFIG_BCM63138_SIM
+        ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ) / 20;
+#else
+        ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
+#endif
+
+        clockevents_calc_mult_shift(evt, rate, GTIMER_MIN_RANGE);
+
+	evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
+	evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
+}
+
+/*
+ * MPCORE Global Timer initialization function
+ */
+static void __init ca9mp_gtimer_init(unsigned long rate)
+{
+	u64 count;
+	int res;
+
+	printk(KERN_INFO "MPCORE Global Timer Clock %luHz\n", rate);
+
+	/* Register as system timer */
+	gtimer_clockevents_init(rate);
+
+	/* Self-test the timer is running */
+	count = gptimer_count_read(NULL);
+
+	/* Register as time source */
+	res = clocksource_register_hz(&clocksource_gptimer, rate);
+	if (res)
+		printk("%s:clocksource_register failed!\n", __func__);
+	setup_sched_clock(brcm_sched_clock_read, 32, rate);
+
+	count = gptimer_count_read(NULL) - count;
+	if (count == 0)
+		printk(KERN_CRIT "MPCORE Global Timer Dead!!\n");
+}
+
+#ifdef CONFIG_HAVE_ARM_TWD
+static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, LTIMER_PHY_ADDR,
+		CA9MP_IRQ_LOCALTIMER);
+
+static void __init ca9mp_twd_init(void)
+{
+	int err = twd_local_timer_register(&twd_local_timer);
+	if (err)
+		pr_err("twd_local_timer_register failed %d\n", err);
+}
+#else
+#define ca9mp_twd_init()	do {} while(0)
+#endif
+
+void ca9mp_timer_update_freq(unsigned long rate)
+{
+	printk(KERN_INFO "MPCORE Global Timer Clock update to %luHz\n", rate);
+
+	gtimer_clockevents_updatefreq_hz(rate);
+
+	__clocksource_updatefreq_hz(&clocksource_gptimer, rate);
+}
+
+void __init ca9mp_timer_init(unsigned long rate)
+{
+	/* init global timer */
+	ca9mp_gtimer_init(rate);
+
+	/* init TWD / local timer */
+	ca9mp_twd_init();
+
+}
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/cache-l310.c b/arch/arm/plat-bcm63xx/cache-l310.c
new file mode 100644
index 0000000000000000000000000000000000000000..982677629d153d7b3f56f501aab6d7071a8b5afa
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/cache-l310.c
@@ -0,0 +1,233 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * This L310 Cache controller code is provided with BCM5301x, it is pretty
+ * similar to what the official kernel has, besides: 1) this one removes
+ * some spinlock protections over certain atomic access and 2) register
+ * ISR fo L2 cache (which does nothing just print out the interrupt
+ * occurs.  Therefore, this code is kept here in case there is a performance
+ * improvement requirement, then we can try this code.
+ */
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/cache-l2x0.h>	/* Old register offsets */
+
+#define CACHE_LINE_SIZE		32
+
+static void __iomem *l2x0_base;
+static DEFINE_SPINLOCK(l2x0_lock);
+static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
+int l2x0_irq = 32 ;
+
+static inline void cache_wait(void __iomem *reg, unsigned long mask)
+{
+	/* wait for the operation to complete */
+	while (readl_relaxed(reg) & mask)
+		;
+}
+
+/*
+ * Atomic operations
+ * 
+ * The following are atomic operations:
+ * . Clean Line by PA or by Set/Way.
+ * . Invalidate Line by PA.
+ * . Clean and Invalidate Line by PA or by Set/Way.
+ * . Cache Sync.
+ * These operations stall the slave ports until they are complete.
+ * When these registers are read, bit [0], the C flag,
+ * indicates that a background operation is in progress.
+ * When written, bit 0 must be zero.
+ */
+static inline void atomic_cache_sync(void __iomem *base)
+{
+	writel_relaxed(0, base + L2X0_CACHE_SYNC);
+}
+
+static inline void atomic_clean_line(void __iomem *base, unsigned long addr)
+{
+	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
+}
+
+static inline void atomic_inv_line(void __iomem *base, unsigned long addr)
+{
+	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
+}
+
+static inline void atomic_flush_line(void __iomem *base, unsigned long addr)
+{
+	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
+}
+
+/*
+ * Atomic operations do not require the use of the spinlock
+ */
+
+static void l2x0_cache_sync(void)
+{
+	void __iomem *base = l2x0_base;
+	atomic_cache_sync(base);
+}
+
+static void l2x0_inv_range(unsigned long start, unsigned long end)
+{
+	void __iomem *base = l2x0_base;
+
+	/* Ramge edges could contain live dirty data */
+	if(start & (CACHE_LINE_SIZE - 1))
+		atomic_flush_line(base, start & ~(CACHE_LINE_SIZE - 1));
+	if(end & (CACHE_LINE_SIZE - 1))
+		atomic_flush_line(base, end & ~(CACHE_LINE_SIZE - 1));
+
+	start &= ~(CACHE_LINE_SIZE - 1);
+
+	while (start < end) {
+		atomic_inv_line(base, start);
+		start += CACHE_LINE_SIZE;
+	}
+}
+
+static void l2x0_clean_range(unsigned long start, unsigned long end)
+{
+	void __iomem *base = l2x0_base;
+
+	start &= ~(CACHE_LINE_SIZE - 1);
+
+	while (start < end) {
+		atomic_clean_line(base, start);
+		start += CACHE_LINE_SIZE;
+	}
+	atomic_cache_sync(base);
+}
+
+static void l2x0_flush_range(unsigned long start, unsigned long end)
+{
+	void __iomem *base = l2x0_base;
+
+	start &= ~(CACHE_LINE_SIZE - 1);
+	while (start < end) {
+		atomic_flush_line(base, start);
+		start += CACHE_LINE_SIZE;
+	}
+	atomic_cache_sync(base);
+}
+
+/*
+ * Invalidate by way is non-atomic, background operation
+ * has to be protected with the spinlock.
+ */
+static inline void l2x0_inv_all(void)
+{
+	void __iomem *base = l2x0_base;
+	unsigned long flags;
+
+	/* invalidate all ways */
+	spin_lock_irqsave(&l2x0_lock, flags);
+	writel_relaxed(l2x0_way_mask, base + L2X0_INV_WAY);
+	cache_wait(base + L2X0_INV_WAY, l2x0_way_mask);
+	atomic_cache_sync(base);
+	spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
+static irqreturn_t l2x0_isr(int irq, void * cookie)
+{
+	u32 reg;
+
+	/* Read pending interrupts */
+	reg = readl_relaxed(l2x0_base + L2X0_RAW_INTR_STAT);
+	/* Acknowledge the interupts */
+	writel_relaxed(reg, l2x0_base + L2X0_INTR_CLEAR);
+	printk(KERN_WARNING "L310: interrupt bits %#x\n", reg);
+
+	return IRQ_HANDLED;
+}
+
+void __init l310_init(void __iomem *base, u32 aux_val, u32 aux_mask, int irq)
+{
+	__u32 aux;
+	__u32 cache_id;
+	int ways;
+
+	l2x0_base = base;
+	l2x0_irq = irq;
+
+	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+
+	aux &= aux_mask;
+	aux |= aux_val;
+
+	/* This module unly supports the L310 */
+	BUG_ON((cache_id & L2X0_CACHE_ID_PART_MASK) != L2X0_CACHE_ID_PART_L310);
+
+	/* Determine the number of ways */
+	if (aux & (1 << 16))
+		ways = 16;
+	else
+		ways = 8;
+
+	l2x0_way_mask = (1 << ways) - 1;
+
+	/*
+	 * Check if l2x0 controller is already enabled.
+	 * If you are booting from non-secure mode
+	 * accessing the below registers will fault.
+	 */
+	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+
+		/* l2x0 controller is disabled */
+		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
+
+		l2x0_inv_all();
+
+		/* enable L2X0 */
+		writel_relaxed(1, l2x0_base + L2X0_CTRL);
+	}
+
+ 	/* Enable interrupts */
+ 	WARN_ON(request_irq(l2x0_irq, l2x0_isr, 0, "L2C", NULL));
+ 	writel_relaxed(0x00ff, l2x0_base + L2X0_INTR_MASK);
+
+	outer_cache.inv_range = l2x0_inv_range;
+	outer_cache.clean_range = l2x0_clean_range;
+	outer_cache.flush_range = l2x0_flush_range;
+	outer_cache.sync = l2x0_cache_sync;
+
+	printk(KERN_INFO "L310: cache controller enabled %d ways, "
+			"CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
+			ways, cache_id, aux);
+}
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/clock.c b/arch/arm/plat-bcm63xx/clock.c
new file mode 100644
index 0000000000000000000000000000000000000000..0818e05aed7cb7c599cde180060b3577dc5a7e6c
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/clock.c
@@ -0,0 +1,182 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * Top-level clock management API
+ * see include/linux/clk.h for description.
+ * These routines are hardware-independent,
+ * and all hardware-specific code is invoked
+ * through the "ops" methods.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <mach/clkdev.h>
+
+int clk_enable(struct clk *clk)
+{
+	int ret;
+
+	ret = atomic_inc_return(&clk->ena_cnt);
+	if (ret > 1)
+		return 0;
+
+	/* Continue of count was moved from 0 to 1 - reentrant */
+	if (clk->parent)
+		ret = clk_enable(clk->parent);
+	else
+		ret = 0;
+
+#if !defined(CONFIG_BCM63138_SIM) && !defined(CONFIG_BCM63148_SIM)
+	if (ret == 0) {
+		if (!clk->ops || !clk->ops->enable) {
+			if (clk->rate)
+				ret = 0;
+			else
+				ret = -EIO;
+		} else
+			ret = clk->ops->enable(clk);
+	}
+#endif
+
+	if (ret != 0)
+		atomic_dec(&clk->ena_cnt);
+
+	return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+	int ret;
+
+	ret = atomic_dec_return(&clk->ena_cnt);
+
+	/* Continue if this is the last client to disable - reentrant */
+	if (ret > 0)
+		return;
+	BUG_ON(ret < 0);
+
+#if !defined(CONFIG_BCM63138_SIM) && !defined(CONFIG_BCM63148_SIM)
+	if (!clk->ops || !clk->ops->disable)
+		return;
+
+	clk->ops->disable(clk);
+
+	if (clk->parent)
+		clk_disable(clk->parent);
+#endif
+
+	return;
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	/* Recurse to update parent's frequency */
+	if (clk->parent)
+		clk_get_rate(clk->parent);
+	/* Read hardware registers if needed */
+	if (clk->ops && clk->ops->status)
+		clk->ops->status(clk);
+	return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+#if defined(CONFIG_BCM63138_SIM) || defined(CONFIG_BCM63148_SIM)
+	return 0;
+#else
+	long ret = -EIO;
+	if (clk->ops && clk->ops->round)
+		ret = clk->ops->round(clk, rate);
+	return ret;
+#endif
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+#if defined(CONFIG_BCM63138_SIM) || defined(CONFIG_BCM63148_SIM)
+	return 0;
+#else
+	int ret = -EIO;
+
+	if (rate == clk->rate)
+		return 0;
+
+	if (clk->ops && clk->ops->setrate)
+		ret = clk->ops->setrate(clk, rate);
+
+	return ret;
+#endif
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+/*
+ * clk_get(), clk_put() are implemented in drivers/clk/clkdev.c
+ * but it needs these two stub functions for platform-specific operations.
+ * Return 1 on success 0 on failure.
+ */
+int __clk_get(struct clk *clk)
+{
+#if !defined(CONFIG_BCM63138_SIM) && !defined(CONFIG_BCM63148_SIM)
+	int ret;
+
+	ret = atomic_inc_return(&clk->use_cnt);
+	if (ret > 1)
+		return 1;
+	if (clk->parent)
+		return __clk_get(clk->parent);
+#endif
+	return 1;
+}
+EXPORT_SYMBOL(__clk_get);
+
+void __clk_put(struct clk *clk)
+{
+#if !defined(CONFIG_BCM63138_SIM) && !defined(CONFIG_BCM63148_SIM)
+	int ret;
+
+	ret = atomic_dec_return(&clk->use_cnt);
+	if (ret > 0)
+		return;
+
+	BUG_ON(ret < 0);
+
+	if (clk->parent)
+		__clk_put(clk->parent);
+#endif
+}
+EXPORT_SYMBOL(__clk_put);
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/hotplug.c b/arch/arm/plat-bcm63xx/hotplug.c
new file mode 100644
index 0000000000000000000000000000000000000000..57d9efba29561479dc7ba521c25f4f5cdee2b621
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/hotplug.c
@@ -0,0 +1,129 @@
+/*
+ *  linux/arch/arm/mach-realview/hotplug.c
+ *
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_plat.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+	unsigned int v;
+
+	flush_cache_all();
+	asm volatile(
+	"	mcr	p15, 0, %1, c7, c5, 0\n"
+	"	mcr	p15, 0, %1, c7, c10, 4\n"
+	/*
+	 * Turn off coherency
+	 */
+	"	mrc	p15, 0, %0, c1, c0, 1\n"
+	"	bic	%0, %0, #0x20\n"
+	"	mcr	p15, 0, %0, c1, c0, 1\n"
+	"	mrc	p15, 0, %0, c1, c0, 0\n"
+	"	bic	%0, %0, %2\n"
+	"	mcr	p15, 0, %0, c1, c0, 0\n"
+	  : "=&r" (v)
+	  : "r" (0), "Ir" (CR_C)
+	  : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+	unsigned int v;
+
+	asm volatile(	"mrc	p15, 0, %0, c1, c0, 0\n"
+	"	orr	%0, %0, %1\n"
+	"	mcr	p15, 0, %0, c1, c0, 0\n"
+	"	mrc	p15, 0, %0, c1, c0, 1\n"
+	"	orr	%0, %0, #0x20\n"
+	"	mcr	p15, 0, %0, c1, c0, 1\n"
+	  : "=&r" (v)
+	  : "Ir" (CR_C)
+	  : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+	/*
+	 * there is no power-control hardware on this platform, so all
+	 * we can do is put the core into WFI; this is safe as the calling
+	 * code will have already disabled interrupts
+	 */
+	for (;;) {
+		/*
+		 * here's the WFI
+		 */
+		asm(".word	0xe320f003\n"
+		    :
+		    :
+		    : "memory", "cc");
+
+		if (pen_release == cpu_logical_map(cpu)) {
+			/*
+			 * OK, proper wakeup, we're done
+			 */
+			break;
+		}
+
+		/*
+		 * Getting here, means that we have come out of WFI without
+		 * having been woken up - this shouldn't happen
+		 *
+		 * Just note it happening - when we're woken, we can report
+		 * its occurrence.
+		 */
+		(*spurious)++;
+	}
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+	return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void platform_cpu_die(unsigned int cpu)
+{
+	int spurious = 0;
+
+	/*
+	 * we're ready for shutdown now, so do it
+	 */
+	cpu_enter_lowpower();
+	platform_do_lowpower(cpu, &spurious);
+
+	/*
+	 * bring this CPU back into the world of cache
+	 * coherency, and then restore interrupts
+	 */
+	cpu_leave_lowpower();
+
+	if (spurious)
+		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+	/*
+	 * we don't allow CPU 0 to be shutdown (it is still too special
+	 * e.g. clock tick interrupts)
+	 */
+	return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/plat-bcm63xx/include/plat/b15core.h b/arch/arm/plat-bcm63xx/include/plat/b15core.h
new file mode 100644
index 0000000000000000000000000000000000000000..c9f8cc16153402052ad9e41a24d59578861d9a86
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/include/plat/b15core.h
@@ -0,0 +1,85 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * Broadcom ARM based on Cortex A15 CORE
+ *
+ * Platform hardware information and internal API
+ */
+
+#ifndef	__PLAT_B15CORE_H
+#define	__PLAT_B15CORE_H
+
+#include <mach/hardware.h>
+
+/* B15 CORE internally-connected IRQs */
+#define	B15_IRQ_GLOBALTIMER	27
+#define	B15_IRQ_LOCALTIMER	29
+#define B15_IRQ_WDTIMER		30
+
+/* 
+ NOTE: B15 CORE physical based ontained at run-time,
+ while its virtual base address is set at compile-time in memory.h
+*/
+
+/* B15 CORE register offsets */
+#define	B15_SCU_OFF		0x0000	/* Coherency controller */
+#define	B15_GTIMER_OFF		0x0200	/* Global timer */
+#define	B15_LTIMER_OFF		0x0600	/* Local (private) timers */
+#define	B15_GIC_DIST_OFF	0x1000	/* Interrupt distributor registers */
+#define	B15_GIC_CPUIF_OFF	0x2000	/* Interrupt controller CPU interface */
+
+/* FIXME! the following should be fixed once we verify whether B15 and CA9 share
+ * the same timer or not */
+#define CA9MP_IRQ_GLOBALTIMER	B15_IRQ_GLOBALTIMER
+#define CA9MP_IRQ_LOCALTIMER	B15_IRQ_LOCALTIMER
+#define CA9MP_GTIMER_OFF	B15_GTIMER_OFF
+#define CA9MP_LTIMER_OFF	B15_LTIMER_OFF
+
+#ifndef __ASSEMBLY__
+
+extern void __init b15_fixup(void);
+extern void __init b15_map_io(void);
+extern void __init b15_init_gic(void);
+extern void __init b15_init_early(void);
+
+/* FIXME! the following should be fixed once we verify whether B15 and CA9 share
+ * the same timer or not */
+//extern void __init ca9mp_timer_init(unsigned long rate);
+
+extern void __iomem * scu_base_addr(void);
+extern void __cpuinit b15_power_up_cpu(int cpu_id);
+extern void __cpuinit b15_cpu_init(void);
+extern void plat_wake_secondary_cpu(unsigned cpus, void (* _sec_entry_va)(void));
+
+#endif
+
+#endif /* __PLAT_CA9MPCORE_H */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/include/plat/bcm63xx_acp.h b/arch/arm/plat-bcm63xx/include/plat/bcm63xx_acp.h
new file mode 100644
index 0000000000000000000000000000000000000000..42a8fc55afb77dd0ddd563ca682b87a4af7e78b5
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/include/plat/bcm63xx_acp.h
@@ -0,0 +1,108 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#ifndef __PLAT_BCM63XX_ACP_H
+#define __PLAT_BCM63XX_ACP_H
+#define BCM_UBUS_CFG_MAX	4
+
+typedef enum {
+#ifdef CONFIG_BCM963138
+	BCM_UBUS_PID_PCIE0 = 0x0,
+	BCM_UBUS_PID_DDR = 0x1,
+	BCM_UBUS_PID_ARMAXIACP = 0x2,
+	BCM_UBUS_PID_PERIPH = 0x3,
+	BCM_UBUS_PID_USBD = 0x4,
+	BCM_UBUS_PID_USBH = 0x5,
+	BCM_UBUS_PID_SATA = 0x6,
+	BCM_UBUS_PID_DECT = 0x7,
+	BCM_UBUS_PID_APM = 0x8,
+	BCM_UBUS_PID_VDSL = 0x9,
+	BCM_UBUS_PID_SAR = 0xa,
+	BCM_UBUS_PID_RNR = 0xb,
+	BCM_UBUS_PID_RNR_RABR = 0xc,
+	BCM_UBUS_PID_SF2 = 0xe,
+	BCM_UBUS_PID_PMC = 0xf,
+	BCM_UBUS_PID_PCIE1 = 0x10,
+	BCM_UBUS_PID_ARMAIPDAP = 0x12,
+	BCM_UBUS_PID_SAR2 = 0x1a,
+	BCM_UBUS_PID_RNR_RBBR = 0x1c,
+	BCM_UBUS_PID_ERROR = 0x1f,
+#endif
+	BCM_UBUS_PID_MAX,
+} bcm_ubus_pid_t;
+#define BCM_UBUS_PID_INVALID	0xff
+
+typedef struct {
+	uint32_t addr_in;
+	uint32_t addr_out;
+	uint8_t dst_pid;
+	uint8_t size_shift;
+	uint8_t en;
+} bcm_acp_ubus_cfg_t;
+
+typedef struct {
+	/* L2 cache policy for write, recommend value is 0xf for
+	 * cacheable WBWA, or 0x0 to disable */
+	uint8_t wcache;	
+	/* L2 cache policy for read, recommend value is 0xf for
+	 * cacheable WBWA, or 0x0 to disable */
+	uint8_t rcache;
+	/* L1 cache policy for write, recommend value is 0x1 for
+	 * cache invalidation, 0x1f for WBWA, or 0x0 to disable */
+	uint8_t wuser;
+	/* L1 cache policy for read, recommend value is 0x1 for
+	 * cache invalidation, 0x1f for WBWA, or 0x0 to disable */
+	uint8_t ruser;
+} bcm_acp_cache_ctrl_t;
+
+/* enable / disable the ACP feature for a specific block */
+int bcm63xx_acp_enable(uint8_t ubus_pid);
+int bcm63xx_acp_disable(uint8_t ubus_pid);
+
+/* check if the ACP is enabled */
+bool bcm63xx_acp_on(uint8_t ubus_pid);
+
+/* UBUS configuration setting APIs */
+int bcm63xx_acp_ubus_cfg_get_entry(uint8_t ubus_pid, uint8_t idx,
+		bcm_acp_ubus_cfg_t *acp_ubus_cfg);
+int bcm63xx_acp_ubus_cfg_get_all(uint8_t ubus_pid,
+		bcm_acp_ubus_cfg_t *acp_ubus_cfg);
+int bcm63xx_acp_ubus_cfg_set_entry(uint8_t ubus_pid, uint8_t idx,
+		bcm_acp_ubus_cfg_t *acp_ubus_cfg);
+int bcm63xx_acp_ubus_cfg_set_all(uint8_t ubus_pid,
+		bcm_acp_ubus_cfg_t *acp_ubus_cfg);
+void bcm63xx_acp_ubus_cfg_reset(uint8_t ubus_pid);
+
+/* ACP port control */
+int bcm63xx_acp_cache_ctrl_get(uint8_t ubus_pid, bcm_acp_cache_ctrl_t *cache_ctrl);
+int bcm63xx_acp_cache_ctrl_set(uint8_t ubus_pid, bcm_acp_cache_ctrl_t *cache_ctrl);
+
+#endif /* __PLAT_BCM63XX_ACP_H */
+#endif /* defined(CONFIG_BCM_KF_ARM_BCM963XX) */
diff --git a/arch/arm/plat-bcm63xx/include/plat/bcm63xx_pcie.h b/arch/arm/plat-bcm63xx/include/plat/bcm63xx_pcie.h
new file mode 100644
index 0000000000000000000000000000000000000000..a0388290e3d50ac3172072613584466903a4c99d
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/include/plat/bcm63xx_pcie.h
@@ -0,0 +1,87 @@
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#ifndef __BCM63XX_PCIE_H
+#define __BCM63XX_PCIE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+#include <board.h>
+#include <pmc_pcie.h>
+#include <pmc_drv.h>
+#include <shared_utils.h>
+
+#if 0
+#define DPRINT(x...)                printk(x)
+#define TRACE()                     DPRINT("%s\n",__FUNCTION__)
+#define TRACE_READ(x...)            printk(x)
+#define TRACE_WRITE(x...)           printk(x)
+#else
+#undef  DPRINT
+#define DPRINT(x...)
+#define TRACE()
+#define TRACE_READ(x...)
+#define TRACE_WRITE(x...)
+#endif
+
+/*PCI-E */
+#define BCM_BUS_PCIE_ROOT           0
+#if defined(PCIEH) && defined(PCIEH_1)
+#define NUM_CORE                    2
+#else
+#define NUM_CORE                    1
+#endif
+
+
+/*
+ * Per port control structure
+ */
+struct bcm63xx_pcie_port {
+    unsigned char * __iomem regs;
+    struct resource *owin_res;
+    unsigned int irq;
+    struct hw_pci hw_pci;
+
+    bool enabled;                   // link-up
+    bool link;                      // link-up
+    bool saved;                     // pci-state saved
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __BCM63XX_PCIE_H */
diff --git a/arch/arm/plat-bcm63xx/include/plat/bcm63xx_timer.h b/arch/arm/plat-bcm63xx/include/plat/bcm63xx_timer.h
new file mode 100644
index 0000000000000000000000000000000000000000..e23fb2d0e876c2265269af7a678c5e94fb0e72b0
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/include/plat/bcm63xx_timer.h
@@ -0,0 +1,36 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef __ASM_PLAT_BCM63XX_TIMER_H
+#define __ASM_PLAT_BCM63XX_TIMER_H
+
+void __init bcm63xx_timer_init(void);
+
+#endif /* __ASM_PLAT_BCM63XX_TIMER_H */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/include/plat/bsp.h b/arch/arm/plat-bcm63xx/include/plat/bsp.h
new file mode 100644
index 0000000000000000000000000000000000000000..bc01d6708a735532f7fe63e4d82d7513809e0e94
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/include/plat/bsp.h
@@ -0,0 +1,51 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * Broadcom ARM BSP
+ * Internal API declarations
+ */
+
+
+#ifndef __PLAT_BSP_H
+#define __PLAT_BSP_H
+
+struct clk;
+
+void __init soc_fixup(void);
+void __init soc_map_io(void);
+void __init soc_init_clock(void);
+void __init soc_init_irq(void);
+void __init soc_init_early(void);
+void __init soc_add_devices(void);
+void __init soc_init_timer(void);
+
+#endif /* __PLAT_BSP_H */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/include/plat/ca9mpcore.h b/arch/arm/plat-bcm63xx/include/plat/ca9mpcore.h
new file mode 100644
index 0000000000000000000000000000000000000000..845f026f45a77a8a62020083e7accae5063a7c96
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/include/plat/ca9mpcore.h
@@ -0,0 +1,77 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * ARM A9 MPCORE
+ *
+ * Platform hardware information and internal API
+ */
+
+#ifndef	__PLAT_CA9MPCORE_H
+#define	__PLAT_CA9MPCORE_H
+
+#include <mach/hardware.h>
+
+/* MPCORE internally-connected IRQs */
+#define	CA9MP_IRQ_GLOBALTIMER	27
+#define	CA9MP_IRQ_LOCALTIMER	29
+#define CA9MP_IRQ_WDTIMER	30
+
+/* 
+ NOTE: MPCORE physical based ontained at run-time,
+ while its virtual base address is set at compile-time in memory.h
+*/
+
+/* MPCORE register offsets */
+#define	CA9MP_SCU_OFF		0x0000	/* Coherency controller */
+#define	CA9MP_GIC_CPUIF_OFF	0x0100	/* Interrupt controller CPU interface */
+#define	CA9MP_GTIMER_OFF	0x0200	/* Global timer */
+#define	CA9MP_LTIMER_OFF	0x0600	/* Local (private) timers */
+#define	CA9MP_GIC_DIST_OFF	0x1000	/* Interrupt distributor registers */
+
+#ifndef __ASSEMBLY__
+
+extern void __init ca9mp_fixup(void);
+extern void __init ca9mp_map_io(void);
+extern void __init ca9mp_init_gic(void);
+extern void __init ca9mp_init_early(void);
+extern void __iomem * scu_base_addr(void);
+extern void __cpuinit ca9mp_cpu_init(void);
+extern void plat_wake_secondary_cpu(unsigned cpus, void (* _sec_entry_va)(void));
+
+#ifdef CONFIG_PLAT_CA9_MPCORE_TIMER
+extern void __init ca9mp_timer_init(unsigned long rate);
+extern void ca9mp_timer_update_freq(unsigned long rate);
+#endif
+
+#endif
+
+#endif /* __PLAT_CA9MPCORE_H */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/include/plat/clock.h b/arch/arm/plat-bcm63xx/include/plat/clock.h
new file mode 100644
index 0000000000000000000000000000000000000000..ebd76e8221b61b6549ddb5e378401261cc752eea
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/include/plat/clock.h
@@ -0,0 +1,51 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef __ASM_PLAT_CLOCK_H
+#define __ASM_PLAT_CLOCK_H	__FILE__
+
+#define FREQ_MHZ(x)	((x)*1000*1000)
+
+struct clk;
+
+/*
+ * Operations on clocks -
+ * See <linux/clk.h> for description
+ */
+struct clk_ops {
+	int	(* enable)(struct clk *);
+	void	(* disable)(struct clk *);
+	long	(* round)(struct clk *, unsigned long);
+	int	(* setrate)(struct clk *, unsigned long);
+	/* Update current rate and return running status */
+	int	(* status)(struct clk *);
+};
+
+#endif /* __ASM_PLAT_CLOCK_H */
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/pci-bcm63xx.c b/arch/arm/plat-bcm63xx/pci-bcm63xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..c5f7ba4a5456b12fd5969e0909aa5867190b54f7
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/pci-bcm63xx.c
@@ -0,0 +1,3 @@
+/*
+ * TODO:
+ */
diff --git a/arch/arm/plat-bcm63xx/pcie-bcm63xx.c b/arch/arm/plat-bcm63xx/pcie-bcm63xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..46d98becabc2d911cb407e6e404fb8affa077448
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/pcie-bcm63xx.c
@@ -0,0 +1,868 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+#include <board.h>
+#include <pmc_pcie.h>
+#include <pmc_drv.h>
+#include <shared_utils.h>
+#include "plat/bcm63xx_pcie.h"
+
+
+extern unsigned long getMemorySize(void);
+static int bcm63xx_pcie_get_baraddrsize_index(void);
+static struct pci_bus *bcm63xx_pcie_scan_bus(int nr, struct pci_sys_data *sys);
+static int bcm63xx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+static int bcm63xx_pcie_setup(int nr, struct pci_sys_data *sys);
+static void bcm63xx_pcie_phy_mode_config(int index);
+static void bcm63xx_pcie_config_timeouts(struct bcm63xx_pcie_port *port);
+
+/* calculate size dynamically according to the RAM
+ * 0x01 ... 64KB
+ * 0x02 ... 128KB
+ * 0x03 ... 256KB ...
+ * 0x0d ... 256MB ...
+ * 0x14 ... 32GB 
+ */
+static int bcm63xx_pcie_get_baraddrsize_index(void)
+{
+	unsigned long memsize; /*in K units*/
+	int i = 0;
+	
+	memsize = ((getMemorySize()) >> 10);
+	DPRINT("getMemorySize() = %lu\n", getMemorySize());
+
+	for ( i = 0; i < PCIE_MISC_RC_BAR_CONFIG_LO_SIZE_MAX; i++) {
+		if ((64 * (1 << i)) >= memsize) {
+			break;
+		}
+	}
+	
+	DPRINT("PCIE_MISC.RC_BAR1_CONFIG_LO.size = 0x%x\n", i + 1);
+	return (i + 1);
+}
+
+static void bcm63xx_pcie_pcie_reset(int index, bool PowerOn)
+{
+#if defined(PCIE3_CORE)
+	u32 val = __raw_readl(MISC_BASE+offsetof(Misc,miscPCIECtrl));
+
+	TRACE();
+	if(PowerOn) {
+	  val &= ~(1<<index);
+	  __raw_writel(val, MISC_BASE+offsetof(Misc,miscPCIECtrl));
+	  mdelay(10);
+	  bcm63xx_pcie_phy_mode_config(index);
+	  mdelay(10);			  
+	  val |= (1<<index);
+	  __raw_writel(val, MISC_BASE+offsetof(Misc,miscPCIECtrl));
+	  mdelay(10);
+	} else {
+		val &= ~(1<<index);
+		__raw_writel(val, MISC_BASE+offsetof(Misc,miscPCIECtrl));
+	}
+	/* this is a critical delay */
+	mdelay(500);
+#endif
+}
+
+/*
+ * PCIe host controller registers
+ * one entry per port
+ */
+
+static struct resource bcm63xx_pcie_owin[NUM_CORE] = {
+	{
+	.name = "bcm63xx pcie0",
+	.start = PCIEH_0_MEM_BASE,
+	.end   = PCIEH_0_MEM_BASE+PCIEH_0_MEM_SIZE-1,
+	.flags = IORESOURCE_MEM,
+	},
+#if defined(PCIEH_1)	
+	{
+	.name = "bcm63xx pcie1",
+	.start = PCIEH_1_MEM_BASE,
+	.end   = PCIEH_1_MEM_BASE+PCIEH_1_MEM_SIZE-1,
+	.flags = IORESOURCE_MEM,
+	},
+#endif	
+};
+
+/*
+ * Per port control structure
+ */
+struct bcm63xx_pcie_port bcm63xx_pcie_ports[NUM_CORE] = {
+	{
+	.regs = (unsigned char * __iomem)PCIE_0_BASE, /* this is mapped address */
+	.owin_res = & bcm63xx_pcie_owin[0],
+	.irq = INTERRUPT_ID_PCIE0,
+	.hw_pci = {
+		.domain 	= 0,
+		.swizzle 	= pci_std_swizzle,
+		.nr_controllers = 1,
+		.setup 		= bcm63xx_pcie_setup,
+		.scan 		= bcm63xx_pcie_scan_bus,
+		.map_irq 	= bcm63xx_pcie_map_irq,
+		},
+	.enabled = 0,
+	.link = 0,
+	},
+#if defined(PCIEH_1)	
+	{
+	.regs = (unsigned char * __iomem)PCIE_1_BASE,
+	.owin_res = & bcm63xx_pcie_owin[1],
+	.irq = INTERRUPT_ID_PCIE1,
+	.hw_pci = {
+		.domain 	= 1,
+		.swizzle 	= pci_std_swizzle,
+		.nr_controllers = 1,
+		.setup 		= bcm63xx_pcie_setup,
+		.scan 		= bcm63xx_pcie_scan_bus,
+		.map_irq 	= bcm63xx_pcie_map_irq,
+		},
+	.enabled = 0,
+	.link = 0,	
+	},
+#endif	
+};
+
+/* 
+  Function pcie_mdio_read (phyad, regad)
+
+   Parameters:
+     phyad ... MDIO PHY address (typically 0!)
+     regad ... Register address in range 0-0x1f
+
+   Description:
+     Perform PCIE MDIO read on specified PHY (typically 0), and Register.
+     Access is through an indirect command/status mechanism, and timeout
+     is possible. If command is not immediately complete, which would
+     be typically the case, one more attempt is made after a 1ms delay.
+
+   Return: 16-bit data item or 0xdead on MDIO timeout
+*/
+static uint16 bcm63xx_pcie_mdio_read (struct bcm63xx_pcie_port *port, uint16 phyad, uint16 regad) 
+{
+    unsigned char * __iomem regs = port->regs;
+    int timeout;
+    uint32 data;
+    uint16 retval;
+    volatile PcieBlk1000Regs *RcDLReg;
+
+    RcDLReg = (PcieBlk1000Regs*)(regs+PCIEH_BLK_1000_REGS);
+
+    /* Bit-20=1 to initiate READ, bits 19:16 is the phyad, bits 4:0 is the regad */
+    data = 0x100000;
+    data = data |((phyad & 0xf)<<16);
+    data = data |(regad & 0x1F);
+
+    RcDLReg->mdioAddr = data;
+    /* critical delay */
+    udelay(1000);
+
+    timeout = 2;
+    while (timeout-- > 0) {
+        data = RcDLReg->mdioRdData;
+        /* Bit-31=1 is DONE */
+        if (data & 0x80000000)
+            break;
+        timeout = timeout - 1;
+        udelay(1000);
+    }
+
+    if (timeout == 0) {
+        retval = 0xdead;
+    }else 
+        /* Bits 15:0 is read data*/
+        retval = (data&0xffff);
+
+    return retval;
+}
+
+/* 
+ Function pcie_mdio_write (phyad, regad, wrdata)
+
+   Parameters:
+     phyad ... MDIO PHY address (typically 0!)
+     regad  ... Register address in range 0-0x1f
+     wrdata ... 16-bit write data
+
+   Description:
+     Perform PCIE MDIO write on specified PHY (typically 0), and Register.
+     Access is through an indirect command/status mechanism, and timeout
+     is possible. If command is not immediately complete, which would
+     be typically the case, one more attempt is made after a 1ms delay.
+
+   Return: 1 on success, 0 on timeout
+*/
+static int bcm63xx_pcie_mdio_write (struct bcm63xx_pcie_port *port, uint16 phyad, uint16 regad, uint16 wrdata)
+{
+    unsigned char * __iomem regs = port->regs;
+    int timeout;
+    uint32 data;
+    volatile PcieBlk1000Regs *RcDLReg;
+    
+    RcDLReg = (PcieBlk1000Regs*)(regs+PCIEH_BLK_1000_REGS);
+
+    /* bits 19:16 is the phyad, bits 4:0 is the regad */
+    data = ((phyad & 0xf) << 16);
+    data = data | (regad & 0x1F);
+
+    RcDLReg->mdioAddr = data;
+    udelay(1000);
+
+    /* Bit-31=1 to initial the WRITE, bits 15:0 is the write data */
+    data = 0x80000000;
+    data = data | (wrdata & 0xFFFF);
+
+    RcDLReg->mdioWrData = data;
+    udelay(1000);
+
+    /* Bit-31=0 when DONE */
+    timeout = 2;
+    while (timeout-- > 0) {
+
+        data = RcDLReg->mdioWrData;
+
+        /* CTRL1 Bit-31=1 is DONE */
+        if ((data & 0x80000000) == 0 )
+            break;
+
+        timeout = timeout - 1;
+        udelay(1000);
+    }
+
+    if (timeout == 0){
+        return 0;
+    } else 
+        return 1;
+}
+
+static void bcm63xx_pcie_phy_mode_config(int index)
+{
+	struct bcm63xx_pcie_port* port;	
+	port = &bcm63xx_pcie_ports[index];	
+
+#if defined(RCAL_1UM_VERT)
+	/*
+	 * Rcal Calibration Timers
+	 *   Block 0x1000, Register 1, bit 4(enable), and 3:0 (value)
+	 */
+	{
+		int val = 0;
+		uint16 data = 0; 
+		if(GetRCalSetting(RCAL_1UM_VERT, &val)== kPMC_NO_ERROR) {
+			printk("bcm63xx_pcie: setting resistor calibration value to 0x%x\n", val);
+			bcm63xx_pcie_mdio_write(port, 0, 0x1f , 0x1000); 
+			data = bcm63xx_pcie_mdio_read (port, 0, 1);
+			data = ((data & 0xffe0) | (val & 0xf) | (1 << 4)); /*enable*/   		
+			bcm63xx_pcie_mdio_write(port, 0, 1, data);
+		}
+	}
+#endif
+
+#if defined(PCIE3_CORE) 
+	//printk("chipid:0x%x , chiprev:0x%x \n", kerSysGetChipId(), (UtilGetChipRev()));
+	{
+		printk("bcm63xx_pcie: applying serdes parameters\n");
+		/*
+		 * VCO Calibration Timers
+		 * Workaround: 
+		 * Block 0x3000, Register 0xB = 0x40
+		 * Block 0x3000, Register 0xD = 7
+		 * Notes: 
+		 * -Fixed in 63148A0, 63381B0, 63138B0 but ok to write anyway
+		 */ 
+		bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x3000);
+		bcm63xx_pcie_mdio_read (port, 0, 0x1f);  /* just to exericise the read */
+		bcm63xx_pcie_mdio_write(port, 0, 0xB, 0x40);
+		bcm63xx_pcie_mdio_write(port, 0, 0xD, 7);      
+
+		/*	
+		 * Reference clock output level
+		 * Workaround:
+		 * Block 0x2200, Register 3 = 0xaba4
+		 * Note: 
+		 * -Fixed in 63148A0, 63381B0, 63138B0 but ok to write anyway
+		 */
+		bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x2200);
+		bcm63xx_pcie_mdio_write(port, 0, 3, 0xaba4);    
+
+		/* 
+		 * Tx Pre-emphasis
+		 * Workaround:
+		 * Block 0x4000, Register 0 = 0x1d20  // Gen1
+		 * Block 0x4000, Register 1 = 0x12cd  // Gen1
+		 * Block 0x4000, Register 3 = 0x0016  // Gen1, Gen2
+		 * Block 0x4000, Register 4 = 0x5920  // Gen2
+		 * Block 0x4000, Register 5 = 0x13cd  // Gen2
+		 * Notes: 
+		 * -Fixed in 63148A0, 63381B0, 63138B0 but ok to write anyway
+		 */
+		bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x4000);
+		bcm63xx_pcie_mdio_write(port, 0, 0, 0x1D20);    
+		bcm63xx_pcie_mdio_write(port, 0, 1, 0x12CD);
+		bcm63xx_pcie_mdio_write(port, 0, 3, 0x0016);
+		bcm63xx_pcie_mdio_write(port, 0, 4, 0x5920);
+		bcm63xx_pcie_mdio_write(port, 0, 5, 0x13CD);
+
+		/*
+		 * Rx Signal Detect
+		 * Workaround:
+		 * Block 0x6000, Register 5 = 0x2c0d 
+		 * Notes:
+		 * -Fixed in 63148A0, 63381B0, 63138B0 but ok to write anyway
+		 */
+		bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x6000);
+		bcm63xx_pcie_mdio_write(port, 0, 0x5, 0x2C0D);		
+
+		/*
+		 * Rx Jitter Tolerance
+		 * Workaround:
+		 * Block 0x7300, Register 3 = 0x190  // Gen1
+		 * Block 0x7300, Register 9 = 0x194  // Gen2
+		 * Notes:
+		 * -Gen1 setting 63148A0, 63381B0, 63138B0 but ok to write anyway
+		 * -Gen2 setting only in latest SerDes RTL  / future tapeouts
+		 */
+		bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x7300);
+		bcm63xx_pcie_mdio_write(port, 0, 3, 0x190);
+		bcm63xx_pcie_mdio_write(port, 0, 9, 0x194);
+
+		/* 
+		 * Gen2 Rx Equalizer
+		 * Workaround:
+		 * Block 0x6000 Register 7 = 0xf0c8  // Gen2
+		 * Notes:
+		 * -New setting only in latest SerDes RTL / future tapeouts
+		 */
+		bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x6000);
+		bcm63xx_pcie_mdio_write(port, 0, 7, 0xf0c8);
+
+		/*
+		 * SSC Parameters
+		 * Workaround:
+		 * Block 0x1100, Register 0xA = 0xea3c  
+		 * Block 0x1100, Register 0xB = 0x04e7
+		 * Block 0x1100, Register 0xC = 0x0039 
+		 * Block 0x2200, Register 5 = 0x5044    // VCO parameters for fractional mode, -175ppm
+		 * Block 0x2200, Register 6 = 0xfef1    // VCO parameters for fractional mode, -175ppm
+		 * Block 0x2200, Register 7 = 0xe818    // VCO parameters for fractional mode, -175ppm
+		 * Notes:
+		 * -Only need to apply these fixes when enabling Spread Spectrum Clocking (SSC), which would likely be a flash option
+		 * -Block 0x1100 fixed in 63148A0, 63381B0, 63138B0 but ok to write anyway
+		 */
+
+		/*
+		 * EP Mode PLL Bandwidth and Peaking
+		 * Workaround:
+		 * Block 0x2100, Register 0 = 0x5174
+		 * Block 0x2100, Register 4 = 0x6023
+		 * Notes:
+		 * -Only needed for EP mode, but ok to write in RC mode too
+		 * -New setting only in latest SerDes RTL / future tapeouts
+		 */
+		bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x2100);
+		bcm63xx_pcie_mdio_write(port, 0, 0, 0x5174);
+		bcm63xx_pcie_mdio_write(port, 0, 4, 0x6023);
+	}
+#endif
+    return;
+}
+
+static struct bcm63xx_pcie_port *bcm63xx_pcie_bus2port(struct pci_bus *bus)
+{
+	struct pci_sys_data *sys = bus->sysdata;
+	return sys->private_data;
+}
+
+static void bcm63xx_pcie_config_select(unsigned char * __iomem regs, u32 bus_no, u32 dev_no, u32 func_no)
+{
+	/* set device bus/func/func */
+#if defined(UBUS2_PCIE)
+  __raw_writel((bus_no<<PCIE_EXT_CFG_BUS_NUM_SHIFT)|(dev_no <<PCIE_EXT_CFG_DEV_NUM_SHIFT)|(func_no<<PCIE_EXT_CFG_FUNC_NUM_SHIFT),
+  			regs+PCIEH_PCIE_EXT_CFG_REGS+offsetof(PcieExtCfgRegs,index));
+
+#endif    
+}
+    
+static u32 __iomem bcm63xx_pcie_config_offset_aligned(int bus_no, int where)
+{
+   if(bus_no == BCM_BUS_PCIE_ROOT ) {
+        /* access offset 0 */
+        return where&~3;
+    } else {
+        /* access offset */
+        return (PCIEH_DEV_OFFSET+where)&~3;
+    }	
+}
+
+/*
+ * return whether bus_no, dev_no is a valid configuration to access or not
+ *
+ * On any PCIE slot below is the bus assignment
+ *  0 - Root Complex
+ *  1 - PCIe device or Switch
+ *  2 - PCIe switch upsteream port (if switch is present)
+ *  3..n - PCIe switch downstream port devices (if switch is present)
+ *
+ * Limit RC to access only configuration space access (dev#0)
+ * Otherwise Linux will make all 1-31 devices valid and try to enumerate them
+ *
+ * Note: Under RC only 1 PCIe switch is supported
+ */
+static int bcm63xx_pcie_valid_config(int bus_no, int dev_no)
+{
+	if (bus_no <= (BCM_BUS_PCIE_ROOT+1)) {
+	    /* Root Conplex bridge, first device or switch */
+	    /* Allow only configuration space (dev#0) */
+	    return (dev_no == 0);
+	} else if (bus_no == (BCM_BUS_PCIE_ROOT+2)){
+	    /* Switch UP stream port */
+	    /* Allow access for all the DN ports */
+	    return 1;
+	} else {
+	    /* Switch down stream ports to devices */
+	    /* Allow only configuration space (dev#0) */
+	    return (dev_no == 0); /*otherwise will loop for the rest of the device*/
+	}
+
+	/* All are invalid configurations */
+	return 0;
+}	
+
+/*
+ * PCIe config cycles are done by programming the PCIE_CONF_ADDR register
+ * and then reading the PCIE_CONF_DATA register. Need to make sure these
+ * transactions are atomic.
+ */
+
+static int bcm63xx_pciebios_read(struct pci_bus *bus, u32 devfn, int where,
+			int size, u32 *val)
+{
+	struct bcm63xx_pcie_port *port = bcm63xx_pcie_bus2port(bus);
+	unsigned char * __iomem regs = port->regs;
+	u32 __iomem offset;
+	int busno = bus->number;
+	int slot = PCI_SLOT(devfn);
+	int fn = PCI_FUNC(devfn);
+  u32 data;
+
+	TRACE();
+
+  TRACE_READ("R device (bus)%d/(slot)%d/func(%d) at %d size %d, val=0x%x\n", busno, slot, fn, where, size, *val);		
+	if (bcm63xx_pcie_valid_config(busno, PCI_SLOT(devfn)) == 0){
+		*val = -1;
+		TRACE_READ("not valid config\n");
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+		
+	offset = (u32)regs + bcm63xx_pcie_config_offset_aligned(busno,where);
+	
+	if (((size == 2) && (where & 1)) ||((size == 4) && (where & 3))) {
+		 BUG_ON(1);
+		 return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	bcm63xx_pcie_config_select(regs, busno,slot,fn);
+
+  data = __raw_readl(offset);
+
+  TRACE_READ("reading 0x%x @ 0x%x\n", data, offset);
+    
+  if (data == 0xdeaddead) {
+		*val = -1;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+  }
+  if (size == 1)
+     *val = (data >> ((where & 3) << 3)) & 0xff;
+  else if (size == 2)
+     *val = (data >> ((where & 3) << 3)) & 0xffff;
+  else
+     *val = data;
+
+  TRACE_READ("val= 0x%x\n", *val);        	
+  return PCIBIOS_SUCCESSFUL;
+}
+	
+
+static int bcm63xx_pciebios_write(struct pci_bus *bus, u32 devfn,
+			int where, int size, u32 val)
+{
+	struct bcm63xx_pcie_port *port = bcm63xx_pcie_bus2port(bus);
+	unsigned char * __iomem regs = port->regs;
+	u32 __iomem offset;
+	int busno = bus->number;
+	int slot = PCI_SLOT(devfn);
+	int fn = PCI_FUNC(devfn);
+  u32 data;
+	
+	TRACE();
+
+  TRACE_WRITE("W device (bus)%d/(slot)%d/func(%d) at %d size %d, val=0x%x\n", busno, slot, fn, where, size, val);
+	if (bcm63xx_pcie_valid_config(busno, PCI_SLOT(devfn)) == 0)
+	{
+		TRACE_WRITE("not valid config\n");
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+		
+	bcm63xx_pcie_config_select(regs, busno,slot,fn);
+	offset = (u32)regs + bcm63xx_pcie_config_offset_aligned(busno,where);
+	
+	if (((size == 2) && (where & 1)) ||((size == 4) && (where & 3))) {
+		 BUG_ON(1);
+		 return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+  data = __raw_readl(offset);
+  DPRINT("reading 0x%x @ 0x%x\n", data, offset);
+  if (size == 1)
+		data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3));
+	else if (size == 2)
+		data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3));
+  else
+  	data = val;
+
+  TRACE_WRITE("writing 0x%x @ 0x%x\n", data, offset);
+	__raw_writel(data, offset);
+
+  return PCIBIOS_SUCCESSFUL;
+}
+
+
+static struct pci_ops bcm63xx_pcie_ops = {
+	.read   = bcm63xx_pciebios_read,
+	.write  = bcm63xx_pciebios_write
+};
+
+static struct bcm63xx_pcie_port *bcm63xx_pcie_sysdata2port(struct pci_sys_data *sysdata)
+{
+	unsigned port;
+	TRACE();
+	port = sysdata->domain;
+	BUG_ON(port >= ARRAY_SIZE(bcm63xx_pcie_ports));
+	return & bcm63xx_pcie_ports[port];
+}
+
+static void bcm63xx_pcie_config_timeouts(struct bcm63xx_pcie_port *port)
+{
+#if 0
+        /* Currently disabled until good timeout values are available from design team */
+        unsigned char * __iomem regs = port->regs;
+
+        TRACE();
+
+        /*
+         * Program the timeouts
+         *   MISC_UBUS_TIMEOUT:                        0x0300_0000 (250 msec, 5ns increments, based on curent PCIE Clock)
+         *   RC_CFG_PCIE_DEVICE_STATUS_CONTROL_2:      0x0006      (210ms)
+         */
+        __raw_writel(0x03000000, regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,ubus_timeout));
+        __raw_writew(0x0006, regs+PCIEH_REGS+offsetof(PcieRegs,deviceControl2));
+#endif
+        return;
+}
+
+static void bcm63xx_hw_pcie_setup(struct bcm63xx_pcie_port *port)
+{
+#if defined(UBUS2_PCIE)
+	unsigned char * __iomem regs = port->regs;
+	TRACE();
+		
+	__raw_writel(PCIE_CPU_INTR1_PCIE_INTD_CPU_INTR | PCIE_CPU_INTR1_PCIE_INTC_CPU_INTR |PCIE_CPU_INTR1_PCIE_INTB_CPU_INTR |PCIE_CPU_INTR1_PCIE_INTA_CPU_INTR,
+				regs+PCIEH_CPU_INTR1_REGS+offsetof(PcieCpuL1Intr1Regs,maskClear));
+				/*&((PcieCpuL1Intr1Regs*)(regs+PCIEH_CPU_INTR1_REGS))->maskClear);*/
+            
+  /* setup outgoing mem resource window */
+	__raw_writel((port->owin_res->end & PCIE_MISC_CPU_2_PCI_MEM_WIN_LO_BASE_LIMIT_LIMIT_MASK)
+    	 	|((port->owin_res->start >> PCIE_MISC_CPU_2_PCI_MEM_WIN_LO_BASE_LIMIT_LIMIT_SHIFT) << PCIE_MISC_CPU_2_PCI_MEM_WIN_LO_BASE_LIMIT_BASE_SHIFT),
+				regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,cpu_2_pcie_mem_win0_base_limit));
+ 				//&((PcieMiscRegs*)(regs+PCIEH_MISC_REGS))->cpu_2_pcie_mem_win0_base_limit);
+    	 																								
+  __raw_writel((port->owin_res->start & PCIE_MISC_CPU_2_PCI_MEM_WIN_LO_BASE_ADDR_MASK),
+  			regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,cpu_2_pcie_mem_win0_lo));
+				//&((PcieMiscRegs*)(regs+PCIEH_MISC_REGS))->cpu_2_pcie_mem_win0_lo);
+   	 
+  /* setup incoming DDR memory BAR(1) */
+#if 0 //defined(CONFIG_CPU_LITTLE_ENDIAN)
+	__raw_writel(PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BYTE_ALIGN,
+				regs+PCIEH_RC_CFG_VENDOR_REGS+offsetof(PcieRcCfgVendorRegs,specificReg1));
+				//&((PcieRcCfgVendorRegs*)(regs + PCIEH_RC_CFG_VENDOR_REGS))->specificReg1);
+#endif
+	__raw_writel((DDR_UBUS_ADDRESS_BASE & PCIE_MISC_RC_BAR_CONFIG_LO_MATCH_ADDRESS_MASK)| bcm63xx_pcie_get_baraddrsize_index(),
+				regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,rc_bar1_config_lo));
+				//&((PcieMiscRegs*)(regs+PCIEH_MISC_REGS))->rc_bar1_config_lo);
+
+
+	__raw_writel(PCIE_MISC_UBUS_BAR_CONFIG_ACCESS_EN,
+				regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,ubus_bar1_config_remap));
+				//&((PcieMiscRegs*)(regs+PCIEH_MISC_REGS))->ubus_bar1_config_remap);
+
+  /* set device bus/func/func -no need*/
+  /* setup class code, as bridge */       
+  __raw_writel((__raw_readl(regs+PCIEH_BLK_428_REGS+offsetof(PcieBlk428Regs,idVal3))& PCIE_IP_BLK428_ID_VAL3_REVISION_ID_MASK) | (PCI_CLASS_BRIDGE_PCI << 8),
+  			regs+PCIEH_BLK_428_REGS+offsetof(PcieBlk428Regs,idVal3));
+ 
+  /* disable bar0 size -no need*/
+
+	/* disable data bus error for enumeration */
+	__raw_writel(__raw_readl(regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,misc_ctrl))|PCIE_MISC_CTRL_CFG_READ_UR_MODE,
+				regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,misc_ctrl));  
+#endif
+
+#if defined(PCIE3_CORE)
+	/* Misc performance addition */
+	__raw_writel(__raw_readl(regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,misc_ctrl))
+								|PCIE_MISC_CTRL_MAX_BURST_SIZE_128B
+								|PCIE_MISC_CTRL_BURST_ALIGN
+								|PCIE_MISC_CTRL_PCIE_IN_WR_COMBINE
+								|PCIE_MISC_CTRL_PCIE_RCB_MPS_MODE
+								|PCIE_MISC_CTRL_PCIE_RCB_64B_MODE,
+							  regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,misc_ctrl));
+#endif
+
+        /* Program the UBUS completion timeout after reset */
+        bcm63xx_pcie_config_timeouts(port);
+
+}
+
+static int bcm63xx_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+	struct bcm63xx_pcie_port *port = bcm63xx_pcie_sysdata2port(sys);
+
+	TRACE();
+	BUG_ON(request_resource(&iomem_resource, port->owin_res));
+
+ 	pci_add_resource_offset(&sys->resources, port->owin_res, sys->mem_offset); 
+	/* pcibios_init_hw will add resource offset */
+	sys->private_data = port;
+	bcm63xx_hw_pcie_setup(port);
+
+	return 1;
+}
+
+static struct pci_bus *
+bcm63xx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+	TRACE();	
+	return pci_scan_root_bus(NULL, sys->busnr, &bcm63xx_pcie_ops, sys, &sys->resources);
+}
+
+
+static int bcm63xx_pcie_map_irq(const struct pci_dev *dev, u8 slot,
+	u8 pin)
+{
+	struct bcm63xx_pcie_port *port = bcm63xx_pcie_bus2port(dev->bus);
+	TRACE();  
+	return port->irq;
+}
+
+#define BCM4360_D11AC_SROMLESS_ID	0x4360
+#define BCM4360_D11AC_ID	0x43a0
+#define BCM4360_D11AC2G_ID	0x43a1
+#define BCM4360_D11AC5G_ID	0x43a2
+#define BCM4352_D11AC_ID	0x43b1
+#define BCM4352_D11AC2G_ID	0x43b2
+#define BCM4352_D11AC5G_ID	0x43b3
+#define BCM43602_CHIP_ID	0xaa52
+#define BCM43602_D11AC_ID	0x43ba
+#define BCM43602_D11AC2G_ID	0x43bb
+#define BCM43602_D11AC5G_ID	0x43bc
+
+#define IS_DEV_AC3X3(d) (((d) == BCM4360_D11AC_ID) || \
+	                 ((d) == BCM4360_D11AC2G_ID) || \
+	                 ((d) == BCM4360_D11AC5G_ID) || \
+	                 ((d) == BCM4360_D11AC_SROMLESS_ID) || \
+	                 ((d) == BCM43602_D11AC_ID) || \
+	                 ((d) == BCM43602_D11AC2G_ID) || \
+	                 ((d) == BCM43602_D11AC5G_ID) || \
+	                 ((d) == BCM43602_CHIP_ID))
+
+#define IS_DEV_AC2X2(d) (((d) == BCM4352_D11AC_ID) ||	\
+	                 ((d) == BCM4352_D11AC2G_ID) || \
+	                 ((d) == BCM4352_D11AC5G_ID))
+
+static void bcm63xx_pcie_fixup_mps(struct pci_dev *dev)
+{
+#if defined(PCIE3_CORE)	
+	if (dev->vendor == 0x14e4) {
+		if (IS_DEV_AC3X3(dev->device) || IS_DEV_AC2X2(dev->device)) {
+			/* set 4360 specific tunables
+			 * wlan driver will set mps but cannot populate to RC, 
+			 * fake/hijack it so linux sw can sync it up
+			 */
+			dev->pcie_mpss = 2;
+		}
+	}
+#endif	
+}
+
+static void bcm63xx_pcie_fixup_final(struct pci_dev *dev)
+{
+#if defined(PCIE3_CORE)
+	pcie_bus_config = PCIE_BUS_SAFE;
+
+	bcm63xx_pcie_fixup_mps(dev);
+
+	/* sync-up mps */
+	if (dev->bus && dev->bus->self) {
+		pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
+	}
+#endif	
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_pcie_fixup_final);
+
+
+static int bcm63xx_pcie_link_up(int index)
+{		
+	struct bcm63xx_pcie_port* port=&bcm63xx_pcie_ports[index];
+	TRACE();
+
+	port->link = __raw_readl(port->regs+PCIEH_BLK_1000_REGS+offsetof(PcieBlk1000Regs,dlStatus)) & PCIE_IP_BLK1000_DL_STATUS_PHYLINKUP_MASK;
+	if(port->link) {
+		printk("PCIE port %d link-up\n", index);
+	}
+		
+	return port->link;
+}
+
+/* save pci configuration for all devices in domain */
+static void bcm63xx_pcie_domain_save(int domain)
+{
+	struct pci_dev *dev = 0;
+
+	for_each_pci_dev(dev) {
+		if (pci_domain_nr(dev->bus) == domain) {
+			pci_save_state(dev);
+		}
+	}
+}
+
+/* restore pci configuration for all devices in domain */
+static void bcm63xx_pcie_domain_restore(int domain)
+{
+	struct pci_dev *dev = 0;
+
+	for_each_pci_dev(dev) {
+		if (pci_domain_nr(dev->bus) == domain) {
+			/* expected all to have state saved */
+			if (!dev->state_saved) {
+				printk("%s %x:%x %s\n", __func__,
+					dev->vendor, dev->device, "not saved");
+				continue;
+			}
+			pci_restore_state(dev);
+			dev->state_saved = TRUE; // mark state as still valid
+			pci_reenable_device(dev);
+		}
+	}
+}
+
+/* pcie reinit without pci_common_init */
+void bcm63xx_pcie_aloha(int hello)
+{
+	int i;
+
+	if (!hello) {
+		/* goodbye */
+		for (i = 0; i < NUM_CORE; i++) {
+			bcm63xx_pcie_pcie_reset(i, FALSE);
+			pmc_pcie_power_down(i);
+		}
+		return;
+	}
+
+	/* pcie ports, domain 0/1 */
+	for (i = 0; i < NUM_CORE; i++) {
+		struct bcm63xx_pcie_port *port = &bcm63xx_pcie_ports[i];
+
+		/* skip ports with link down */
+		if (!port->enabled)
+			continue;
+
+		if (!port->saved) {
+			/* first time: port powered, link up */
+			port->saved = TRUE;
+
+			/* save pci configuration for domain devices */
+			bcm63xx_pcie_domain_save(i);
+		} else {
+			/* power port and check for link */
+			pmc_pcie_power_up(i);
+			bcm63xx_pcie_pcie_reset(i, TRUE);
+			port->enabled = bcm63xx_pcie_link_up(i);
+			if (!port->enabled) {
+				/* power off ports without link */
+				bcm63xx_pcie_pcie_reset(i, FALSE);
+				pmc_pcie_power_down(i);
+			} else {
+				/* redo setup (previously done during bus scan) */
+				bcm63xx_hw_pcie_setup(port);
+
+				/* restore pci configuration for domain devices */
+				bcm63xx_pcie_domain_restore(i);
+			}
+		}
+	}
+}
+EXPORT_SYMBOL(bcm63xx_pcie_aloha);
+
+static int bcm63xx_pcie_init(void)
+{
+	int i;
+	bool shutdown;
+	TRACE();
+  
+	/* pcie ports, domain 1/2 */
+	for (i = 0; i < NUM_CORE; i++) {
+		shutdown = TRUE;
+		if (kerSysGetPciePortEnable(i)) {
+			pmc_pcie_power_up(i);
+			bcm63xx_pcie_pcie_reset(i, TRUE);
+			bcm63xx_pcie_ports[i].enabled =1;			
+			if(bcm63xx_pcie_link_up(i)) {
+				pci_common_init(&(bcm63xx_pcie_ports[i].hw_pci));
+				shutdown = FALSE;
+	  	}
+	  }
+		if(shutdown) {
+			/* power off ports without link */
+			printk("PCIE port %d power-down\n", i);
+			bcm63xx_pcie_pcie_reset(i, FALSE);
+			pmc_pcie_power_down(i);
+		}
+	}
+	return 0;
+}
+subsys_initcall(bcm63xx_pcie_init);
+#endif
diff --git a/arch/arm/plat-bcm63xx/pcie-msi-bcm63xx.c b/arch/arm/plat-bcm63xx/pcie-msi-bcm63xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..f52000bca871c9c7a23f94e5cf6c5c8ab324ea1b
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/pcie-msi-bcm63xx.c
@@ -0,0 +1,364 @@
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/mach/irq.h>
+#include <asm/irq.h>
+#include <linux/msi.h>
+#include "plat/bcm63xx_pcie.h"
+#include <linux/workqueue.h>
+
+/*
+*    Static definitions
+*/
+#define MSI_MAP_SIZE                            PCIE_MSI_IDS_PER_DOMAIN
+#define PCIE_MISC_MSI_DATA_CONFIG_MATCH_MAGIC   0x0000BCA0
+
+/*
+*    Static structures
+*/
+struct msi_map_entry {
+    bool    used;               /* flag: in use or not */
+    u8      index;              /* index into the vector */
+    int     irq;                /* Virtual IRQ */
+};
+
+struct pcie_rc_msi_info {
+    /* flag to specify  msi is anbled for the domain or not */
+    bool    msi_enable;
+
+    /* MSI IRQ map entries */
+    struct  msi_map_entry     msi_map[MSI_MAP_SIZE];
+};
+
+
+/*
+*    Static function declerations
+*/
+
+/*
+*    Static variables
+*/
+/* pci msi local structures */
+static struct pcie_rc_msi_info    pciercmsiinfo[NUM_CORE] = {
+    {
+        .msi_enable = 0,
+    },
+#if defined(PCIEH_1)
+    {
+        .msi_enable = 0,
+    },
+#endif
+};
+
+/*
+*/
+static struct irq_chip bcm63xx_irq_chip_msi_pcie[NUM_CORE] = {
+    {
+        .name = "PCIe0-MSI",
+        .irq_mask = mask_msi_irq,
+        .irq_unmask = unmask_msi_irq,
+        .irq_enable = unmask_msi_irq,
+        .irq_disable = mask_msi_irq,
+    },
+#if defined(PCIEH_1)
+    {
+        .name = "PCIe1-MSI",
+        .irq_mask = mask_msi_irq,
+        .irq_unmask = unmask_msi_irq,
+        .irq_enable = unmask_msi_irq,
+        .irq_disable = mask_msi_irq,
+    },
+#endif
+};
+
+/*
+* Initializes all msi irq map entries
+*
+* return - None
+*/
+static void msi_map_init(u8 domain)
+{
+    int i;
+    struct msi_map_entry* msi_map = pciercmsiinfo[domain].msi_map;
+
+    for (i = 0; i < MSI_MAP_SIZE; i++) {
+        msi_map[i].used = false;
+        msi_map[i].index = i;
+        msi_map[i].irq = 0;
+    }
+}
+
+/*
+* returns an unused msi irq map
+*
+* return - pointer to map entry on success else NULL
+*/
+static struct msi_map_entry *msi_map_get(u8 domain)
+{
+    struct msi_map_entry* msi_map = pciercmsiinfo[domain].msi_map;
+    struct msi_map_entry *retval = NULL;
+    int i;
+
+    for (i = 0; i < MSI_MAP_SIZE; i++) {
+        if (!msi_map[i].used) {
+            retval = msi_map + i;
+            retval->irq = INTERRUPT_ID_PCIE_MSI_FIRST + i + domain*MSI_MAP_SIZE;
+            retval->used = true;
+            break;
+        }
+    }
+
+    return retval;
+}
+
+/*
+* Release MSI Irq map
+*
+* return - None
+*/
+static void msi_map_release(struct msi_map_entry *entry)
+{
+    if (entry) {
+        entry->used = false;
+        entry->irq = 0;
+    }
+}
+
+/*
+* ISR routine for MSI interrupt
+*
+*  - Clear MSI interrupt status
+*  - Call corresponding MSI virtual interrupt
+*
+* return - always returns IRQ_HANDLED
+*/
+static irqreturn_t bcm63xx_pcie_msi_isr(int irq, void *arg)
+{
+    int index;
+    u32 reg_val;
+    struct bcm63xx_pcie_port* port = (struct bcm63xx_pcie_port*)arg;
+    int domain = port->hw_pci.domain;
+    struct msi_map_entry* msi_map = pciercmsiinfo[domain].msi_map;
+
+
+    /* Get the MSI interrupt status */
+    reg_val =__raw_readl(port->regs+PCIEH_L2_INTR_CTRL_REGS+offsetof(PcieL2IntrControl,Intr2CpuStatus));
+    reg_val &= (PCIE_L2_INTR_CTRL_MSI_CPU_INTR_MASK);
+
+    /* clear the interrupts, as this is an edge triggered interrupt */
+    __raw_writel(reg_val, port->regs+PCIEH_L2_INTR_CTRL_REGS+offsetof(PcieL2IntrControl,Intr2CpuClear));
+
+    /* Process all the available MSI interrupts */
+    index = 0;
+
+    while (reg_val != 0x00000000) {
+        if ( reg_val & ( 1ul << (index+PCIE_L2_INTR_CTRL_MSI_CPU_INTR_SHIFT))) {
+            if (index < MSI_MAP_SIZE) {
+                if (msi_map[index].used)
+                    /* Call the corresponding virtual interrupt */
+                    generic_handle_irq(msi_map[index].irq);
+                else
+                    printk(KERN_INFO "unexpected MSI (1)\n");
+            } else {
+                /* that's weird who triggered this?*/
+                /* just clear it*/
+                printk(KERN_INFO "unexpected MSI (2)\n");
+            }
+            reg_val &= (~( 1ul << (index+PCIE_L2_INTR_CTRL_MSI_CPU_INTR_SHIFT)));
+        }
+        index++;
+    }
+
+    if (index) return IRQ_HANDLED;
+    else return IRQ_NONE;
+}
+
+/*
+* Enable MSI interrupt on the root complex
+*
+*  - Setup MSI isr, program MSI matching address and data pattern
+*  - Enable MSI interrupt vectors at L2
+*  - Enable L2 interrupts at L1 and disable INTA-D interrupts at L1
+*
+* return - true: on msi enable, false: failure to enable msi
+*/
+static bool bcm63xx_pcie_enable_msi(struct bcm63xx_pcie_port *port)
+{
+    bool    retval    = false;
+    u8        domain = port->hw_pci.domain;
+    u32        reg_val;
+
+    /* Initialize only once */
+    if (pciercmsiinfo[domain].msi_enable) {
+        retval = true;
+        goto exit;
+    }
+
+    /* Initialize the local map structure */
+    msi_map_init(domain);
+
+    /* Register MSI interrupt with OS */
+    if (request_irq(port->irq, bcm63xx_pcie_msi_isr, IRQF_SHARED,
+                    bcm63xx_irq_chip_msi_pcie[domain].name, port)) {
+        pr_err("%s: Cannot register IRQ %u\n", __func__, port->irq);
+        goto exit;
+    }
+
+    printk(KERN_INFO "Using irq=%d for PCIE-MSI interrupts\r\n",port->irq);
+
+    /* Program the Root Complex Registers for matching address hi and low */
+    /* The address should be unique with in the down stream/up stream BAR mapping */
+    __raw_writel((PCIE_MISC_MSI_BAR_CONFIG_LO_MATCH_ADDR_MASK|PCIE_MISC_MSI_BAR_CONFIG_LO_ENABLE_MASK),
+                port->regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,msi_bar_config_lo));
+    __raw_writel(0,port->regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,msi_bar_config_hi));
+
+    /* Program the RC registers for matching data pattern */
+    reg_val = PCIE_MISC_MSI_DATA_CONFIG_MATCH_MASK;
+    reg_val &= ((~(MSI_MAP_SIZE-1))<<PCIE_MISC_MSI_DATA_CONFIG_MATCH_SHIFT);
+    reg_val |= PCIE_MISC_MSI_DATA_CONFIG_MATCH_MAGIC;
+    __raw_writel(reg_val, port->regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,msi_data_config));
+
+
+    /* Clear all MSI interrupts initially */
+    __raw_writel(PCIE_L2_INTR_CTRL_MSI_CPU_INTR_MASK, port->regs+PCIEH_L2_INTR_CTRL_REGS+offsetof(PcieL2IntrControl,Intr2CpuClear));
+
+
+    /* enable all available MSI vectors */
+    __raw_writel(PCIE_L2_INTR_CTRL_MSI_CPU_INTR_MASK, port->regs+PCIEH_L2_INTR_CTRL_REGS+offsetof(PcieL2IntrControl,Intr2CpuMask_clear));
+
+    /* Enable L2 Intr2 controller interrupt */
+    __raw_writel(PCIE_CPU_INTR1_PCIE_INTR_CPU_INTR, port->regs+PCIEH_CPU_INTR1_REGS+offsetof(PcieCpuL1Intr1Regs,maskClear));
+
+    set_irq_flags(port->irq, IRQF_VALID);
+
+    /* Set the flag to specify MSI is enabled */
+    pciercmsiinfo[domain].msi_enable = true;
+
+    retval = true;
+
+exit:
+
+    return retval;
+}
+
+
+/*
+* Clear the previous setup virtual MSI interrupt
+*
+* return - None
+*/
+void arch_teardown_msi_irq(unsigned int irq)
+{
+    int i, d;
+
+    /* find existance of msi irq in all domains */
+    for (d = 0; d < NUM_CORE; d++) {
+        if (pciercmsiinfo[d].msi_enable == true) {
+            for (i = 0; i < MSI_MAP_SIZE; i++) {
+                if ((pciercmsiinfo[d].msi_map[i].used) && (pciercmsiinfo[d].msi_map[i].irq == irq)) {
+                    /* Free the resources */
+                    irq_free_desc(irq);
+                    msi_map_release(pciercmsiinfo[d].msi_map + i);
+                    break;
+                }
+            }
+        }
+    }
+}
+
+/*
+* setup architecture specific initialization for msi interrupt
+* - enable msi in the root complex,
+* - get a virtual irq,
+* - setup msi address & data on the EP
+*
+* return 0: on success, <0: on failure
+*/
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+
+    int retval = -EINVAL;
+    struct msi_msg msg;
+    struct msi_map_entry *map_entry = NULL;
+    struct bcm63xx_pcie_port *port = NULL;
+
+    port = (struct bcm63xx_pcie_port*)((struct pci_sys_data*)pdev->bus->sysdata)->private_data;
+
+    /* Enable MSI at RC */
+    if (!bcm63xx_pcie_enable_msi(port))
+        goto exit;
+
+    /*
+     * Get an unused IRQ map entry and set the irq descriptors
+     */
+    map_entry = msi_map_get(port->hw_pci.domain);
+    if (map_entry == NULL)
+        goto exit;
+
+    retval = irq_alloc_desc(map_entry->irq);
+    if (retval < 0)
+        goto exit;
+
+    irq_set_chip_and_handler(map_entry->irq,
+                            &bcm63xx_irq_chip_msi_pcie[port->hw_pci.domain],
+                            handle_simple_irq);
+
+    retval = irq_set_msi_desc(map_entry->irq, desc);
+    if (retval < 0)
+        goto exit;
+
+    set_irq_flags(map_entry->irq, IRQF_VALID);
+
+
+    /*
+     * Program the msi matching address and data pattern on the EP
+    */
+    /* Get the address from RC and mask the enable bit */
+    /* 32 bit address only */
+    msg.address_lo = __raw_readl(port->regs+PCIEH_MISC_REGS+offsetof(PcieMiscRegs,msi_bar_config_lo));
+    msg.address_lo &= PCIE_MISC_MSI_BAR_CONFIG_LO_MATCH_ADDR_MASK;
+    msg.address_hi = 0;
+    msg.data = (PCIE_MISC_MSI_DATA_CONFIG_MATCH_MAGIC | map_entry->index);
+    write_msi_msg(map_entry->irq, &msg);
+
+    retval = 0;
+
+exit:
+    if (retval != 0) {
+        pr_err(" arch_setup_msi_irq returned error %d\r\n",retval);
+        if (map_entry) {
+            irq_free_desc(map_entry->irq);
+            msi_map_release(map_entry);
+        }
+    }
+
+    return retval;
+}
diff --git a/arch/arm/plat-bcm63xx/plat-ca9mp-headsmp.S b/arch/arm/plat-bcm63xx/plat-ca9mp-headsmp.S
new file mode 100644
index 0000000000000000000000000000000000000000..13cbd9871fd6b8b09792939b53f2df5bc7e49e52
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/plat-ca9mp-headsmp.S
@@ -0,0 +1,76 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+	__CPUINIT
+
+/*
+ * Broadcom specific entry point for secondary CPUs.
+ * This provides a "holding pen" into which all secondary cores are held
+ * until we're ready for them to initialise.
+ */
+ENTRY(platform_secondary_startup)
+	/*
+	 * Get hardware CPU id of ours
+	 */
+	mrc	p15, 0, r0, c0, c0, 5
+	and	r0, r0, #15
+	adr	r4, 1f
+	ldmia	r4, {r5, r6}
+	sub	r4, r4, r5
+	add	r6, r6, r4
+pen:	ldr	r7, [r6]
+	cmp	r7, r0
+	bne	pen
+	nop
+
+	/* enable the cpu cycle counter on second core. adsl driver use that */
+	mrc	p15, 0, r1, c9, c12, 0
+	ldr	r2, =5
+	orr	r1, r1, r2
+	mcr	p15, 0, r1, c9, c12, 0
+	ldr	r1, =0x80000000
+	mcr	p15, 0, r1, c9, c12, 1
+
+	bleq	v7_invalidate_l1
+
+	/*
+	 * we've been released from the holding pen: secondary_stack
+	 * should now contain the SVC stack for this core
+	 */
+	b	secondary_startup
+
+	.align
+1:	.long	.
+	.long	pen_release
+ENDPROC(platform_secondary_startup)
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/plat-bcm63xx/platsmp.c b/arch/arm/plat-bcm63xx/platsmp.c
new file mode 100644
index 0000000000000000000000000000000000000000..504fc5c707463e0658e1522847c851b9fef2777a
--- /dev/null
+++ b/arch/arm/plat-bcm63xx/platsmp.c
@@ -0,0 +1,195 @@
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/cputype.h>
+#include <asm/mach-types.h>
+#include <asm/smp.h>
+#include <asm/smp_scu.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+
+#ifdef CONFIG_PLAT_CA9_MPCORE
+#include <plat/ca9mpcore.h>
+#endif
+#ifdef CONFIG_PLAT_B15_CORE
+#include <plat/b15core.h>
+#endif
+#include <plat/bsp.h>
+#include <mach/hardware.h>
+#include <mach/smp.h>
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen".
+ */
+volatile int __cpuinitdata pen_release = -1;
+
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+static inline int get_core_count(void)
+{
+#ifdef CONFIG_PLAT_CA9_MPCORE
+	void __iomem *scu_base = scu_base_addr();
+	return (scu_base ? scu_get_core_count(scu_base) : 1);
+#endif
+#ifdef CONFIG_PLAT_B15_CORE
+	/* 1 + the PART[1:0] field of MIDR */
+	return ((read_cpuid_id() >> 4) & 3) + 1;
+#endif
+}
+
+/* write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whatever they'are taking part in coherency
+ * or not.  This is necessary for the hotplug code to work reliably.
+ */
+static void __cpuinit write_pen_release(int val)
+{
+	pen_release = val;
+	smp_wmb();
+	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+	outer_clean_range(__pa(&pen_release), __pa(&pen_release +1));
+}
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+	trace_hardirqs_off();
+
+	/*
+	 * if any interrupts are already enabled for the primary
+	 * core (e.g. timer irq), then they will not have been enabled
+	 * for us: do so
+	 */
+#ifdef CONFIG_PLAT_CA9_MPCORE
+	ca9mp_cpu_init();
+#endif
+
+#ifdef CONFIG_PLAT_B15_CORE
+	b15_cpu_init();
+#endif
+
+	/*
+	 * let the primary processor know we're out of the
+	 * pen, then head off into the C entry point
+	 */
+	write_pen_release(-1);
+
+	/*
+	 * Synchronise with the boot thread.
+	 */
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	unsigned long timeout;
+
+	/*
+	 * set synchronisation state between this boot processor
+	 * and the secondary one
+	 */
+	raw_spin_lock(&boot_lock);
+
+	/*
+	 * The secondary processor is waiting to be released from
+	 * the holding pen - release it, then wait for it to flag
+	 * that it has been released by resetting pen_release.
+	 *
+	 * Note that "pen_release" is the hardware CPU ID, whereas
+	 * "cpu" is Linux's internal ID.
+	 */
+	write_pen_release(cpu_logical_map(cpu));
+
+	/*
+	 * Send the secondary CPU a soft interrupt, thereby causing
+	 * the boot monitor to read the system wide flags register,
+	 * and branch to the address found there.
+	 */
+	gic_raise_softirq(cpumask_of(cpu), 1);
+
+	timeout = jiffies + (1 * HZ);
+	while (time_before(jiffies, timeout)) {
+		smp_rmb();
+		if (pen_release == -1)
+			break;
+
+		udelay(10);
+	}
+
+	/*
+	 * now the secondary core is starting up let it run its
+	 * calibrations, then wait for it to finish
+	 */
+	raw_spin_unlock(&boot_lock);
+
+	return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+	unsigned int i, ncores = get_core_count();
+
+	/* sanity check */
+	if (ncores > nr_cpu_ids) {
+		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+			ncores, nr_cpu_ids);
+		ncores = nr_cpu_ids;
+	}
+
+	for (i = 0; i < ncores; i++)
+		set_cpu_possible(i, true);
+
+	set_smp_cross_call(gic_raise_softirq);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+	/*
+	 * Initialise the SCU and wake up the secondary core using
+	 * wakeup_secondary().
+	 */
+#ifdef CONFIG_PLAT_CA9_MPCORE
+	scu_enable(scu_base_addr());
+#endif
+	plat_wake_secondary_cpu(max_cpus, platform_secondary_startup);
+}
+
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index f9c9f33f8cbe2f651277793400d8287eec0073c9..aa75244cea2f2d32badd2d9cd76192860a98aa0f 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1169,3 +1169,5 @@ elite_ulk		MACH_ELITE_ULK		ELITE_ULK		3888
 pov2			MACH_POV2		POV2			3889
 ipod_touch_2g		MACH_IPOD_TOUCH_2G	IPOD_TOUCH_2G		3890
 da850_pqab		MACH_DA850_PQAB		DA850_PQAB		3891
+bcm963148		MACH_BCM963148		BCM963148		9993
+bcm963138		MACH_BCM963138		BCM963138		9994
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index cc926c98598141a2fbcdb762148ec17782982d06..97afcf6e57560e09a95bd2e4f48262574303d39e 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -22,10 +22,18 @@
 @  IRQs disabled.
 @
 ENTRY(do_vfp)
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 #ifdef CONFIG_PREEMPT
 	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
 	add	r11, r4, #1		@ increment it
 	str	r11, [r10, #TI_PREEMPT]
+#endif
+#else
+#ifdef CONFIG_PREEMPT_COUNT
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	add	r11, r4, #1		@ increment it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 #endif
 	enable_irq
  	ldr	r4, .LCvfp
@@ -35,11 +43,20 @@ ENTRY(do_vfp)
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 #ifdef CONFIG_PREEMPT
 	get_thread_info	r10
 	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
 	sub	r11, r4, #1		@ decrement it
 	str	r11, [r10, #TI_PREEMPT]
+#endif
+#else
+#ifdef CONFIG_PREEMPT_COUNT
+	get_thread_info	r10
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	sub	r11, r4, #1		@ decrement it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 #endif
 	mov	pc, lr
 ENDPROC(vfp_null_entry)
@@ -53,11 +70,20 @@ ENDPROC(vfp_null_entry)
 
 	__INIT
 ENTRY(vfp_testing_entry)
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 #ifdef CONFIG_PREEMPT
 	get_thread_info	r10
 	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
 	sub	r11, r4, #1		@ decrement it
 	str	r11, [r10, #TI_PREEMPT]
+#endif
+#else
+#ifdef CONFIG_PREEMPT_COUNT
+	get_thread_info	r10
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	sub	r11, r4, #1		@ decrement it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 #endif
 	ldr	r0, VFP_arch_address
 	str	r5, [r0]		@ known non-zero value
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 3a0efaad6090c4ff0f5413faa7501fbb1853c5ce..5cd5e62bf1c17402d143b714441a8586a6f761e3 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -167,11 +167,20 @@ vfp_hw_state_valid:
 					@ else it's one 32-bit instruction, so
 					@ always subtract 4 from the following
 					@ instruction address.
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 #ifdef CONFIG_PREEMPT
 	get_thread_info	r10
 	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
 	sub	r11, r4, #1		@ decrement it
 	str	r11, [r10, #TI_PREEMPT]
+#endif
+#else
+#ifdef CONFIG_PREEMPT_COUNT
+	get_thread_info	r10
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	sub	r11, r4, #1		@ decrement it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 #endif
 	mov	pc, r9			@ we think we have handled things
 
@@ -191,11 +200,20 @@ look_for_VFP_exceptions:
 	@ not recognised by VFP
 
 	DBGSTR	"not VFP"
+#if !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 #ifdef CONFIG_PREEMPT
 	get_thread_info	r10
 	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
 	sub	r11, r4, #1		@ decrement it
 	str	r11, [r10, #TI_PREEMPT]
+#endif
+#else
+#ifdef CONFIG_PREEMPT_COUNT
+	get_thread_info	r10
+	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
+	sub	r11, r4, #1		@ decrement it
+	str	r11, [r10, #TI_PREEMPT]
+#endif
 #endif
 	mov	pc, lr
 
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 5ce8029f558b17a28107d602e7e3d59712262093..0671f0d8499da81d8c43c36af38753e6a856f795 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -5,6 +5,7 @@ platforms += ar7
 platforms += ath79
 platforms += bcm47xx
 platforms += bcm63xx
+platforms += bcm963xx
 platforms += cavium-octeon
 platforms += cobalt
 platforms += dec
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f0bc185b0b09b021431a486f2080d316c2b4707a..98fed02bdba788ee771c95c389cc3bff222fe1e0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -34,6 +34,7 @@ menu "Machine selection"
 
 config ZONE_DMA
 	bool
+	default y if BCM_KF_MIPS_BCM963XX
 
 choice
 	prompt "System type"
@@ -114,12 +115,30 @@ config BCM63XX
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_BIG_ENDIAN
+	select SYS_SUPPORTS_LITTLE_ENDIAN if BCM_KF_MIPS_BCM963XX
 	select SYS_HAS_EARLY_PRINTK
 	select SWAP_IO_SPACE
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	 Support for BCM63XX based boards
 
+config MIPS_BCM963XX
+	bool "Broadcom 96xxx boards (kern)"
+	select CEVT_R4K
+	select CSRC_R4K
+	select IRQ_CPU
+	select DMA_NONCOHERENT
+	select SYS_SUPPORTS_32BIT_KERNEL
+	select SYS_HAS_CPU_MIPS32_R1
+	select SYS_SUPPORTS_BIG_ENDIAN
+	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select HW_HAS_PCI
+	select ARCH_WANT_OPTIONAL_GPIOLIB
+ 	depends on BCM_KF_MIPS_BCM963XX
+	help
+	 "Support for BCM963XX boards"
+
+
 config MIPS_COBALT
 	bool "Cobalt Server"
 	select CEVT_R4K
@@ -878,7 +897,9 @@ config ARCH_MAY_HAVE_PC_FDC
 	bool
 
 config BOOT_RAW
-	bool
+	bool 
+	prompt "boot frow raw image" if BCM_KF_MIPS_BCM963XX || BCM_KF_MIPS_BCM9685XX 
+	default n
 
 config CEVT_BCM1480
 	bool
@@ -1159,6 +1180,7 @@ config BOOT_ELF32
 
 config MIPS_L1_CACHE_SHIFT
 	int
+	default "4" if BCM_KF_MIPS_BCM963XX && MIPS_BCM963XX
 	default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL
 	default "6" if MIPS_CPU_SCACHE
 	default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
@@ -2270,12 +2292,23 @@ config SYS_SUPPORTS_1000HZ
 config SYS_SUPPORTS_1024HZ
 	bool
 
+if BCM_KF_MIPS_BCM963XX || BCM_KF_MIPS_BCM9685XX
+config SYS_SUPPORTS_ARBIT_HZ
+	bool
+	default y if !SYS_SUPPORTS_48HZ && !SYS_SUPPORTS_100HZ && \
+		     !SYS_SUPPORTS_128HZ && !SYS_SUPPORTS_250HZ && \
+		     !SYS_SUPPORTS_256HZ && !SYS_SUPPORTS_1000HZ && \
+		     !SYS_SUPPORTS_1024HZ
+endif
+
+if !BCM_KF_MIPS_BCM963XX && !BCM_KF_MIPS_BCM9685XX
 config SYS_SUPPORTS_ARBIT_HZ
 	bool
 	default y if !SYS_SUPPORTS_48HZ && !SYS_SUPPORTS_100HZ && \
 		     !SYS_SUPPORTS_128HZ && !SYS_SUPPORTS_250HZ && \
 		     !SYS_SUPPORTS_256HZ && !SYS_SUPPORTS_1000HZ && \
 		     !SYS_SUPPORTS_1024HZ
+endif
 
 config HZ
 	int
@@ -2367,6 +2400,10 @@ config PCI
 config PCI_DOMAINS
 	bool
 
+if BCM_KF_MIPS_BCM963XX
+source "drivers/pci/pcie/Kconfig"
+endif
+
 source "drivers/pci/Kconfig"
 
 #
@@ -2489,7 +2526,6 @@ config MIPS32_N32
 config BINFMT_ELF32
 	bool
 	default y if MIPS32_O32 || MIPS32_N32
-
 endmenu
 
 menu "Power management options"
@@ -2521,3 +2557,8 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+
+	
+
+
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 4fedf5a51d965ab9cea3798ab1d2ed3c5298a10b..06767eabe9bf7e2b3bbe26de240e77e019ce4659 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -48,6 +48,11 @@ ifneq ($(SUBARCH),$(ARCH))
   endif
 endif
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+# removing -ffunction-sections from clfags-y
+cflags-y :=
+endif # BCM_KF # CONFIG_BCM_KF_MISC_MAKEFILE
+
 ifdef CONFIG_FUNCTION_GRAPH_TRACER
   ifndef KBUILD_MCOUNT_RA_ADDRESS
     ifeq ($(call cc-option-yn,-mmcount-ra-address), y)
@@ -101,7 +106,7 @@ cflags-y += -ffreestanding
 # carefully avoid to add it redundantly because gcc 3.3/3.4 complains
 # when fed the toolchain default!
 #
-# Certain gcc versions up to gcc 4.1.1 (probably 4.2-subversion as of
+# Certain gcc versions upto gcc 4.1.1 (probably 4.2-subversion as of
 # 2006-10-10 don't properly change the predefined symbols if -EB / -EL
 # are used, so we kludge that here.  A bug has been filed at
 # http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413.
@@ -110,14 +115,21 @@ undef-all += -UMIPSEB -U_MIPSEB -U__MIPSEB -U__MIPSEB__
 undef-all += -UMIPSEL -U_MIPSEL -U__MIPSEL -U__MIPSEL__
 predef-be += -DMIPSEB -D_MIPSEB -D__MIPSEB -D__MIPSEB__
 predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
-cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
-cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EB $(undef-all) $(predef-be))
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EL $(undef-all) $(predef-le))
 
 cflags-$(CONFIG_CPU_HAS_SMARTMIPS)	+= $(call cc-option,-msmartmips)
 
 cflags-$(CONFIG_SB1XXX_CORELIS)	+= $(call cc-option,-mno-sched-prolog) \
 				   -fno-omit-frame-pointer
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+cflags-$(CONFIG_REMOTE_DEBUG)	+= -ggdb
+endif # BCM_KF # CONFIG_BCM_KF_MISC_MAKEFILE
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_BALOO)
+endif # BCM_KF # CONFIG_BCM_KF_BALOO
+
 #
 # CPU-dependent compiler/assembler options for optimization.
 #
@@ -225,9 +237,21 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
 LDFLAGS			+= -m $(ld-emul)
 
 ifdef CONFIG_MIPS
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+	egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+	sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
+else
+CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
+	egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+	sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
 	egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
 	sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 ifdef CONFIG_64BIT
 CHECKFLAGS		+= -m64
 endif
diff --git a/arch/mips/bcm963xx/Makefile b/arch/mips/bcm963xx/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..78ecc5996006f5f2bb6690a017936db7679c2f03
--- /dev/null
+++ b/arch/mips/bcm963xx/Makefile
@@ -0,0 +1,50 @@
+#
+# Makefile for generic Broadcom MIPS boards
+#
+# Copyright (C) 2004 Broadcom Corporation
+#
+ifeq ($(strip $(CONFIG_BRCM_IKOS)),)
+obj-y           := irq.o prom.o setup.o
+else
+obj-y           := irq.o ikos_setup.o
+endif
+obj-$(CONFIG_SMP)	+= smp-brcm.o
+
+ifneq ($(strip $(CONFIG_BCM_HOSTMIPS_PWRSAVE)),)
+obj-y += pwrmngtclk.o        
+else
+ifneq ($(strip $(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)),)
+obj-y += pwrmngtclk.o        
+endif
+endif
+
+obj-y += ktools/
+
+SRCBASE         := $(TOPDIR)
+EXTRA_CFLAGS    += -I$(INC_BRCMBOARDPARMS_PATH)/$(BRCM_BOARD) -I$(SRCBASE)/include -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/pmc
+#EXTRA_CFLAGS    += -I$(INC_ADSLDRV_PATH) -DDBG
+EXTRA_CFLAGS    += -I$(INC_ADSLDRV_PATH) 
+EXTRA_CFLAGS += -g
+EXTRA_CFLAGS += $(BRCM_WERROR_CFLAGS)
+
+ifneq ($(strip $(BUILD_SWMDK)),)
+EXTRA_CFLAGS += -DSUPPORT_SWMDK
+endif
+
+
+ifeq "$(ADSL)" "ANNEX_B"
+EXTRA_CFLAGS += -DADSL_ANNEXB
+endif
+ifeq "$(ADSL)" "SADSL"
+EXTRA_CFLAGS += -DADSL_SADSL
+endif
+ifeq "$(ADSL)" "ANNEX_C"
+EXTRA_CFLAGS += -DADSL_ANNEXC
+endif
+ifeq "$(BRCM_PHY_BONDING)" "y"
+EXTRA_CFLAGS += -DSUPPORT_DSL_BONDING
+endif
+ifeq "$(BRCM_PHY_BONDING5B)" "y"
+EXTRA_CFLAGS += -DSUPPORT_DSL_BONDING5B
+endif
+
diff --git a/arch/mips/bcm963xx/Platform b/arch/mips/bcm963xx/Platform
new file mode 100644
index 0000000000000000000000000000000000000000..32016fae95e3e999cdc1e6e244a9e34c6d41dc6a
--- /dev/null
+++ b/arch/mips/bcm963xx/Platform
@@ -0,0 +1,8 @@
+#
+# Broadcom BCM963XX boards
+#
+platform-$(CONFIG_MIPS_BCM963XX) += bcm963xx/
+cflags-$(CONFIG_MIPS_BCM963XX) += -I$(srctree)/arch/mips/include/asm/mach-bcm963xx
+cflags-$(CONFIG_MIPS_BCM963XX) += -I$(srctree)/../../bcmdrivers/opensource/include/bcm963xx
+cflags-$(CONFIG_MIPS_BCM963XX) += -I$(srctree)/../../shared/opensource/include/bcm963xx
+load-$(CONFIG_MIPS_BCM963XX) := 0x80010000
diff --git a/arch/mips/bcm963xx/ikos_setup.c b/arch/mips/bcm963xx/ikos_setup.c
new file mode 100644
index 0000000000000000000000000000000000000000..8e37e2a6cbd9362f3407241e12d4bb5b1c884fc3
--- /dev/null
+++ b/arch/mips/bcm963xx/ikos_setup.c
@@ -0,0 +1,263 @@
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_IKOS) && defined(CONFIG_BRCM_IKOS)
+
+/*
+<:copyright-BRCM:2013:GPL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+/*
+ * Generic setup routines for Broadcom 963xx MIPS IKOS emulation environment
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/console.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+
+#include <asm/addrspace.h>
+#include <asm/bcache.h>
+#include <asm/irq.h>
+#include <asm/time.h>
+#include <asm/reboot.h>
+//#include <asm/gdb-stub.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+#include <bcm_map_part.h>
+#include <bcm_cpu.h>
+#include <bcm_intr.h>
+#include <boardparms.h>
+
+#include "shared_utils.h"
+
+unsigned long getMemorySize(void);
+
+#if 1
+
+/***************************************************************************
+ * C++ New and delete operator functions
+ ***************************************************************************/
+
+/* void *operator new(unsigned int sz) */
+void *_Znwj(unsigned int sz)
+{
+    return( kmalloc(sz, GFP_KERNEL) );
+}
+
+/* void *operator new[](unsigned int sz)*/
+void *_Znaj(unsigned int sz)
+{
+    return( kmalloc(sz, GFP_KERNEL) );
+}
+
+/* placement new operator */
+/* void *operator new (unsigned int size, void *ptr) */
+void *ZnwjPv(unsigned int size, void *ptr)
+{
+    return ptr;
+}
+
+/* void operator delete(void *m) */
+void _ZdlPv(void *m)
+{
+    kfree(m);
+}
+
+/* void operator delete[](void *m) */
+void _ZdaPv(void *m)
+{
+    kfree(m);
+}
+
+EXPORT_SYMBOL(_Znwj);
+EXPORT_SYMBOL(_Znaj);
+EXPORT_SYMBOL(ZnwjPv);
+EXPORT_SYMBOL(_ZdlPv);
+EXPORT_SYMBOL(_ZdaPv);
+
+#endif
+
+void __init plat_mem_setup(void)
+{
+    add_memory_region(0, (getMemorySize()), BOOT_MEM_RAM);
+    {
+        volatile unsigned long *cr;
+        uint32 mipsBaseAddr = MIPS_BASE;
+
+        cr = (void *)(mipsBaseAddr + MIPS_RAC_CR0);
+    	*cr = *cr | RAC_D | RAC_PF_D;
+
+#if defined(MIPS_RAC_CR1)
+        cr = (void *)(mipsBaseAddr + MIPS_RAC_CR1);
+    	*cr = *cr | RAC_D | RAC_PF_D;
+#endif        
+    }
+}
+
+void __init plat_time_init(void)
+{
+    /* hard code to 320MHz */ 
+    mips_hpt_frequency = 160000000;
+    // Enable cp0 counter/compare interrupt when
+    // not using workaround for clock divide
+    write_c0_status(IE_IRQ5 | read_c0_status());
+}
+
+extern void stop_other_cpu(void);  // in arch/mips/kernel/smp.c
+
+/* IKOS does not need real restart. Same as halt */
+static void brcm_machine_restart(char *command)
+{
+#if defined(CONFIG_SMP)
+    stop_other_cpu();
+#endif
+    printk("IKOS restart --> system halted\n");
+    local_irq_disable();
+    while (1);
+}
+
+static void brcm_machine_halt(void)
+{
+    /*
+     * we don't support power off yet.  This halt will cause both CPU's to
+     * spin in a while(1) loop with interrupts disabled.  (Used for gathering
+     * wlan debug dump via JTAG)
+     */
+#if defined(CONFIG_SMP)
+    stop_other_cpu();
+#endif
+    printk("System halted\n");
+    local_irq_disable();
+    while (1);
+}
+
+/* this funciton implement any necessary hardware related initialization for ikos */ 
+static int __init ikos_hw_init(void)
+{
+    return 0;
+}
+#define bcm63xx_specific_hw_init() ikos_hw_init()
+
+static int __init bcm63xx_hw_init(void)
+{
+    return bcm63xx_specific_hw_init();
+}
+arch_initcall(bcm63xx_hw_init);
+
+
+static int __init brcm63xx_setup(void)
+{
+    extern int panic_timeout;
+
+    _machine_restart = brcm_machine_restart;
+    _machine_halt = brcm_machine_halt;
+    pm_power_off = brcm_machine_halt;
+
+    panic_timeout = 1;
+
+    return 0;
+}
+
+arch_initcall(brcm63xx_setup);
+
+
+unsigned long getMemorySize(void)
+{
+    return(16 * 1024 * 1024); 
+}
+
+int kerSysGetSdramSize( void )
+{
+  return getMemorySize();
+} /* kerSysGetSdramSize */
+
+
+/* return the cmdline for ramdisk boot */
+static void __init create_cmdline(char *cmdline)
+{
+
+}
+
+extern struct plat_smp_ops brcm_smp_ops;
+
+void __init prom_init(void)
+{
+    int argc = fw_arg0;
+    u32 *argv = (u32 *)CKSEG0ADDR(fw_arg1);
+    int i;
+
+    PERF->IrqControl[0].IrqMask=0;
+
+    arcs_cmdline[0] = '\0';
+
+    create_cmdline(arcs_cmdline);
+
+    strcat(arcs_cmdline, " ");
+
+    for (i = 1; i < argc; i++) {
+        strcat(arcs_cmdline, (char *)CKSEG0ADDR(argv[i]));
+        if (i < (argc - 1))
+            strcat(arcs_cmdline, " ");
+    }
+
+#if defined (CONFIG_SMP)
+    register_smp_ops(&brcm_smp_ops);
+#endif
+
+}
+
+
+void __init allocDspModBuffers(void);
+/*
+*****************************************************************************
+*  stub functions for ikos build.
+*****************************************************************************
+*/
+void __init allocDspModBuffers(void)
+{
+}
+
+void __init prom_free_prom_memory(void)
+{
+
+}
+
+/* ikos does not use external interrupt */
+unsigned int kerSysGetExtIntInfo(unsigned int irq)
+{
+    return (unsigned int)(-1);
+}
+
+const char *get_system_type(void)
+{
+    return "ikos emulation system";
+}
+
+
+
+#endif
diff --git a/arch/mips/bcm963xx/irq.c b/arch/mips/bcm963xx/irq.c
new file mode 100644
index 0000000000000000000000000000000000000000..41c467887d8b3b90b75c6cb087b76577a1f91be4
--- /dev/null
+++ b/arch/mips/bcm963xx/irq.c
@@ -0,0 +1,885 @@
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * Interrupt control functions for Broadcom 963xx MIPS boards
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/linkage.h>
+
+#include <asm/irq.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/signal.h>
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+#include <linux/bcm_assert.h>
+#include <boardparms.h>
+#include <board.h>
+#if defined(CONFIG_BCM_EXT_TIMER)
+#include <bcm_ext_timer.h>
+#endif
+
+#if defined(CONFIG_SMP)
+    #define AFFINITY_OF(d) (*(d)->affinity)
+#else
+    #define AFFINITY_OF(d) ((void)(d), CPU_MASK_CPU0)
+#endif
+
+#if IRQ_BITS == 64
+    #define IRQ_TYPE uint64
+#else
+    #define IRQ_TYPE uint32
+#endif
+
+
+volatile IrqControl_t * brcm_irq_ctrl[NR_CPUS];
+spinlock_t brcm_irqlock;
+
+#if defined(CONFIG_SMP)
+extern DEFINE_PER_CPU(unsigned int, ipi_pending);
+#endif
+
+static void irq_dispatch_int(void)
+{
+    int cpu = smp_processor_id();
+    IRQ_TYPE pendingIrqs;
+    static IRQ_TYPE irqBit[NR_CPUS];
+
+    static uint32 isrNumber[NR_CPUS] = {[0 ... NR_CPUS-1] = (sizeof(IRQ_TYPE) * 8) - 1};
+
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM96838) || defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+    IRQ_TYPE pendingExtIrqs;
+    static IRQ_TYPE extIrqBit[NR_CPUS];
+    static uint32 extIsrNumber[NR_CPUS] = {[0 ... NR_CPUS-1] = (sizeof(IRQ_TYPE) * 8) - 1};
+#endif
+
+    spin_lock(&brcm_irqlock);
+#if defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+    pendingIrqs = PERF->IrqStatus & brcm_irq_ctrl[cpu]->IrqMask;
+    pendingExtIrqs = PERF->ExtIrqStatus & brcm_irq_ctrl[cpu]->ExtIrqMask;
+#else
+    pendingIrqs = brcm_irq_ctrl[cpu]->IrqStatus & brcm_irq_ctrl[cpu]->IrqMask;
+#if defined(CONFIG_BCM963268)
+    pendingExtIrqs = brcm_irq_ctrl[cpu]->ExtIrqStatus & brcm_irq_ctrl[cpu]->ExtIrqMask;
+#endif
+#if defined(CONFIG_BCM96838)
+    pendingExtIrqs = PERFEXT->IrqControl[cpu].IrqStatus & PERFEXT->IrqControl[cpu].IrqMask;
+#endif
+#endif
+    spin_unlock(&brcm_irqlock);
+
+    if (pendingIrqs) 
+    {
+        while (1) {
+            irqBit[cpu] <<= 1;
+            isrNumber[cpu]++;
+            if (isrNumber[cpu] == (sizeof(IRQ_TYPE) * 8)) {
+                isrNumber[cpu] = 0;
+                irqBit[cpu] = 0x1;
+            }
+            if (pendingIrqs & irqBit[cpu]) {
+                unsigned int irq = isrNumber[cpu] + INTERNAL_ISR_TABLE_OFFSET;
+#if defined(CONFIG_BCM96838)
+                if (irq == INTERRUPT_ID_EXTERNAL) 
+                {
+                    int i;
+                    unsigned int reg = PERF->ExtIrqCfg;
+                    unsigned int status = (reg & EI_STATUS_MASK) >> EI_STATUS_SHFT;
+                    unsigned int mask = (reg & EI_MASK_MASK) >> EI_MASK_SHFT;
+                    status &=mask;
+
+                    for(i = 0; i < 6; i++)
+                    {
+                        if (status & (1 << i))
+                        {
+                            irq = INTERRUPT_ID_EXTERNAL_0 + i;
+                            break;
+                        }
+                    }
+                    spin_lock(&brcm_irqlock);
+                    PERF->ExtIrqCfg |= (1 << (i + EI_CLEAR_SHFT));      // Clear
+                    spin_unlock(&brcm_irqlock);
+                }
+#elif !defined(CONFIG_BCM963381) 
+#if defined(CONFIG_BCM96848)
+                if (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_7)
+#else
+                if (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_3) 
+#endif
+                {   spin_lock(&brcm_irqlock);
+                    PERF->ExtIrqCfg |= (1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_CLEAR_SHFT));      // Clear
+                    spin_unlock(&brcm_irqlock);
+                }
+#endif
+                do_IRQ(irq);
+                break;
+            }
+        }
+    }
+
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM96838) || defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+    if (pendingExtIrqs) 
+    {
+        while (1) {
+            extIrqBit[cpu] <<= 1;
+            extIsrNumber[cpu]++;
+            if (extIsrNumber[cpu] == (sizeof(IRQ_TYPE) * 8)) {
+                extIsrNumber[cpu] = 0;
+                extIrqBit[cpu] = 0x1;
+            }
+            if (pendingExtIrqs & extIrqBit[cpu]) {
+                unsigned int extIrq = extIsrNumber[cpu] + INTERNAL_EXT_ISR_TABLE_OFFSET;
+#if defined(CONFIG_BCM963381)
+                if (extIrq >= INTERRUPT_ID_EXTERNAL_0 && extIrq <= INTERRUPT_ID_EXTERNAL_7) {
+                    spin_lock(&brcm_irqlock);
+                    PERF->ExtIrqCfg |= (1 << (extIrq - INTERRUPT_ID_EXTERNAL_0 + EI_CLEAR_SHFT));      // Clear
+                    spin_unlock(&brcm_irqlock);
+                }
+#endif
+                do_IRQ(extIrq);
+                break;
+            }
+        }
+    }
+#endif
+}
+
+#ifdef CONFIG_BCM_HOSTMIPS_PWRSAVE
+extern void BcmPwrMngtResumeFullSpeed (void);
+#endif
+
+
+asmlinkage void plat_irq_dispatch(void)
+{
+    u32 cause;
+
+#ifdef CONFIG_BCM_HOSTMIPS_PWRSAVE
+    BcmPwrMngtResumeFullSpeed();
+#endif
+
+    while((cause = (read_c0_cause() & read_c0_status() & CAUSEF_IP))) {
+        if (cause & CAUSEF_IP7)
+            do_IRQ(MIPS_TIMER_INT);
+        else if (cause & CAUSEF_IP0)
+            do_IRQ(INTERRUPT_ID_SOFTWARE_0);
+        else if (cause & CAUSEF_IP1)
+            do_IRQ(INTERRUPT_ID_SOFTWARE_1);
+#if defined (CONFIG_SMP)
+#if defined(CONFIG_BCM96838)
+        else if (cause & (CAUSEF_IP3 | CAUSEF_IP4))
+#else
+        else if (cause & (CAUSEF_IP2 | CAUSEF_IP3))
+#endif
+#else 
+#if defined(CONFIG_BCM96838)
+        else if (cause & CAUSEF_IP3)
+#else
+        else if (cause & CAUSEF_IP2)
+#endif
+#endif
+            irq_dispatch_int();
+    }
+}
+
+#if !defined(CONFIG_BCM96838)
+// bill
+void disable_brcm_irqsave(struct irq_data *data, unsigned long stateSaveArray[])
+{
+    int cpu;
+    unsigned long flags;
+    unsigned int irq = data->irq;
+
+    // Test for valid interrupt.
+    if ((irq >= INTERNAL_ISR_TABLE_OFFSET ) && (irq <= INTERRUPT_ID_LAST))
+    {
+        // Disable this processor's interrupts and acquire spinlock.
+        spin_lock_irqsave(&brcm_irqlock, flags);
+
+        // Loop thru each processor.
+        for_each_cpu_mask(cpu, AFFINITY_OF(data))
+        {
+            // Save original interrupt's enable state.
+            stateSaveArray[cpu] = brcm_irq_ctrl[cpu]->IrqMask & (((IRQ_TYPE)1) << (irq - INTERNAL_ISR_TABLE_OFFSET));
+
+            // Clear each cpu's selected interrupt enable.
+            brcm_irq_ctrl[cpu]->IrqMask &= ~(((IRQ_TYPE)1) << (irq - INTERNAL_ISR_TABLE_OFFSET));
+
+#if defined(CONFIG_BCM963268) || defined (CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+            // Save original interrupt's enable state.
+            stateSaveArray[cpu] = brcm_irq_ctrl[cpu]->ExtIrqMask & (((IRQ_TYPE)1) << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+
+            // Clear each cpu's selected interrupt enable.
+            brcm_irq_ctrl[cpu]->ExtIrqMask &= ~(((IRQ_TYPE)1) << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#endif
+        }
+
+        // Release spinlock and enable this processor's interrupts.
+        spin_unlock_irqrestore(&brcm_irqlock, flags);
+    }
+}
+
+
+// bill
+void restore_brcm_irqsave(struct irq_data *data, unsigned long stateSaveArray[])
+{
+    int cpu;
+    unsigned long flags;
+
+    // Disable this processor's interrupts and acquire spinlock.
+    spin_lock_irqsave(&brcm_irqlock, flags);
+
+    // Loop thru each processor.
+    for_each_cpu_mask(cpu, AFFINITY_OF(data))
+    {
+        // Restore cpu's original interrupt enable (off or on).
+        brcm_irq_ctrl[cpu]->IrqMask |= stateSaveArray[cpu];
+#if defined(CONFIG_BCM963268) || defined (CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+        brcm_irq_ctrl[cpu]->ExtIrqMask |= stateSaveArray[cpu];
+#endif
+    }
+
+    // Release spinlock and enable this processor's interrupts.
+    spin_unlock_irqrestore(&brcm_irqlock, flags);
+}
+#endif //#if !defined(CONFIG_BCM96838)
+
+
+static __attribute__((__always_inline__)) void enable_brcm_irq_data_locked(unsigned long irq, cpumask_t affinity)
+{
+    int cpu;
+    unsigned long flags;
+    int levelOrEdge = 1;
+    int detectSense = 0;
+
+    spin_lock_irqsave(&brcm_irqlock, flags);
+
+    if(( irq >= INTERNAL_ISR_TABLE_OFFSET ) 
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM96838) || defined (CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+        && ( irq < (INTERNAL_ISR_TABLE_OFFSET+64) ) 
+#endif
+        ) 
+    {
+
+        for_each_cpu_mask(cpu, affinity) {
+            brcm_irq_ctrl[cpu]->IrqMask |= (((IRQ_TYPE)1)  << (irq - INTERNAL_ISR_TABLE_OFFSET));
+        }
+    }
+#if defined(CONFIG_BCM96838)
+    else if((irq >= INTERRUPT_ID_EXTERNAL_0) && (irq <= INTERRUPT_ID_EXTERNAL_5))
+    {
+        for_each_cpu_mask(cpu, affinity) {
+            brcm_irq_ctrl[cpu]->IrqMask |= (((IRQ_TYPE)1)  << (INTERRUPT_ID_EXTERNAL - INTERNAL_ISR_TABLE_OFFSET));
+        }
+    }
+#endif
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM96838) || defined (CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+    else if(( irq >= INTERNAL_EXT_ISR_TABLE_OFFSET ) &&
+            ( irq < (INTERNAL_EXT_ISR_TABLE_OFFSET+64) ) ) 
+    {
+        for_each_cpu_mask(cpu, affinity) {
+#if defined(CONFIG_BCM963268) || defined (CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+            brcm_irq_ctrl[cpu]->ExtIrqMask |= (((IRQ_TYPE)1)  << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#else
+            PERFEXT->IrqControl[cpu].IrqMask |= (((IRQ_TYPE)1)  << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#endif
+        }
+    }
+#endif
+    else if ((irq == INTERRUPT_ID_SOFTWARE_0) || (irq == INTERRUPT_ID_SOFTWARE_1)) {
+        set_c0_status(0x1 << (STATUSB_IP0 + irq - INTERRUPT_ID_SOFTWARE_0));
+    }
+
+    /* Determine the type of IRQ trigger required */
+    if ( (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_3) 
+#if defined(CONFIG_BCM96838)    
+     || (irq >= INTERRUPT_ID_EXTERNAL_4 && irq <= INTERRUPT_ID_EXTERNAL_5)
+#elif defined (CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+     || (irq >= INTERRUPT_ID_EXTERNAL_4 && irq <= INTERRUPT_ID_EXTERNAL_7)
+#endif
+       )
+    {
+        if( IsExtIntrTypeActHigh(kerSysGetExtIntInfo(irq)) )
+            detectSense = 1;
+        else
+            detectSense = 0;
+
+        if( IsExtIntrTypeSenseLevel(kerSysGetExtIntInfo(irq)) )
+            levelOrEdge = 1;
+        else
+            levelOrEdge = 0;
+    }
+
+
+#if defined(CONFIG_BCM96838)
+    if (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_5) {
+#elif defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+    if (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_7) {
+#else
+    if (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_3) {
+#endif
+        PERF->ExtIrqCfg &= ~(1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_INSENS_SHFT));    // Edge insesnsitive
+        if ( levelOrEdge ) {
+        PERF->ExtIrqCfg |= (1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_LEVEL_SHFT));      // Level triggered
+        } else {
+            PERF->ExtIrqCfg &= ~(1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_LEVEL_SHFT));     // Edge triggered        
+        }
+        if ( detectSense ) {
+            PERF->ExtIrqCfg |= (1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_SENSE_SHFT));      // High / Rising triggered
+        } else {
+            PERF->ExtIrqCfg &= ~(1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_SENSE_SHFT));     // Low / Falling triggered        
+        }
+        PERF->ExtIrqCfg |= (1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_CLEAR_SHFT));      // Clear
+#if defined (CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+        PERF->ExtIrqSts |= (1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_MASK_SHFT));       // Unmask
+#else
+        PERF->ExtIrqCfg |= (1 << (irq - INTERRUPT_ID_EXTERNAL_0 + EI_MASK_SHFT));       // Unmask
+#endif
+    }
+
+    spin_unlock_irqrestore(&brcm_irqlock, flags);
+}
+
+__attribute__((__always_inline__)) void enable_brcm_irq_data(struct irq_data *data)
+{
+   enable_brcm_irq_data_locked(data->irq, AFFINITY_OF(data));
+}
+
+void enable_brcm_irq_irq(unsigned int irq)
+{
+#if defined(CONFIG_SMP)
+    // Note: for performance, no bounds checks are done on the below two lines
+    struct irq_desc *desc = irq_desc + irq;
+    cpumask_var_t affinity;
+
+    cpumask_copy(affinity, desc->irq_data.affinity);
+
+    // sanity check
+    if (affinity->bits[0] == 0)
+    {
+        //WARN_ONCE(1, "irq %d has no affinity!!!\n", irq);
+        cpumask_copy(affinity, &CPU_MASK_CPU0);
+    }
+
+    enable_brcm_irq_data_locked(irq, *affinity);
+#else
+    enable_brcm_irq_data_locked(irq, CPU_MASK_CPU0);
+#endif
+}
+
+static __attribute__((__always_inline__)) void __disable_ack_brcm_irq(unsigned int irq, cpumask_t affinity)
+{
+    int cpu;
+
+#if defined(CONFIG_BCM96838)
+    if((irq >= INTERRUPT_ID_EXTERNAL_0) && (irq <= INTERRUPT_ID_EXTERNAL_5))
+        irq = INTERRUPT_ID_EXTERNAL;
+#endif
+
+    if(( irq >= INTERNAL_ISR_TABLE_OFFSET )  
+        && ( irq < (INTERNAL_ISR_TABLE_OFFSET+64) ))
+    {
+        for_each_cpu_mask(cpu, affinity) {
+            brcm_irq_ctrl[cpu]->IrqMask &= ~(((IRQ_TYPE)1) << (irq - INTERNAL_ISR_TABLE_OFFSET));
+        }
+    }
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM96838) || defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+    else if(( irq >= INTERNAL_EXT_ISR_TABLE_OFFSET ) &&
+            ( irq < (INTERNAL_EXT_ISR_TABLE_OFFSET+64) )) 
+    {
+        for_each_cpu_mask(cpu, affinity) {
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+            brcm_irq_ctrl[cpu]->ExtIrqMask &= ~(((IRQ_TYPE)1)  << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#else
+            PERFEXT->IrqControl[cpu].IrqMask &= ~(((IRQ_TYPE)1)  << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#endif
+        }
+    }
+#endif
+}
+
+static __attribute__((__always_inline__)) void disable_brcm_irq_data_locked(unsigned long irq, cpumask_t affinity)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&brcm_irqlock, flags);
+    __disable_ack_brcm_irq(irq, affinity);
+    if ((irq == INTERRUPT_ID_SOFTWARE_0) || (irq == INTERRUPT_ID_SOFTWARE_1)) {
+        clear_c0_status(0x1 << (STATUSB_IP0 + irq - INTERRUPT_ID_SOFTWARE_0));
+    }
+    spin_unlock_irqrestore(&brcm_irqlock, flags);
+}
+
+void disable_brcm_irq_irq(unsigned int irq)
+{
+#if defined(CONFIG_SMP)
+    struct irq_desc *desc = irq_desc + irq;
+    cpumask_var_t affinity;
+
+    cpumask_copy(affinity, desc->irq_data.affinity);
+
+    if (unlikely(affinity->bits[0] == 0))
+    {
+        //WARN_ONCE(1, "irq %d has no affinity!!!\n", irq);
+        cpumask_copy(affinity, &CPU_MASK_CPU0);
+    }
+
+    disable_brcm_irq_data_locked(irq, *affinity);
+#else
+    disable_brcm_irq_data_locked(irq, CPU_MASK_CPU0);
+#endif
+}
+
+void disable_brcm_irq_data(struct irq_data *data)
+{
+    disable_brcm_irq_data_locked(data->irq, AFFINITY_OF(data));
+}
+
+void ack_brcm_irq(struct irq_data *data)
+{
+    unsigned long flags;
+    unsigned int irq = data->irq;
+
+    spin_lock_irqsave(&brcm_irqlock, flags);
+    __disable_ack_brcm_irq(irq, AFFINITY_OF(data));
+
+#if defined(CONFIG_SMP)
+    if (irq == INTERRUPT_ID_SOFTWARE_0) {
+        int this_cpu = smp_processor_id();
+        int other_cpu = !this_cpu;
+        per_cpu(ipi_pending, this_cpu) = 0;
+        mb();
+        clear_c0_cause(1<<CAUSEB_IP0);
+        if (per_cpu(ipi_pending, other_cpu)) {
+            set_c0_cause(1<<CAUSEB_IP0);
+        }
+    }
+#else
+    if (irq == INTERRUPT_ID_SOFTWARE_0) {
+        clear_c0_cause(1<<CAUSEB_IP0);
+    }
+#endif
+
+    if (irq == INTERRUPT_ID_SOFTWARE_1) {
+        clear_c0_cause(1<<CAUSEB_IP1);
+    }
+
+    spin_unlock_irqrestore(&brcm_irqlock, flags);
+}
+
+
+void mask_ack_brcm_irq(struct irq_data *data)
+{
+    unsigned long flags;
+    unsigned int irq = data->irq;
+
+    spin_lock_irqsave(&brcm_irqlock, flags);
+    __disable_ack_brcm_irq(irq, AFFINITY_OF(data));
+
+#if defined(CONFIG_SMP)
+    if (irq == INTERRUPT_ID_SOFTWARE_0) {
+        int this_cpu = smp_processor_id();
+        int other_cpu = !this_cpu;
+        per_cpu(ipi_pending, this_cpu) = 0;
+        mb();
+        clear_c0_cause(1<<CAUSEB_IP0);
+        if (per_cpu(ipi_pending, other_cpu)) {
+            set_c0_cause(1<<CAUSEB_IP0);
+        }
+        clear_c0_status(1<<STATUSB_IP0);
+    }
+#else
+    if (irq == INTERRUPT_ID_SOFTWARE_0) {
+        clear_c0_status(1<<STATUSB_IP0);
+        clear_c0_cause(1<<CAUSEB_IP0);
+    }
+#endif
+
+    if (irq == INTERRUPT_ID_SOFTWARE_1) {
+        clear_c0_status(1<<STATUSB_IP1);
+        clear_c0_cause(1<<CAUSEB_IP1);
+    }
+
+    spin_unlock_irqrestore(&brcm_irqlock, flags);
+}
+
+
+void unmask_brcm_irq_noop(struct irq_data *data)
+{
+}
+
+int set_brcm_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
+{
+    int cpu;
+    unsigned int irq = data->irq;
+    unsigned long flags;
+    int ret = 0;
+
+    spin_lock_irqsave(&brcm_irqlock, flags);
+
+#if defined(CONFIG_BCM96838)
+    if((irq >= INTERRUPT_ID_EXTERNAL_0) && (irq <= INTERRUPT_ID_EXTERNAL_5))
+        irq = INTERRUPT_ID_EXTERNAL;
+#endif
+
+    if(( irq >= INTERNAL_ISR_TABLE_OFFSET ) 
+        && ( irq < (INTERNAL_ISR_TABLE_OFFSET+64) ) 
+        ) 
+    {
+        for_each_online_cpu(cpu) {
+            if (cpu_isset(cpu, *dest) && !(irqd_irq_disabled(data))) {
+                brcm_irq_ctrl[cpu]->IrqMask |= (((IRQ_TYPE)1)  << (irq - INTERNAL_ISR_TABLE_OFFSET));
+            }
+            else {
+                brcm_irq_ctrl[cpu]->IrqMask &= ~(((IRQ_TYPE)1) << (irq - INTERNAL_ISR_TABLE_OFFSET));
+            }
+        }
+    }
+
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM96838) || defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848) 
+    if(( irq >= INTERNAL_EXT_ISR_TABLE_OFFSET ) 
+        && ( irq < (INTERNAL_EXT_ISR_TABLE_OFFSET+64) ) 
+        )
+    {
+        for_each_online_cpu(cpu) {
+            if (cpu_isset(cpu, *dest) && !(irqd_irq_disabled(data))) {
+
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+                brcm_irq_ctrl[cpu]->ExtIrqMask |= (((IRQ_TYPE)1)  << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#else
+                PERFEXT->IrqControl[cpu].IrqMask |= (((IRQ_TYPE)1)  << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#endif
+            }
+            else {
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+                brcm_irq_ctrl[cpu]->ExtIrqMask &= ~(((IRQ_TYPE)1) << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#else
+                PERFEXT->IrqControl[cpu].IrqMask &= ~(((IRQ_TYPE)1) << (irq - INTERNAL_EXT_ISR_TABLE_OFFSET));
+#endif
+            }
+        }
+    }
+#endif
+
+    spin_unlock_irqrestore(&brcm_irqlock, flags);
+
+    return ret;
+}
+
+
+static struct irq_chip brcm_irq_chip = {
+    .name = "BCM63xx",
+    .irq_enable = enable_brcm_irq_data,
+    .irq_disable = disable_brcm_irq_data,
+    .irq_ack = ack_brcm_irq,
+    .irq_mask = disable_brcm_irq_data,
+    .irq_mask_ack = mask_ack_brcm_irq,
+    .irq_unmask = enable_brcm_irq_data,
+    .irq_set_affinity = set_brcm_affinity
+};
+
+static struct irq_chip brcm_irq_chip_no_unmask = {
+    .name = "BCM63xx_no_unmask",
+    .irq_enable = enable_brcm_irq_data,
+    .irq_disable = disable_brcm_irq_data,
+    .irq_ack = ack_brcm_irq,
+    .irq_mask = disable_brcm_irq_data,
+    .irq_mask_ack = mask_ack_brcm_irq,
+    .irq_unmask = unmask_brcm_irq_noop,
+    .irq_set_affinity = set_brcm_affinity
+};
+
+
+void __init arch_init_irq(void)
+{
+    int i;
+
+    spin_lock_init(&brcm_irqlock);
+
+    for (i = 0; i < NR_CPUS; i++) {
+        brcm_irq_ctrl[i] = &PERF->IrqControl[i];
+    }
+
+    for (i = 0; i < NR_IRQS; i++) {
+        irq_set_chip_and_handler(i, &brcm_irq_chip, handle_level_irq); 
+    }
+
+#if defined(CONFIG_BCM96838)
+    PERF->ExtIrqCfg |= EI_CLEAR_MASK;
+#endif
+
+    clear_c0_status(ST0_BEV);
+#if defined(CONFIG_SMP)
+    // make interrupt mask same as TP1, miwang 6/14/10
+#if defined(CONFIG_BCM96838)
+    change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ2);
+#else
+    change_c0_status(ST0_IM, IE_IRQ0|IE_IRQ1);
+#endif
+#else
+#if defined(CONFIG_BCM96838)
+    change_c0_status(ST0_IM, IE_IRQ1);
+#else
+    change_c0_status(ST0_IM, IE_IRQ0);
+#endif
+#endif
+
+
+#ifdef CONFIG_REMOTE_DEBUG
+    rs_kgdb_hook(0);
+#endif
+}
+
+
+#define INTR_NAME_MAX_LENGTH 16
+
+// This is a wrapper to standand Linux request_irq
+// Differences are:
+//    - The irq won't be renabled after ISR is done and needs to be explicity re-enabled, which is good for NAPI drivers.
+//      The change is implemented by filling in an no-op unmask function in brcm_irq_chip_no_unmask and set it as the irq_chip
+//    - IRQ flags and interrupt names are automatically set
+// Either request_irq or BcmHalMapInterrupt can be used. Just make sure re-enabling IRQ is handled correctly.
+
+unsigned int BcmHalMapInterrupt(FN_HANDLER pfunc, unsigned int param, unsigned int irq)
+{
+    char devname[INTR_NAME_MAX_LENGTH];
+
+    sprintf(devname, "brcm_%d", irq);
+    return BcmHalMapInterruptEx(pfunc, param, irq, devname,
+                                INTR_REARM_NO, INTR_AFFINITY_DEFAULT);
+}
+
+
+// This is a wrapper to standand Linux request_irq for the VOIP driver
+// Differences are:
+// The irq is not automatically enabled when the ISR is registered.
+// The irq is automatically re-enabled when the ISR is done.
+// Interrupts are re-enabled when the ISR is invoked.
+unsigned int BcmHalMapInterruptVoip(FN_HANDLER pfunc, unsigned int param, unsigned int irq)
+{
+    char devname[INTR_NAME_MAX_LENGTH];
+
+    sprintf(devname, "brcm_%d", irq);
+    return BcmHalMapInterruptEx(pfunc, param, irq, devname,
+                                INTR_REARM_YES, INTR_AFFINITY_DEFAULT);
+}
+
+
+/** Broadcom wrapper to linux request_irq.  This version does more stuff.
+ *
+ * @param pfunc (IN) interrupt handler function
+ * @param param (IN) context/cookie that is passed to interrupt handler
+ * @param irq   (IN) interrupt number
+ * @param interruptName (IN) descriptive name for the interrupt.  15 chars
+ *                           or less.  This function will make a copy of
+ *                           the name.
+ * @param INTR_REARM_MODE    (IN) See bcm_intr.h
+ * @param INTR_AFFINITY_MODE (IN) See bcm_intr.h
+ *
+ * @return 0 on success.
+ */
+unsigned int BcmHalMapInterruptEx(FN_HANDLER pfunc,
+                                  unsigned int param,
+                                  unsigned int irq,
+                                  const char *interruptName,
+                                  INTR_REARM_MODE_ENUM rearmMode,
+                                  INTR_AFFINITY_MODE_ENUM affinMode)
+{
+    char *devname;
+    unsigned long irqflags;
+    struct irq_chip *chip;
+    unsigned int retval;
+
+#if defined(CONFIG_BCM_KF_ASSERT)
+    BCM_ASSERT_R(interruptName != NULL, -1);
+    BCM_ASSERT_R(strlen(interruptName) < INTR_NAME_MAX_LENGTH, -1);
+#endif
+
+    if ((devname = kmalloc(INTR_NAME_MAX_LENGTH, GFP_ATOMIC)) == NULL)
+    {
+        printk(KERN_ERR "kmalloc(%d, GFP_ATOMIC) failed for intr name\n",
+                        INTR_NAME_MAX_LENGTH);      
+        return -1;
+    }
+    sprintf( devname, "%s", interruptName );
+
+    /* If this is for the timer interrupt, do not invoke the following code
+       because doing so kills the timer interrupts that may already be running */
+    if (irq != INTERRUPT_ID_TIMER) {
+        chip = (rearmMode == INTR_REARM_NO) ? &brcm_irq_chip_no_unmask :
+                                              &brcm_irq_chip;
+        irq_set_chip_and_handler(irq, chip, handle_level_irq);
+    }
+
+    if (rearmMode == INTR_REARM_YES)
+    {
+        irq_modify_status(irq, IRQ_NOAUTOEN, 0);
+    }
+
+
+    irqflags = IRQF_SAMPLE_RANDOM;
+    irqflags |= (rearmMode == INTR_REARM_NO) ? IRQF_DISABLED : 0;
+#if defined(CONFIG_BCM_EXT_TIMER)
+    /* There are 3 timers with individual control, so the interrupt can be shared */
+    if ( (irq >= INTERRUPT_ID_TIMER) && (irq < (INTERRUPT_ID_TIMER+EXT_TIMER_NUM)) )
+         irqflags |= IRQF_SHARED;
+#endif
+    /* For external interrupt, check if it is shared */
+    if ( (irq >= INTERRUPT_ID_EXTERNAL_0 && irq <= INTERRUPT_ID_EXTERNAL_3)
+#if defined(CONFIG_BCM96838)
+     || (irq >= INTERRUPT_ID_EXTERNAL_4 && irq <= INTERRUPT_ID_EXTERNAL_5)
+#elif defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+     || (irq >= INTERRUPT_ID_EXTERNAL_4 && irq <= INTERRUPT_ID_EXTERNAL_7)
+#endif
+       )
+    {
+       if( IsExtIntrShared(kerSysGetExtIntInfo(irq)) )
+    	   irqflags |= IRQF_SHARED;
+    }
+
+    retval = request_irq(irq, (void*)pfunc, irqflags, devname, (void *) param);
+    if (retval != 0)
+    {
+        printk(KERN_WARNING "request_irq failed for irq=%d (%s) retval=%d\n",
+               irq, devname, retval);
+        kfree(devname);
+        return retval;
+    }
+
+    // now deal with interrupt affinity requests
+    if (affinMode != INTR_AFFINITY_DEFAULT)
+    {
+        struct cpumask mask;
+
+        cpus_clear(mask);
+
+        if (affinMode == INTR_AFFINITY_TP1_ONLY ||
+            affinMode == INTR_AFFINITY_TP1_IF_POSSIBLE)
+        {
+            if (cpu_online(1))
+            {
+                cpu_set(1, mask);
+                irq_set_affinity(irq, &mask);
+            }
+            else
+            {
+                // TP1 is not on-line but caller insisted on it
+                if (affinMode == INTR_AFFINITY_TP1_ONLY)
+                {
+                    printk(KERN_WARNING
+                           "cannot assign intr %d to TP1, not online\n", irq);
+                    retval = request_irq(irq, NULL, 0, NULL, NULL);
+                    kfree(devname);
+                    retval = -1;
+                }
+            }
+        }
+        else
+        {
+            // INTR_AFFINITY_BOTH_IF_POSSIBLE
+            cpu_set(0, mask);
+            if (cpu_online(1))
+            {
+                cpu_set(1, mask);
+                irq_set_affinity(irq, &mask);
+            }
+        }
+    }
+
+    return retval;
+}
+EXPORT_SYMBOL(BcmHalMapInterruptEx);
+
+
+//***************************************************************************
+//  void  BcmHalGenerateSoftInterrupt
+//
+//   Triggers a software interrupt.
+//
+//***************************************************************************
+void BcmHalGenerateSoftInterrupt( unsigned int irq )
+{
+    unsigned long flags;
+
+    local_irq_save(flags);
+
+    set_c0_cause(0x1 << (CAUSEB_IP0 + irq - INTERRUPT_ID_SOFTWARE_0));
+
+    local_irq_restore(flags);
+}
+
+void BcmHalExternalIrqMask(unsigned int irq)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&brcm_irqlock, flags);
+#if defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+    PERF->ExtIrqSts &= ~(1 << (EI_MASK_SHFT + irq - INTERRUPT_ID_EXTERNAL_0));
+#else
+    PERF->ExtIrqCfg &= ~(1 << (EI_MASK_SHFT + irq - INTERRUPT_ID_EXTERNAL_0));
+#endif
+    spin_unlock_irqrestore(&brcm_irqlock, flags); 
+}
+
+void BcmHalExternalIrqUnmask(unsigned int irq)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&brcm_irqlock, flags);
+#if defined(CONFIG_BCM963381) || defined(CONFIG_BCM96848)
+    PERF->ExtIrqSts |= (1 << (EI_MASK_SHFT + irq - INTERRUPT_ID_EXTERNAL_0));
+#else
+    PERF->ExtIrqCfg |= (1 << (EI_MASK_SHFT + irq - INTERRUPT_ID_EXTERNAL_0));
+#endif
+    spin_unlock_irqrestore(&brcm_irqlock, flags); 
+}
+
+EXPORT_SYMBOL(enable_brcm_irq_irq);
+EXPORT_SYMBOL(disable_brcm_irq_irq);
+EXPORT_SYMBOL(BcmHalMapInterrupt);
+EXPORT_SYMBOL(BcmHalMapInterruptVoip);
+EXPORT_SYMBOL(BcmHalGenerateSoftInterrupt);
+EXPORT_SYMBOL(BcmHalExternalIrqMask);
+EXPORT_SYMBOL(BcmHalExternalIrqUnmask);
+
+#if !defined(CONFIG_BCM96838)
+// bill
+EXPORT_SYMBOL(disable_brcm_irqsave);
+EXPORT_SYMBOL(restore_brcm_irqsave);
+#endif
+
+#endif //defined(CONFIG_BCM_KF_MIPS_BCM963XX)
diff --git a/arch/mips/bcm963xx/ktools/Makefile b/arch/mips/bcm963xx/ktools/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..f12761899bfecdfbfdf4d51125b2d1f8587a9cf9
--- /dev/null
+++ b/arch/mips/bcm963xx/ktools/Makefile
@@ -0,0 +1,43 @@
+#
+# Makefile for generic Broadcom MIPS boards
+#
+# Copyright (C) 2004 Broadcom Corporation
+#
+#obj-y           := irq.o prom.o setup.o bcm63xx_flash.o bcm63xx_led.o flash_api.o boardparms.o board.o bcm63xx_cons.o spiflash.o
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_BALOO)
+endif # BCM_KF # defined(CONFIG_BCM_KF_BALOO)
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_BUZZZ)
+# BUZZZ must never be sent to customers !!!
+obj-$(CONFIG_BUZZZ) += buzzz.o
+endif # BCM_KF # defined(CONFIG_BCM_KF_BUZZZ)
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_BOUNCE)
+obj-$(CONFIG_BRCM_BOUNCE) += bounce.o
+endif # BCM_KF # defined(CONFIG_BCM_KF_BOUNCE)
+ifdef BCM_KF # defined(CONFIG_BCM_KF_PMON)
+obj-$(CONFIG_PMON) += pmontool.o
+endif # BCM_KF # defined(CONFIG_BCM_KF_PMON)
+
+SRCBASE         := $(TOPDIR)
+EXTRA_CFLAGS    += -I$(INC_BRCMBOARDPARMS_PATH)/$(BRCM_BOARD) -I$(SRCBASE)/include -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD)
+#EXTRA_CFLAGS    += -I$(INC_ADSLDRV_PATH) -DDBG
+EXTRA_CFLAGS    += -I$(INC_ADSLDRV_PATH) 
+EXTRA_CFLAGS += -I$(BRCMDRIVERS_DIR)/../shared/broadcom/include/$(BRCM_BOARD)
+EXTRA_CFLAGS += -I$(BRCMDRIVERS_DIR)/broadcom/include/$(BRCM_BOARD)
+EXTRA_CFLAGS += -I$(BRCMDRIVERS_DIR)/opensource/include/$(BRCM_BOARD)
+cflags-y      += -I$(srctree)/arch/mips/include/asm/bcm963xx
+EXTRA_CFLAGS += -g
+
+
+ifeq "$(ADSL)" "ANNEX_B"
+EXTRA_CFLAGS += -DADSL_ANNEXB
+endif
+ifeq "$(ADSL)" "SADSL"
+EXTRA_CFLAGS += -DADSL_SADSL
+endif
+ifeq "$(ADSL)" "ANNEX_C"
+EXTRA_CFLAGS += -DADSL_ANNEXC
+endif
+
diff --git a/arch/mips/bcm963xx/ktools/bounce.c b/arch/mips/bcm963xx/ktools/bounce.c
new file mode 100644
index 0000000000000000000000000000000000000000..1b61541c6b8c36d76823453d72484f98ce737aba
--- /dev/null
+++ b/arch/mips/bcm963xx/ktools/bounce.c
@@ -0,0 +1,575 @@
+#if defined(CONFIG_BCM_KF_BOUNCE) && defined(CONFIG_BRCM_BOUNCE)
+/*
+<:copyright-BRCM:2007:DUAL/GPL:standard
+
+   Copyright (c) 2007 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license 
+agreement governing use of this software, this software is licensed 
+to you under the terms of the GNU General Public License version 2 
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give 
+   you permission to link this software with independent modules, and 
+   to copy and distribute the resulting executable under terms of your 
+   choice, provided that you also meet, for each linked independent 
+   module, the terms and conditions of the license of that module. 
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications 
+   of the software.  
+
+Not withstanding the above, under no circumstances may you combine 
+this software in any way with any other Broadcom software provided 
+under a license other than the GPL, without Broadcom's express prior 
+written consent. 
+
+:>
+*/
+
+/*
+ *******************************************************************************
+ * File Name   : bounce.c
+ *******************************************************************************
+ */
+
+#include <asm/bounce.h>
+#include <linux/sched.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+
+#ifdef BOUNCE_COLOR
+#define _H_						"\e[0;36;44m"
+#define _N_						"\e[0m"
+#define _R_						"\e[0;31m"
+#define _G_						"\e[0;32m"
+#else
+#define _H_
+#define _N_
+#define _R_
+#define _G_
+#endif
+
+#undef  BOUNCE_DECL
+#define BOUNCE_DECL(x)			#x,
+
+/*----- typedefs -----*/
+typedef char BounceFmt_t[BOUNCE_FMT_LENGTH];
+
+typedef struct bounceDev
+{
+
+	BounceMode_t	mode;		/* mode of operation */
+	BounceLog_t	  * log_p;
+
+	uint32_t		wrap;		/* buffer wrapped at least once */
+	uint32_t		run;		/* trace incarnation */
+	uint32_t		count;		/* log count .. (not function count) */
+
+	dev_t			dev;
+	struct cdev		cdev;
+
+    BounceFmt_t     evtfmt[ BOUNCE_MAX_EVENTS ];
+
+	BounceLog_t		log[ BOUNCE_SIZE ];
+
+} BounceDev_t;
+
+
+/*----- Forward definition -----*/
+
+static int  bounce_open(struct inode *inodep, struct file *filep)BOUNCE_NOINSTR;
+static int  bounce_rel(struct inode *inodep, struct file *filep)	BOUNCE_NOINSTR;
+static long bounce_unlocked_ioctl( struct file *  file, 
+                                  unsigned int   cmd,
+                                  unsigned long  arg)			BOUNCE_NOINSTR;
+extern void bounce_up(BounceMode_t mode, uint32_t limit)		BOUNCE_NOINSTR;
+extern asmlinkage void bounce_dn(void);
+extern asmlinkage void bounce_panic(void);
+extern void bounce_reg(uint32_t event, char * eventName)        BOUNCE_NOINSTR;
+extern void bounce_dump(uint32_t last)							BOUNCE_NOINSTR;
+
+extern void bounce0(uint32_t event)                             BOUNCE_NOINSTR;
+extern void bounce1(uint32_t event, uint32_t arg1)              BOUNCE_NOINSTR;
+extern void bounce2(uint32_t event, uint32_t arg1, uint32_t arg2)
+                                                                BOUNCE_NOINSTR;
+extern void bounce3(uint32_t event, uint32_t arg1, uint32_t arg2, uint32_t arg3)
+                                                                BOUNCE_NOINSTR;
+
+static int  __init bounce_init(void)							BOUNCE_NOINSTR;
+static void __exit bounce_exit(void)							BOUNCE_NOINSTR;
+
+/*----- Globals -----*/
+
+BounceDev_t bounce_g = { .mode = BOUNCE_MODE_DISABLED };
+
+static struct file_operations bounce_fops_g =
+{
+	.unlocked_ioctl =    bounce_unlocked_ioctl,
+	.open =     bounce_open,
+	.release =  bounce_rel,
+	.owner =    THIS_MODULE
+};
+
+static const char * bounce_mode_str_g[] =
+{
+    BOUNCE_DECL(BOUNCE_MODE_DISABLED)
+    BOUNCE_DECL(BOUNCE_MODE_LIMITED)    /* auto disable when count goes to 0 */
+    BOUNCE_DECL(BOUNCE_MODE_CONTINUOUS) /* explicit disable via bounce_dn() */
+    BOUNCE_DECL(BOUNCE_MODE_MAXIMUM)
+};
+
+#ifdef BOUNCE_DEBUG
+static const char * bounce_ioctl_str_g[] =
+{
+    BOUNCE_DECL(BOUNCE_START_IOCTL)
+    BOUNCE_DECL(BOUNCE_STOP_IOCTL)
+    BOUNCE_DECL(BOUNCE_DUMP_IOCTL)
+    BOUNCE_DECL(BOUNCE_INVLD_IOCTL)
+};
+#endif
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+#include "linux/spinlock.h"
+static DEFINE_SPINLOCK(bounce_lock_g);   /* FkBuff packet flow */
+#define BOUNCE_LOCK(flags)       spin_lock_irqsave( &bounce_lock_g, flags )
+#define BOUNCE_UNLOCK(flags)     spin_unlock_irqrestore( &bounce_lock_g, flags )
+#else
+#define BOUNCE_LOCK(flags)       local_irq_save(flags)
+#define BOUNCE_UNLOCK(flags)     local_irq_restore(flags)
+#endif
+
+/* MACROS used by __cyg_profile_func_ enter() and exit() */
+#define __BOUNCE_BGN(flags)												\
+																	\
+	if ( bounce_g.mode == BOUNCE_MODE_DISABLED )					\
+		return;														\
+																	\
+    BOUNCE_LOCK(flags);                                                  \
+	if ( bounce_g.mode == BOUNCE_MODE_LIMITED )						\
+	{																\
+		if ( bounce_g.count == 0 )									\
+		{															\
+			bounce_g.mode = BOUNCE_MODE_DISABLED;					\
+            BOUNCE_UNLOCK(flags);                                        \
+			return;													\
+		}															\
+		bounce_g.count--;											\
+	}
+
+
+#define __BOUNCE_END(flags)												\
+																	\
+	bounce_g.log_p++;												\
+																	\
+	if ( bounce_g.log_p == &bounce_g.log[ BOUNCE_SIZE ] )			\
+	{																\
+		bounce_g.wrap = 1;											\
+		bounce_g.log_p = &bounce_g.log[0];							\
+	}																\
+																	\
+    BOUNCE_UNLOCK(flags);
+
+
+/* Function entry stub providied by -finstrument-functions */
+void __cyg_profile_func_enter(void *ced, void *cer)
+{
+    unsigned long flags;
+	__BOUNCE_BGN(flags);
+
+    bounce_g.log_p->word0.u32 = (uint32_t)ced | (smp_processor_id() << 1) | 1;
+	bounce_g.log_p->pid = (uint32_t)(current_thread_info()->task->pid);
+
+	__BOUNCE_END(flags);
+}
+
+/* Function exit stub providied by -finstrument-functions */
+void __cyg_profile_func_exit(void *ced, void *cer)
+{
+#if defined(CONFIG_BRCM_BOUNCE_EXIT)
+    unsigned long flags;
+	__BOUNCE_BGN(flags);
+
+    bounce_g.log_p->word0.u32 = (uint32_t)ced | (smp_processor_id() << 1);
+	bounce_g.log_p->pid = (uint32_t)(current_thread_info()->task->pid);
+
+	__BOUNCE_END(flags);
+
+#endif	/* defined(CONFIG_BRCM_BOUNCE_EXIT) */
+}
+
+void bounce0(uint32_t event)
+{
+    unsigned long flags;
+    __BOUNCE_BGN(flags);
+
+    bounce_g.log_p->word0.u32 = (event << 16) | (smp_processor_id() << 1);
+
+    __BOUNCE_END(flags);
+}
+
+void bounce1(uint32_t event, uint32_t arg1)
+{
+    unsigned long flags;
+    __BOUNCE_BGN(flags);
+
+    bounce_g.log_p->word0.u32 = (event << 16)
+                                | (1 << 2) | (smp_processor_id() << 1);
+    bounce_g.log_p->arg1 = arg1;
+
+    __BOUNCE_END(flags);
+}
+
+void bounce2(uint32_t event, uint32_t arg1, uint32_t arg2)
+{
+    unsigned long flags;
+    __BOUNCE_BGN(flags);
+
+    bounce_g.log_p->word0.u32 = (event << 16)
+                                | (2 << 2) | (smp_processor_id() << 1);
+    bounce_g.log_p->arg1 = arg1;
+    bounce_g.log_p->arg2 = arg2;
+
+    __BOUNCE_END(flags);
+}
+
+void bounce3(uint32_t event, uint32_t arg1, uint32_t arg2, uint32_t arg3)
+{
+    unsigned long flags;
+    __BOUNCE_BGN(flags);
+
+    bounce_g.log_p->word0.u32 = (event << 16)
+                                | (3 << 2) | (smp_processor_id() << 1);
+    bounce_g.log_p->arg1 = arg1;
+    bounce_g.log_p->arg2 = arg2;
+    bounce_g.log_p->arg3 = arg3;
+
+    __BOUNCE_END(flags);
+}
+
+static int bounce_panic_dump = 0;
+
+/* Start tracing */
+void bounce_up(BounceMode_t mode, uint32_t limit)
+{
+	bounce_g.wrap = 0;						/* setup trace buffer */
+	bounce_g.log_p = &bounce_g.log[0];
+	bounce_g.count = limit;					/* setup stop semantics */
+	bounce_g.mode = mode;					/* tracing enabled now */
+
+	bounce_panic_dump = 1;
+}
+
+/* Stop tracing */
+void bounce_dn(void)
+{
+	BOUNCE_LOGK(bounce_dn);
+
+    if ( bounce_g.mode != BOUNCE_MODE_DISABLED )
+		bounce_g.mode = BOUNCE_MODE_LIMITED;/* initiate stop semantics */
+}
+
+/* Auto dump last BOUNCE_PANIC items on a panic/bug */
+void bounce_panic(void)
+{
+	BOUNCE_LOGK(bounce_dn);
+
+	if ( bounce_panic_dump ) {
+		bounce_panic_dump = 0;
+		bounce_g.mode = BOUNCE_MODE_DISABLED;
+		bounce_dump( BOUNCE_PANIC );
+	}
+}
+
+void bounce_reg(uint32_t event, char * eventName)
+{
+    if ( event < BOUNCE_MAX_EVENTS )
+    {
+        strncpy( bounce_g.evtfmt[event], eventName,
+                 BOUNCE_FMT_LENGTH-1 );
+    }
+}
+
+/* Dump the trace buffer via printk */
+void bounce_dump(uint32_t last)
+{
+	BounceLog_t * log_p;
+	uint32_t logs;
+	uint32_t wrap;
+	uint32_t count;
+	BounceMode_t mode;
+
+	count = bounce_g.count;
+	bounce_g.count = 0;
+
+	mode = bounce_g.mode;
+	bounce_g.mode  = BOUNCE_MODE_DISABLED;
+
+	printk(_H_ "BOUNCE DUMP BGN: FUNC_EXIT<%d> run<%u> wrap<%u> count<%u> %s\n"
+	       "B[0x%08x] L[0x%08x] E[0x%08x], %u:%u bounce_dn[<0x%08x>]\n\n" _N_,
+#if defined(CONFIG_BRCM_BOUNCE_EXIT)
+			1,
+#else
+			0,
+#endif
+		    bounce_g.run, bounce_g.wrap, count, bounce_mode_str_g[mode],
+            (int)&bounce_g.log[0],
+            (int)bounce_g.log_p, (int)&bounce_g.log[BOUNCE_SIZE],
+			(((uint32_t)bounce_g.log_p - (uint32_t)&bounce_g.log[0])
+            / sizeof(BounceLog_t)),
+			(((uint32_t)(&bounce_g.log[BOUNCE_SIZE])
+            - (uint32_t)bounce_g.log_p) / sizeof(BounceLog_t)),
+            (int)bounce_dn );
+
+	/* Dump the last few records */
+	if ( last != 0 )
+	{
+		uint32_t items;
+
+		if ( last > BOUNCE_SIZE )
+			last = BOUNCE_SIZE;
+
+		items = (((uint32_t)bounce_g.log_p - (uint32_t)&bounce_g.log[0])
+				 / sizeof(BounceLog_t));
+
+		if ( items > last )
+		{
+			log_p = (BounceLog_t*)
+				((uint32_t)bounce_g.log_p - (last * sizeof(BounceLog_t)));
+			wrap = 0;
+		}
+		else
+		{
+			items = last - items; 	/* remaining items */
+			log_p = (BounceLog_t*)
+				((uint32_t)(&bounce_g.log[BOUNCE_SIZE]
+				 - (items * sizeof(BounceLog_t))));
+			wrap = 1;
+		}
+	}
+	else
+	{
+		wrap = bounce_g.wrap;
+		if ( bounce_g.wrap )
+			log_p = bounce_g.log_p;
+		else
+			log_p = & bounce_g.log[0];
+	}
+
+	logs = 0;
+
+    /* Start from current and until end */
+	if ( wrap )
+	{
+		for ( ; log_p != & bounce_g.log[BOUNCE_SIZE]; logs++, log_p++ )
+		{
+            if ( BOUNCE_IS_FUNC_LOG(log_p->word0.u32) )
+            {
+			    printk( "%s %5u %pS" _N_ "\n",
+					    (log_p->word0.site.type) ? _R_ "=>" : _G_ "<=",
+					    log_p->pid, BOUNCE_GET_FUNCP(log_p->word0.u32) );
+            }
+            else
+            {
+                switch (log_p->word0.event.args)
+                {
+                    case 0:
+                        printk(bounce_g.evtfmt[log_p->word0.event.evid]);
+                        break;
+                    case 1:
+                        printk( bounce_g.evtfmt[log_p->word0.event.evid],
+                                log_p->arg1);
+                        break;
+                    case 2:
+                        printk( bounce_g.evtfmt[log_p->word0.event.evid],
+                                log_p->arg1, log_p->arg2);
+                        break;
+                    case 3:
+                        printk( bounce_g.evtfmt[log_p->word0.event.evid],
+                                log_p->arg1, log_p->arg2, log_p->arg3);
+                        break;
+                }
+                printk( " %s cpu<%u> %s evt<%6u>\n",
+                        (log_p->word0.event.cpu0) ? _R_ : _G_,
+                        log_p->word0.event.cpu0,  _N_,
+                        log_p->word0.event.evid );
+            }
+		}
+
+		log_p = & bounce_g.log[0];
+	}
+
+	for ( ; log_p != bounce_g.log_p; logs++, log_p++ )
+	{
+        if ( BOUNCE_IS_FUNC_LOG(log_p->word0.u32) )
+        {
+		    printk( "%s %5u %pS" _N_ "\n",
+				    (log_p->word0.site.type) ? _R_ "=>" : _G_ "<=",
+				    log_p->pid, BOUNCE_GET_FUNCP(log_p->word0.u32) );
+        }
+        else
+        {
+            switch (log_p->word0.event.args)
+            {
+                case 0:
+                    printk(bounce_g.evtfmt[log_p->word0.event.evid]);
+                    break;
+                case 1:
+                    printk( bounce_g.evtfmt[log_p->word0.event.evid],
+                            log_p->arg1);
+                    break;
+                case 2:
+                    printk( bounce_g.evtfmt[log_p->word0.event.evid],
+                            log_p->arg1, log_p->arg2);
+                    break;
+                case 3:
+                    printk( bounce_g.evtfmt[log_p->word0.event.evid],
+                            log_p->arg1, log_p->arg2, log_p->arg3);
+                    break;
+            }
+            printk( " %s cpu<%u> %s evt<%6u>\n",
+                    (log_p->word0.event.cpu0) ? _R_ : _G_,
+                    log_p->word0.event.cpu0,  _N_,
+                    log_p->word0.event.evid );
+        }
+	}
+
+	printk( _H_ "\nBOUNCE DUMP END: logs<%u>\n\n\n" _N_, logs );
+}
+
+static DEFINE_MUTEX(ioctlMutex);
+
+/* ioctl fileops */
+long bounce_unlocked_ioctl( struct file *  file, 
+                            unsigned int   command,
+                            unsigned long  arg)
+{
+	BounceIoctl_t cmd;
+	long ret = -EINVAL;
+
+	mutex_lock(&ioctlMutex);
+
+	if ( command > BOUNCE_INVLD_IOCTL )
+		cmd = BOUNCE_INVLD_IOCTL;
+	else
+		cmd = (BounceIoctl_t)command;
+
+	BDBG( printk(KERN_DEBUG "BOUNCE DEV: ioctl cmd[%d,%s] arg[%lu 0x%08x]\n",
+		         command, bounce_ioctl_str_g[cmd], arg, (int)arg ); );
+
+	switch ( cmd )
+	{
+		case BOUNCE_START_IOCTL:
+			{
+				BounceMode_t mode = (BounceMode_t) ( arg & 7 );
+				uint32_t limit = ( arg >> 3 );
+
+				bounce_up( mode, limit );
+				ret = 0;
+				break;
+			}
+
+		case BOUNCE_STOP_IOCTL:
+			bounce_dn(); 
+			ret = 0;
+			break;
+
+		case BOUNCE_DUMP_IOCTL:
+			bounce_dump(arg);
+			ret = 0;
+			break;
+
+		default:
+			printk( KERN_ERR "BOUNCE DEV: invalid ioctl <%u>\n", command );
+	}
+	mutex_unlock(&ioctlMutex);
+	return ret;
+}
+
+/* open fileops */
+int bounce_open(struct inode *inodep, struct file *filep)
+{
+	int minor = MINOR(inodep->i_rdev) & 0xf;    /* fetch minor */
+
+	if (minor > 0)
+	{
+		printk(KERN_WARNING "BOUNCE DEV: multiple open " BOUNCE_DEV_NAME);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+/* release fileops */
+int bounce_rel(struct inode *inodep, struct file *filep)
+{
+	return 0;
+}
+
+/* module init: register character device */
+int __init bounce_init(void)
+{
+	int i, ret;
+	memset(&bounce_g, 0, sizeof(BounceDev_t));
+	bounce_g.mode  = BOUNCE_MODE_DISABLED;
+	bounce_g.count = BOUNCE_SIZE;
+	bounce_g.log_p = &bounce_g.log[0];
+
+	bounce_g.dev = MKDEV(BOUNCE_DEV_MAJ, 0);
+
+	cdev_init(&bounce_g.cdev, &bounce_fops_g);
+	bounce_g.cdev.ops = &bounce_fops_g;
+
+	ret = cdev_add(&bounce_g.cdev, bounce_g.dev, 1);
+
+    for (i=0; i<BOUNCE_MAX_EVENTS; i++)
+        sprintf(bounce_g.evtfmt[i], "INVALID EVENT");
+
+	if (ret) {
+		printk( KERN_ERR _R_ "BOUNCE DEV: Error %d adding device "
+				BOUNCE_DEV_NAME " [%d,%d] added.\n" _N_,
+				ret, MAJOR(bounce_g.dev), MINOR(bounce_g.dev));
+		return ret;
+	} else {
+		printk( KERN_DEBUG _G_ "BOUNCE DEV: "
+				BOUNCE_DEV_NAME " [%d,%d] added.\n" _N_,
+				MAJOR(bounce_g.dev), MINOR(bounce_g.dev));
+	}
+
+	return ret;
+}
+
+/* cleanup : did not bother with char device de-registration */
+void __exit bounce_exit(void)
+{
+	cdev_del(&bounce_g.cdev);
+	memset(&bounce_g, 0, sizeof(BounceDev_t));
+}
+
+module_init(bounce_init);
+module_exit(bounce_exit);
+
+EXPORT_SYMBOL(__cyg_profile_func_enter);
+EXPORT_SYMBOL(__cyg_profile_func_exit);
+
+EXPORT_SYMBOL(bounce_up);
+EXPORT_SYMBOL(bounce_dn);
+EXPORT_SYMBOL(bounce_reg);
+EXPORT_SYMBOL(bounce0);
+EXPORT_SYMBOL(bounce1);
+EXPORT_SYMBOL(bounce2);
+EXPORT_SYMBOL(bounce3);
+EXPORT_SYMBOL(bounce_dump);
+EXPORT_SYMBOL(bounce_panic);
+
+#endif
diff --git a/arch/mips/bcm963xx/ktools/pmontool.c b/arch/mips/bcm963xx/ktools/pmontool.c
new file mode 100644
index 0000000000000000000000000000000000000000..151e8eca45f5fd6028f726d0a003b93cc275dd6a
--- /dev/null
+++ b/arch/mips/bcm963xx/ktools/pmontool.c
@@ -0,0 +1,603 @@
+/*
+<:copyright-BRCM:2007:DUAL/GPL:standard
+
+   Copyright (c) 2007 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license 
+agreement governing use of this software, this software is licensed 
+to you under the terms of the GNU General Public License version 2 
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give 
+   you permission to link this software with independent modules, and 
+   to copy and distribute the resulting executable under terms of your 
+   choice, provided that you also meet, for each linked independent 
+   module, the terms and conditions of the license of that module. 
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications 
+   of the software.  
+
+Not withstanding the above, under no circumstances may you combine 
+this software in any way with any other Broadcom software provided 
+under a license other than the GPL, without Broadcom's express prior 
+written consent. 
+
+:>
+*/
+/*
+ *------------------------------------------------------------------------------
+ *
+ * Filename: pmontool.c
+ *
+ * Implements a rudimentary API to the MIPS performance counters.
+ * Statically built into the kernel for usage in kernel profiling.
+ *
+ *  pmon_bgn: Sets up a baseline counter reference used in subsequent pmon_log
+ *  pmon_log(eventid): This API must be called with sequential eventid's.
+ *  pmon_end: Termintaes one iteration, computes logged counter deltas and
+ *            accumulates them. After a sample number of iterations, the
+ *            accumulated counters are averaged and the next performance
+ *            counter type is applied.
+ *  pmon_clr: This function may be invoked to skip the current iteration from
+ *            the test average.
+ *
+ *  pmon_reg: Register a string description with an even point, for us in report
+ *  pmon_enable: May be invoked by C code or by the control utility to start
+ *            the monitoring.
+ *
+ *  - Measurement configuration options:
+ *    skip: Delayed enabling after a number of iterations
+ *    iter: Average over number of iteration per metric
+ *    metric: Compute all=1 or only cyclecount=0 metrics
+ *
+ * Implements a character driver for configuration from pmon control utility
+ *
+ * Uncomment PMON_UT to enable the unit test mode of PMON tool. In this mode,
+ * upon enabling PMON via the control utility, the pmon_ut() will be invoked.
+ * pmon_ut() consists of a sample loop that has been instrumented with 
+ * microsecond delays (caliberated kernel function) in between instrumentation.
+ * The report function will be invoked on completion of the specified test.
+ *
+ *------------------------------------------------------------------------------
+ */
+
+#if defined( CONFIG_PMON )
+
+#include <asm/pmonapi.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <bcm_cpu.h>
+
+// #define PMON_UT     /* PMON Unit Testing */
+
+#ifdef PMON_COLOR
+#define _H_                     "\e[0;36;44m"
+#define _N_                     "\e[0m"
+#define _R_                     "\e[0;31m"
+#define _G_                     "\e[0;32m"
+#define _B_                     "\e[0;34m"
+#define _NL_                    _N_ "\n"
+#else
+#define _H_
+#define _N_
+#define _R_
+#define _G_
+#define _B_
+#define _NL_                    "\n"
+#endif
+
+typedef struct PMetricConfig {
+    uint32_t perfGbl;
+    uint32_t perfCtl;
+} PMetricConfig_t;
+
+/* Default values correspond to CyclesCount */
+#define DEF_GBL_VAL             0x80000000u
+#define DEF_CTL_VAL             0x00008048u
+
+#undef  PMON_DECL
+#define PMON_DECL(x)    x,
+
+typedef enum PMetric {
+    PMON_DECL(DISABLED)
+#if defined(PMON_RAC_METRIC)
+    PMON_DECL(RACLookup)
+    PMON_DECL(RACHits)
+    PMON_DECL(Prefetch)
+#endif
+    PMON_DECL(DCacheHits)
+    PMON_DECL(DCacheMiss)
+    PMON_DECL(ICacheHits)
+    PMON_DECL(ICacheMiss)
+    PMON_DECL(InstrRate)
+    PMON_DECL(CyclesCount)     /* CyclesCount must be the last */
+    PMON_MAX_METRIC
+} PMetric_t;
+
+#undef  PMON_DECL
+#define PMON_DECL(x)    #x
+
+const char * PMetric_Str[] = {
+    PMON_DECL(DISABLED)
+#if defined(PMON_RAC_METRIC)
+    PMON_DECL(RACLookup)
+    PMON_DECL(RACHits)
+    PMON_DECL(Prefetch)
+#endif
+    PMON_DECL(DCacheHits)
+    PMON_DECL(DCacheMiss)
+    PMON_DECL(ICacheHits)
+    PMON_DECL(ICacheMiss)
+    PMON_DECL(InstrRate)
+    PMON_DECL(CyclesCount)
+    "INVALID"
+};
+
+PMetricConfig_t pMetricConfig[] =
+{
+    { DEF_GBL_VAL, DEF_CTL_VAL },   /* EventType     MOD SET       EventId,00 */
+
+#if defined(PMON_RAC_METRIC)
+    { 0x8000002Cu, 0x00008108u },   /* RAC Lookup 2C=1011 00   108=1000010,00 */
+    { 0x8000002Eu, 0x00008114u },   /* RAC Hits   2E=1011 10   114=1000101,00 */
+    { 0x8000002Du, 0x0000812Cu },   /* Prefetch   2D=1011 01   12C=1001011,00 */
+#endif
+     
+    { 0x80000011u, 0x00008028u },   /* DCacheHit  11=0100 01   028=0001010,00 */
+    { 0x80000011u, 0x00008024u },   /* DCacheMiss 11=0100 01   024=0001001,00 */
+
+    { 0x80000018u, 0x00008018u },   /* ICacheHit  18=0110 00   018=0000110,00 */
+    { 0x80000018u, 0x00008014u },   /* ICacheMiss 18=0110 00   014=0000101,00 */
+
+    { 0x80000000u, 0x00008044u },   /* Instructn  00=xxxx xx   044=0010001,00 */
+    { 0x80000000u, 0x00008048u },   /* CycleCount 00=xxxx xx   048=0010010,00 */
+
+    { DEF_GBL_VAL, DEF_CTL_VAL }
+};
+
+uint32_t    iteration   = 0;
+uint32_t    sample_size = 0;
+PMetric_t   metric      = DISABLED;
+uint32_t    skip_enable = 0;
+uint32_t    running     = 0;
+uint32_t    report_all  = 0;
+uint32_t    cycles_per_usec = 400;
+
+uint32_t pfCtr[ PMON_OVFLW_EVENTS ]; /* Current sample values */
+uint32_t pfSum[ PMON_MAX_EVENTS ];   /* Accumulate sum of sample iterations */
+uint32_t pfTmp[ PMON_MAX_EVENTS ];   /* MIPS workaround for CP0 counters not
+                                        getting updated correctly. */
+
+uint32_t pfMon[ PMON_MAX_METRIC + 1 ][ PMON_MAX_EVENTS ];
+uint32_t pfTot[ PMON_MAX_METRIC ];
+
+typedef char PmonEventName_t[128];
+PmonEventName_t pfEvt[ PMON_MAX_EVENTS ];
+
+void pmon_reg(uint32_t event, char * eventName)
+{
+    if ( event < PMON_MAX_EVENTS )
+    {
+        strncpy( pfEvt[event], eventName, 127 );
+
+        if ( strcmp( pfEvt[event], PMON_DEF_UNREGEVT ) )
+            printk( _G_ "PMON Registering event %u : %s" _NL_,
+                    event, pfEvt[event] );
+    }
+}
+
+#if defined( PMON_UT )
+
+int pmon_loops = 0;
+
+void pmon_ut(void)
+{
+    int iter;
+
+        /* Each iteration from bgn to end is a sample */
+    for (iter=0; iter<pmon_loops; iter++)
+    {
+        pmon_bgn();
+        udelay(1); pmon_log(1);
+        udelay(2); pmon_log(2);
+        udelay(3); pmon_log(3);
+        udelay(4); pmon_log(4);
+        if ( ( iter % 10 ) == 0 )
+            continue;                   /* fake an incomplete iteration */
+        udelay(5); pmon_log(5);
+        udelay(6); pmon_log(6);
+        if ( ( iter % 100 ) == 0 )      /* fake a skipped complete iteration */
+        {
+            pmon_clr();
+            goto skip;
+        }
+        udelay(7); pmon_log(7);
+        udelay(8); pmon_log(8);
+skip:
+        udelay(9); pmon_log(9);
+        pmon_end(9);                    /* iteration complete */
+
+        PMONDBG( printk("pmon_ut next iter<%u>\n", iter ); );
+    }
+
+    printk( _G_ "Done %u loops in pmon_ut()" _NL_, pmon_loops );
+
+    __write_pfgblctl(DEF_GBL_VAL);  /* Enable global PCE */
+    __write_pfctl_0 (DEF_CTL_VAL);  /* Enable cycle count in CTR0 */
+
+}
+#endif  /* defined( PMON_UT ) */
+
+
+/*
+ *-----------------------------------------------------------------------------
+ * Function: pmon_report
+ * Report all collected metrics measurements.
+ *-----------------------------------------------------------------------------
+ */
+int pmon_report(void)
+{
+    uint32_t evt, cycles;
+    PMetric_t metric;
+
+    if ( running )
+        printk( _R_ "WARNING: PMON is still running" _NL_ );
+
+    printk( " Poor Man's Performance Monitor\n"
+            "\trunning: %s\n"
+            "\tcaliberation cycles_per_usec: %u\n",
+            running ? "TRUE" : "FALSE",
+            cycles_per_usec );
+
+    memset( pfTot, 0, sizeof(pfTot) );
+
+    if ( report_all )
+        printk( _H_ "Evt:  Cycles-Count Nano-secs Instruction "
+                    "I-C_Hits I-C_Miss D-C_Hits D-C_Miss"
+#if defined(PMON_RAC_METRIC)
+                    " Prefetch RAC_Hits RAC_Look"
+#endif
+                    " : EventName" _NL_ );
+    else
+        printk( _H_ "Evt:  Cycles-Count Nano-secs : EventName" _NL_ );
+
+    for ( evt=1; evt<PMON_MAX_EVENTS; evt++ )
+    {
+        if ( pfMon[CyclesCount][evt] == 0 ) break;
+
+        cycles = pfMon[CyclesCount][evt]/sample_size;
+
+        pfTot[CyclesCount] += cycles;
+
+        if ( report_all )
+        {
+            for (metric=DISABLED+1; metric < CyclesCount; metric++)
+                pfTot[metric] += pfMon[metric][evt]/sample_size;
+
+            printk( "%3u:  %12u %9u %11u %8u %8u %8u %8u"
+#if defined(PMON_RAC_METRIC)
+                    " %8u %8u %8u"
+#endif
+                    " : %s\n",
+                    evt, cycles,
+                    (cycles * 1000) / cycles_per_usec,
+                    pfMon[InstrRate][evt]/sample_size,
+                    pfMon[ICacheHits][evt]/sample_size,
+                    pfMon[ICacheMiss][evt]/sample_size,
+                    pfMon[DCacheHits][evt]/sample_size,
+                    pfMon[DCacheMiss][evt]/sample_size,
+#if defined(PMON_RAC_METRIC)
+                    pfMon[Prefetch][evt]/sample_size,
+                    pfMon[RACHits][evt]/sample_size,
+                    pfMon[RACLookup][evt]/sample_size,
+#endif
+                    pfEvt[evt] );
+        }
+        else
+            printk( "%3u:  %12u %9u : %s\n",
+                    evt, cycles, (cycles * 1000) / cycles_per_usec, pfEvt[evt]);
+    }
+
+    if ( report_all )
+        printk( _B_ "\nTot:  %12u %9u %11u %8u %8u %8u %8u"
+#if defined(PMON_RAC_METRIC)
+                " %8u %8u %8u"
+#endif
+                "\n" _NL_,
+                pfTot[CyclesCount],
+                (pfTot[CyclesCount] * 1000) / cycles_per_usec,
+                pfTot[InstrRate],
+                pfTot[ICacheHits],
+                pfTot[ICacheMiss],
+                pfTot[DCacheHits],
+                pfTot[DCacheMiss]
+#if defined(PMON_RAC_METRIC)
+                , pfTot[Prefetch],
+                pfTot[RACHits],
+                pfTot[RACLookup]
+#endif
+                );
+
+    else
+        printk( _B_ "\nTot:  %12u %9u\n" _NL_,
+                pfTot[CyclesCount],
+                (pfTot[CyclesCount] * 1000) / cycles_per_usec );
+
+    for ( evt=PMON_MAX_EVENTS; evt<PMON_OVFLW_EVENTS; evt++ )
+        if ( pfCtr[evt] != 0 )
+            printk( _R_ "WARNING: %u event not reported" _NL_, evt );
+
+    return 0;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Function: pmon_bind
+ * Bind the Performance Monitoring hardware module for counting in Counter 0.
+ *-----------------------------------------------------------------------------
+ */
+void pmon_bind(PMetric_t metric)
+{
+    PMetricConfig_t * pmon_cfg_p;
+
+    PMONDBG( printk("bind() metric<%u:%s>\n", metric, PMetric_Str[metric]); );
+
+    /* Apply GBL and CNTL configuration for metric */
+    pmon_cfg_p = &pMetricConfig[metric];
+
+    /* PS: For DISABLED metric, default is Cycle count */
+    __write_pfgblctl( pmon_cfg_p->perfGbl );/* Configure Global MOD SET */
+    __write_pfctl_0(  pmon_cfg_p->perfCtl );/* Configure Control Select CTR0|1*/
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Function: pmon_enable
+ * Apply skip, sample and metric configuration.
+ *-----------------------------------------------------------------------------
+ */
+int pmon_enable(uint32_t skip, uint32_t samples, uint32_t all_metric)
+{
+    printk(_G_ "PMON: skip<%u> samples<%u> all_metric<%u>" _NL_,
+            skip, samples, all_metric );
+
+    memset( pfCtr, 0, sizeof(pfCtr) );
+    memset( pfSum, 0, sizeof(pfSum) );
+    memset( pfMon, 0, sizeof(pfMon) );
+
+    sample_size = samples;
+    iteration = 0;  /* current iteration */
+    running = 0;
+
+    report_all = all_metric;
+    metric = ( report_all == 0 ) ? CyclesCount : (DISABLED+1);
+    skip_enable = (skip == 0) ? 1 : skip;
+
+#ifdef PMON_UT
+    pmon_loops = (sample_size + skip) * 2; 
+    if ( all_metric ) pmon_loops *= 10;
+    pmon_ut();
+#endif
+
+    return 0;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Function: pmon_bgn
+ * Start a new iteration. Ignore the first few iterations defined by skip.
+ * If pmon_bgn is invoked before pmon_end, then current iteration overwritten
+ *-----------------------------------------------------------------------------
+ */
+void pmon_bgn(void)
+{
+#ifdef PMON_UT
+    PMONDBG( printk("pmon_bgn() skip_enable<%u>\n", skip_enable ); );
+#endif
+
+    if ( unlikely(skip_enable) )    /* iterations to skip */
+    {
+        skip_enable--;
+        if ( skip_enable == 0 )
+        {
+            pmon_bind( metric );    /* setup pmon_fn pointer */
+            running = 1;            /* tracing begins now */
+            pmon_log(0);            /* record event0 : baseline value */
+        }
+        else
+            pmon_clr();
+    }
+    else
+        pmon_log(0);                /* record event0 : baseline value */
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Function: pmon_end
+ * Function to be invoked to demarcate the end of an iteration.
+ *
+ * Accumulates the elapsed count into an accumulator per event. Assumes that
+ * events will be invoked in order of event id. Each time pmon_end() is invoked
+ * an iteration completes, implying a sample is taken. Upon completion of the
+ * sample size of iterations, measurements for the next metric will commence.
+ *
+ *-----------------------------------------------------------------------------
+ */
+void pmon_end(uint32_t max_event)
+{
+    uint32_t evt, elapsed_count, previous_count, current_count;
+
+    if ( ! running )
+        return;
+
+    if ( pfCtr[0] == ~0U )
+        return;
+
+    PMONDBG( printk("pmon_end iteration<%u> metric<%u,%s>\n",
+             iteration, metric, PMetric_Str[metric]); );
+
+    /* Accumulate elapsed counter values per event */
+#if defined(USE_CP0_CYCLES_COUNT)
+    if ( metric == CyclesCount )    /* cycles count up */
+    {
+        for ( evt=1; evt<=max_event; evt++ )
+        {
+            previous_count = pfCtr[evt-1];
+            current_count  = pfCtr[evt];
+
+            if ( current_count < previous_count )   /* rollover */
+                elapsed_count = current_count + (~0U - previous_count);
+            else
+                elapsed_count = (current_count - previous_count);
+
+            if ( elapsed_count > 0xFFFF0000 )
+                return;     /* exclude this entire iteration */
+            else
+                pfTmp[evt] = elapsed_count;
+        }
+    }
+    else        /* Performance counters count down */
+#else /* Performance counters : count down */
+    /* WORKAROUND : */
+    for ( evt=1; evt<=max_event; evt++ )
+    {
+        previous_count = pfCtr[evt-1];
+        current_count  = pfCtr[evt];
+
+        if ( current_count > previous_count )    /* rollover */
+            elapsed_count = previous_count + (~0U - current_count);
+        else
+            elapsed_count = previous_count - current_count;
+
+        if ( elapsed_count > 0xFFFF0000 )
+            return;     /* exclude this entire iteration */
+        else
+            pfTmp[evt] = elapsed_count;
+    }
+#endif
+
+    for ( evt=1; evt<=max_event; evt++ )
+        pfSum[evt] += pfTmp[evt];   /* Accumulate into average */
+
+
+    iteration++;    /* Completed the collection of one sample */
+
+
+    /* 
+     * Record accumulated into pfMon, if sample size number of iterations
+     * have been collected, and switch to next metric measurement.
+     */
+    if ( iteration >= sample_size )
+    {
+        pfMon[metric][0] = ~0U;     /* Tag measurement as valid */
+        for ( evt=1; evt<PMON_MAX_EVENTS; evt++ )
+            pfMon[metric][evt] = pfSum[evt];
+
+        metric++;                   /* Fetch next metric */
+        iteration = 0;              /* First iteration of next metric */
+        memset( pfSum, 0, sizeof(pfSum) );  /* Clear accumulator */
+
+        if ( metric >= PMON_MAX_METRIC)     /* Completed all metrics ? */
+        {
+            running = 0;            
+            metric = DISABLED;
+            pmon_report();
+        }
+
+        pmon_bind( metric );        /* Configure next metric (or disable) */
+    }
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Rudimentary pmon ctl char device ioctl handling of a single word argument
+ *-----------------------------------------------------------------------------
+ */
+static int pmon_open(struct inode *inode, struct file *filp) { return 0; }
+static int pmon_rel(struct inode *inode, struct file *file)  { return 0; }
+
+static DEFINE_MUTEX(ioctlMutex);
+
+static long pmon_ioctl_unlocked(  struct file *  file, 
+                                  unsigned int   cmd,
+                                  unsigned long  arg)
+{
+    long ret = -ENOTTY;
+    mutex_lock(&ioctlMutex);
+    switch ( cmd )
+    {
+        case PMON_CPU_START_IOCTL:
+        case PMON_ALL_START_IOCTL:
+             ret = pmon_enable( (arg >>16), (arg & 0xFFFF), cmd );
+        case PMON_REPORT_IOCTL:
+             ret = pmon_report();
+    }
+    mutex_unlock(&ioctlMutex);
+    return ret;
+}
+
+static struct file_operations pmon_fops =
+{
+    .unlocked_ioctl  = pmon_ioctl_unlocked,
+    .open   = pmon_open,
+    .release = pmon_rel,
+    .owner  = THIS_MODULE
+};
+
+/*
+ *-----------------------------------------------------------------------------
+ * Function: pmon_init
+ * Upon module loading, the characted device pmon is registered and the
+ * performance counter module is enabled for counting cycles in Counter 0.
+ *-----------------------------------------------------------------------------
+ */
+int __init pmon_init(void)
+{
+    uint32_t evt, elapsed_count, previous_count, current_count;
+
+    if ( register_chrdev(PMON_DEV_MAJ, PMON_DEV_NAME, &pmon_fops) )
+    {
+        printk( _R_ "Unable to get major number <%d>" _NL_, PMON_DEV_MAJ);
+        return -1;
+    }
+
+    for (evt=0; evt < PMON_MAX_EVENTS; evt++)
+        pmon_reg(evt, PMON_DEF_UNREGEVT);
+
+    __write_pfgblctl(DEF_GBL_VAL);  /* Enable global PCE */
+    __write_pfctl_0 (DEF_CTL_VAL);  /* Enable cycle count in CTR0 */
+
+    /* Estimate PMON cycles to microseconds */
+    previous_count = __read_pfctr_0();
+    udelay(1000);
+    current_count = __read_pfctr_0();
+
+    if ( current_count > previous_count )   /* rollover */
+        elapsed_count = previous_count + (~0U - current_count);
+    else
+        elapsed_count = previous_count - current_count;
+    cycles_per_usec = elapsed_count / 1000;
+
+    printk( _H_ "PMON Char Driver Registered<%d> cycles_per_usec<%u>" _NL_,
+            PMON_DEV_MAJ, cycles_per_usec );
+
+    return PMON_DEV_MAJ;
+}
+
+module_init(pmon_init);
+
+EXPORT_SYMBOL(pfCtr);
+EXPORT_SYMBOL(pmon_log);
+EXPORT_SYMBOL(pmon_bgn);
+EXPORT_SYMBOL(pmon_end);
+EXPORT_SYMBOL(pmon_reg);
+EXPORT_SYMBOL(pmon_enable);
+
+#endif  /* defined( CONFIG_PMON ) */
diff --git a/arch/mips/bcm963xx/prom.c b/arch/mips/bcm963xx/prom.c
new file mode 100644
index 0000000000000000000000000000000000000000..678124032167efead9f65419ef10c47e72760654
--- /dev/null
+++ b/arch/mips/bcm963xx/prom.c
@@ -0,0 +1,405 @@
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+/*
+* <:copyright-BRCM:2004:DUAL/GPL:standard
+* 
+*    Copyright (c) 2004 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+* :>
+ 
+*/
+/*
+ * prom.c: PROM library initialization code.
+ *
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/bootmem.h>
+#include <linux/blkdev.h>
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/time.h>
+
+#include <bcm_map_part.h>
+#include <bcm_cpu.h>
+#include <board.h>
+#include <boardparms.h>
+
+extern int  do_syslog(int, char *, int);
+
+unsigned char g_blparms_buf[1024];
+
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+unsigned int main_tp_num;
+#endif
+
+static void __init create_cmdline(char *cmdline);
+UINT32 __init calculateCpuSpeed(void);
+void __init retrieve_boot_loader_parameters(void);
+
+#if defined (CONFIG_BCM96328)
+const uint32 cpu_speed_table[0x20] = {
+    320, 320, 320, 320, 320, 320, 320, 320, 320, 320, 320, 320, 320, 320, 320, 320,
+    0, 320, 160, 200, 160, 200, 400, 320, 320, 160, 384, 320, 192, 320, 320, 320
+};
+#endif
+
+#if defined (CONFIG_BCM96362)
+const uint32 cpu_speed_table[0x20] = {
+    320, 320, 320, 240, 160, 400, 440, 384, 320, 320, 320, 240, 160, 320, 400, 320,
+    320, 320, 320, 240, 160, 200, 400, 384, 320, 320, 320, 240, 160, 200, 400, 400
+};
+#endif
+
+#if defined (CONFIG_BCM963268)
+const uint32 cpu_speed_table[0x20] = {
+    0, 0, 400, 320, 0, 0, 0, 0, 0, 0, 333, 400, 0, 0, 320, 400,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+#endif
+
+#if defined (CONFIG_BCM96318)
+const uint32 cpu_speed_table[0x04] = {
+    166, 400, 250, 333
+};
+#endif
+
+#if defined (CONFIG_BCM960333)
+const uint32 cpu_speed_table[0x04] = {
+    200, 400, 333, 0
+};
+#endif
+
+
+
+
+
+
+#if defined (CONFIG_BCM96838)
+const uint32 cpu_speed_table[0x3] = {
+    600, 400, 240
+};
+#endif
+
+#if defined (CONFIG_BCM963381)
+const uint32 cpu_speed_table[0x04] = {
+    300, 800, 480, 600
+};
+#endif
+
+#if defined (CONFIG_BCM96848)
+const uint32 cpu_speed_table[8] = {
+    250, 250, 400, 400, 250, 250, 428, 600 
+};
+#endif
+
+static char promBoardIdStr[NVRAM_BOARD_ID_STRING_LEN];
+const char *get_system_type(void)
+{
+    kerSysNvRamGetBoardId(promBoardIdStr);
+    return(promBoardIdStr);
+}
+
+
+/* --------------------------------------------------------------------------
+    Name: prom_init
+ -------------------------------------------------------------------------- */
+
+extern struct plat_smp_ops brcm_smp_ops;
+
+void __init prom_init(void)
+{
+    int argc = fw_arg0;
+    u32 *argv = (u32 *)CKSEG0ADDR(fw_arg1);
+    int i;
+
+    retrieve_boot_loader_parameters();
+    kerSysEarlyFlashInit();
+
+    // too early in bootup sequence to acquire spinlock, not needed anyways
+    // only the kernel is running at this point
+    kerSysNvRamGetBoardIdLocked(promBoardIdStr);
+    printk( "%s prom init\n", promBoardIdStr );
+
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+    main_tp_num = ((read_c0_diag3() & CP0_CMT_TPID) == CP0_CMT_TPID) ? 1 : 0;
+    printk("Linux TP ID = %u \n", (unsigned int)main_tp_num);
+#endif
+
+    PERF->IrqControl[0].IrqMask=0;
+
+    arcs_cmdline[0] = '\0';
+
+    create_cmdline(arcs_cmdline);
+
+    strcat(arcs_cmdline, " ");
+
+    for (i = 1; i < argc; i++) {
+        strcat(arcs_cmdline, (char *)CKSEG0ADDR(argv[i]));
+        if (i < (argc - 1))
+            strcat(arcs_cmdline, " ");
+    }
+
+
+    /* Count register increments every other clock */
+    mips_hpt_frequency = calculateCpuSpeed() / 2;
+
+#if defined (CONFIG_SMP)
+    register_smp_ops(&brcm_smp_ops);
+#endif
+}
+
+
+/* --------------------------------------------------------------------------
+    Name: prom_free_prom_memory
+Abstract: 
+ -------------------------------------------------------------------------- */
+void __init prom_free_prom_memory(void)
+{
+
+}
+
+/*#if defined(CONFIG_ROOT_NFS) && defined(SUPPORT_SWMDK)*/
+#if 0 /* Using a different interface, e.g. a USB link cable, it works */
+  /* We can't use gendefconfig to automatically fix this, so instead we will
+     raise an error here */
+  #error "Kernel cannot be configured for both SWITCHMDK and NFS."
+#endif
+
+#define HEXDIGIT(d) ((d >= '0' && d <= '9') ? (d - '0') : ((d | 0x20) - 'W'))
+#define HEXBYTE(b)  (HEXDIGIT((b)[0]) << 4) + HEXDIGIT((b)[1])
+
+#ifndef CONFIG_ROOT_NFS_DIR
+#define NFS_ROOT_DIR	"/srv/rootfs/mips"
+#define NFS_HOST_IP	"172.22.33.1"
+#define NFS_LOCAL_IP	"172.22.33.2"
+#define NFS_IP_MASK	"255.255.255.0"
+#define NFS_IF		"usb0"
+#endif
+
+#ifdef CONFIG_BLK_DEV_RAM_SIZE
+#define RAMDISK_SIZE		CONFIG_BLK_DEV_RAM_SIZE
+#else
+#define RAMDISK_SIZE		0x800000
+#endif
+
+/*
+ * This function reads in a line that looks something like this from NvRam:
+ *
+ * CFE bootline=bcmEnet(0,0)host:vmlinux e=192.169.0.100:ffffff00 h=192.169.0.1
+ *
+ * and retuns in the cmdline parameter based on the boot_type that CFE sets up.
+ *
+ * for boot from flash, it will use the definition in CONFIG_ROOT_FLASHFS
+ *
+ * for boot from NFS, it will look like below:
+ * CONFIG_CMDLINE="root=/dev/nfs nfsroot=192.168.0.1:/opt/targets/96345R/fs
+ * ip=192.168.0.100:192.168.0.1::255.255.255.0::eth0:off rw"
+ *
+ * for boot from tftp, it will look like below:
+ * CONFIG_CMDLINE="root=/dev/ram rw rd_start=0x81000000 rd_size=0x1800000"
+ */
+static void __init create_cmdline(char *cmdline)
+{
+	char boot_type = '\0', mask[16] = "";
+	char bootline[NVRAM_BOOTLINE_LEN] = "";
+	char *localip = NULL, *hostip = NULL, *p = bootline, *rdaddr = NULL;
+
+	/*
+	 * too early in bootup sequence to acquire spinlock, not needed anyways
+	 * only the kernel is running at this point
+	 */
+	kerSysNvRamGetBootlineLocked(bootline);
+
+	while (*p) {
+		if (p[0] == 'e' && p[1] == '=') {
+			/* Found local ip address */
+			p += 2;
+			localip = p;
+			while (*p && *p != ' ' && *p != ':')
+				p++;
+			if (*p == ':') {
+				/* Found network mask (eg FFFFFF00 */
+				*p++ = '\0';
+				sprintf(mask, "%u.%u.%u.%u", HEXBYTE(p),
+					HEXBYTE(p + 2),
+				HEXBYTE(p + 4), HEXBYTE(p + 6));
+				p += 4;
+			} else if (*p == ' ')
+				*p++ = '\0';
+		} else if (p[0] == 'h' && p[1] == '=') {
+			/* Found host ip address */
+			p += 2;
+			hostip = p;
+			while (*p && *p != ' ')
+				p++;
+			if (*p == ' ')
+				*p++ = '\0';
+		} else if (p[0] == 'r' && p[1] == '=') {
+			/* Found boot type */
+			p += 2;
+			boot_type = *p;
+			while (*p && *p != ' ')
+				p++;
+			if (*p == ' ')
+				*p++ = '\0';
+		} else if (p[0] == 'a' && p[1] == '=') {
+			p += 2;
+			rdaddr = p;
+			while (*p && *p != ' ')
+				p++;
+			if (*p == ' ')
+				*p++ = '\0';
+		} else 
+			p++;
+	}
+
+#ifdef CONFIG_ROOT_NFS_DIR
+	if (boot_type == 'h' && localip && hostip) {
+		/* Boot from NFS with proper IP addresses */
+		sprintf(cmdline, "root=/dev/nfs nfsroot=%s:" CONFIG_ROOT_NFS_DIR
+				" ip=%s:%s::%s::eth0:off rw",
+				hostip, localip, hostip, mask);
+#else
+	if (boot_type == 'h') {
+		strcpy(cmdline, "root=/dev/nfs nfsroot=" NFS_HOST_IP ":" NFS_ROOT_DIR
+				" ip=" NFS_LOCAL_IP ":" NFS_HOST_IP "::" NFS_IP_MASK "::" NFS_IF ":off "
+				"rw rootwait loglevel=7");
+#endif
+	} else if (boot_type == 'c') {
+		/* boot from tftp */
+		sprintf(cmdline, "root=/dev/ram0 ro rd_start=%s rd_size=0x%x",
+				rdaddr, RAMDISK_SIZE << 10);
+	} else {
+		/* go with the default, boot from flash */
+#ifdef CONFIG_ROOT_FLASHFS
+		strcpy(cmdline, CONFIG_ROOT_FLASHFS);
+#endif
+	}
+}
+
+/*  *********************************************************************
+    *  calculateCpuSpeed()
+    *      Calculate the BCM63xx CPU speed by reading the PLL Config register
+    *      and applying the following formula:
+    *      Fcpu_clk = (25 * MIPSDDR_NDIV) / MIPS_MDIV
+    *  Input parameters:
+    *      none
+    *  Return value:
+    *      none
+    ********************************************************************* */
+
+#if defined(CONFIG_BCM96328) || defined(CONFIG_BCM96362) ||         defined(CONFIG_BCM963268) ||                                         defined(CONFIG_BCM963381)
+UINT32 __init calculateCpuSpeed(void)
+{
+    UINT32 mips_pll_fvco;
+
+    mips_pll_fvco = MISC->miscStrapBus & MISC_STRAP_BUS_MIPS_PLL_FVCO_MASK;
+    mips_pll_fvco >>= MISC_STRAP_BUS_MIPS_PLL_FVCO_SHIFT;
+
+    return cpu_speed_table[mips_pll_fvco] * 1000000;
+}
+#endif
+
+#if defined(CONFIG_BCM96318) || defined(CONFIG_BCM960333)
+UINT32 __init calculateCpuSpeed(void)
+{
+	UINT32 uiCpuSpeedTableIdx;				// Index into the CPU speed table (0 to 3)
+	
+	// Get the strapOverrideBus bits to index into teh CPU speed table	
+	uiCpuSpeedTableIdx = STRAP->strapOverrideBus & STRAP_BUS_MIPS_FREQ_MASK;
+	uiCpuSpeedTableIdx >>= STRAP_BUS_MIPS_FREQ_SHIFT;
+    
+    return cpu_speed_table[uiCpuSpeedTableIdx] * 1000000;
+}
+#endif
+
+#if defined(CONFIG_BCM96838)
+UINT32 __init calculateCpuSpeed(void)
+{ 
+#define OTP_BASE		   0xb4e00400
+#define OTP_SHADOW_BRCM_BITS_0_31               0x40
+#define OTP_BRCM_VIPER_FREQ_SHIFT               18
+#define OTP_BRCM_VIPER_FREQ_MASK                (0x7 << OTP_BRCM_VIPER_FREQ_SHIFT)
+
+    UINT32 otp_shadow_reg = *((volatile UINT32*)(OTP_BASE+OTP_SHADOW_BRCM_BITS_0_31));
+	UINT32 uiCpuSpeedTableIdx = (otp_shadow_reg & OTP_BRCM_VIPER_FREQ_MASK) >> OTP_BRCM_VIPER_FREQ_SHIFT;
+	
+	return cpu_speed_table[uiCpuSpeedTableIdx] * 1000000;
+}
+#endif
+
+#if defined(CONFIG_BCM96848)
+#define OTP_BITS_320_351_ROW                    10
+#define OTP_BRCM_MAX_CLOCK_FREQ_SHIFT			0
+#define OTP_BRCM_MAX_CLOCK_FREQ_MASK			(0x7<<OTP_BRCM_MAX_CLOCK_FREQ_SHIFT)
+int otp_get_row(int row);
+
+UINT32 __init calculateCpuSpeed(void)
+{
+    UINT32 clock_sel_strap = (MISC->miscStrapBus & MISC_STRAP_CLOCK_SEL_MASK) >> MISC_STRAP_CLOCK_SEL_SHIFT;
+    UINT32 clock_sel_otp = otp_get_row(OTP_BITS_320_351_ROW) & OTP_BRCM_MAX_CLOCK_FREQ_MASK;
+ 
+    if (cpu_speed_table[clock_sel_strap] <= cpu_speed_table[clock_sel_otp])
+        return cpu_speed_table[clock_sel_strap] * 1000000;
+    else
+        return cpu_speed_table[clock_sel_otp] * 1000000;
+}
+#endif
+
+
+/* Retrieve a buffer of paramters passed by the boot loader.  Functions in
+ * board.c can return requested parameter values to a calling Linux function.
+ */
+void __init retrieve_boot_loader_parameters(void)
+{
+    extern unsigned char _text;
+    unsigned long blparms_magic = *(unsigned long *) (&_text - 8);
+    unsigned long blparms_buf = *(unsigned long *) (&_text - 4);
+    unsigned char *src = (unsigned char *) blparms_buf;
+    unsigned char *dst = g_blparms_buf;
+
+    if( blparms_magic != BLPARMS_MAGIC )
+    {
+        /* Subtract four more bytes for NAND flash images. */
+        blparms_magic = *(unsigned long *) (&_text - 12);
+        blparms_buf = *(unsigned long *) (&_text - 8);
+        src = (unsigned char *) blparms_buf;
+    }
+
+    if( blparms_magic == BLPARMS_MAGIC )
+    {
+        do
+        {
+            *dst++ = *src++;
+        } while( (src[0] != '\0' || src[1] != '\0') &&
+          (unsigned long) (dst - g_blparms_buf) < sizeof(g_blparms_buf) - 2);
+    }
+
+    dst[0] = dst[1] = '\0';
+}
+
+#endif // defined(CONFIG_BCM_KF_MIPS_BCM963XX)
+
diff --git a/arch/mips/bcm963xx/pwrmngtclk.c b/arch/mips/bcm963xx/pwrmngtclk.c
new file mode 100644
index 0000000000000000000000000000000000000000..0875ae167b59bbb4aac0bc9a769031a2a451aa74
--- /dev/null
+++ b/arch/mips/bcm963xx/pwrmngtclk.c
@@ -0,0 +1,617 @@
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+/***********************************************************
+ *
+ * Copyright (c) 2009 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * <:label-BRCM:2009:DUAL/GPL:standard
+ * 
+ * Unless you and Broadcom execute a separate written software license 
+ * agreement governing use of this software, this software is licensed 
+ * to you under the terms of the GNU General Public License version 2 
+ * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+ * with the following added to such license:
+ * 
+ *    As a special exception, the copyright holders of this software give 
+ *    you permission to link this software with independent modules, and 
+ *    to copy and distribute the resulting executable under terms of your 
+ *    choice, provided that you also meet, for each linked independent 
+ *    module, the terms and conditions of the license of that module. 
+ *    An independent module is a module which is not derived from this
+ *    software.  The special exception does not apply to any modifications 
+ *    of the software.  
+ * 
+ * Not withstanding the above, under no circumstances may you combine 
+ * this software in any way with any other Broadcom software provided 
+ * under a license other than the GPL, without Broadcom's express prior 
+ * written consent. 
+ * 
+ * :>
+ *
+ ************************************************************/
+#include <linux/module.h>
+#include <asm/time.h>
+#include <bcm_map_part.h>
+#include "board.h"
+
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_HOSTMIPS_PWRSAVE)
+#define CLK_ALIGNMENT_REG   0xff410040
+#define KEEPME_MASK         0x00007F00 // bit[14:8]
+
+#if defined (CONFIG_BCM96318)
+#define RATIO_ONE_ASYNC     0x0 /* 0b00 */
+#define RATIO_ONE_HALF      0x1 /* 0b01 */
+#define RATIO_ONE_QUARTER   0x2 /* 0b10 */
+#define RATIO_ONE_EIGHTH    0x3 /* 0b11 */
+
+#define MASK_ASCR_BITS 0x3
+#define MASK_ASCR_SHFT 23
+#define MASK_ASCR (MASK_ASCR_BITS << MASK_ASCR_SHFT)
+#else
+#define RATIO_ONE_SYNC      0x0 /* 0b000 */
+#define RATIO_ONE_ASYNC     0x1 /* 0b001 */
+#define RATIO_ONE_HALF      0x3 /* 0b011 */
+#define RATIO_ONE_QUARTER   0x5 /* 0b101 */
+#define RATIO_ONE_EIGHTH    0x7 /* 0b111 */
+
+#define MASK_ASCR_BITS 0x7
+#define MASK_ASCR_SHFT 28
+#define MASK_ASCR (MASK_ASCR_BITS << MASK_ASCR_SHFT)
+#endif
+
+unsigned int originalMipsAscr = 0; // To keep track whether MIPS was in Async mode to start with at boot time
+unsigned int originalMipsAscrChecked = 0;
+unsigned int keepme;
+#endif
+
+#if defined(CONFIG_BCM_PWRMNGT_MODULE)
+#if defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+unsigned int self_refresh_enabled = 0; // Wait for the module to control if it is enabled or not
+#endif
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_HOSTMIPS_PWRSAVE)
+unsigned int clock_divide_enabled = 0; // Wait for the module to control if it is enabled or not
+#endif
+#else
+#if defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+unsigned int self_refresh_enabled = 1;
+#endif
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_HOSTMIPS_PWRSAVE)
+unsigned int clock_divide_enabled = 1;
+#endif
+#endif
+
+unsigned int clock_divide_low_power0 = 0;
+unsigned int clock_divide_active0 = 0;
+unsigned int wait_count0 = 0;
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+unsigned int TimerC0Snapshot0 = 0;
+unsigned int prevTimerCnt0, newTimerCnt0, TimerAdjust0;
+#endif
+
+#if defined(CONFIG_SMP)
+unsigned int clock_divide_low_power1 = 0;
+unsigned int clock_divide_active1 = 0;
+unsigned int wait_count1 = 0;
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+unsigned int TimerC0Snapshot1 = 0;
+unsigned int prevTimerCnt1, newTimerCnt1, TimerAdjust1;
+#endif
+#endif
+
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+unsigned int C0divider, C0multiplier, C0ratio, C0adder;
+#endif
+extern volatile int isVoiceIdle;
+ 
+DEFINE_SPINLOCK(pwrmgnt_clk_irqlock);
+ 
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_HOSTMIPS_PWRSAVE)
+/* To put CPU in ASYNC mode and change CPU clock speed */
+void __BcmPwrMngtSetASCR(unsigned int freq_div)
+{
+   register unsigned int temp;
+#if defined(CONFIG_BCM96328) ||          defined(CONFIG_BCM96362)
+   register unsigned int cp0_ascr_asc;
+   volatile register unsigned int * clk_alignment_reg = (unsigned int *) CLK_ALIGNMENT_REG;
+
+   // A/ SYNC instruction // Step A, SYNC instruction
+   asm("sync" : : );
+
+   // B/ CMT mips : set cp0 reg 22 sel 5 bits [30:28] to 001 (RATIO_ONE_ASYNC)
+   asm("mfc0 %0,$22,5" : "=d"(cp0_ascr_asc) :);
+   if (!originalMipsAscrChecked) {
+      originalMipsAscr = cp0_ascr_asc & MASK_ASCR;
+      originalMipsAscrChecked = 1;
+   }
+   cp0_ascr_asc = ( cp0_ascr_asc & ~MASK_ASCR) | (RATIO_ONE_ASYNC << MASK_ASCR_SHFT);
+   asm("mtc0 %0,$22,5" : : "d" (cp0_ascr_asc));
+
+   // // These 3 steps (C,D and E) are needed to work around limitations on clock alignment logics [...]
+   // C/ read from 0xff410040   ( make sure you set this to volatile first)
+   temp = *clk_alignment_reg;    
+    
+   // D/ save bit[14:8] to some register, then zero out bit [14:8], write back to same address.
+   keepme = temp | KEEPME_MASK;
+   // E/ SYNC instruction   // Step E SYNC instruction  
+   asm("sync" : : );
+
+   // F/ change to 1/2, or 1/4, or 1/8 by setting cp0 sel 5 bits[30:28] (sel 4 bits[24:22] for single core mips)  to 011, 101, or 111 respectively
+   // Step F change to 1/2, or 1/4, or 1/8 by setting cp0 bits[30:28]
+   asm("mfc0 %0,$22,5" : "=d"(temp) :);
+   temp = ( temp & ~MASK_ASCR) | (freq_div << MASK_ASCR_SHFT);
+   asm("mtc0 %0,$22,5" : : "d" (temp));
+
+   // Step G/ 16 nops // Was 32 nops
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : ); 
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+#elif defined(CONFIG_BCM96318)
+   asm("sync" : : );
+   asm("mfc0 %0,$22,4" : "=d"(temp) :);
+   temp = ( temp & ~MASK_ASCR) | (freq_div << MASK_ASCR_SHFT);
+   asm("mtc0 %0,$22,4" : : "d" (temp));
+#else
+   if (freq_div == RATIO_ONE_ASYNC) {
+      // Gradually bring the processor speed back to 1:1
+      // If it is done in one step, CP0 timer interrupts are missed.
+
+      // E/ SYNC instruction   // Step E SYNC instruction  
+      asm("sync" : : );
+
+      // Step F1 change to 1/4
+      asm("mfc0 %0,$22,5" : "=d"(temp) :);
+      temp = ( temp & ~MASK_ASCR) | (RATIO_ONE_QUARTER << MASK_ASCR_SHFT);
+      asm("mtc0 %0,$22,5" : : "d" (temp));
+
+      // Step F2 change to 1/2
+      temp = ( temp & ~MASK_ASCR) | (RATIO_ONE_HALF << MASK_ASCR_SHFT);
+      asm("mtc0 %0,$22,5" : : "d" (temp));
+
+      // Step F3 change to 1/1, high performance memory access
+      temp = ( temp & ~MASK_ASCR);
+      asm("mtc0 %0,$22,5" : : "d" (temp));
+
+   } else {
+      // E/ SYNC instruction   // Step E SYNC instruction  
+      asm("sync" : : );
+
+      // F/ change to 1/2, or 1/4, or 1/8 by setting cp0 sel 5 bits[30:28] (sel 4 bits[24:22] for single core mips)  to 011, 101, or 111 respectively
+      // Step F change to 1/2, or 1/4, or 1/8 by setting cp0 bits[30:28]
+      asm("mfc0 %0,$22,5" : "=d"(temp) :);
+      temp = ( temp & ~MASK_ASCR) | (freq_div << MASK_ASCR_SHFT);
+      asm("mtc0 %0,$22,5" : : "d" (temp));
+   }
+#endif
+
+   return;
+} /* BcmPwrMngtSetASCR */
+
+void BcmPwrMngtSetASCR(unsigned int freq_div)
+{
+   unsigned long flags;
+
+   if (!freq_div) {
+      // Can't use this function to set to SYNC mode
+      return;
+   }
+
+   spin_lock_irqsave(&pwrmgnt_clk_irqlock, flags);
+   __BcmPwrMngtSetASCR(freq_div);
+   spin_unlock_irqrestore(&pwrmgnt_clk_irqlock, flags);
+   return;
+} /* BcmPwrMngtSetASCR */
+EXPORT_SYMBOL(BcmPwrMngtSetASCR);
+
+
+/* To put CPU in SYNC mode and change CPU clock speed to 1:1 ratio */
+/* No SYNC mode in newer MIPS core, use the __BcmPwrMngtSetASCR with ratio 1:1 instead */
+void __BcmPwrMngtSetSCR(void)
+{
+   register unsigned int cp0_ascr_asc;
+#if defined(CONFIG_BCM96328) ||          defined(CONFIG_BCM96362)
+   register unsigned int temp;
+   volatile register unsigned int * clk_alignment_reg = (unsigned int *) CLK_ALIGNMENT_REG;
+#endif
+
+   // It is important to go back to divide by 1 async mode first, don't jump directly from divided clock back to SYNC mode.
+   // A/ set cp0 reg 22 sel 5 bits[30:28]  (sel 4 bits[24:22] for single core mips)  to 001
+   asm("mfc0 %0,$22,5" : "=d"(cp0_ascr_asc) :);
+   if (!originalMipsAscrChecked) {
+      originalMipsAscr = cp0_ascr_asc & MASK_ASCR;
+      originalMipsAscrChecked = 1;
+   }
+   if (originalMipsAscr)
+      return;
+   cp0_ascr_asc = ( cp0_ascr_asc & ~MASK_ASCR) | (RATIO_ONE_ASYNC << MASK_ASCR_SHFT);
+   asm("mtc0 %0,$22,5" : : "d" (cp0_ascr_asc));
+
+   // B/ 16 nops // Was 32 nops (wait a while to make sure clk is back to full speed)
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : ); 
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+   asm("nop" : : ); asm("nop" : : );
+
+   // C/ SYNC instruction
+   asm("sync" : : );
+
+#if defined(CONFIG_BCM96328) ||          defined(CONFIG_BCM96362)
+   // This step is needed to work around limitations on clock alignment logics for chips from BCM3368 and before BCM6816.
+
+   // E/ set bit[14:8] to be 6'b1000001
+   *clk_alignment_reg = (*clk_alignment_reg & ~KEEPME_MASK) | 0x4100;
+
+   // F/ repeat
+   //  until caught rising edge
+
+   while (1) {
+      //  a/ sread bit[22:16] to check rising edge:
+      //   - When bit[22:16] of register 0xff410040 shows sequence of 1's (from bit[22]) followed by 0's (to bit[16]) means good alignment. 
+      //    eg [1100000] or [1111000]
+      temp = (*clk_alignment_reg & 0x007F0000) >> 16;
+      if (temp == 0x40 || temp == 0x60 || temp == 0x70 || temp == 0x78 || temp == 0x7C || temp == 0x7E) {
+        break;
+      }
+
+   }
+
+   // G/ restore the saved value of bit[14:8] of 0xff410040 back to the register.
+   *clk_alignment_reg = (*clk_alignment_reg & ~KEEPME_MASK) | (keepme & KEEPME_MASK);
+#endif
+
+   // H/ set cp0 reg 22 sel 5 bits[30:28]  (sel 4 bits[24:22] for single core mips)  to 000
+   asm("mfc0 %0,$22,5" : "=d"(cp0_ascr_asc) :);
+   cp0_ascr_asc = ( cp0_ascr_asc & ~MASK_ASCR);
+   asm("mtc0 %0,$22,5" : : "d" (cp0_ascr_asc));
+
+   // I/ SYNC instruction 
+   asm("sync" : : );
+
+   return;
+} /* BcmPwrMngtSetSCR */
+
+void BcmPwrMngtSetSCR(void)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(&pwrmgnt_clk_irqlock, flags);
+   __BcmPwrMngtSetSCR();
+   spin_unlock_irqrestore(&pwrmgnt_clk_irqlock, flags);
+
+   return;
+} /* BcmPwrMngtSetSCR */
+EXPORT_SYMBOL(BcmPwrMngtSetSCR);
+
+
+void BcmPwrMngtSetAutoClkDivide(unsigned int enable)
+{
+   printk("Host MIPS Clock divider pwrsaving is %s\n", enable?"enabled":"disabled");
+   clock_divide_enabled = enable;
+}
+EXPORT_SYMBOL(BcmPwrMngtSetAutoClkDivide);
+
+
+int BcmPwrMngtGetAutoClkDivide(void)
+{
+   return (clock_divide_enabled);
+}
+EXPORT_SYMBOL(BcmPwrMngtGetAutoClkDivide);
+#endif
+
+#if defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+void BcmPwrMngtSetDRAMSelfRefresh(unsigned int enable)
+{
+#if defined (CONFIG_BCM963381)
+   if (0xA0 == ((PERF->RevID & REV_ID_MASK) & 0xF0)) {
+      printk("DDR Self Refresh pwrsaving must not be enabled on 63381A0/A1\n");
+      enable = 0;
+   }
+#endif
+
+   printk("DDR Self Refresh pwrsaving is %s\n", enable?"enabled":"disabled");
+   self_refresh_enabled = enable;
+
+#if defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE) && defined(CONFIG_USB) && defined(USBH_OHCI_MEM_REQ_DIS)
+#if defined (CONFIG_BCM963268)
+   if (0xD0 == (PERF->RevID & REV_ID_MASK)) {
+#endif
+      if (enable) {
+         // Configure USB port to not access DDR if unused, to save power
+         USBH->USBSimControl |= USBH_OHCI_MEM_REQ_DIS;
+      } else {
+         USBH->USBSimControl &= ~USBH_OHCI_MEM_REQ_DIS;
+      }
+#if defined (CONFIG_BCM963268)
+   }
+#endif
+#endif
+}
+EXPORT_SYMBOL(BcmPwrMngtSetDRAMSelfRefresh);
+
+
+int BcmPwrMngtGetDRAMSelfRefresh(void)
+{
+   return (self_refresh_enabled);
+}
+EXPORT_SYMBOL(BcmPwrMngtGetDRAMSelfRefresh);
+
+#if defined(CONFIG_BCM_ADSL_MODULE) || defined(CONFIG_BCM_ADSL)
+PWRMNGT_DDR_SR_CTRL *pDdrSrCtrl = NULL;
+void BcmPwrMngtRegisterLmemAddr(PWRMNGT_DDR_SR_CTRL *pDdrSr)
+{
+    pDdrSrCtrl = pDdrSr;
+
+    // Initialize tp0 to busy status and tp1 to idle
+    // for cases where SMP is not compiled in.
+    if(NULL != pDdrSrCtrl) {
+        pDdrSrCtrl->tp0Busy = 1;
+        pDdrSrCtrl->tp1Busy = 0;
+    }
+}
+EXPORT_SYMBOL(BcmPwrMngtRegisterLmemAddr);
+#else
+PWRMNGT_DDR_SR_CTRL ddrSrCtl = {{.word=0}};
+PWRMNGT_DDR_SR_CTRL *pDdrSrCtrl = &ddrSrCtl;
+#endif
+#endif
+
+// Determine if cpu is busy by checking the number of times we entered the wait
+// state in the last milisecond. If we entered the wait state only once or
+// twice, then the processor is very likely not busy and we can afford to slow
+// it down while on wait state. Otherwise, we don't slow down the processor
+// while on wait state in order to avoid affecting the time it takes to
+// process interrupts
+void BcmPwrMngtCheckWaitCount (void)
+{
+    int cpu = smp_processor_id();
+
+    if (cpu == 0) {
+#if defined(CONFIG_SMP) && defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+        if (isVoiceIdle && TimerC0Snapshot1) {
+#else
+        if (isVoiceIdle) {
+#endif
+           if (wait_count0 > 0 && wait_count0 < 3) {
+              clock_divide_low_power0 = 1;
+           }
+           else {
+              clock_divide_low_power0 = 0;
+           }
+        }
+        else {
+           clock_divide_low_power0 = 0;
+        }
+        wait_count0 = 0;
+    }
+#if defined(CONFIG_SMP)
+    else {
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+        if (TimerC0Snapshot1) {
+#else
+        {
+#endif
+           if (wait_count1 > 0 && wait_count1 < 3) {
+              clock_divide_low_power1 = 1;
+           }
+           else {
+              clock_divide_low_power1 = 0;
+           }
+        }
+        wait_count1 = 0;
+    }
+#endif
+}
+
+// When entering wait state, consider reducing the MIPS clock speed.
+// Clock speed is reduced if it has been determined that the cpu was
+// mostly idle in the previous milisecond. Clock speed is reduced only
+// once per 1 milisecond interval.
+void BcmPwrMngtReduceCpuSpeed (void)
+{
+    int cpu = smp_processor_id();
+    unsigned long flags;
+
+    spin_lock_irqsave(&pwrmgnt_clk_irqlock, flags);
+
+    if (cpu == 0) {
+        // Slow down the clock when entering wait instruction
+        // only if the cpu is not busy
+        if (clock_divide_low_power0) {
+            if (wait_count0 < 2) {
+                clock_divide_active0 = 1;
+#if defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+                if (pDdrSrCtrl && self_refresh_enabled) {
+                    // Communicate TP status to PHY MIPS
+                    pDdrSrCtrl->tp0Busy = 0;
+                }
+#endif
+            }
+        }
+        wait_count0++;
+    }
+#if defined(CONFIG_SMP)
+    else {
+        // Slow down the clock when entering wait instruction
+        // only if the cpu is not busy
+        if (clock_divide_low_power1) {
+            if (wait_count1 < 2) {
+                clock_divide_active1 = 1;
+#if defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+                if (pDdrSrCtrl && self_refresh_enabled) {
+                    // Communicate TP status to PHY MIPS
+                    pDdrSrCtrl->tp1Busy = 0;
+                }
+#endif
+            }
+        }
+        wait_count1++;
+    }
+#endif
+
+#if defined(CONFIG_SMP)
+    if (clock_divide_active0 && clock_divide_active1) {
+#else
+    if (clock_divide_active0) {
+#endif
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_HOSTMIPS_PWRSAVE)
+        if (clock_divide_enabled) {
+            __BcmPwrMngtSetASCR(RATIO_ONE_EIGHTH);
+		}
+#endif
+
+#if defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+        // Place DDR in self-refresh mode if enabled and other processors are OK with it
+        if (pDdrSrCtrl && !pDdrSrCtrl->word && self_refresh_enabled) {
+            // Below defines are CHIP Specific - refer to xxxx_map_part.h
+#if defined(DMODE_1_DRAMSLEEP)
+            DDR->DMODE_1 |= DMODE_1_DRAMSLEEP;
+#elif defined(MEMC_SELF_REFRESH)
+            MEMC->Control |= MEMC_SELF_REFRESH;
+#elif defined(CFG_DRAMSLEEP)
+            MEMC->DRAM_CFG |= CFG_DRAMSLEEP;
+#elif defined(SELF_REFRESH_CMD)
+            MEMC->SDR_CFG.DRAM_CMD[SELF_REFRESH_CMD] = 0;
+#else
+            #error "DDR Self refresh definition missing in xxxx_map_part.h for this chip"
+#endif
+        }
+#endif
+    }
+    spin_unlock_irqrestore(&pwrmgnt_clk_irqlock, flags);
+}
+
+// Full MIPS clock speed is resumed on the first interrupt following
+// the wait instruction. If the clock speed was reduced, the MIPS
+// C0 counter was also slowed down and its value needs to be readjusted.
+// The adjustments are done based on a reliable timer from the peripheral
+// block, timer2. The adjustments are such that C0 will never drift
+// but will see minor jitter.
+void BcmPwrMngtResumeFullSpeed (void)
+{
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+    unsigned int mult, rem, new;
+#endif
+    int cpu = smp_processor_id();
+    unsigned long flags;
+
+    spin_lock_irqsave(&pwrmgnt_clk_irqlock, flags);
+
+#if defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+    if (pDdrSrCtrl) {
+        // Communicate TP status to PHY MIPS
+        // Here I don't check if Self-Refresh is enabled because when it is,
+        // I want PHY MIPS to think the Host MIPS is always busy so it won't assert SR
+        if (cpu == 0) {
+            pDdrSrCtrl->tp0Busy = 1;
+        } else {
+            pDdrSrCtrl->tp1Busy = 1;
+        }
+    }
+#endif
+
+
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_HOSTMIPS_PWRSAVE)
+
+#if defined(CONFIG_SMP)
+    if (clock_divide_enabled && clock_divide_active0 && clock_divide_active1) {
+#else
+    if (clock_divide_enabled && clock_divide_active0) {
+#endif
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM96328)
+        if (originalMipsAscr) {
+            __BcmPwrMngtSetASCR(RATIO_ONE_ASYNC);
+        } else {
+            __BcmPwrMngtSetSCR();
+        }
+#else
+        // In newer MIPS core, there is no SYNC mode, simply use 1:1 async
+        __BcmPwrMngtSetASCR(RATIO_ONE_ASYNC);
+#endif
+    }
+#endif
+
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+    if (cpu == 0) {
+        // Check for TimerCnt2 rollover
+        newTimerCnt0 = TIMER->TimerCnt2 & 0x3fffffff;
+        if (newTimerCnt0 < prevTimerCnt0) {
+           TimerAdjust0 += C0adder;
+        }
+
+        // fix the C0 counter because it slowed down while on wait state
+        if (clock_divide_active0) {
+           mult = newTimerCnt0/C0divider;
+           rem  = newTimerCnt0%C0divider;
+           new  = mult*C0multiplier + ((rem*C0ratio)>>10);
+           write_c0_count(TimerAdjust0 + TimerC0Snapshot0 + new);
+           clock_divide_active0 = 0;
+        }
+        prevTimerCnt0 = newTimerCnt0;
+    }
+#if defined(CONFIG_SMP)
+    else {
+        // Check for TimerCnt2 rollover
+        newTimerCnt1 = TIMER->TimerCnt2 & 0x3fffffff;
+        if (newTimerCnt1 < prevTimerCnt1) {
+           TimerAdjust1 += C0adder;
+        }
+
+        // fix the C0 counter because it slowed down while on wait state
+        if (clock_divide_active1) {
+           mult = newTimerCnt1/C0divider;
+           rem  = newTimerCnt1%C0divider;
+           new  = mult*C0multiplier + ((rem*C0ratio)>>10);
+           write_c0_count(TimerAdjust1 + TimerC0Snapshot1 + new);
+           clock_divide_active1 = 0;
+        }
+        prevTimerCnt1 = newTimerCnt1;
+    }
+#endif
+#else
+    // On chips not requiring the PERIPH Timers workaround,
+    // only need to clear the active flags, no need to adjust timers
+    if (cpu == 0) {
+       clock_divide_active0 = 0;
+    }
+#if defined(CONFIG_SMP)
+    else {
+       clock_divide_active1 = 0;
+    }
+#endif
+#endif
+    spin_unlock_irqrestore(&pwrmgnt_clk_irqlock, flags);
+}
+
+
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+// These numbers can be precomputed. The values are chosen such that the
+// calculations will never overflow as long as the MIPS frequency never
+// exceeds 850 MHz (hence mips_hpt_frequency must not exceed 425 MHz)
+void BcmPwrMngtInitC0Speed (void)
+{
+    unsigned int mult, rem;
+    if (mips_hpt_frequency > 425000000) {
+       printk("\n\nWarning!!! CPU frequency exceeds limits to support" \
+          " Clock Divider feature for Power Management\n");
+    }
+    C0divider = 50000000/128;
+    C0multiplier = mips_hpt_frequency/128;
+    C0ratio = ((mips_hpt_frequency/1000000)<<10)/50;
+    mult = 0x40000000/C0divider;
+    rem = 0x40000000%C0divider;
+    // Value below may overflow from 32 bits but that's ok
+    C0adder = mult*C0multiplier + ((rem*C0ratio)>>10);
+    spin_lock_init(&pwrmgnt_clk_irqlock);
+}
+#endif //CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS
+
+#endif //defined(CONFIG_BCM_KF_MIPS_BCM963XX)
+
diff --git a/arch/mips/bcm963xx/setup.c b/arch/mips/bcm963xx/setup.c
new file mode 100644
index 0000000000000000000000000000000000000000..9bff05435f063c21e83175176ca66dcc04abab02
--- /dev/null
+++ b/arch/mips/bcm963xx/setup.c
@@ -0,0 +1,1839 @@
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+
+/*
+<:copyright-BRCM:2002:GPL/GPL:standard
+
+   Copyright (c) 2002 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+/*
+ * Generic setup routines for Broadcom 963xx MIPS boards
+ */
+
+//#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/console.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <../drivers/staging/android/persistent_ram.h>
+
+#include <asm/addrspace.h>
+#include <asm/bcache.h>
+#include <asm/irq.h>
+#include <asm/time.h>
+#include <asm/reboot.h>
+//#include <asm/gdb-stub.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+#include <linux/platform_device.h>
+
+#include <boardparms.h>
+
+extern unsigned long getMemorySize(void);
+extern irqreturn_t brcm_timer_interrupt(int irq, void *dev_id);
+
+#include <bcm_map_part.h>
+#include <bcm_cpu.h>
+#include <bcm_intr.h>
+#include <board.h>
+#include <boardparms.h>
+
+#if defined(CONFIG_PCI)
+#include <linux/pci.h>
+#include <bcmpci.h>
+#endif
+
+#if IS_ENABLED(CONFIG_BCM_ADSL)
+#include "softdsl/AdslCoreDefs.h"
+#endif
+
+#if defined(CONFIG_BCM_ENDPOINT_MODULE)
+#include <dsp_mod_size.h>
+#endif
+
+
+#include "shared_utils.h"
+#include <pmc_usb.h>
+
+#if 1
+
+/***************************************************************************
+ * C++ New and delete operator functions
+ ***************************************************************************/
+
+/* void *operator new(unsigned int sz) */
+void *_Znwj(unsigned int sz)
+{
+    return( kmalloc(sz, GFP_KERNEL) );
+}
+
+/* void *operator new[](unsigned int sz)*/
+void *_Znaj(unsigned int sz)
+{
+    return( kmalloc(sz, GFP_KERNEL) );
+}
+
+/* placement new operator */
+/* void *operator new (unsigned int size, void *ptr) */
+void *ZnwjPv(unsigned int size, void *ptr)
+{
+    return ptr;
+}
+
+/* void operator delete(void *m) */
+void _ZdlPv(void *m)
+{
+    kfree(m);
+}
+
+/* void operator delete[](void *m) */
+void _ZdaPv(void *m)
+{
+    kfree(m);
+}
+
+EXPORT_SYMBOL(_Znwj);
+EXPORT_SYMBOL(_Znaj);
+EXPORT_SYMBOL(ZnwjPv);
+EXPORT_SYMBOL(_ZdlPv);
+EXPORT_SYMBOL(_ZdaPv);
+
+#endif
+
+#if IS_ENABLED(CONFIG_BCM_ADSL)
+/***************************************************************************
+ * Function Name: kerSysGetDslPhyMemory
+ * Description  : return the start address of the reserved DSL SDRAM. The memory
+ *                is reserved in the arch dependent setup.c
+ * Returns      : physical address of the reserved DSL SDRAM
+ ***************************************************************************/
+void* kerSysGetDslPhyMemory(void)
+{
+    return (void*)((getMemorySize() - ADSL_SDRAM_IMAGE_SIZE));
+}
+
+EXPORT_SYMBOL(kerSysGetDslPhyMemory);
+
+#endif
+
+bool kerSysIsRootfsSet(void)
+{
+	char *cmd_ptr;
+
+	cmd_ptr = strstr(arcs_cmdline, "root=");
+	if (cmd_ptr != NULL)
+		return true;
+	else
+		return false;
+}
+EXPORT_SYMBOL(kerSysIsRootfsSet);
+
+#if defined(CONFIG_BCM96838) || defined(CONFIG_BCM96848)
+struct boot_mem_name 
+{
+	char name[32];
+} map_name[BOOT_MEM_MAP_MAX];
+
+
+static unsigned long get_usable_mem_size(void)
+{
+	int i;
+	
+	for(i = 0; i < boot_mem_map.nr_map; i++)
+	{
+		if( boot_mem_map.map[i].type == BOOT_MEM_RAM )
+			return boot_mem_map.map[i].size;
+	}
+	
+	return 0;
+}
+static void BcmMemReserveInit(void)
+{
+	unsigned long 		memsize = getMemorySize();
+	unsigned long		size = 0;
+	NVRAM_DATA NvramData;
+
+	if (boot_mem_map.nr_map != 0)
+	{
+		unsigned long usr_usable		= get_usable_mem_size();
+		unsigned long usr_reserved	= memsize - usr_usable;
+		
+		if( usr_usable == 0 )
+		{
+			printk("Error: No usable memory detected\n");
+			BUG_ON(1);
+		}
+		
+		if( memsize < usr_usable )
+		{
+			printk("Error: Detected usable memory greater than physical memory\n");
+			BUG_ON(1);
+		}
+
+		boot_mem_map.nr_map = 0;
+		if(usr_reserved)
+		{
+			memsize -= usr_reserved;
+			strcpy(map_name[boot_mem_map.nr_map].name, "mem_user_reserved");
+			add_memory_region(memsize, usr_reserved, BOOT_MEM_RESERVED);
+		}
+	}
+
+	kerSysNvRamGet((char *)&NvramData, sizeof(NVRAM_DATA), 0);
+
+	size = NvramData.allocs.alloc_rdp.tmsize;
+	if (size == 0xff) {
+		/* Erased NVRAM should be alloc TM_ERASED_NVRAM_DEF_DDR_SIZE to be
+         * backward compatible */
+		size = TM_ERASED_NVRAM_DEF_DDR_SIZE;
+	}
+	size = size * 0x100000;
+
+	
+	if(size < TM_DEF_DDR_SIZE)
+		size = TM_DEF_DDR_SIZE;
+
+    /* TM_BASE_ADDR_STR must be 2MB aligned, reserve unaligned block to heap */
+    if ((memsize - size) % (2 * 1024 * 1024) != 0)
+    {
+        int tempsize = (memsize - size) % (2 * 1024 * 1024);
+
+        memsize -= tempsize;
+        strcpy(map_name[boot_mem_map.nr_map].name, "BOOT_MEM_RAM");	
+        add_memory_region(memsize, tempsize, BOOT_MEM_RAM);
+    }
+
+	memsize -= size;
+	strcpy(map_name[boot_mem_map.nr_map].name, TM_BASE_ADDR_STR);	
+	add_memory_region(memsize, size, BOOT_MEM_RESERVED);
+
+	size = NvramData.allocs.alloc_rdp.mcsize;
+	if (size == 0xff) {
+		/* Erased NVRAM should be treated as zero */
+		size = 0;
+	}
+	size = size * 0x100000;
+	
+	if(size < TM_MC_DEF_DDR_SIZE)
+		size = TM_MC_DEF_DDR_SIZE;
+		
+	memsize -= size;
+	strcpy(map_name[boot_mem_map.nr_map].name, TM_MC_BASE_ADDR_STR);	
+	add_memory_region(memsize, size, BOOT_MEM_RESERVED);
+
+//#ifdef CONFIG_DHD_RUNNER
+    /* Add memory for DHD offload */
+    /* XXX: Temporary solution, should be under dedicated #ifdef */
+
+    size = NvramData.alloc_dhd.dhd_size[0];
+    if (size != 0xff && size != 0) {
+        size = size * 0x100000;
+        memsize -= size;
+        strcpy(map_name[boot_mem_map.nr_map].name, DHD_BASE_ADDR_STR); 
+        add_memory_region(memsize, size, BOOT_MEM_RESERVED);
+    }
+    size = NvramData.alloc_dhd.dhd_size[1];
+    if (size != 0xff && size != 0) {
+        size = size * 0x100000;
+        memsize -= size;
+        strcpy(map_name[boot_mem_map.nr_map].name, DHD_BASE_ADDR_STR_1); 
+        add_memory_region(memsize, size, BOOT_MEM_RESERVED);
+    }
+    size = NvramData.alloc_dhd.dhd_size[2];
+    if (size != 0xff && size != 0) {
+        size = size * 0x100000;
+        memsize -= size;
+        strcpy(map_name[boot_mem_map.nr_map].name, DHD_BASE_ADDR_STR_2); 
+        add_memory_region(memsize, size, BOOT_MEM_RESERVED);
+    }
+
+//#endif
+
+	/* add the linux usable region */
+	add_memory_region(0, memsize, BOOT_MEM_RAM);
+}
+
+int BcmMemReserveGetByName(char *name, void **addr, uint32_t *size)
+{
+	int i;
+	*addr = NULL;
+	*size = 0;
+
+	/* the data in index i of boot_mem_map refers to the name in map_name with the same index i */
+	for (i=0; i<boot_mem_map.nr_map; i++)
+	{
+		if ( strcmp(name, map_name[i].name) == 0 )
+		{
+			*addr = (void*)(((unsigned int)boot_mem_map.map[i].addr) | 0xA0000000);
+			*size = boot_mem_map.map[i].size;
+			return 0;
+		}
+	}
+	return -1;
+}
+EXPORT_SYMBOL(BcmMemReserveGetByName);
+#endif
+
+#if defined(CONFIG_ANDROID_RAM_CONSOLE)
+static struct platform_device ramconsole_device = {
+        .name           = "ram_console",
+        .id             = -1,
+};
+
+static int __init ram_console_init(void)
+{
+	return platform_device_register(&ramconsole_device);
+}
+postcore_initcall(ram_console_init);
+
+static struct persistent_ram_descriptor inteno_prd[] __initdata = {
+        {
+                .name = "ram_console",
+                .size = 1 << CONFIG_LOG_BUF_SHIFT,
+        }
+};
+
+static struct persistent_ram inteno_pr __initdata = {
+        .descs = inteno_prd,
+        .num_descs = ARRAY_SIZE(inteno_prd),
+        .size = 1 << CONFIG_LOG_BUF_SHIFT,
+};
+
+static int __init inteno_pram_init_early(void)
+{
+        persistent_ram_early_init(&inteno_pr);
+        return 0;
+}
+/* Not sure when this needs to be called but in the android driver it was in early_init
+section so copy that. */
+early_initcall(inteno_pram_init_early);
+
+#endif
+
+void __init plat_mem_setup(void)
+{
+        unsigned int reserved = 0;
+
+#if defined(CONFIG_BCM96838) || defined(CONFIG_BCM96848)
+	BcmMemReserveInit();
+#elif !IS_ENABLED(CONFIG_BCM_ADSL)
+    add_memory_region(0, (getMemorySize()), BOOT_MEM_RAM);
+#else
+    /* reserve DSL memory */
+    reserved += ADSL_SDRAM_IMAGE_SIZE;
+    printk("DSL SDRAM reserved: 0x%x at 0x%lx\n",
+           ADSL_SDRAM_IMAGE_SIZE,
+           getMemorySize() - reserved);
+    add_memory_region(getMemorySize() - reserved,
+                      ADSL_SDRAM_IMAGE_SIZE, BOOT_MEM_RESERVED);
+#endif
+
+#if defined(CONFIG_ANDROID_RAM_CONSOLE)
+    /* persistent console */
+    reserved += 1 << CONFIG_LOG_BUF_SHIFT;
+    printk("Persistent log: 0x%x at 0x%lx\n",
+           1 << CONFIG_LOG_BUF_SHIFT,
+           getMemorySize() - reserved);
+
+    /* we can not call the persistent driver just yet, so we need to record
+       the mem area for later */
+    inteno_pr.start = getMemorySize() - reserved;
+    add_memory_region(getMemorySize() - reserved,
+                      1 << CONFIG_LOG_BUF_SHIFT , BOOT_MEM_RESERVED);
+#endif
+
+    /* whats left as usable dram */
+    add_memory_region(0, getMemorySize() - reserved, BOOT_MEM_RAM);
+
+    /* and this is ???? */
+    {
+        volatile unsigned long *cr;
+        uint32 mipsBaseAddr = MIPS_BASE;
+
+        cr = (void *)(mipsBaseAddr + MIPS_RAC_CR0);
+        *cr = *cr | RAC_D;
+
+#if defined(MIPS_RAC_CR1)
+        cr = (void *)(mipsBaseAddr + MIPS_RAC_CR1);
+        *cr = *cr | RAC_D;
+#endif
+    }
+}
+
+
+extern UINT32 __init calculateCpuSpeed(void);
+#ifdef CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS
+extern void BcmPwrMngtInitC0Speed (void);
+#endif
+
+
+void __init plat_time_init(void)
+{
+    mips_hpt_frequency = calculateCpuSpeed() / 2;
+#ifdef CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS
+    BcmPwrMngtInitC0Speed();
+#else
+    // Enable cp0 counter/compare interrupt when
+    // not using workaround for clock divide
+    write_c0_status(IE_IRQ5 | read_c0_status());
+#endif
+}
+
+
+static void brcm_machine_restart(char *command)
+{
+    kerSysMipsSoftReset();
+}
+
+extern void stop_other_cpu(void);  // in arch/mips/kernel/smp.c
+
+static void brcm_machine_halt(void)
+{
+    /*
+     * we don't support power off yet.  This halt will cause both CPU's to
+     * spin in a while(1) loop with interrupts disabled.  (Used for gathering
+     * wlan debug dump via JTAG)
+     */
+#if defined(CONFIG_SMP)
+    stop_other_cpu();
+#endif
+    printk("System halted\n");
+    local_irq_disable();
+    while (1);
+}
+
+#if defined(CONFIG_BCM96318) /* For 6318 for now but can be used for other chip if need */
+/*  *********************************************************************
+    *  setPinMuxGpio()
+    *
+    *  Set pin mux to GPIO function
+    *
+    *  Input parameters:
+    *      unsigned short gpio pin number
+    *
+    *  Return value:
+    *      nothing
+    ********************************************************************* */
+static void setPinMuxGpio(unsigned short gpio)
+{
+	uint32 reg, shift;
+	volatile uint32* pinMuxSel;
+
+        /* on 6318 device, gpio 13 to 41 pin is not default to gpio function. must change
+        * the pinmux to gpio function
+        */
+
+        if( (gpio&BP_GPIO_NUM_MASK) == BP_GPIO_NONE )
+                return;
+
+	/* not for any serial LED gpio as they don't take any gpio pins */
+	if( (gpio&BP_GPIO_SERIAL) == BP_GPIO_SERIAL )
+		return;
+
+	gpio = gpio&BP_GPIO_NUM_MASK;
+	if( gpio >= 13 && gpio <= 41 )
+	{
+        /* set the pad control to gpio */
+		reg = GPIO->PadControl[gpio>>3];
+		shift = (gpio&0x7)<<2;
+		reg &= ~(PAD_CTRL_GPIO0_MASK<<shift);
+		reg |= (PAD_CTRL_GPIO<<shift);
+		GPIO->PadControl[gpio>>3] = reg;
+
+		/* set pin mux to gpio */
+		pinMuxSel = &GPIO->PinMuxSel0;
+		pinMuxSel += gpio>>4;
+		reg = *pinMuxSel;
+		shift = (gpio&0xf)<<1;
+		reg &= ~(PINMUX_SEL_GPIO0_MASK<<shift);
+		reg |= (PINMUX_SEL_GPIO<<shift);
+		*pinMuxSel = reg;
+	}
+
+	return;
+}
+
+/*  *********************************************************************
+    *  initGpioPinMux()
+    *
+    *  Initialize the gpio pin mux register setting. On some chip like 6318, Certain
+    *  gpio pin are muxed with other function and  are not default to gpio. so init
+    *  code needs to set the mux to gpio if they are used by led or gpio boardparm
+    *
+    *
+    *  Input parameters: none
+    *
+    *  Return value:
+    *      nothing
+    ********************************************************************* */
+static void initGpioPinMux(void)
+{
+    int i = 0, token = 0, rc;
+    unsigned short gpio;
+
+    /* walk through all the led bp */
+    for(;;)
+    {
+        rc = BpGetLedGpio(i, &token, &gpio);
+       	if( rc == BP_MAX_ITEM_EXCEEDED )
+       	    break;
+       	else if( rc == BP_SUCCESS )
+       	    setPinMuxGpio(gpio);
+        else
+	{
+	    token = 0;
+       	    i++;
+	}
+    }
+
+    /* walk through all the gpio bp */
+    i = 0;
+    token = 0;
+    for(;;)
+    {
+        rc = BpGetGpioGpio(i, &token, &gpio);
+       	if( rc == BP_MAX_ITEM_EXCEEDED )
+       	    break;
+       	else if( rc == BP_SUCCESS )
+       	    setPinMuxGpio(gpio);
+        else
+	{
+	    token = 0;
+       	    i++;
+	}
+    }
+
+    return;
+}
+#endif
+
+#if   defined(CONFIG_BCM96362)
+
+static int __init bcm6362_hw_init(void)
+{
+    unsigned long GPIOOverlays, DeviceOptions = 0;
+    unsigned short gpio;
+
+    if( BpGetDeviceOptions(&DeviceOptions) == BP_SUCCESS ) {
+        if(DeviceOptions&BP_DEVICE_OPTION_DISABLE_LED_INVERSION)
+            MISC->miscLed_inv = 0;
+    }
+    
+    /* Set LED blink rate for activity LEDs to 80mS */
+    LED->ledInit &= ~LED_FAST_INTV_MASK;
+    LED->ledInit |= (LED_INTERVAL_20MS * 4) << LED_FAST_INTV_SHIFT;
+
+    if( BpGetGPIOverlays(&GPIOOverlays) == BP_SUCCESS ) {
+        /* Start with all HW LEDs disabled */
+        LED->ledHWDis |= 0xFFFFFF;
+        if (GPIOOverlays & BP_OVERLAY_SERIAL_LEDS) {
+            GPIO->GPIOMode |= (GPIO_MODE_SERIAL_LED_CLK | GPIO_MODE_SERIAL_LED_DATA);
+            LED->ledInit |= LED_SERIAL_LED_EN;
+        }
+
+        if (GPIOOverlays & BP_OVERLAY_SPI_SSB2_EXT_CS) {           
+            /* Enable Overlay for SPI SS2 Pin */            
+             GPIO->GPIOMode |= GPIO_MODE_LS_SPIM_SSB2;                    
+        }
+
+        if (GPIOOverlays & BP_OVERLAY_SPI_SSB3_EXT_CS) {           
+            /* Enable Overlay for SPI SS3 Pin */            
+             GPIO->GPIOMode |= GPIO_MODE_LS_SPIM_SSB3;                    
+        }
+
+        /* Map HW LEDs to LED controller inputs and enable LED controller to drive GPIO */
+        if (GPIOOverlays & BP_OVERLAY_USB_LED) {
+            LED->ledLinkActSelLow |= ((1 << LED_USB_ACT) << LED_0_ACT_SHIFT);
+            LED->ledLinkActSelLow |= ((1 << LED_USB_ACT) << LED_0_LINK_SHIFT);
+            GPIO->LEDCtrl |= (1 << LED_USB_ACT);
+            LED->ledHWDis &= ~(1 << LED_USB_ACT);
+        }
+        if ( BpGetWanDataLedGpio(&gpio) == BP_SUCCESS ) {
+            if ((gpio & BP_GPIO_NUM_MASK) == LED_INET_ACT) {
+                /* WAN Data LED must be LED 1 */
+                LED->ledLinkActSelLow |= ((1 << LED_INET_ACT) << LED_1_ACT_SHIFT);
+                GPIO->LEDCtrl |= GPIO_NUM_TO_MASK(gpio);
+            }
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_0) {
+            LED->ledLinkActSelHigh |= ((1 << (LED_ENET0 - 4)) << LED_4_LINK_SHIFT);
+            GPIO->LEDCtrl |= (1 << LED_ENET0);
+            LED->ledHWDis &= ~(1 << LED_ENET0);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_1) {
+            LED->ledLinkActSelHigh |= ((1 << (LED_ENET1 - 4)) << LED_5_LINK_SHIFT);
+            GPIO->LEDCtrl |= (1 << LED_ENET1);
+            LED->ledHWDis &= ~(1 << LED_ENET1);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_2) {
+            LED->ledLinkActSelHigh |= ((1 << (LED_ENET2 - 4)) << LED_6_LINK_SHIFT);
+            GPIO->LEDCtrl |= (1 << LED_ENET2);
+            LED->ledHWDis &= ~(1 << LED_ENET2);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_3) {
+            LED->ledLinkActSelHigh |= ((1 << (LED_ENET3 - 4)) << LED_7_LINK_SHIFT);
+            GPIO->LEDCtrl |= (1 << LED_ENET3);
+            LED->ledHWDis &= ~(1 << LED_ENET3);
+        }
+    }
+
+#if defined(CONFIG_USB)
+    PERF->blkEnables |= USBH_CLK_EN;
+    mdelay(100);
+    USBH->SwapControl = EHCI_ENDIAN_SWAP | OHCI_ENDIAN_SWAP;
+    USBH->Setup |= USBH_IOC;
+#else
+    MISC->miscIddqCtrl |= MISC_IDDQ_CTRL_USBH;
+    PERF->blkEnables &= ~USBH_CLK_EN;
+#endif
+
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+#else
+    PERF->blkEnables &= ~FAP_CLK_EN;
+#endif
+
+#if defined(CONFIG_PCI)
+    /* Enable WOC */  
+    PERF->blkEnables |=WLAN_OCP_CLK_EN;
+    mdelay(10);
+    PERF->softResetB &= ~(SOFT_RST_WLAN_SHIM_UBUS | SOFT_RST_WLAN_SHIM);
+    mdelay(1);
+    PERF->softResetB |= (SOFT_RST_WLAN_SHIM_UBUS | SOFT_RST_WLAN_SHIM);
+    mdelay(1);
+
+    WLAN_SHIM->ShimMisc = (WLAN_SHIM_FORCE_CLOCKS_ON|WLAN_SHIM_MACRO_SOFT_RESET);
+    mdelay(1);
+    WLAN_SHIM->MacControl = (SICF_FGC|SICF_CLOCK_EN);
+    WLAN_SHIM->ShimMisc = WLAN_SHIM_FORCE_CLOCKS_ON;
+    WLAN_SHIM->ShimMisc = 0;
+    WLAN_SHIM->MacControl = SICF_CLOCK_EN;        	
+
+#endif    
+
+#if defined(CONFIG_BCM_ETH_PWRSAVE)
+    // Turn off pll_use_lock to allow watchdog timer to reset the chip when
+    // ephy_pwr_down_dll is set in ethernet sleep mode
+    MISC->miscStrapBus &= ~MISC_STRAP_BUS_PLL_USE_LOCK;
+    MISC->miscStrapOverride |= 1;
+#endif
+
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+    /* Enable power savings from DDR pads on this chip when DDR goes in Self-Refresh mode */
+    DDR->PhyControl.IDLE_PAD_CONTROL = 0x00000172;
+    DDR->PhyByteLane0Control.IDLE_PAD_CONTROL = 0x000fffff;
+    DDR->PhyByteLane1Control.IDLE_PAD_CONTROL = 0x000fffff;
+#endif
+
+    if ( BpGetFemtoResetGpio(&gpio) == BP_SUCCESS ) {
+        kerSysSetGpioState(gpio, kGpioActive);
+    }
+
+    return 0;
+}
+#define bcm63xx_specific_hw_init() bcm6362_hw_init()
+
+#elif defined(CONFIG_BCM96328)
+
+static int __init bcm6328_hw_init(void)
+{
+    unsigned long GPIOOverlays, DeviceOptions = 0;
+    unsigned short gpio;
+    
+    if( BpGetDeviceOptions(&DeviceOptions) == BP_SUCCESS ) {
+        if(DeviceOptions&BP_DEVICE_OPTION_DISABLE_LED_INVERSION)
+            MISC->miscLedXorReg = 0;
+    }
+
+    /* Set LED blink rate for activity LEDs to 80mS */
+    LED->ledInit &= ~LED_FAST_INTV_MASK;
+    LED->ledInit |= (LED_INTERVAL_20MS * 4) << LED_FAST_INTV_SHIFT;
+
+    if( BpGetGPIOverlays(&GPIOOverlays) == BP_SUCCESS ) {
+        /* Start with all HW LEDs disabled */
+        LED->ledHWDis |= 0xFFFFFF;
+        if (GPIOOverlays & BP_OVERLAY_SERIAL_LEDS) {
+            GPIO->PinMuxSel |= PINMUX_SERIAL_LED_DATA;
+            GPIO->PinMuxSel |= PINMUX_SERIAL_LED_CLK;
+            LED->ledInit |= LED_SERIAL_LED_EN;
+        }
+        if ( BpGetWanDataLedGpio(&gpio) == BP_SUCCESS ) {
+            if ((gpio & BP_GPIO_NUM_MASK) < 4) {
+                /* WAN Data LED must be LED 0-3 */
+                LED->ledLinkActSelLow |= ((1 << LED_INET_ACT) << ((gpio & BP_GPIO_NUM_MASK) * 4));
+                LED->ledLinkActSelLow |= ((1 << LED_INET_ACT) << (((gpio & BP_GPIO_NUM_MASK) * 4) + LED_0_LINK_SHIFT));
+                GPIO->GPIOMode |= GPIO_NUM_TO_MASK(gpio);
+
+                /* The following two checks are for legacy schematics */
+                if (gpio & BP_GPIO_SERIAL) {
+                    /* If WAN Data LED is serial, then configure serial controller to shift it out */
+                    LED->ledSerialMuxSelect |= GPIO_NUM_TO_MASK(gpio);
+                }
+                if ((gpio & BP_GPIO_NUM_MASK) == 0) {
+                    /* In case INET_ACT LED is connected to GPIO_11 */
+                    GPIO->PinMuxSel |= PINMUX_INET_ACT_LED;
+                }
+            }
+        }
+        /* Enable LED controller to drive GPIO */
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_0) {
+            GPIO->PinMuxSel |= PINMUX_EPHY0_ACT_LED;
+            GPIO->GPIOMode |= (1 << EPHY0_SPD_LED);
+            LED->ledHWDis &= ~(1 << EPHY0_SPD_LED);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_1) {
+            GPIO->PinMuxSel |= PINMUX_EPHY1_ACT_LED;
+            GPIO->GPIOMode |= (1 << EPHY1_SPD_LED);
+            LED->ledHWDis &= ~(1 << EPHY1_SPD_LED);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_2) {
+            GPIO->PinMuxSel |= PINMUX_EPHY2_ACT_LED;
+            GPIO->GPIOMode |= (1 << EPHY2_SPD_LED);
+            LED->ledHWDis &= ~(1 << EPHY2_SPD_LED);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_3) {
+            GPIO->PinMuxSel |= PINMUX_EPHY3_ACT_LED;
+            GPIO->GPIOMode |= (1 << EPHY3_SPD_LED);
+            LED->ledHWDis &= ~(1 << EPHY3_SPD_LED);
+        }
+        if (GPIOOverlays & BP_OVERLAY_SPI_EXT_CS) { 
+            GPIO->PinMuxSelOther &= ~PINMUX_SEL_SPI2_MASK;
+            GPIO->PinMuxSelOther |= PINMUX_SEL_SPI2;                            
+        }
+
+        /* Enable PCIe CLKREQ signal */
+        if (GPIOOverlays & BP_OVERLAY_PCIE_CLKREQ) {
+            GPIO->PinMuxSel |= PINMUX_PCIE_CLKREQ;
+        }
+    }
+
+#if defined(CONFIG_USB)
+    PERF->blkEnables |= USBH_CLK_EN;
+    mdelay(100);
+    USBH->SwapControl = EHCI_ENDIAN_SWAP | OHCI_ENDIAN_SWAP;
+    USBH->Setup |= USBH_IOC;
+#else
+    MISC->miscIddqCtrl |= MISC_IDDQ_CTRL_USBH;
+    PERF->blkEnables &= ~USBH_CLK_EN;
+#endif
+
+#if !(defined(CONFIG_BCM_ADSL) || defined(CONFIG_BCM_ADSL_MODULE))
+    MISC->miscIddqCtrl |= MISC_IDDQ_CTRL_SAR | MISC_IDDQ_CTRL_ADSL2_AFE | MISC_IDDQ_CTRL_ADSL2_PHY | MISC_IDDQ_CTRL_ADSL2_MIPS;
+    PERF->blkEnables &= ~(SAR_CLK_EN | ADSL_CLK_EN | ADSL_AFE_EN | ADSL_QPROC_EN | PHYMIPS_CLK_EN);
+    MISC->miscPllCtrlSysPll2 |= (1<<6); // Channel 5
+    MISC->miscPllCtrlDdrPll |= 1; // Channel 5
+#endif
+
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+    /* Enable power savings from DDR pads on this chip when DDR goes in Self-Refresh mode */
+    DDR->PhyControl.IDLE_PAD_CONTROL = 0x00000172;
+    DDR->PhyByteLane0Control.IDLE_PAD_CONTROL = 0x000fffff;
+    DDR->PhyByteLane1Control.IDLE_PAD_CONTROL = 0x000fffff;
+#endif
+    return 0;
+}
+
+#define bcm63xx_specific_hw_init() bcm6328_hw_init()
+
+#elif defined(CONFIG_BCM963268)
+
+int map_63268_vdsl_override(int val) {
+    switch (val & ~BP_ACTIVE_MASK) {
+        case (BP_GPIO_10_AH & BP_GPIO_NUM_MASK):
+        case (BP_GPIO_11_AH & BP_GPIO_NUM_MASK):
+	    return(GPIO_BASE_VDSL_PHY_OVERRIDE_0);
+        case (BP_GPIO_12_AH & BP_GPIO_NUM_MASK):
+        case (BP_GPIO_13_AH & BP_GPIO_NUM_MASK):
+	    return(GPIO_BASE_VDSL_PHY_OVERRIDE_1);
+        case (BP_GPIO_24_AH & BP_GPIO_NUM_MASK):
+        case (BP_GPIO_25_AH & BP_GPIO_NUM_MASK):
+	    return(GPIO_BASE_VDSL_PHY_OVERRIDE_2);
+        case (BP_GPIO_26_AH & BP_GPIO_NUM_MASK):
+        case (BP_GPIO_27_AH & BP_GPIO_NUM_MASK):
+	    return(GPIO_BASE_VDSL_PHY_OVERRIDE_3);
+        default:
+            return(0);
+    }
+}
+
+int map_63268_misc_misc_override(int val) {
+    switch (val & ~BP_ACTIVE_MASK) {
+        case (BP_GPIO_8_AH & BP_GPIO_NUM_MASK):
+	    return(MISC_MISC_DSL_GPIO_8_OVERRIDE);
+        case (BP_GPIO_9_AH & BP_GPIO_NUM_MASK):
+	    return(MISC_MISC_DSL_GPIO_9_OVERRIDE);
+        default:
+            return(0);
+    }
+}
+
+static int __init bcm63268_hw_init(void)
+{
+    unsigned long GPIOOverlays, DeviceOptions = 0;
+    unsigned short gpio;
+    const ETHERNET_MAC_INFO *EnetInfo;
+    unsigned char vreg1p8;
+#if defined(CONFIG_BCM_1V2REG_AUTO_SHUTDOWN)
+    uint32 startCount, endCount;
+    int diff; 
+#endif
+    
+    /* Turn off test bus */
+    PERF->blkEnables &= ~TBUS_CLK_EN;
+
+
+#if !(defined(CONFIG_BCM_XTMRT) || defined(CONFIG_BCM_XTMRT_MODULE))
+    // Disable SAR if unused
+    PERF->blkEnables &= ~( SAR_CLK_EN );
+    MISC->miscIddqCtrl |= MISC_IDDQ_CTRL_SAR;
+#endif
+
+#if defined(CONFIG_BCM_XTMRT) || defined(CONFIG_BCM_XTMRT_MODULE)
+    // Phy should always be powered down if XTM is deselected
+    if (kerSysGetDslPhyEnable()) {
+#else
+    if (0) {
+#endif
+        MISC->miscIddqCtrl &= ~(MISC_IDDQ_CTRL_VDSL_PHY 
+				| MISC_IDDQ_CTRL_VDSL_MIPS
+				| MISC_IDDQ_CTRL_SAR);
+    } 
+    else 
+    {
+        /* If there is no phy support, shut off power */
+        PERF->blkEnables &= ~( PHYMIPS_CLK_EN
+				| VDSL_CLK_EN 
+				| VDSL_AFE_EN | VDSL_QPROC_EN );
+        MISC->miscIddqCtrl |= (MISC_IDDQ_CTRL_VDSL_PHY 
+				| MISC_IDDQ_CTRL_VDSL_MIPS);
+    }
+
+    if( BpGetDeviceOptions(&DeviceOptions) == BP_SUCCESS ) {
+        if(DeviceOptions&BP_DEVICE_OPTION_DISABLE_LED_INVERSION)
+            MISC->miscLed_inv = 0;
+    }
+
+    /* Set LED blink rate for activity LEDs to 80mS */
+    LED->ledInit &= ~LED_FAST_INTV_MASK;
+    LED->ledInit |= (LED_INTERVAL_20MS * 4) << LED_FAST_INTV_SHIFT;
+
+    /* Start with all HW LEDs disabled */
+    LED->ledHWDis |= 0xFFFFFF;
+
+
+    EnetInfo = BpGetEthernetMacInfoArrayPtr();
+
+    /* Enable HW to drive LEDs for Ethernet ports in use */
+    if (EnetInfo[0].sw.port_map & (1 << 0)) {
+        LED->ledHWDis &= ~(1 << LED_EPHY0_ACT);
+        LED->ledHWDis &= ~(1 << LED_EPHY0_SPD);
+    }
+    if (EnetInfo[0].sw.port_map & (1 << 1)) {
+        LED->ledHWDis &= ~(1 << LED_EPHY1_ACT);
+        LED->ledHWDis &= ~(1 << LED_EPHY1_SPD);
+    }
+    if (EnetInfo[0].sw.port_map & (1 << 2)) {
+        LED->ledHWDis &= ~(1 << LED_EPHY2_ACT);
+        LED->ledHWDis &= ~(1 << LED_EPHY2_SPD);
+    }
+    if (EnetInfo[0].sw.port_map & (1 << 3)) {
+        LED->ledHWDis &= ~(1 << LED_GPHY0_ACT);
+        LED->ledHWDis &= ~(1 << LED_GPHY0_SPD0);
+        LED->ledHWDis &= ~(1 << LED_GPHY0_SPD1);
+        LED->ledLinkActSelLow |= ((1 << LED_GPHY0_SPD0) << LED_0_LINK_SHIFT);
+        LED->ledLinkActSelLow |= ((1 << LED_GPHY0_SPD1) << LED_1_LINK_SHIFT);
+        GPIO->RoboSWLEDControl |= LED_BICOLOR_SPD;
+    }
+
+    /* UART2 - SDIN and SDOUT are separate for flexibility */
+    {
+        unsigned short Uart2Sdin;
+        unsigned short Uart2Sdout;
+        if (BpGetUart2SdinGpio(&Uart2Sdin) == BP_SUCCESS) {
+            switch (Uart2Sdin & BP_GPIO_NUM_MASK) {
+            case (BP_GPIO_12_AH & BP_GPIO_NUM_MASK):
+                GPIO->GPIOMode |= (GPIO_MODE_UART2_SDIN);
+                break;
+            case (BP_GPIO_26_AH & BP_GPIO_NUM_MASK):
+                GPIO->GPIOMode |= (GPIO_MODE_UART2_SDIN2);
+                break;
+            }
+        }
+        if (BpGetUart2SdoutGpio(&Uart2Sdout) == BP_SUCCESS) {
+            switch (Uart2Sdout & BP_GPIO_NUM_MASK) {
+            case (BP_GPIO_13_AH & BP_GPIO_NUM_MASK):
+                GPIO->GPIOMode |= (GPIO_MODE_UART2_SDOUT);
+                break;
+            case (BP_GPIO_27_AH & BP_GPIO_NUM_MASK):
+                GPIO->GPIOMode |= (GPIO_MODE_UART2_SDOUT2);
+                break;
+            }
+        }
+    }
+
+
+    if( BpGetGPIOverlays(&GPIOOverlays) == BP_SUCCESS ) {
+        if (GPIOOverlays & BP_OVERLAY_SERIAL_LEDS) {
+            GPIO->GPIOMode |= (GPIO_MODE_SERIAL_LED_CLK | GPIO_MODE_SERIAL_LED_DATA);
+            LED->ledInit |= LED_SERIAL_LED_EN;
+        }
+        if ( BpGetWanDataLedGpio(&gpio) == BP_SUCCESS ) {
+            if ((gpio & BP_GPIO_NUM_MASK) == LED_INET_ACT) {
+                /* WAN Data LED must be LED 8 */
+                if (!(gpio & BP_GPIO_SERIAL)) {
+                    /* If LED is not serial, enable corresponding GPIO */
+                    GPIO->LEDCtrl |= GPIO_NUM_TO_MASK(gpio);
+                }
+            }
+        }
+        /* Enable LED controller to drive GPIO when LEDs are connected to GPIO pins */
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_0) {
+            GPIO->LEDCtrl |= (1 << LED_EPHY0_ACT);
+            GPIO->LEDCtrl |= (1 << LED_EPHY0_SPD);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_1) {
+            GPIO->LEDCtrl |= (1 << LED_EPHY1_ACT);
+            GPIO->LEDCtrl |= (1 << LED_EPHY1_SPD);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_2) {
+            GPIO->LEDCtrl |= (1 << LED_EPHY2_ACT);
+            GPIO->LEDCtrl |= (1 << LED_EPHY2_SPD);
+        }
+        if (GPIOOverlays & BP_OVERLAY_GPHY_LED_0) {
+            GPIO->LEDCtrl |= (1 << LED_GPHY0_ACT);
+            GPIO->LEDCtrl |= (1 << LED_GPHY0_SPD0);
+            GPIO->LEDCtrl |= (1 << LED_GPHY0_SPD1);
+        }
+
+        /* DG301 workaround */
+        if (GPIOOverlays & BP_OVERLAY_DG301) {
+            GPIO->LEDCtrl |= (1 << LED_GPHY0_ACT);
+        }
+
+        /* VG50 workaround */
+        if (GPIOOverlays & BP_OVERLAY_VG50) {
+            LED->ledHWDis |= (1 << LED_EPHY0_SPD);
+            LED->ledHWDis |= (1 << LED_EPHY1_SPD);
+            LED->ledHWDis |= (1 << LED_EPHY2_SPD);
+        }
+
+        if (GPIOOverlays & BP_OVERLAY_PHY) {
+            unsigned short IntLdMode = 0xffff;
+            unsigned short IntLdPwr = 0xffff;
+            unsigned short ExtLdMode = 0xffff;
+            unsigned short ExtLdPwr = 0xffff;
+            unsigned short ExtLdClk = 0xffff;
+            unsigned short ExtLdData = 0xffff;
+            unsigned long ul;
+            int ExplicitLdControl ;
+            ExplicitLdControl = (BpGetIntAFELDModeGpio(&IntLdMode) == BP_SUCCESS) ? 1 : 0;
+            ExplicitLdControl = ExplicitLdControl + ((BpGetIntAFELDPwrGpio(&IntLdPwr) == BP_SUCCESS) ? 1 : 0);
+            ExplicitLdControl = ExplicitLdControl + ((BpGetExtAFELDModeGpio(&ExtLdMode) == BP_SUCCESS) ? 1 : 0);
+            ExplicitLdControl = ExplicitLdControl + ((BpGetExtAFELDPwrGpio(&ExtLdPwr) == BP_SUCCESS) ? 1 : 0);
+            ExplicitLdControl = ExplicitLdControl + ((BpGetExtAFELDClkGpio(&ExtLdClk) == BP_SUCCESS) ? 1 : 0);
+            ExplicitLdControl = ExplicitLdControl + ((BpGetExtAFELDDataGpio(&ExtLdData) == BP_SUCCESS) ? 1 : 0);
+            if (ExplicitLdControl == 0) {
+                /* default if boardparms doesn't specify a subset */
+                GPIO->GPIOBaseMode |= GPIO_BASE_VDSL_PHY_OVERRIDE_0  | GPIO_BASE_VDSL_PHY_OVERRIDE_1;
+            } else {
+                GPIO->GPIOBaseMode |= map_63268_vdsl_override(IntLdMode) 
+                    |  map_63268_vdsl_override(IntLdPwr) 
+                    |  map_63268_vdsl_override(ExtLdMode)
+                    |  map_63268_vdsl_override(ExtLdPwr)
+                    |  map_63268_vdsl_override(ExtLdClk)
+                    |  map_63268_vdsl_override(ExtLdData) ;
+                ul = map_63268_misc_misc_override(IntLdMode) 
+                    |  map_63268_misc_misc_override(IntLdPwr) 
+                    |  map_63268_misc_misc_override(ExtLdMode)
+                    |  map_63268_misc_misc_override(ExtLdPwr)
+                    |  map_63268_misc_misc_override(ExtLdClk)
+                    |  map_63268_misc_misc_override(ExtLdData) ;
+		if (ul != 0) {
+			MISC->miscMisc_ctrl |= ul;
+  		}
+            } 
+        }
+
+        /* Enable PCIe CLKREQ signal */
+        if (GPIOOverlays & BP_OVERLAY_PCIE_CLKREQ) {
+            GPIO->GPIOMode |= GPIO_MODE_PCIE_CLKREQ_B;
+        }
+
+        if (GPIOOverlays & BP_OVERLAY_USB_LED) {
+            LED->ledHWDis &= ~(1 << LED_USB_ACT);
+        }
+        /* Enable HS SPI SS Pins */
+        if (GPIOOverlays & BP_OVERLAY_HS_SPI_SSB4_EXT_CS) {
+             GPIO->GPIOMode |= GPIO_MODE_HS_SPI_SS_4;
+        }
+        if (GPIOOverlays & BP_OVERLAY_HS_SPI_SSB5_EXT_CS) {
+             GPIO->GPIOMode |= GPIO_MODE_HS_SPI_SS_5;
+        }
+        if (GPIOOverlays & BP_OVERLAY_HS_SPI_SSB6_EXT_CS) {
+             GPIO->GPIOMode |= GPIO_MODE_HS_SPI_SS_6;
+        }
+        if (GPIOOverlays & BP_OVERLAY_HS_SPI_SSB7_EXT_CS) {
+             GPIO->GPIOMode |= GPIO_MODE_HS_SPI_SS_7;
+        }
+    }
+
+    {
+        unsigned short PhyBaseAddr;
+        /* clear the base address first. hw does not clear upon soft reset*/
+        GPIO->RoboswEphyCtrl &= ~EPHY_PHYAD_BASE_ADDR_MASK;
+        if( BpGetEphyBaseAddress(&PhyBaseAddr) == BP_SUCCESS ) {
+            GPIO->RoboswEphyCtrl |= ((PhyBaseAddr >>3) & 0x3) << EPHY_PHYAD_BASE_ADDR_SHIFT;
+        }
+
+        /* clear the base address first. hw does not clear upon soft reset*/
+        GPIO->RoboswGphyCtrl &= ~GPHY_PHYAD_BASE_ADDR_MASK;
+        if( BpGetGphyBaseAddress(&PhyBaseAddr) == BP_SUCCESS ) {
+            GPIO->RoboswGphyCtrl |= ((PhyBaseAddr >>3) & 0x3) << GPHY_PHYAD_BASE_ADDR_SHIFT;
+        }
+    }
+
+
+#if defined(CONFIG_USB)
+    PERF->blkEnables |= USBH_CLK_EN;
+    PERF->softResetB |= SOFT_RST_USBH;
+    TIMER->ClkRstCtl |= USB_REF_CLKEN;
+    MISC->miscIddqCtrl &= ~MISC_IDDQ_CTRL_USBH;
+    mdelay(100);
+    USBH->SwapControl = EHCI_ENDIAN_SWAP | OHCI_ENDIAN_SWAP;
+    USBH->Setup |= USBH_IOC;
+    USBH->Setup &= ~USBH_IPP;
+    USBH->PllControl1 &= ~(PLLC_PLL_IDDQ_PWRDN | PLLC_PLL_PWRDN_DELAY);
+#else
+    MISC->miscIddqCtrl |= MISC_IDDQ_CTRL_USBH;
+    PERF->blkEnables &= ~USBH_CLK_EN;
+#endif
+
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+#else
+    PERF->blkEnables &= ~FAP0_CLK_EN;
+    PERF->blkEnables &= ~FAP1_CLK_EN;
+#endif
+
+#if defined(CONFIG_PCI)
+    /* Enable WOC */  
+    PERF->blkEnables |=WLAN_OCP_CLK_EN;
+    mdelay(10);
+    PERF->softResetB &= ~(SOFT_RST_WLAN_SHIM_UBUS | SOFT_RST_WLAN_SHIM);
+    mdelay(1);
+    PERF->softResetB |= (SOFT_RST_WLAN_SHIM_UBUS | SOFT_RST_WLAN_SHIM);
+    mdelay(1);
+ 
+    WLAN_SHIM->ShimMisc = (WLAN_SHIM_FORCE_CLOCKS_ON|WLAN_SHIM_MACRO_SOFT_RESET);
+    mdelay(1);
+    WLAN_SHIM->MacControl = (SICF_FGC|SICF_CLOCK_EN);
+    WLAN_SHIM->ShimMisc = WLAN_SHIM_FORCE_CLOCKS_ON;
+    WLAN_SHIM->ShimMisc = 0;
+    WLAN_SHIM->MacControl = SICF_CLOCK_EN;
+#endif    
+
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+    /* Enable power savings from DDR pads on this chip when DDR goes in Self-Refresh mode */
+    MEMC->PhyControl.IDLE_PAD_CONTROL = 0x00000172;
+    MEMC->PhyByteLane0Control.IDLE_PAD_CONTROL = 0x000fffff;
+    MEMC->PhyByteLane1Control.IDLE_PAD_CONTROL = 0x000fffff;
+#endif
+
+#if defined(CONFIG_BCM_1V2REG_AUTO_SHUTDOWN)
+    /*
+     * Determine if internal VREG is used.
+     * If not, disable it to improve WLAN performance at 5GHz
+     * The ring oscillators are affected when varying the 1V2 voltage
+     * So take a measure of the ring osc count, then raise the internal regulator voltage and remeasure
+     * If the ring osc count changed as expected than internal regulators are used
+     */
+    printk("Internal 1P2 VREG will be shutdown if unused...");
+
+    /* Take the first ring osc measurement */
+    GPIO->RingOscCtrl1 = RING_OSC_ENABLE_MASK | RING_OSC_COUNT_RESET | RING_OSC_IRQ;
+    GPIO->RingOscCtrl1 = RING_OSC_ENABLE_MASK | (2 << RING_OSC_SELECT_SHIFT);
+    GPIO->RingOscCtrl0 = RING_OSC_512_CYCLES;
+    while (!(GPIO->RingOscCtrl1 & RING_OSC_IRQ));
+    startCount = GPIO->RingOscCtrl1 & RING_OSC_COUNT_MASK;
+
+    /* Increase internal 1V2 slightly and see if the ring osc is speeding up */
+    MISC->miscVregCtrl1 += 8;
+    MISC->miscVregCtrl0 |= MISC_VREG_CONTROL0_REG_RESET_B;
+
+    /* Take the second ring osc measurement */
+    GPIO->RingOscCtrl1 = RING_OSC_ENABLE_MASK | RING_OSC_COUNT_RESET | RING_OSC_IRQ;
+    GPIO->RingOscCtrl1 = RING_OSC_ENABLE_MASK | (2 << RING_OSC_SELECT_SHIFT);
+    GPIO->RingOscCtrl0 = RING_OSC_512_CYCLES;
+    while (!(GPIO->RingOscCtrl1 & RING_OSC_IRQ));
+    endCount = GPIO->RingOscCtrl1 & RING_OSC_COUNT_MASK;
+
+    /* Reset the internal 1V2 to its original value */
+    MISC->miscVregCtrl1 -= 8;
+
+    /*
+     * A negative difference or a small positive difference indicates that an external regulator is used
+     * This code was calibrated by repeating the measurements thousands of times and looking for a safe value
+     * Safe means avoiding at all costs being wrong by shutting down the internal regulator when it is in use
+     * It is better to be wrong by leaving the internal regulator running when an external regulator is used
+     */
+    diff = startCount - endCount;
+    if (diff < 300) {
+        printk("Unused, turn it off (%08lx-%08lx=%d<300)\n", startCount, endCount,diff);
+        /* Turn off internal 1P2 regulator */
+        MISC->miscVregCtrl0 |= MISC_VREG_CONTROL0_REG_RESET_B | MISC_VREG_CONTROL0_POWER_DOWN_1;
+    } else {
+        printk("Used, leave it on (%08lx-%08lx=%d>=300)\n", startCount, endCount, diff);
+    }
+#elif defined(CONFIG_BCM_1V2REG_ALWAYS_SHUTDOWN)
+    printk("Internal 1P2 VREG is forced to be shutdown\n");
+    MISC->miscVregCtrl0 |= MISC_VREG_CONTROL0_REG_RESET_B | MISC_VREG_CONTROL0_POWER_DOWN_1;
+#elif defined(CONFIG_BCM_1V2REG_NEVER_SHUTDOWN)
+    printk("Internal 1P2 VREG is forced to remain enabled\n");
+#endif
+
+    if ( BpGetVreg1P8(&vreg1p8) == BP_SUCCESS ) {
+        if (vreg1p8 == BP_VREG_EXTERNAL) {
+            printk("Internal 1P8 VREG is forced by boardparms to be shutdown\n");
+            MISC->miscVregCtrl0 |= MISC_VREG_CONTROL0_REG_RESET_B | MISC_VREG_CONTROL0_POWER_DOWN_2;
+        }
+    }	
+
+    if ( BpGetFemtoResetGpio(&gpio) == BP_SUCCESS ) {
+        kerSysSetGpioState(gpio, kGpioActive);
+    }
+    return 0;
+}
+
+#define bcm63xx_specific_hw_init() bcm63268_hw_init()
+
+#elif defined(CONFIG_BCM96318)
+
+static int __init bcm6318_hw_init(void)
+{
+    const ETHERNET_MAC_INFO *EnetInfo;
+    unsigned long GPIOOverlays;
+    unsigned short gpio;
+    unsigned short SerialMuxSel;
+    unsigned int chipRev = UtilGetChipRev();
+
+    /* Set LED blink rate for activity LEDs to 80mS */
+    LED->ledInit &= ~LED_FAST_INTV_MASK;
+    LED->ledInit |= (LED_INTERVAL_20MS * 4) << LED_FAST_INTV_SHIFT;
+
+
+    EnetInfo = BpGetEthernetMacInfoArrayPtr();
+
+
+#if defined(CONFIG_USB)
+    PERF->blkEnables |= USBH_CLK_EN;
+    mdelay(100);
+    USBH->PllControl1 |= PLLC_PLL_SUSPEND_EN;  
+    USBH->SwapControl = EHCI_ENDIAN_SWAP | OHCI_ENDIAN_SWAP;
+    USBH->Setup |= USBH_IOC;
+    USBH->Setup &= ~USBH_IPP;
+    USBH->USBSimControl |= USBH_LADDR_SEL; /*choose A1 implemenatation mode for Last ADDR calculation*/
+    USBH->PllControl1 &= ~(PLLC_PLL_IDDQ_PWRDN);
+    GPIO->PinMuxSel0 &= ~(PINMUX_SEL_GPIO13_MASK << PINMUX_SEL_GPIO13_SHIFT);
+    GPIO->PinMuxSel0 |= (PINMUX_SEL_USB_PWRON << PINMUX_SEL_GPIO13_SHIFT);  
+#endif
+
+#if !defined(CONFIG_BCM_USB) && !defined(CONFIG_USB)
+    PERF->blkEnables &= ~USBH_CLK_EN;
+    PERF->blkEnablesUbus &= ~USB_UBUS_CLK_EN;
+    PERF->softResetB &= ~(SOFT_RST_USBH | SOFT_RST_USBD);
+    PLL_PWR->PllPwrControlIddqCtrl |= IDDQ_USB;
+    PLL_PWR->PllPwrControlPsmVddCtrl |= PSM_VDD_USBH | PSM_VDD_USBD;
+#endif
+
+#if !defined(CONFIG_BCM_ADSL) && !defined(CONFIG_BCM_ADSL_MODULE)
+    PLL_PWR->PllPwrControlPsmVddCtrl |= PSM_VDD_ADSL | PSM_VDD_PHY_MIPS | PSM_VDD_SAR | PSM_VDD_PHY_MIPS_CACHE;
+    PLL_PWR->PllPwrControlIddqCtrl |= IDDQ_LDO2P9;
+    PERF->blkEnables &= ~(SAR_CLK_EN | ADSL_CLK_EN | ADSL_AFE_EN | ADSL_QPROC_EN | PHYMIPS_CLK_EN);
+    PERF->blkEnablesUbus &= ~(ADSL_UBUS_CLK_EN | PHYMIPS_UBUS_CLK_EN | SAR_UBUS_CLK_EN);
+    PERF->softResetB &= ~(SOFT_RST_SAR | SOFT_RST_ADSL | SOFT_RST_PHYMIPS);
+#endif
+
+    /* set any in use led/gpio pin mux to gpio function */
+    initGpioPinMux();
+
+    /* Start with all HW LEDs disabled */
+    LED->ledHWDis |= 0xFFFFFF;
+    LED->ledMode = 0;
+
+    /* Enable HW to drive LEDs for Ethernet ports in use */
+    if (EnetInfo[0].sw.port_map & (1 << 0)) {
+        LED->ledHWDis &= ~(1 << EPHY0_SPD_LED);
+        LED->ledHWDis &= ~(1 << EPHY0_ACT_LED);
+        /* set up link and speed mapping */
+        LED->ledLinkActSelLow |= ((1<<(EPHY0_ACT_LED-4))<<LED_0_LINK_SHIFT);
+        LED->ledLinkActSelHigh |= ((1<<(EPHY0_ACT_LED-4))<<LED_0_LINK_SHIFT);
+        /* workaround for hw which invert the active low to active high */
+        if(chipRev == 0xa0)
+        {
+            LED->ledXorReg |= (1 << EPHY0_SPD_LED);
+            LED->ledXorReg |= (1 << EPHY0_ACT_LED);
+        }
+    }
+    if (EnetInfo[0].sw.port_map & (1 << 1)) {
+        LED->ledHWDis &= ~(1 << EPHY1_SPD_LED);
+        LED->ledHWDis &= ~(1 << EPHY1_ACT_LED);
+        LED->ledLinkActSelLow |= ((1<<(EPHY1_ACT_LED-4))<<LED_1_LINK_SHIFT);
+        LED->ledLinkActSelHigh |= ((1<<(EPHY1_ACT_LED-4))<<LED_1_LINK_SHIFT);
+        if(chipRev == 0xa0)
+        {
+            LED->ledXorReg |= (1 << EPHY1_SPD_LED);
+            LED->ledXorReg |= (1 << EPHY1_ACT_LED);
+        }
+    }
+    if (EnetInfo[0].sw.port_map & (1 << 2)) {
+        LED->ledHWDis &= ~(1 << EPHY2_SPD_LED);
+        LED->ledHWDis &= ~(1 << EPHY2_ACT_LED);
+        LED->ledLinkActSelLow |= ((1<<(EPHY2_ACT_LED-4))<<LED_2_LINK_SHIFT);
+        LED->ledLinkActSelHigh |= ((1<<(EPHY2_ACT_LED-4))<<LED_2_LINK_SHIFT);
+        if(chipRev == 0xa0)
+        {
+            LED->ledXorReg |= (1 << EPHY2_SPD_LED);
+            LED->ledXorReg |= (1 << EPHY2_ACT_LED);
+        }
+    }
+    if (EnetInfo[0].sw.port_map & (1 << 3)) {
+        LED->ledHWDis &= ~(1 << EPHY3_SPD_LED);
+        LED->ledHWDis &= ~(1 << EPHY3_ACT_LED);
+        LED->ledLinkActSelLow |= ((1<<(EPHY3_ACT_LED-4))<<LED_3_LINK_SHIFT);
+        LED->ledLinkActSelHigh |= ((1<<(EPHY3_ACT_LED-4))<<LED_3_LINK_SHIFT);
+        if(chipRev == 0xa0)
+        {
+            LED->ledXorReg |= (1 << EPHY3_SPD_LED);
+            LED->ledXorReg |= (1 << EPHY3_ACT_LED);
+        }
+    }
+
+    if( BpGetGPIOverlays(&GPIOOverlays) == BP_SUCCESS ) 
+    {      
+        if (GPIOOverlays & BP_OVERLAY_SERIAL_LEDS) {
+            GPIO->GPIOMode |= (1 << SERIAL_LED_DATA);
+            GPIO->GPIOMode |= (1 << SERIAL_LED_CLK);
+            /* enable shift only on led status change mode to solve the dim led issue. only available in b0 or newer chip */
+            if(chipRev != 0xa0)
+            {
+            	LED->ledInit &= ~LED_SERIAL_SHIFT_MODE_MASK;
+            	LED->ledInit |= LED_SERIAL_SHIFT_MODE_CHANGE;
+            }
+
+            LED->ledSerialMuxSelect = 0x0;
+            if( BpGetSerialLEDMuxSel(&SerialMuxSel) == BP_SUCCESS )
+            {
+                if( SerialMuxSel == (BP_SERIAL_MUX_SEL_GROUP0|BP_SERIAL_MUX_SEL_GROUP2) )
+                     LED->ledSerialMuxSelect = 0xff;
+        	    /* otherwise either non supported combination or default 8 to 23 LED*/
+            }
+
+
+            /* For default Serial MUX selection, XOR workaround is not needed for EPHY 3 SPD and ACT
+        	 * as EPHY 3 LED function is not available anyway. Otherwise, serial data/clk will be inverted too.
+        	 * But for non default Serial MUX selection, we need it to make all EPHY LINK/SPD LED work.
+        	 * However LED 16 to 23 are inverted too. Will fix in next hw revision */
+            if(chipRev == 0xa0)
+            {
+                if( LED->ledSerialMuxSelect == 0x0 )
+                {
+                    LED->ledXorReg &= ~(1 << SERIAL_LED_DATA);
+                    LED->ledXorReg &= ~(1 << SERIAL_LED_CLK);
+                    LED->ledInit |= (LED_SERIAL_LED_EN|LED_SERIAL_LED_MUX_SEL);
+        	    }
+                else
+                    LED->ledInit |= (LED_SERIAL_LED_EN|LED_SERIAL_LED_MUX_SEL|LED_SERIAL_LED_CLK_NPOL);
+            }
+            else
+            	LED->ledInit |= (LED_SERIAL_LED_EN|LED_SERIAL_LED_MUX_SEL);
+        }
+
+        /* Enable LED controller to drive GPIO */
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_0) {
+            GPIO->GPIOMode |= (1 << EPHY0_SPD_LED);
+            GPIO->GPIOMode |= (1 << EPHY0_ACT_LED);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_1) {
+            GPIO->GPIOMode |= (1 << EPHY1_SPD_LED);
+            GPIO->GPIOMode |= (1 << EPHY1_ACT_LED);
+		}
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_2) {
+            GPIO->GPIOMode |= (1 << EPHY2_SPD_LED);
+            GPIO->GPIOMode |= (1 << EPHY2_ACT_LED);
+        }
+        if (GPIOOverlays & BP_OVERLAY_EPHY_LED_3) {
+            GPIO->GPIOMode |= (1 << EPHY3_SPD_LED);
+            GPIO->GPIOMode |= (1 << EPHY3_ACT_LED);
+        }
+
+        if (GPIOOverlays & BP_OVERLAY_USB_DEVICE) {
+            LED->ledHWDis &= ~(1 << USB_ACT_LED);
+        }
+        if (GPIOOverlays & BP_OVERLAY_USB_LED) {
+            GPIO->GPIOMode |= (1 << USB_ACT_LED);
+        }
+
+        if ( BpGetWanDataLedGpio(&gpio) == BP_SUCCESS ) {
+            if ((gpio & BP_GPIO_NUM_MASK) == INET_ACT_LED) {
+            	/* WAN Data LED must be LED 8 */
+                if (!(gpio & BP_GPIO_SERIAL)) {
+                    /* If LED is not serial, enable corresponding GPIO */
+                    GPIO->GPIOMode |= GPIO_NUM_TO_MASK(gpio);
+                }
+            }
+        }
+
+#if defined(CONFIG_PCI) && defined(PCIEH)
+        /* enable PCIE_REQ if boardparam says so */
+        if (GPIOOverlays & BP_OVERLAY_PCIE_CLKREQ)
+        {
+            /* PCIE CLK req use GPIO 10 input so make sure GPIO 10 is not used when this flag is set.*/
+            if( !BpIsGpioInUse(BP_GPIO_10_AL&BP_GPIO_NUM_MASK) )
+            {
+            	kerSysSetGpioDirInput(BP_GPIO_10_AL&BP_GPIO_NUM_MASK);
+            	MISC->miscPeriph_ECO_Register |= MISC_PCIE_CLKREQ_EN;
+            	PCIEH_MISC_HARD_REGS->hard_pcie_hard_debug |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE;
+            }
+        }
+#endif
+    }
+    
+    return 0;
+}
+
+#define bcm63xx_specific_hw_init() bcm6318_hw_init()
+
+#elif defined(CONFIG_BCM960333)
+
+static int __init bcm60333_hw_init(void)
+{
+    /* We can add a minimum GPIO MUX setup here to enable UART TxRx*/
+    return 0;
+}
+
+#define bcm63xx_specific_hw_init() bcm60333_hw_init()
+
+#elif defined(CONFIG_BCM96838)
+
+static int __init bcm6838_hw_init(void)
+{
+    unsigned short irq, gpio;
+	
+    if( BpGetResetToDefaultExtIntr(&irq) == BP_SUCCESS )
+    {
+        if(BpGetResetToDefaultExtIntrGpio(&gpio) == BP_SUCCESS)
+        {
+            int gpio_polarity = gpio & BP_ACTIVE_MASK;
+            gpio &= BP_GPIO_NUM_MASK;
+            PERF->ext_irq_muxsel0 |= ( (gpio&EXT_IRQ_MASK_LOW) << (irq*EXT_IRQ_OFF_LOW) );
+            DBGPERF->Dbg_extirqmuxsel0_1 |= ( ((gpio&EXT_IRQ_MASK_HIGH)>>EXT_IRQ_OFF_LOW) << (irq*EXT_IRQ_OFF_HIGH) );
+            if (gpio_polarity == BP_ACTIVE_HIGH)
+                PERF->ExtIrqCfg |= (1<<irq);
+        }
+    }
+
+    if( BpGetWirelessSesExtIntr(&irq) == BP_SUCCESS )
+    {
+        if(BpGetWirelessSesExtIntrGpio(&gpio) == BP_SUCCESS)
+        {
+            int gpio_polarity = gpio & BP_ACTIVE_MASK;
+            gpio &= BP_GPIO_NUM_MASK;
+            PERF->ext_irq_muxsel0 |= ( (gpio&EXT_IRQ_MASK_LOW) << (irq*EXT_IRQ_OFF_LOW) );
+            DBGPERF->Dbg_extirqmuxsel0_1 |= ( ((gpio&EXT_IRQ_MASK_HIGH)>>EXT_IRQ_OFF_LOW) << (irq*EXT_IRQ_OFF_HIGH) );
+            if (gpio_polarity == BP_ACTIVE_HIGH)
+                PERF->ExtIrqCfg |= (1<<irq);
+        }
+    }
+
+    if( BpGetPmdAlarmExtIntr(&irq) == BP_SUCCESS )
+    {
+        if(BpGetPmdAlarmExtIntrGpio(&gpio) == BP_SUCCESS)
+        {
+            int gpio_polarity = gpio & BP_ACTIVE_MASK;
+            gpio &= BP_GPIO_NUM_MASK;
+            PERF->ext_irq_muxsel0 |= ( (gpio&EXT_IRQ_MASK_LOW) << (irq*EXT_IRQ_OFF_LOW) );
+            DBGPERF->Dbg_extirqmuxsel0_1 |= ( ((gpio&EXT_IRQ_MASK_HIGH)>>EXT_IRQ_OFF_LOW) << (irq*EXT_IRQ_OFF_HIGH) );
+            if (gpio_polarity == BP_ACTIVE_HIGH)
+			{
+                PERF->ExtIrqCfg |= (1<<irq);
+                PERF->ExtIrqCfg |= (1<<26);
+            }
+        }
+    }
+
+    if( BpGetPmdSDExtIntr(&irq) == BP_SUCCESS )
+    {
+        if(BpGetPmdSDExtIntrGpio(&gpio) == BP_SUCCESS)
+        {
+            int gpio_polarity = gpio & BP_ACTIVE_MASK;
+            gpio &= BP_GPIO_NUM_MASK;
+            PERF->ext_irq_muxsel0 |= ( (gpio&EXT_IRQ_MASK_LOW) << (irq*EXT_IRQ_OFF_LOW) );
+            DBGPERF->Dbg_extirqmuxsel0_1 |= ( ((gpio&EXT_IRQ_MASK_HIGH)>>EXT_IRQ_OFF_LOW) << (irq*EXT_IRQ_OFF_HIGH) );
+            if (gpio_polarity == BP_ACTIVE_HIGH)
+            {
+                PERF->ExtIrqCfg |= (1<<irq);
+                /* PERF->ExtIrqCfg |= (1<<27); change interrupt to level */
+            }
+        }
+    }
+
+    if( BpGetTrplxrTxFailExtIntr(&irq) == BP_SUCCESS )
+    {
+        if(BpGetTrplxrTxFailExtIntrGpio(&gpio) == BP_SUCCESS)
+        {
+            int gpio_polarity = gpio & BP_ACTIVE_MASK;
+            gpio &= BP_GPIO_NUM_MASK;
+            PERF->ext_irq_muxsel0 |= ( (gpio&EXT_IRQ_MASK_LOW) << (irq*EXT_IRQ_OFF_LOW) );
+            DBGPERF->Dbg_extirqmuxsel0_1 |= ( ((gpio&EXT_IRQ_MASK_HIGH)>>EXT_IRQ_OFF_LOW) << (irq*EXT_IRQ_OFF_HIGH) );
+            if (gpio_polarity == BP_ACTIVE_HIGH)
+			{
+                PERF->ExtIrqCfg |= (1<<irq);
+            }
+        }
+    }
+
+    if( BpGetTrplxrSdExtIntr(&irq) == BP_SUCCESS )
+    {
+        if(BpGetTrplxrSdExtIntrGpio(&gpio) == BP_SUCCESS)
+        {
+            int gpio_polarity = gpio & BP_ACTIVE_MASK;
+            gpio &= BP_GPIO_NUM_MASK;
+            PERF->ext_irq_muxsel0 |= ( (gpio&EXT_IRQ_MASK_LOW) << (irq*EXT_IRQ_OFF_LOW) );
+            DBGPERF->Dbg_extirqmuxsel0_1 |= ( ((gpio&EXT_IRQ_MASK_HIGH)>>EXT_IRQ_OFF_LOW) << (irq*EXT_IRQ_OFF_HIGH) );
+            if (gpio_polarity == BP_ACTIVE_HIGH)
+			{
+                PERF->ExtIrqCfg |= (1<<irq);
+            }
+        }
+    }
+
+    if( BpGetWifiOnOffExtIntr(&irq) == BP_SUCCESS )
+    {
+        if(BpGetWifiOnOffExtIntrGpio(&gpio) == BP_SUCCESS)
+        {
+            int gpio_polarity = gpio & BP_ACTIVE_MASK;
+            gpio &= BP_GPIO_NUM_MASK;
+            PERF->ext_irq_muxsel0 |= ( (gpio&EXT_IRQ_MASK_LOW) << (irq*EXT_IRQ_OFF_LOW) );
+            DBGPERF->Dbg_extirqmuxsel0_1 |= ( ((gpio&EXT_IRQ_MASK_HIGH)>>EXT_IRQ_OFF_LOW) << (irq*EXT_IRQ_OFF_HIGH) );
+            if (gpio_polarity == BP_ACTIVE_HIGH)
+			{
+                PERF->ExtIrqCfg |= (1<<irq);
+            }
+        }
+    }
+
+    if( BpGetLteExtIntr(&irq) == BP_SUCCESS )
+    {
+        if(BpGetLteExtIntrGpio(&gpio) == BP_SUCCESS)
+        {
+            int gpio_polarity = gpio & BP_ACTIVE_MASK;
+            gpio &= BP_GPIO_NUM_MASK;
+            PERF->ext_irq_muxsel0 |= ( (gpio&EXT_IRQ_MASK_LOW) << (irq*EXT_IRQ_OFF_LOW) );
+            DBGPERF->Dbg_extirqmuxsel0_1 |= ( ((gpio&EXT_IRQ_MASK_HIGH)>>EXT_IRQ_OFF_LOW) << (irq*EXT_IRQ_OFF_HIGH) );
+            if (gpio_polarity == BP_ACTIVE_HIGH)
+			{
+                PERF->ExtIrqCfg |= (1<<irq);
+            }
+        }
+    }
+
+
+    if (BpGetUart2SdinGpio(&gpio) == BP_SUCCESS)
+    {
+        gpio &= BP_GPIO_NUM_MASK;
+        set_pinmux(gpio, 1);
+    }
+    if (BpGetUart2SdoutGpio(&gpio) == BP_SUCCESS)
+    {
+        gpio &= BP_GPIO_NUM_MASK;
+        set_pinmux(gpio, 1);
+    }
+
+#if defined(CONFIG_USB)
+    if(kerSysGetUsbHostPortEnable(0) || kerSysGetUsbHostPortEnable(1))
+    {
+        /* enable power to USB ports */
+        GPIO->port_block_data1 = 0x0;
+        if(kerSysGetUsbHostPortEnable(0))
+        {
+            GPIO->port_block_data2 = 0x1045; /*USB0_PWRFLT */
+            GPIO->port_command = 0x21;
+            GPIO->port_block_data2 = 0x1046; /*USB0_PWRON */
+            GPIO->port_command = 0x21;
+        }
+        if(kerSysGetUsbHostPortEnable(1))
+        {
+            GPIO->port_block_data2 = 0x5047; /*USB1_PWRFLT */
+            GPIO->port_command = 0x21;
+            GPIO->port_block_data2 = 0x5048; /*USB1_PWRON */
+            GPIO->port_command = 0x21;
+        }
+        mdelay(100);
+        USBH->SwapControl = EHCI_ENDIAN_SWAP | OHCI_ENDIAN_SWAP;
+        USBH->Setup |= USB_IOC;
+        USBH->Setup &= ~USB_IPP;
+        USBH->PllControl1 &= ~(PLL_IDDQ_PWRDN);
+    }
+    else
+    { /* no USB HOST */
+        /*
+         * Power to USB Host controller is on by default,
+         * shutdown power to USB Host controller
+         */
+        kerSysSetUsbPower(0, USB_HOST_FUNC);
+    }
+
+    if(!kerSysGetUsbDeviceEnable())
+    {
+        /* USB device not supported shutdown power to USB device */
+        kerSysSetUsbPower(0, USB_DEVICE_FUNC);
+    }
+#endif
+	return 0;
+}
+
+#define bcm63xx_specific_hw_init() bcm6838_hw_init()
+
+#elif defined(CONFIG_BCM963381)
+
+extern void bcm_set_pinmux(unsigned int pin_num, unsigned int mux_num);
+
+#if defined(CONFIG_USB)
+
+#define CAP_TYPE_EHCI       0x00 
+#define CAP_TYPE_OHCI       0x01 
+#define CAP_TYPE_XHCI       0x02 
+
+static struct platform_device *xhci_dev;
+
+static void bcm63381_manual_usb_ldo_start(void)
+{
+    USBH_CTRL->pll_ctl &= ~(1 << 30); /*pll_resetb=0*/
+    USBH_CTRL->utmi_ctl_1 = 0; 
+    USBH_CTRL->pll_ldo_ctl = 4; /*ldo_ctl=core_rdy */
+    USBH_CTRL->pll_ctl |= ( 1 << 31); /*pll_iddq=1*/
+    mdelay(10);
+    USBH_CTRL->pll_ctl &= ~( 1 << 31); /*pll_iddq=0*/
+    USBH_CTRL->pll_ldo_ctl |= 1; /*ldo_ctl.AFE_LDO_PWRDWNB=1*/
+    USBH_CTRL->pll_ldo_ctl |= 2; /*ldo_ctl.AFE_BG_PWRDWNB=1*/
+    mdelay(1);
+    USBH_CTRL->utmi_ctl_1 = 0x00020002;/* utmi_resetb &ref_clk_sel=0; */ 
+    USBH_CTRL->pll_ctl |= ( 1 << 30); /*pll_resetb=1*/
+    mdelay(10);
+}    
+
+#define MDIO_USB2   0
+#define MDIO_USB3   (1 << 31)
+static void usb_mdio_write(volatile u32 *mdio, u32 reg, u32 val, int mode)
+{
+    uint32_t data;
+    data = (reg << 16) | val | mode;
+    *mdio = data;
+    data |= (1 << 25);
+    *mdio = data;
+    mdelay(1);
+    data &= ~(1 << 25);
+    *mdio = data;
+}
+
+static void usb2_eye_fix(void)
+{
+    /* Updating USB 2.0 PHY registers */
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x1f, 0x80a0, MDIO_USB2);
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x0a, 0xc6a0, MDIO_USB2);
+}
+
+static void usb3_pll_fix(void)
+{
+    /* Updating USB 3.0 PHY registers */
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x1f, 0x8000, MDIO_USB3);
+    usb_mdio_write((void *)&USBH_CTRL->mdio, 0x07, 0x1503, MDIO_USB3);
+}
+
+
+static __init struct platform_device *bcm_add_usb_host(int type, int id,
+                            uint32_t mem_base, uint32_t mem_size, int irq,
+                            const char *devname, void *private_data)
+{
+    struct resource res[2];
+    struct platform_device *pdev;
+    //static const u64 usb_dmamask = ~(u32)0;
+    static const u64 usb_dmamask = 0xffffffff;
+
+    memset(&res, 0, sizeof(res));
+    res[0].start = mem_base;
+    res[0].end   = mem_base + (mem_size -1);
+    res[0].flags = IORESOURCE_MEM;
+
+    res[1].flags = IORESOURCE_IRQ;
+    res[1].start = res[1].end = irq;
+
+    pdev = platform_device_alloc(devname, id);
+    if(!pdev)
+    {
+        printk("Error Failed to allocate platform device for devname=%s id=%d\n",
+                devname, id);
+    }
+
+    platform_device_add_resources(pdev, res, 2);
+
+    pdev->dev.dma_mask = (u64 *)&usb_dmamask;
+    pdev->dev.coherent_dma_mask = 0xffffffff;
+    
+    if(private_data)
+    {
+        pdev->dev.platform_data = private_data;
+    } 
+
+    if( platform_device_add(pdev))
+    {
+        printk(KERN_ERR "Error Failed to add platform device for devname=%s id=%d\n",
+                devname, id);
+    }
+
+    return pdev;
+}
+
+static void bcm63381_usb30_init(void)
+{
+
+    /*initialize XHCI settings*/
+    //USB30H_CTRL->setup |= (USBH_IPP);
+
+    USB30H_CTRL->usb30_ctl2 = 0x1; /*swap data & control */
+
+    USB30H_CTRL->usb30_ctl1 |= (1<<30); /*disable over current*/
+    USB30H_CTRL->usb30_ctl1 |= USB3_IOC;
+    //USB30H_CTRL->usb30_ctl1 |= USB3_IPP;
+    USB30H_CTRL->usb30_ctl1 |= XHC_SOFT_RESETB;
+    USB30H_CTRL->usb30_ctl1 |= PHY3_PLL_SEQ_START;
+
+    /* work around to avoid USB3.0 issue of contoller being reset when UBUS is loaded */ 
+    USB30H_CTRL->bridge_ctl = (USBH_CTRL->bridge_ctl & 0xFFFF00FF) | (0x1000);
+    
+    usb3_pll_fix();
+
+    xhci_dev = bcm_add_usb_host(CAP_TYPE_XHCI, 0, USB_XHCI_BASE, 0x1000,
+                                INTERRUPT_ID_USBH30, "xhci-hcd", NULL); 
+}
+
+#endif
+
+static int __init bcm63381_hw_init(void)
+{ 
+#if defined(CONFIG_USB)
+    short usb_gpio;
+    unsigned int chipRev = UtilGetChipRev();
+    if(pmc_usb_power_up(PMC_USB_HOST_20))
+    {
+        printk(KERN_ERR "+++ Failed to Power Up USB20 Host\n");
+        return -1;
+    }
+    
+    bcm63381_manual_usb_ldo_start();
+
+    USBH_CTRL->setup |= (USBH_IOC);
+    if(BpGetUsbPwrFlt0(&usb_gpio) == BP_SUCCESS)
+    {
+       if((usb_gpio & BP_ACTIVE_MASK) != BP_ACTIVE_LOW)
+       {
+          USBH_CTRL->setup &= ~(USBH_IOC);
+       }
+    }
+    if(BpGetUsbPwrOn0(&usb_gpio) == BP_SUCCESS)
+    {
+       if((usb_gpio & BP_ACTIVE_MASK) != BP_ACTIVE_LOW)
+       {
+          USBH_CTRL->setup &= ~(USBH_IPP);
+       }
+       else
+       {
+            USBH_CTRL->setup |= (USBH_IPP);
+       }
+    }
+
+    if ((chipRev & 0xf0) == 0xa0)
+    {
+        USBH_CTRL->bridge_ctl |= (EHCI_ENDIAN_SWAP | OHCI_ENDIAN_SWAP);
+    } else {
+        USBH_CTRL->bridge_ctl = (USBH_CTRL->bridge_ctl & ~EHCI_SWAP_MODE_MASK & ~OHCI_SWAP_MODE_MASK) 
+                                | ((EHCI_SWAP_MODE_BOTH << EHCI_SWAP_MODE_SHIFT) | (OHCI_SWAP_MODE_BOTH << OHCI_SWAP_MODE_SHIFT));
+    }
+
+    usb2_eye_fix();
+
+    if(kerSysGetUsb30HostEnable())
+    {
+        if(pmc_usb_power_up(PMC_USB_HOST_30))
+        {
+            printk(KERN_ERR "+++ Failed to Power Up USB30 Host\n");
+            return -1;
+        }
+        mdelay(10);
+        bcm63381_usb30_init();
+    }
+#endif
+	return 0;
+}
+
+#define bcm63xx_specific_hw_init() bcm63381_hw_init()
+
+#elif defined(CONFIG_BCM96848)
+
+static int __init bcm6848_hw_init(void)
+{ 
+#if defined(CONFIG_USB)
+    short usb_gpio;
+
+    if (!kerSysGetUsbHostPortEnable(0))
+        return 0;
+
+    if(pmc_usb_power_up(PMC_USB_HOST_20))
+    {
+        printk(KERN_ERR "+++ Failed to Power Up USB20 Host\n");
+        return -1;
+    }
+
+    USBH_CTRL->setup |= (USBH_IOC);
+    if(BpGetUsbPwrFlt0(&usb_gpio) == BP_SUCCESS)
+    {
+       if((usb_gpio & BP_ACTIVE_MASK) != BP_ACTIVE_LOW)
+       {
+          USBH_CTRL->setup &= ~(USBH_IOC);
+       }
+    }
+    if(BpGetUsbPwrOn0(&usb_gpio) == BP_SUCCESS)
+    {
+       if((usb_gpio & BP_ACTIVE_MASK) != BP_ACTIVE_LOW)
+       {
+          USBH_CTRL->setup &= ~(USBH_IPP);
+       }
+       else
+       {
+            USBH_CTRL->setup |= (USBH_IPP);
+       }
+    }
+
+    USBH_CTRL->bridge_ctl = (USBH_CTRL->bridge_ctl & ~EHCI_SWAP_MODE_MASK & ~OHCI_SWAP_MODE_MASK) 
+        | ((EHCI_SWAP_MODE_BOTH << EHCI_SWAP_MODE_SHIFT) | (OHCI_SWAP_MODE_BOTH << OHCI_SWAP_MODE_SHIFT));
+#endif
+	return 0;
+}
+
+#define bcm63xx_specific_hw_init() bcm6848_hw_init()
+
+#endif
+
+static int __init bcm63xx_hw_init(void)
+{
+#if !defined(CONFIG_BRCM_IKOS)
+    kerSysFlashInit();
+#endif
+
+    return bcm63xx_specific_hw_init();
+}
+arch_initcall(bcm63xx_hw_init);
+
+
+static int __init brcm63xx_setup(void)
+{
+    extern int panic_timeout;
+
+    _machine_restart = brcm_machine_restart;
+    _machine_halt = brcm_machine_halt;
+    pm_power_off = brcm_machine_halt;
+
+    panic_timeout = 1;
+
+    return 0;
+}
+
+arch_initcall(brcm63xx_setup);
+
+
+unsigned long getMemorySize(void)
+{
+#if defined(CONFIG_BRCM_IKOS)
+    return(31 * 1024 * 1024); /* voice DSP is loaded after this amount */
+#elif defined(CONFIG_BRCM_MEMORY_RESTRICTION_16M)
+    return(16 * 1024 * 1024); 
+#elif defined(CONFIG_BRCM_MEMORY_RESTRICTION_32M)
+    return(32 * 1024 * 1024); 
+#elif defined(CONFIG_BRCM_MEMORY_RESTRICTION_64M)
+    return(64 * 1024 * 1024); 
+#elif defined(CONFIG_BCM96362) || defined(CONFIG_BCM96328) 
+    return (DDR->CSEND << 24);
+#elif defined(CONFIG_BCM96318) || defined(CONFIG_BCM960333) || defined(CONFIG_BCM963381)
+    uint32 memCfg;
+
+#if defined (CONFIG_BCM963381)
+    memCfg = MEMC->SDR_CFG.SDR_CFG;
+#else
+    memCfg = MEMC->SDR_CFG;
+#endif
+    memCfg = (memCfg&MEMC_SDRAM_SPACE_MASK)>>MEMC_SDRAM_SPACE_SHIFT;
+
+    return 1<<(memCfg+20);
+#elif defined(CONFIG_BCM96848)
+    return 1<<(((MEMC->GLB_GCFG&MEMC_GLB_GCFG_SIZE1_MASK)>>MEMC_GLB_GCFG_SIZE1_SHIFT)+20);
+#else
+    return (((MEMC->CSEND > 16) ? 16 : MEMC->CSEND) << 24);
+#endif
+}
+
+
+/* Pointers to memory buffers allocated for the DSP module */
+void *dsp_core;
+void *dsp_init;
+EXPORT_SYMBOL(dsp_core);
+EXPORT_SYMBOL(dsp_init);
+void __init allocDspModBuffers(void);
+/*
+*****************************************************************************
+** FUNCTION:   allocDspModBuffers
+**
+** PURPOSE:    Allocates buffers for the init and core sections of the DSP
+**             module. This module is special since it has to be allocated
+**             in the 0x800.. memory range which is not mapped by the TLB.
+**
+** PARAMETERS: None
+** RETURNS:    Nothing
+*****************************************************************************
+*/
+void __init allocDspModBuffers(void)
+{
+#if defined(CONFIG_BCM_ENDPOINT_MODULE)
+    printk("Allocating memory for DSP module core and initialization code\n");
+
+  dsp_core = (void*)((DSP_CORE_SIZE > 0) ? alloc_bootmem((unsigned long)DSP_CORE_SIZE) : NULL);
+  dsp_init = (void*)((DSP_INIT_SIZE > 0) ? alloc_bootmem((unsigned long)DSP_INIT_SIZE) : NULL);
+
+  printk("Allocated DSP module memory - CORE=0x%x SIZE=%d, INIT=0x%x SIZE=%d\n",
+         (unsigned int)dsp_core, DSP_CORE_SIZE, (unsigned int)dsp_init , DSP_INIT_SIZE);
+#endif
+}
+
+#endif
diff --git a/arch/mips/bcm963xx/smp-brcm.c b/arch/mips/bcm963xx/smp-brcm.c
new file mode 100644
index 0000000000000000000000000000000000000000..71b32c7b7dd609f70cd1e9af4d4c0445becb40f2
--- /dev/null
+++ b/arch/mips/bcm963xx/smp-brcm.c
@@ -0,0 +1,326 @@
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+/***********************************************************
+ *
+ * Copyright (c) 2009 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * <:label-BRCM:2011:DUAL/GPL:standard
+ * 
+ * Unless you and Broadcom execute a separate written software license 
+ * agreement governing use of this software, this software is licensed 
+ * to you under the terms of the GNU General Public License version 2 
+ * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+ * with the following added to such license:
+ * 
+ *    As a special exception, the copyright holders of this software give 
+ *    you permission to link this software with independent modules, and 
+ *    to copy and distribute the resulting executable under terms of your 
+ *    choice, provided that you also meet, for each linked independent 
+ *    module, the terms and conditions of the license of that module. 
+ *    An independent module is a module which is not derived from this
+ *    software.  The special exception does not apply to any modifications 
+ *    of the software.  
+ * 
+ * Not withstanding the above, under no circumstances may you combine 
+ * this software in any way with any other Broadcom software provided 
+ * under a license other than the GPL, without Broadcom's express prior 
+ * written consent. 
+ * 
+ * :>
+ *
+ ************************************************************/
+
+/***********************************************************
+ *
+ *    SMP support for Broadcom 63xx and 68xx chips
+ *
+ *    05/2009    Created by Xi Wang
+ *
+ ************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/irq.h>
+
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+
+#include <bcm_cpu.h>
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+
+
+static int cpu_ipi_irq;
+DEFINE_PER_CPU(unsigned int, ipi_pending);
+DEFINE_PER_CPU(unsigned int, ipi_flags);
+
+extern spinlock_t brcm_irqlock;
+
+// boot parameters
+struct BootConfig {
+    unsigned int func_addr;
+    unsigned int gp;
+    unsigned int sp;
+};
+
+static struct BootConfig boot_config;
+
+void install_smp_boot_ex_handler(void);
+static void core_send_ipi_single(int cpu, unsigned int action);
+static void core_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+
+
+void install_smp_boot_ex_handler(void)
+{
+
+	asm (
+        ".set push\n"
+        ".set noreorder\n"
+        "lui    $8, 0xa000 \n"
+        "ori    $8, $8, 0x0200  # alternative mips exception vector\n"
+        "la     $9, 2f\n"
+        "la     $10, 3f\n"
+    "1:\n"
+        "lw     $11, 0($9)\n"
+        "sw     $11, 0($8)\n"
+        "addiu  $9, $9, 4\n"
+        "bne    $9, $10, 1b\n"
+        "addiu  $8, $8, 4\n"
+        "b      3f\n"
+        "nop\n"
+    "2:    # begin exception handler code\n"
+        "mfc0   $26, $13, 0\n"
+        "li     $27, 0x800100   # change back to normal exception vector & ack interrupt\n"
+        "xor    $26, $27\n"
+        "mtc0   $26, $13, 0\n"
+        "la     $27, %0         # pointer to boot_config structure\n"
+        "lw     $24, 0($27)     # func_addr - will be loaded into EPC before eret\n"
+        "lw     $28, 4($27)     # gp\n"
+        "lw     $29, 8($27)     # sp\n"
+        "mtc0   $24, $14        # load return address into EPC\n"
+        "eret\n"
+    "3:\n"
+        ".set pop\n"
+        :
+        : "X" (&boot_config)
+	);
+}
+
+#ifdef CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS
+extern void bcm_timer_interrupt_handler_TP1(void);
+#endif
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_id)
+{
+	unsigned int old_ipi_flags;
+
+	spin_lock(&brcm_irqlock);
+	old_ipi_flags = __get_cpu_var(ipi_flags);
+	__get_cpu_var(ipi_flags) = 0;
+	spin_unlock(&brcm_irqlock);
+
+#ifdef CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS
+	/* Process TIMER related interrupt first */
+	if (old_ipi_flags & 1<<2) {
+		bcm_timer_interrupt_handler_TP1();
+	}
+#endif
+
+	if (old_ipi_flags & 1<<0) {
+		scheduler_ipi();
+	}
+
+	if (old_ipi_flags & 1<<1) {
+		smp_call_function_interrupt();
+	}
+
+	return IRQ_HANDLED;
+}
+
+
+static struct irqaction irq_ipi = {
+	.handler	= ipi_interrupt,
+	.flags		= IRQF_DISABLED|IRQF_PERCPU,
+	.name		= "IPI"
+};
+
+
+static void __init brcm_smp_setup(void)
+{
+	int i;
+
+	init_cpu_possible(cpu_possible_mask);
+
+	for (i = 0; i < 2; i++) {
+		set_cpu_possible(i, true);
+		__cpu_number_map[i]	= i;
+		__cpu_logical_map[i]	= i;
+	}
+}
+
+
+static void __init brcm_prepare_cpus(unsigned int max_cpus)
+{
+	unsigned int c0tmp;
+	int cpu;
+
+	c0tmp = __read_32bit_c0_register($22, 0);
+	c0tmp |= CP0_BCM_CFG_NBK;    /* non blocking */
+	__write_32bit_c0_register($22, 0, c0tmp);
+
+	c0tmp = __read_32bit_c0_register($22, 2);
+	c0tmp &= ~(CP0_CMT_PRIO_TP0 | CP0_CMT_PRIO_TP1); /* equal (random) D-cache priority */
+	__write_32bit_c0_register($22, 2, c0tmp);
+
+	//printk("bcm config0 %08x\n", __read_32bit_c0_register($22, 0));
+	//printk("cmt control %08x\n", __read_32bit_c0_register($22, 2));
+	//printk("cmt local %08x\n", __read_32bit_c0_register($22, 3));
+	//printk("bcm config1 %08x\n", __read_32bit_c0_register($22, 5));
+
+	for_each_possible_cpu(cpu) {
+		per_cpu(ipi_pending, cpu) = 0;
+		per_cpu(ipi_flags, cpu) = 0;
+	}
+
+	c0tmp = __read_32bit_c0_register($22, 1);
+	c0tmp |= CP0_CMT_SIR_0;
+	__write_32bit_c0_register($22, 1, c0tmp);
+
+	cpu_ipi_irq = INTERRUPT_ID_SOFTWARE_0;
+	setup_irq(cpu_ipi_irq, &irq_ipi);
+	irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
+}
+
+
+// Pass PC, SP, and GP to a secondary core and start it up by sending an inter-core interrupt
+static void __cpuinit brcm_boot_secondary(int cpu, struct task_struct *idle)
+{
+	unsigned int cause;
+
+	boot_config.func_addr = (unsigned long) smp_bootstrap;
+	boot_config.sp = (unsigned int) __KSTK_TOS(idle);
+	boot_config.gp = (unsigned int) task_thread_info(idle);
+
+	install_smp_boot_ex_handler();
+
+	cause = read_c0_cause();
+	cause |= CAUSEF_IP0;
+	write_c0_cause(cause);
+}
+
+
+static void __cpuinit brcm_init_secondary(void)
+{
+	//printk("bcm config0 %08x\n", __read_32bit_c0_register($22, 0));
+	//printk("cmt control %08x\n", __read_32bit_c0_register($22, 2));
+	//printk("cmt local %08x\n", __read_32bit_c0_register($22, 3));
+	//printk("bcm config1 %08x\n", __read_32bit_c0_register($22, 5));
+
+	clear_c0_status(ST0_BEV);
+
+#if defined(CONFIG_BCM96838) 
+
+#ifdef CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS
+	// CP0 timer interrupt (IRQ5) is not used for TP1 when pwrsave is enabled
+	change_c0_status(ST0_IM, IE_SW0 | IE_IRQ1 | IE_IRQ2 /*| IE_IRQ5*/);
+#else
+	change_c0_status(ST0_IM, IE_SW0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ5);
+#endif
+
+#else
+
+#ifdef CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS
+	// CP0 timer interrupt (IRQ5) is not used for TP1 when pwrsave is enabled
+	change_c0_status(ST0_IM, IE_SW0 | IE_IRQ0 | IE_IRQ1 /*| IE_IRQ5*/);
+#else
+	change_c0_status(ST0_IM, IE_SW0 | IE_IRQ0 | IE_IRQ1 | IE_IRQ5);
+#endif
+
+#endif
+}
+
+
+static void __cpuinit brcm_smp_finish(void)
+{
+}
+
+
+static void brcm_cpus_done(void)
+{
+}
+
+
+// send inter-core interrupts
+static void core_send_ipi_single(int cpu, unsigned int action)
+{
+	unsigned long flags;
+	unsigned int cause;
+	
+	//	printk("== from_cpu %d    to_cpu %d    action %u\n", smp_processor_id(), cpu, action);
+
+	spin_lock_irqsave(&brcm_irqlock, flags);
+
+	switch (action) {
+	case SMP_RESCHEDULE_YOURSELF:
+		per_cpu(ipi_pending, cpu) = 1;
+		per_cpu(ipi_flags, cpu) |= 1<<0;
+		break;
+	case SMP_CALL_FUNCTION:
+		per_cpu(ipi_pending, cpu) = 1;
+		per_cpu(ipi_flags, cpu) |= 1<<1;
+		break;
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+	case SMP_BCM_PWRSAVE_TIMER:
+		per_cpu(ipi_pending, cpu) = 1;
+		per_cpu(ipi_flags, cpu) |= 1<<2;
+		break;
+#endif
+	default:
+		goto errexit;
+	}
+
+	mb();
+
+	cause = read_c0_cause();
+	cause |= CAUSEF_IP0;
+	write_c0_cause(cause);
+
+errexit:
+	spin_unlock_irqrestore(&brcm_irqlock, flags);
+}
+
+
+static void core_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+	unsigned int cpu;
+
+	for_each_cpu_mask(cpu, *mask) {
+		core_send_ipi_single(cpu, action);
+	}
+}
+
+
+struct plat_smp_ops brcm_smp_ops = {
+	.send_ipi_single	= core_send_ipi_single,
+	.send_ipi_mask		= core_send_ipi_mask,
+	.init_secondary		= brcm_init_secondary,
+	.smp_finish		= brcm_smp_finish,
+	.cpus_done		= brcm_cpus_done,
+	.boot_secondary		= brcm_boot_secondary,
+	.smp_setup		= brcm_smp_setup,
+	.prepare_cpus		= brcm_prepare_cpus
+};
+
+#endif // defined(CONFIG_BCM_KF_MIPS_BCM963XX)
diff --git a/arch/mips/include/asm/bounce.h b/arch/mips/include/asm/bounce.h
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc28bbac1d0329b3ca08324ba9cc254ab8f3b4
--- /dev/null
+++ b/arch/mips/include/asm/bounce.h
@@ -0,0 +1,212 @@
+#if defined(CONFIG_BCM_KF_BOUNCE) || !defined(CONFIG_BCM_IN_KERNEL)
+
+#ifndef __BOUNCE_H_INCLUDED__
+#define __BOUNCE_H_INCLUDED__
+
+#if defined(CONFIG_BRCM_BOUNCE) || defined(CONFIG_BOUNCE)
+/*
+<:copyright-BRCM:2007:DUAL/GPL:standard
+
+   Copyright (c) 2007 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license 
+agreement governing use of this software, this software is licensed 
+to you under the terms of the GNU General Public License version 2 
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give 
+   you permission to link this software with independent modules, and 
+   to copy and distribute the resulting executable under terms of your 
+   choice, provided that you also meet, for each linked independent 
+   module, the terms and conditions of the license of that module. 
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications 
+   of the software.  
+
+Not withstanding the above, under no circumstances may you combine 
+this software in any way with any other Broadcom software provided 
+under a license other than the GPL, without Broadcom's express prior 
+written consent. 
+
+:>
+*/
+/*
+ *******************************************************************************
+ * File Name   : bounce.h
+ * Description : Tracing function call entry and exit, using compiler support
+ *				 for instrumenting function entry and exit.
+ *				 The GCC -finstrument-functions compiler option enables this.
+ *
+ *				 Files that need to be instrumented may be compiled with the
+ *				 compiler option -finstrument-functions via the Makefile.
+ *
+ *				 To disable instrumentation of specific functions in a file
+ *				 that is compiled with the option -finstrument-functions, you
+ *				 may append __attribute__ ((no_instrument_function)) to it's
+ *				 definition, e.g.
+ *				 	void hello( void ) __attribute__ ((no_instrument_function));
+ *
+ *				 You may enable tracing by invoking bounce_up().
+ *
+ *				 Two modes of tracing are defined:
+ *				 - Continuous tracing with an EXPLICIT bounce_dn() to stop.
+ *				 - Auto stop, when a limited number of functions are logged.
+ *                 bounce_dn() may also be invoked to stop in this mode.
+ *
+ *				 The collected trace is retained until the next start.
+ *******************************************************************************
+ */
+#ifndef __ASSEMBLY__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if defined( __KERNEL__ )
+#include <linux/types.h>        /* ISO C99 7.18 Integer types */
+#else
+#include <stdint.h>             /* ISO C99 7.18 Integer types */
+#endif
+
+#define BOUNCE_ERROR				(-1)
+
+#define BOUNCE_NOINSTR __attribute__((no_instrument_function))
+
+#if defined(CONFIG_BOUNCE_EXIT)
+#define BOUNCE_SIZE					(32*1024)
+#define BOUNCE_PANIC				(32*1024)
+#else
+#define BOUNCE_SIZE					(256*1024)
+#define BOUNCE_PANIC				20000
+#endif
+
+#define BOUNCE_COLOR
+
+//#define BOUNCE_DEBUG
+#ifdef BOUNCE_DEBUG
+#define BDBG(code)      			code
+#else
+#define BDBG(code)					do {} while(0)
+#endif
+
+#define BOUNCE_VERSION(a,b,c)		(((a) << 16) + ((b) << 8) + ((c) << 0))
+#define BOUNCE_VER_A(version)		((version >>16) & 0xff)
+#define BOUNCE_VER_B(version)		((version >> 8) & 0xff)
+#define BOUNCE_VER_C(version)		((version >> 0) & 0xff)
+
+#define BOUNCE_DEV_VERSION			(BOUNCE_VERSION(01,00,00))
+#define BOUNCE_CTL_VERSION			(BOUNCE_VERSION(01,00,00))
+
+    /* Device name in : /proc/devices */
+#define BOUNCE_DEV_NAME				"bounce"
+#define BOUNCE_DEV_PATH          	"/dev/" BOUNCE_DEV_NAME
+#define BOUNCE_DEV_MAJ           	213
+
+#undef  BOUNCE_DECL
+#define BOUNCE_DECL(x)				x,
+
+typedef enum bounceMode
+{
+	BOUNCE_DECL(BOUNCE_MODE_DISABLED)
+	BOUNCE_DECL(BOUNCE_MODE_CONTINUOUS)	/* explicit disable via bounce_dn() */
+	BOUNCE_DECL(BOUNCE_MODE_LIMITED)	/* auto disable when count goes to 0 */
+    BOUNCE_DECL(BOUNCE_MODE_MAXIMUM)
+} BounceMode_t;
+
+typedef enum bounceIoctl
+{
+	BOUNCE_DECL(BOUNCE_START_IOCTL)
+	BOUNCE_DECL(BOUNCE_STOP_IOCTL)
+	BOUNCE_DECL(BOUNCE_DUMP_IOCTL)
+	BOUNCE_DECL(BOUNCE_INVLD_IOCTL)
+} BounceIoctl_t;
+
+
+#ifdef __KERNEL__
+
+#define BOUNCE_ADDR_MASK			(0xFFFFFFFC)
+#define BOUNCE_ARGS_MASK            (0xF0000000)
+#define BOUNCE_GET_FUNCP(u32)       (void*)((u32) & BOUNCE_ADDR_MASK)
+
+#define BOUNCE_IS_ARGS_LOG(u32)     (((u32) & BOUNCE_ARGS_MASK) == 0)
+#define BOUNCE_IS_FUNC_LOG(u32)     (((u32) & BOUNCE_ARGS_MASK) != 0)
+
+#define BOUNCE_MAX_EVENTS           1024
+#define BOUNCE_FMT_LENGTH           126     /* Bytes in format string */
+typedef struct bounceLog
+{
+	union {
+        uint32_t u32;
+		void * func;
+
+        struct {
+            uint32_t evid   : 16;
+            uint32_t args   : 14;
+            uint32_t cpu0   :  1;
+            uint32_t type   :  1;
+        } event;
+
+		struct {
+			uint32_t addr	: 30;
+            uint32_t cpu0   :  1;   /* CPU0 or CPU1 */
+			uint32_t type	:  1;	/* entry=1 or exit=0 */
+		} site;
+
+	} word0;						/* called function */
+
+    union {
+	    uint32_t pid; 				/* task context */
+        uint32_t arg1;
+    };
+    uint32_t arg2;
+    uint32_t arg3;
+} BounceLog_t;
+
+
+extern void	bounce_up(BounceMode_t mode, unsigned int limit);
+extern void bounce_dn(void);
+extern void bounce_panic(void);
+extern void	bounce_dump(unsigned int last);
+
+extern void __cyg_profile_func_enter(void *ced, void *cer) BOUNCE_NOINSTR;
+extern void __cyg_profile_func_exit( void *ced, void *cer) BOUNCE_NOINSTR;
+
+extern void bounce_reg(uint32_t event, char * eventName)        BOUNCE_NOINSTR;
+extern void bounce0(uint32_t event)                             BOUNCE_NOINSTR;
+extern void bounce1(uint32_t event, uint32_t arg1)              BOUNCE_NOINSTR;
+extern void bounce2(uint32_t event, uint32_t arg1, uint32_t arg2)
+                                                                BOUNCE_NOINSTR;
+extern void bounce3(uint32_t event, uint32_t arg1, uint32_t arg2, uint32_t arg3)
+                                                                BOUNCE_NOINSTR;
+
+#define			BOUNCE_LOGK(func)	__cyg_profile_func_enter((void*)func,	\
+								 			__builtin_return_address(0))
+#endif	/* defined __KERNEL__ */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif  /* defined __ASSEMBLY__ */
+
+
+#else	/* !defined(CONFIG_BRCM_BOUNCE) */
+
+#define			bounce_up(m,l)		do {} while(0)
+#define			bounce_dn()			do {} while(0)
+#define			bounce_dump(l)		do {} while(0)
+#define			bounce_panic()		do {} while(0)
+#define         bounce0(e)          do {} while(0)
+#define         bounce1(e,a)        do {} while(0)
+#define         bounce2(e,a,b)      do {} while(0)
+#define         bounce3(e,a,b,c)    do {} while(0)
+
+#define			BOUNCE_LOGK(f)		do {} while(0)
+
+#endif	/* #defined(CONFIG_BRCM_BOUNCE) */
+
+#endif	/* !defined(__BOUNCE_H_INCLUDED__) */
+
+#endif	/* defined(CONFIG_BCM_KF_BOUNCE) || !defined(CONFIG_BCM_IN_KERNEL) */
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index f2f7c6c264da38bbe409dba7c44b84c0bb5dd70f..3e66547448897e0d6891d488315f1b648cba53d3 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -98,6 +98,64 @@ static inline __sum16 csum_fold(__wsum sum)
  *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
  *	Arnt Gulbrandsen.
  */
+
+#if defined(CONFIG_BCM_KF_CSUM_UNALIGNED)
+
+/* Brcm version can handle unaligned data. Merged from brcm 2.6.8 kernel.*/
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	if (((__u32)iph&0x3) == 0) {
+		unsigned int *word = (unsigned int *) iph;
+		unsigned int *stop = word + ihl;
+		unsigned int csum;
+		int carry;
+
+		csum = word[0];
+		csum += word[1];
+		carry = (csum < word[1]);
+		csum += carry;
+
+		csum += word[2];
+		carry = (csum < word[2]);
+		csum += carry;
+
+		csum += word[3];
+		carry = (csum < word[3]);
+		csum += carry;
+
+		word += 4;
+		do {
+			csum += *word;
+			carry = (csum < *word);
+			csum += carry;
+			word++;
+		} while ((unsigned int) word < (unsigned int) stop);
+
+		return csum_fold(csum);
+	} else {
+	        __u16 * buff = (__u16 *) iph;
+	        __u32 sum=0;
+	        __u16 i;
+
+	        // make 16 bit words out of every two adjacent 8 bit words in the packet
+	        // and add them up
+	        for (i=0;i<ihl*2;i++){
+	                sum = sum + (__u32) buff[i];
+	        }
+
+	        // take only 16 bits out of the 32 bit sum and add up the carries
+	        while (sum>>16)
+	          sum = (sum & 0xFFFF)+(sum >> 16);
+
+	        // one's complement the result
+	        sum = ~sum;
+
+	        return ((__sum16) sum);
+	}
+}
+
+#else
+
 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	const unsigned int *word = iph;
@@ -129,6 +187,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 	return csum_fold(csum);
 }
 
+#endif
+
 static inline __wsum csum_tcpudp_nofold(__be32 saddr,
 	__be32 daddr, unsigned short len, unsigned short proto,
 	__wsum sum)
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index c454550eb0c07195874926969578c1367a6bd19b..ab584a03385b00adf511e8ec690c3c8ac3430c66 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -82,7 +82,11 @@ struct cpuinfo_mips {
 } __attribute__((aligned(SMP_CACHE_BYTES)));
 
 extern struct cpuinfo_mips cpu_data[];
+#if defined(CONFIG_BCM_KF_CPU_DATA_CPUID)
+#define current_cpu_data cpu_data[raw_smp_processor_id()]
+#else
 #define current_cpu_data cpu_data[smp_processor_id()]
+#endif
 #define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
 
 extern void cpu_probe(void);
diff --git a/arch/mips/include/asm/mach-bcm963xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm963xx/cpu-feature-overrides.h
new file mode 100644
index 0000000000000000000000000000000000000000..ed8d22124f19855ca028730bfd1fe8a04c555807
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm963xx/cpu-feature-overrides.h
@@ -0,0 +1,40 @@
+#ifndef __ASM_MACH_BCM963XX_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_BCM963XX_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb			1
+#define cpu_has_4kex			4
+#define cpu_has_4ktlb			8
+#define cpu_has_4k_cache	1
+#define cpu_has_fpu			0
+#define cpu_has_32fpr			0
+#define cpu_has_counter			0x40
+#define cpu_has_watch			0
+#define cpu_has_mips16			0
+// Use IVEC for TP0
+#define cpu_has_divec			0
+#define cpu_has_vce			0
+#define cpu_has_cache_cdex_p		0
+#define cpu_has_cache_cdex_s		0
+#define cpu_has_prefetch		0
+#define cpu_has_mcheck			0x2000
+#define cpu_has_ejtag			0x4000
+#define cpu_has_llsc			0x10000
+#define cpu_has_vtag_icache		0
+/* #define cpu_has_dc_aliases	? */
+#define cpu_has_ic_fills_f_dc		0
+
+#define cpu_has_nofpuex			0
+#define cpu_has_64bits			0
+#define cpu_has_64bit_zero_reg		0
+#define cpu_has_64bit_gp_regs		0
+/* #define cpu_has_inclusive_pcaches ? */
+#define cpu_has_64bit_addresses		0
+
+#define cpu_has_subset_pcaches		0
+
+#define cpu_dcache_line_size()		16
+#define cpu_icache_line_size()		16
+#define cpu_scache_line_size()		0
+/*#define cpu_icache_snoops_remote_store 1 ? */
+
+#endif /* __ASM_MACH_BCM963XX_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bcm963xx/war.h b/arch/mips/include/asm/mach-bcm963xx/war.h
new file mode 100644
index 0000000000000000000000000000000000000000..87cd4651dda380a9438564f5b76cbd94dc9767c0
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm963xx/war.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_MACH_BCM47XX_WAR_H
+#define __ASM_MIPS_MACH_BCM47XX_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR	0
+#define R4600_V1_HIT_CACHEOP_WAR	0
+#define R4600_V2_HIT_CACHEOP_WAR	0
+#define R5432_CP0_INTERRUPT_WAR		0
+#define BCM1250_M3_WAR			0
+#define SIBYTE_1956_WAR			0
+#define MIPS4K_ICACHE_REFILL_WAR	0
+#define MIPS_CACHE_SYNC_WAR		0
+#define TX49XX_ICACHE_INDEX_INV_WAR	0
+#define RM9000_CDEX_SMP_WAR		0
+#define ICACHE_REFILLS_WORKAROUND_WAR	0
+#define R10000_LLSC_WAR			0
+#define MIPS34K_MISSED_ITLB_WAR		0
+
+#endif /* __ASM_MIPS_MACH_BCM47XX_WAR_H */
diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h
index 70d9a25132c5265846febc873c0a90626e7c0f9f..8a5eae31155e275bdfd7984edaee0e2bae0b46b3 100644
--- a/arch/mips/include/asm/mach-generic/irq.h
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -8,6 +8,11 @@
 #ifndef __ASM_MACH_GENERIC_IRQ_H
 #define __ASM_MACH_GENERIC_IRQ_H
 
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && (defined(CONFIG_BCM96838) || defined(CONFIG_BCM96848))
+#ifndef NR_IRQS
+#define NR_IRQS	256
+#endif
+#endif
 #ifndef NR_IRQS
 #define NR_IRQS	128
 #endif
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index d7a9efd3a5ce217973ecebb0e2ce4c09be083efa..6712bae3e270c06e726b8907c8ea7bd1e5d7fe83 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -83,7 +83,11 @@
 #endif
 
 #ifndef FIXADDR_TOP
+#if defined(CONFIG_BCM_KF_FIXADDR_TOP)
+#define FIXADDR_TOP     ((unsigned long)(long)(int)0xff000000)
+#else
 #define FIXADDR_TOP		((unsigned long)(long)(int)0xfffe0000)
 #endif
+#endif
 
 #endif /* __ASM_MACH_GENERIC_SPACES_H */
diff --git a/arch/mips/include/asm/pmonapi.h b/arch/mips/include/asm/pmonapi.h
new file mode 100644
index 0000000000000000000000000000000000000000..4129d68c2352a5df82ed2b96a8eda1827bf686fd
--- /dev/null
+++ b/arch/mips/include/asm/pmonapi.h
@@ -0,0 +1,173 @@
+#if defined(CONFIG_BCM_KF_PMON) || !defined(CONFIG_BCM_IN_KERNEL)
+
+#ifndef __PMONAPI_H_INCLUDED_
+#define __PMONAPI_H_INCLUDED_
+/*
+<:copyright-BRCM:2007:DUAL/GPL:standard
+
+   Copyright (c) 2007 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#if defined( CONFIG_PMON )
+
+#if ! defined( __ASSEMBLY__ )
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if defined( __KERNEL__ )
+#include <linux/types.h>            /* ISO C99 7.18 Integer types */
+#include <asm/mipsregs.h>
+#include <bcm_cpu.h>
+#else
+#include <stdint.h>                 /* ISO C99 7.18 Integer types */
+#endif /* __KERNEL__ */
+
+#define PMON_ERROR                  (-1)
+#define PMON_COLOR
+// #define PMON_DEBUG
+
+#if defined( PMON_DEBUG )
+#define PMONDBG(code)           code
+#else
+#define PMONDBG(code)           do {} while(0)
+#endif  /* PMON_DEBUG */
+
+#define PMON_VERSION(a,b,c)     (((a) << 16) + ((b) << 8) + ((c) << 0))
+#define PMON_VER_A(version)     ((version >>16) & 0xff)
+#define PMON_VER_B(version)     ((version >> 8) & 0xff)
+#define PMON_VER_C(version)     ((version >> 0) & 0xff)
+
+#define PMON_DEV_VERSION        (PMON_VERSION(01,00,00))
+#define PMON_CTL_VERSION        (PMON_VERSION(01,00,00))
+
+/* Device name in : /dev */
+#define PMON_DEV_NAME           "pmon"
+#define PMON_DEV_PATH           "/dev/" PMON_DEV_NAME
+#define PMON_DEV_MAJ            214
+
+// #define PMON_RAC_METRIC
+
+#undef  PMON_DECL
+#define PMON_DECL(x)            x,
+
+typedef enum PmonIoctl
+{
+    PMON_DECL(PMON_CPU_START_IOCTL)
+    PMON_DECL(PMON_ALL_START_IOCTL)
+    PMON_DECL(PMON_REPORT_IOCTL)
+    PMON_DECL(PMON_INVLD_IOCTL)
+} PmonIoctl_t;
+
+#if defined( __KERNEL__ )
+/*
+ * Enable PMON with configuration:
+ *    skip: Delayed enabling after a number of iterations
+ *    iter: Average over number of iteration per metric
+ *    metric: Compute all=1 or only cyclecount=0 metrics
+ *
+ * An iteration could be a single packet processing path.
+ */
+#define PMON_MAX_EVENTS         32
+#define PMON_DEF_UNREGEVT       "Unregistered Event"
+
+/*
+ * MIPS3300 and MIPS4350 performance Counting Module configuration
+ * Only Performance counter #0 is used.
+ */
+#define __read_cycles()         __read_32bit_c0_register($9, 0)
+
+#if defined( CONFIG_BCM96362 ) ||      defined( CONFIG_BCM96328 ) ||                defined(CONFIG_BCM963268) ||                                                    defined(CONFIG_BCM96838)
+
+/* MIPS4350 Performance Counters */
+#define PF_GBLCTL               0xBFA20000u
+#define PF_CTL_0                0xBFA20004u
+#define PF_CTL_1                0xBFA20008u
+#define PF_CTR_0                0xBFA20010u
+#define PF_CTR_1                0xBFA20014u
+#define PF_CTR_2                0xBFA20018u
+#define PF_CTR_3                0xBFA2001Cu
+
+#define MIPS4350_ADDR(reg)      (((reg-MIPS_BASE_BOOT) + MIPS_BASE))
+
+#define MIPS4350_RD(reg)        (*(volatile uint32_t *)(MIPS4350_ADDR(reg)))
+#define MIPS4350_WR(reg,v)      (*(volatile uint32_t *)(MIPS4350_ADDR(reg)))=(v)
+
+#define __read_pfgblctl()       MIPS4350_RD( PF_GBLCTL )
+#define __read_pfctl_0()        MIPS4350_RD( PF_CTL_0 )
+#define __read_pfctr_0()        MIPS4350_RD( PF_CTR_0 )
+#define __write_pfgblctl(val)   MIPS4350_WR( PF_GBLCTL, val )
+#define __write_pfctl_0(val)    MIPS4350_WR( PF_CTL_0, val )
+#define __write_pfctl_1(val)    MIPS4350_WR( PF_CTL_1, val )
+#define __write_pfctr_0(val)    MIPS4350_WR( PF_CTR_0, val )
+
+#else /* MIPS ???? */
+
+#define __read_pfgblctl()       0
+#define __read_pfctl_0()        0
+#define __read_pfctr_0()        0
+#define __write_pfgblctl(val)   FUNC_NULL
+#define __write_pfctl_0(val)    FUNC_NULL
+#define __write_pfctr_0(val)    FUNC_NULL
+
+#endif  /* MIPS: */
+
+#define PMON_OVFLW_EVENTS       1024
+
+extern uint32_t pfCtr[PMON_OVFLW_EVENTS];
+extern void pmon_bgn(void);             /* Begin an iteration */
+extern void pmon_end(uint32_t event);   /* End of an iteration */
+static inline void pmon_log(uint32_t event) { pfCtr[event] = __read_pfctr_0(); }
+static inline void pmon_clr(void) { pfCtr[0] = ~0U; }
+extern int  pmon_enable(uint32_t skip, uint32_t iter, uint32_t metric);
+extern void pmon_reg(uint32_t event, char * name);
+
+#endif  /* __KERNEL__ */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif  /* __ASSEMBLY__ */
+
+#else   /* ! CONFIG_PMON */
+
+#undef  FUNC_NULL
+#define FUNC_NULL               do {} while(0)
+
+#define pmon_log(e)             FUNC_NULL
+#define pmon_clr()              FUNC_NULL
+#define pmon_bgn()              FUNC_NULL
+#define pmon_end(e)             FUNC_NULL
+#define pmon_enable(s,i,m)      0
+#define pmon_reg(e,n)           0
+
+#endif  /* ! CONFIG_PMON */
+
+#endif  /* __PMONAPI_H_INCLUDED_ */
+
+#endif
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index d4fb4d852a6db5e1778bbc6542807ca2c0919e56..af057908d3ae337ddf6132ea265d31af953fc231 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -38,6 +38,11 @@ extern int __cpu_logical_map[NR_CPUS];
 
 #define SMP_RESCHEDULE_YOURSELF	0x1	/* XXX braindead */
 #define SMP_CALL_FUNCTION	0x2
+
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)
+#define SMP_BCM_PWRSAVE_TIMER   0x3
+#endif
+
 /* Octeon - Tell another core to flush its icache */
 #define SMP_ICACHE_FLUSH	0x4
 
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index abb13e83dfdae58c0d2b70dfae2a2493cf45b351..d1fc0d47966468c6cadcd69df0751cbc95bde61a 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -64,8 +64,12 @@ register struct thread_info *__current_thread_info __asm__("$28");
 
 /* thread information allocation */
 #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
+#if defined(CONFIG_BCM_KF_THREAD_SIZE_FIX)
+#define THREAD_SIZE_ORDER (2)
+#else
 #define THREAD_SIZE_ORDER (1)
 #endif
+#endif
 #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_64BIT)
 #define THREAD_SIZE_ORDER (2)
 #endif
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index d8dad5340ea30d22eac825883012a14341fbf157..3a13aaaaf3b93b59777fc96d93e1f49ae71f78f9 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -373,6 +373,8 @@
  */
 #define __NR_Linux_syscalls		346
 
+
+
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux			4000
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 0c6877ea900495f547e087be39c11870d1d95dd7..68160263b8fe74634d8fd50a1d85447b549bc9ff 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -8,6 +8,10 @@ obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
 		   ptrace.o reset.o setup.o signal.o syscall.o \
 		   time.o topology.o traps.o unaligned.o watch.o vdso.o
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_TSTAMP) 
+obj-$(CONFIG_BCM_KF_TSTAMP)	+= bcm_tstamp.o
+endif #BCM_KF # defined(CONFIG_BCM_KF_TSTAMP)
+
 ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
@@ -16,7 +20,17 @@ CFLAGS_REMOVE_perf_event_mipsxx.o = -pg
 endif
 
 obj-$(CONFIG_CEVT_BCM1480)	+= cevt-bcm1480.o
+
+ifdef BCM_KF # defined (CONFIG_BCM_KF_POWER_SAVE)
+ifneq ($(strip $(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)),)
+obj-$(CONFIG_CEVT_R4K_LIB)	+= cevt-r4k-bcm-pwr.o
+else
+obj-$(CONFIG_CEVT_R4K_LIB)	+= cevt-r4k.o
+endif
+else # BCM_KF
 obj-$(CONFIG_CEVT_R4K_LIB)	+= cevt-r4k.o
+endif # BCM_KF
+
 obj-$(CONFIG_MIPS_MT_SMTC)	+= cevt-smtc.o
 obj-$(CONFIG_CEVT_DS1287)	+= cevt-ds1287.o
 obj-$(CONFIG_CEVT_GT641XX)	+= cevt-gt641xx.o
@@ -25,7 +39,17 @@ obj-$(CONFIG_CEVT_TXX9)		+= cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)	+= csrc-bcm1480.o
 obj-$(CONFIG_CSRC_IOASIC)	+= csrc-ioasic.o
 obj-$(CONFIG_CSRC_POWERTV)	+= csrc-powertv.o
+
+ifdef BCM_KF # defined (CONFIG_BCM_KF_POWER_SAVE)
+ifneq ($(strip $(CONFIG_BCM_HOSTMIPS_PWRSAVE_TIMERS)),)
+obj-$(CONFIG_CSRC_R4K_LIB)	+= csrc-r4k-bcm-pwr.o
+else
 obj-$(CONFIG_CSRC_R4K_LIB)	+= csrc-r4k.o
+endif
+else # BCM_KF
+obj-$(CONFIG_CSRC_R4K_LIB)	+= csrc-r4k.o
+endif # BCM_KF
+
 obj-$(CONFIG_CSRC_SB1250)	+= csrc-sb1250.o
 obj-$(CONFIG_SYNC_R4K)		+= sync-r4k.o
 
@@ -104,7 +128,15 @@ obj-$(CONFIG_MIPS_MACHINE)	+= mips_machine.o
 
 obj-$(CONFIG_OF)		+= prom.o
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+CFLAGS_cpu-bugs64.o	= $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+else
+CFLAGS_cpu-bugs64.o	= $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 CFLAGS_cpu-bugs64.o	= $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT)	+= 8250-platform.o
 
diff --git a/arch/mips/kernel/bcm_tstamp.c b/arch/mips/kernel/bcm_tstamp.c
new file mode 100644
index 0000000000000000000000000000000000000000..3296381932c863f69f39950376df78ab5f0bccee
--- /dev/null
+++ b/arch/mips/kernel/bcm_tstamp.c
@@ -0,0 +1,115 @@
+#if defined(CONFIG_BCM_KF_TSTAMP)
+/*
+<:copyright-BRCM:2011:GPL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bcm_tstamp.h>
+#include <asm/time.h>
+
+static u32 _2us_divisor;
+static u32 _2ns_shift;
+static u32 _2ns_multiplier;
+
+u32 bcm_tstamp_read(void)
+{
+	return read_c0_count();
+}
+EXPORT_SYMBOL(bcm_tstamp_read);
+
+
+u32 bcm_tstamp_delta(u32 start, u32 end)
+{
+	// start and end could have been read from different CPU's.
+	// Typically, I have seen the counters on the CPU's to be within
+	// 20 cycles of each other.  Allow a bit more for a margin of error
+	if (start <= 100 && ((end > 0xffffffc0) ||
+	                     (start >= end && end <= 100)))
+		return 1;
+	else if (end > start)
+		return end-start;  // simplest case
+	else if (start > 100 && (start-end < 100))
+		return 1;
+	else
+		return (0xffffffff - start + end);  // simple rollover
+}
+EXPORT_SYMBOL(bcm_tstamp_delta);
+
+
+u32 bcm_tstamp_elapsed(u32 start)
+{
+	u32 end = read_c0_count();
+	return bcm_tstamp_delta(start, end);
+}
+EXPORT_SYMBOL(bcm_tstamp_elapsed);
+
+
+u32 bcm_tstamp2us(u32 i)
+{
+	return (i/_2us_divisor);
+}
+EXPORT_SYMBOL(bcm_tstamp2us);
+
+
+u64 bcm_tstamp2ns(u32 i)
+{
+	u64 ns = (u64) i;
+	ns = (ns * _2ns_multiplier) >> _2ns_shift;
+	return ns;
+}
+EXPORT_SYMBOL(bcm_tstamp2ns);
+
+
+int __init init_bcm_tstamp(void)
+{
+	if (mips_hpt_frequency == 0)
+		mips_hpt_frequency = 160000000;
+
+	_2us_divisor = mips_hpt_frequency / 1000000;
+
+	if (mips_hpt_frequency == 200000000) { //400MHz
+		_2ns_multiplier = 5;  //5ns
+		_2ns_shift = 0;
+	} else if (mips_hpt_frequency == 166500000) { //333MHz
+		_2ns_multiplier = 6;  //6ns
+		_2ns_shift = 0;
+	} else if (mips_hpt_frequency == 160000000) { //320MHz
+		_2ns_multiplier = 25;  //6.25ns
+		_2ns_shift = 2;
+	} else if (mips_hpt_frequency == 15000000) { //300MHz
+		_2ns_multiplier = 13;  // approximate to 6.5? actual is 6.667ns
+		_2ns_shift = 1;
+	} else {
+		printk("init_bcm_tstamp: unhandled mips_hpt_freq=%d, "
+		       "adjust constants in bcm_tstamp.c\n", mips_hpt_frequency);
+	}
+
+	printk(KERN_INFO "bcm_tstamp initialized, (hpt_freq=%d 2us_div=%u "
+	                 "2ns_mult=%u 2ns_shift=%u)\n", mips_hpt_frequency,
+	                 _2us_divisor, _2ns_multiplier, _2ns_shift);
+
+	return 0;
+}
+__initcall(init_bcm_tstamp);
+
+#endif /* defined(CONFIG_BCM_KF_TSTAMP) */
+
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 4d735d0e58f5c9caea2ae5936054e0ce1a66dd08..37b76f30bc9afe7a270da3a80e829dd13066a053 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -35,6 +35,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 	long epc = regs->cp0_epc;
 	int ret = 0;
 
+
 	switch (insn.i_format.opcode) {
 	/*
 	 * jr and jalr are in r_format format.
diff --git a/arch/mips/kernel/cevt-r4k-bcm-pwr.c b/arch/mips/kernel/cevt-r4k-bcm-pwr.c
new file mode 100644
index 0000000000000000000000000000000000000000..5860b436d44406896946ff5efb1e9e5e61f6b8d2
--- /dev/null
+++ b/arch/mips/kernel/cevt-r4k-bcm-pwr.c
@@ -0,0 +1,244 @@
+/***********************************************************
+ *
+ * Copyright (c) 2009 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * <:label-BRCM:2009:DUAL/GPL:standard
+ * 
+ * Unless you and Broadcom execute a separate written software license 
+ * agreement governing use of this software, this software is licensed 
+ * to you under the terms of the GNU General Public License version 2 
+ * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+ * with the following added to such license:
+ * 
+ *    As a special exception, the copyright holders of this software give 
+ *    you permission to link this software with independent modules, and 
+ *    to copy and distribute the resulting executable under terms of your 
+ *    choice, provided that you also meet, for each linked independent 
+ *    module, the terms and conditions of the license of that module. 
+ *    An independent module is a module which is not derived from this
+ *    software.  The special exception does not apply to any modifications 
+ *    of the software.  
+ * 
+ * Not withstanding the above, under no circumstances may you combine 
+ * this software in any way with any other Broadcom software provided 
+ * under a license other than the GPL, without Broadcom's express prior 
+ * written consent. 
+ * 
+ * :>
+ *
+ ************************************************************/
+/***********************************************************
+ *
+ * This file implements clock events for the Broadcom DSL and GPON CPE
+ * when the power management feature is enabled. When the processor
+ * is found to be mostly idle, the main CPU clock is slowed down to
+ * save power. By slowing down the clock, the C0 counter unfortunately
+ * also slows down. This file replaces the (typical) 1 msec clock tick
+ * interrupt processing with a reliable timer source which is unaffected
+ * by the change in MIPS clock changes.
+ *
+ * The timer available to replace the C0 timer works differently.
+ * The design needs to be adjusted accordingly. The C0 counter is a free
+ * running counter which wraps at 0xFFFFFFFF and which runs at different
+ * frequencies depending on the MIPS frequency. The C0 compare register
+ * requires to be programmed to stay ahead of the C0 counter, to generate
+ * an interrupt in the future.
+ *
+ * The peripheral timers (there are 3 of them) wrap at 0x3fffffff and
+ * run at 50 MHz. When the timer reaches a programmed value, it can generate
+ * and interrupt and then either stops counting or restarts at 0.
+ * This difference in behavior between the C0 counter and the peripheral timers
+ * required to use 2 timers for power management. One to generate the periodic
+ * interrupts required by the clock events (Timer 0), and one to keep an accurate
+ * reference when the clock is slowed down for saving power (Timer 2). Timer 1
+ * is planned to be used by the second processor to support SMP.
+ *
+ ************************************************************/
+
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+
+#include <asm/smtc_ipi.h>
+#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+
+#include <bcm_map_part.h>
+#include <bcm_intr.h>
+
+extern void BcmPwrMngtCheckWaitCount(void);
+extern unsigned int TimerC0Snapshot0;
+#if defined(CONFIG_SMP)
+extern unsigned int TimerC0Snapshot1;
+extern unsigned int C0divider, C0multiplier, C0ratio;
+#endif
+
+DEFINE_PER_CPU(struct clock_event_device, bcm_mips_clockevent_device);
+int bcm_timer_irq_installed;
+
+static int bcm_mips_next_event0(unsigned long delta,
+						struct clock_event_device *evt)
+{
+	// Timer may be reprogrammed while it is already running, so clear it first
+	TIMER->TimerCtl0 = 0;
+	TIMER->TimerCnt0 = 0;
+	TIMER->TimerCtl0 = TIMERENABLE | RSTCNTCLR | delta;
+
+	return 0;
+}
+
+#if defined(CONFIG_SMP)
+static int bcm_mips_next_event1(unsigned long delta,
+						struct clock_event_device *evt)
+{
+	// Timer may be reprogrammed while it is already running, so clear it first
+	TIMER->TimerCtl1 = 0;
+	TIMER->TimerCnt1 = 0;
+	TIMER->TimerCtl1 = TIMERENABLE | RSTCNTCLR | delta;
+
+	return 0;
+}
+#endif
+
+void bcm_mips_set_clock_mode(enum clock_event_mode mode,
+						struct clock_event_device *evt)
+{
+}
+
+void bcm_mips_event_handler(struct clock_event_device *dev)
+{
+}
+
+#if defined(CONFIG_SMP)
+extern struct plat_smp_ops *mp_ops;
+#endif
+irqreturn_t bcm_timer_interrupt_handler_TP0(int irq, void *dev_id)
+{
+	struct clock_event_device *cd;
+	irqreturn_t rc = IRQ_NONE;
+	byte timer_ints = TIMER->TimerInts & (TIMER0|TIMER1);
+
+	if (timer_ints & TIMER0) {
+		TIMER->TimerCtl0 = 0;
+	}
+	if (timer_ints & TIMER1) {
+		TIMER->TimerCtl1 = 0;
+	}
+	TIMER->TimerInts = timer_ints;
+
+	if (timer_ints & TIMER0) {
+		// Turn off timer
+		TIMER->TimerCtl0 = 0;
+
+		cd = &per_cpu(bcm_mips_clockevent_device, 0);
+		cd->event_handler(cd);
+
+		BcmPwrMngtCheckWaitCount();
+
+		rc = IRQ_HANDLED;
+	}
+#if defined(CONFIG_SMP)
+	if (timer_ints & TIMER1) {
+		// Turn off timer
+		TIMER->TimerCtl1 = 0;
+		mp_ops->send_ipi_single(1, SMP_BCM_PWRSAVE_TIMER);
+
+		rc = IRQ_HANDLED;
+	}
+#endif
+
+	return rc;
+}
+
+struct irqaction perf_timer_irqaction = {
+	.handler = bcm_timer_interrupt_handler_TP0,
+	.flags = IRQF_DISABLED|IRQF_SHARED,
+	.name = "Periph Timer",
+};
+
+#if defined(CONFIG_SMP)
+void bcm_timer_interrupt_handler_TP1(void)
+{
+	struct clock_event_device *cd;
+
+	cd = &per_cpu(bcm_mips_clockevent_device, 1);
+	cd->event_handler(cd);
+
+	BcmPwrMngtCheckWaitCount();
+
+	return;
+}
+#endif
+
+int __cpuinit r4k_clockevent_init(void)
+{
+	unsigned int cpu = smp_processor_id();
+	struct clock_event_device *cd;
+
+	cd = &per_cpu(bcm_mips_clockevent_device, cpu);
+
+	cd->name		= "BCM Periph Timer";
+	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
+
+	clockevent_set_clock(cd, mips_hpt_frequency);
+
+	/* Calculate the min / max delta */
+	cd->max_delta_ns	= clockevent_delta2ns(0x3fffffff, cd);
+	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);
+
+	cd->rating		= 300;
+	cd->irq			= INTERRUPT_ID_TIMER;
+	cd->cpumask		= cpumask_of(cpu);
+	if (cpu == 0)
+		cd->set_next_event	= bcm_mips_next_event0;
+#if defined(CONFIG_SMP)
+	else
+		cd->set_next_event	= bcm_mips_next_event1;
+#endif
+
+	cd->set_mode		= bcm_mips_set_clock_mode;
+	cd->event_handler	= bcm_mips_event_handler;
+
+	clockevents_register_device(cd);
+
+	if (cpu == 0) {
+		// Start the BCM Timer interrupt
+		irq_set_affinity(INTERRUPT_ID_TIMER, cpumask_of(0));
+		setup_irq(INTERRUPT_ID_TIMER, &perf_timer_irqaction);
+
+		// Start the BCM Timer0 - keep accurate 1 msec tick count
+		TIMER->TimerCtl0 = TIMERENABLE | RSTCNTCLR | (50000-1);
+		TIMER->TimerMask |= TIMER0EN;
+
+		// Take a snapshot of the C0 timer when Timer2 was started
+		// This will be needed later when having to make adjustments
+		TimerC0Snapshot0 = read_c0_count();
+
+		// Start the BCM Timer2
+		// to keep an accurate free running high precision counter
+		// Count up to its maximum value so it can be used by csrc-r4k-bcm-pwr.c
+		TIMER->TimerCtl2 = TIMERENABLE | 0x3fffffff;
+	}
+#if defined(CONFIG_SMP)
+	else {
+		unsigned int newTimerCnt, mult, rem, result;
+		// Start the BCM Timer1 - keep accurate 1 msec tick count
+		TIMER->TimerCtl1 = TIMERENABLE | RSTCNTCLR | (50000-1);
+		TIMER->TimerMask |= TIMER1EN;
+
+		// Take a snapshot of the C0 timer when Timer1 was started
+		// This will be needed later when having to make adjustments
+		TimerC0Snapshot1 = read_c0_count();
+		newTimerCnt = TIMER->TimerCnt2 & 0x3fffffff;
+		mult = newTimerCnt/C0divider;
+		rem  = newTimerCnt%C0divider;
+		result  = mult*C0multiplier + ((rem*C0ratio)>>10);
+		TimerC0Snapshot1 -= result;
+	}
+#endif
+
+	return 0;
+}
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 51095dd9599d307605caeb8c2aca9e99a0e8dce1..4df7ffe6c1ea3a3d9925a4a9c2b4ed7ab34482b8 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -16,6 +16,10 @@
 #include <asm/time.h>
 #include <asm/cevt-r4k.h>
 
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && (defined(CONFIG_BCM_HOSTMIPS_PWRSAVE) || defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE))
+extern void BcmPwrMngtCheckWaitCount(void);
+#endif
+
 /*
  * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
  * of these routines with SMTC-specific variants.
@@ -77,6 +81,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 	}
 
 out:
+#if defined(CONFIG_BCM_KF_POWER_SAVE) && (defined(CONFIG_BCM_HOSTMIPS_PWRSAVE) || defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE))
+		BcmPwrMngtCheckWaitCount();
+#endif
 	return IRQ_HANDLED;
 }
 
@@ -161,7 +168,6 @@ int c0_compare_int_usable(void)
 }
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 int __cpuinit r4k_clockevent_init(void)
 {
 	unsigned int cpu = smp_processor_id();
@@ -209,8 +215,8 @@ int __cpuinit r4k_clockevent_init(void)
 	cp0_timer_irq_installed = 1;
 
 	setup_irq(irq, &c0_compare_irqaction);
-
 	return 0;
 }
 
 #endif /* Not CONFIG_MIPS_MT_SMTC */
+
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5099201fb7bc9933f6b939697a5d93fc02520ea9..a64fbe0db702d834e9140b3c346c85b1d8c1a015 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -53,6 +53,55 @@ static void r39xx_wait(void)
 
 extern void r4k_wait(void);
 
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+/* Bcm version minimizes the chance of an irq sneaking in between checking
+need_resched and wait instruction, or eliminates it completely (depending on 
+pipeline design). This avoids delayed processing of softirq. (The delayed 
+softirq problem can happen when preemption is disabled and softirq runs in 
+process context.) */
+
+extern void BcmPwrMngtReduceCpuSpeed(void);
+extern void BcmPwrMngtResumeFullSpeed(void);
+
+static void bcm_r4k_wait(void)
+{
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE) || defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+	BcmPwrMngtReduceCpuSpeed();
+#endif
+
+	/* Always try to treat the segment below as an atomic entity and try not 
+	to insert code or move code around */
+	/* Begin fixed safe code pattern for the particular MIPS pipleline*/
+	raw_local_irq_disable();
+	if (!need_resched() &&  !(read_c0_cause() & read_c0_status())) {
+		/* Perform SYNC, enable interrupts, then WAIT */
+		__asm__ __volatile__ (
+			".set push\n"
+			".set noreorder\n"
+			".set noat\n"
+			"sync\n"
+			"mfc0	$1, $12\n"
+			"ori $1, $1, 0x1f\n"
+			"xori	$1, $1, 0x1e\n"
+			"mtc0	$1, $12\n"
+			"nop\n"  // Recommended by MIPS team
+			"wait\n"
+			"nop\n"  // Needed to ensure next instruction is safe
+			"nop\n"  // When speed is reduced to 1/8, need one more to get DG interrupt
+			"nop\n"  // Safety net...
+			".set pop\n");
+	}
+	else {
+#if defined(CONFIG_BCM_HOSTMIPS_PWRSAVE) || defined(CONFIG_BCM_DDR_SELF_REFRESH_PWRSAVE)
+		BcmPwrMngtResumeFullSpeed();
+#endif
+		raw_local_irq_enable();
+	}
+	/* End fixed code pattern */
+}
+#endif /* CONFIG_BCM_KF_MIPS_BCM963XX */
+
+
 /*
  * This variant is preferable as it allows testing need_resched and going to
  * sleep depending on the outcome atomically.  Unfortunately the "It is
@@ -183,7 +232,9 @@ void __init check_wait(void)
 	case CPU_25KF:
 	case CPU_PR4450:
 	case CPU_BMIPS3300:
+#if !defined(CONFIG_BCM_KF_MIPS_BCM963XX)
 	case CPU_BMIPS4350:
+#endif
 	case CPU_BMIPS4380:
 	case CPU_BMIPS5000:
 	case CPU_CAVIUM_OCTEON:
@@ -195,6 +246,12 @@ void __init check_wait(void)
 		cpu_wait = r4k_wait;
 		break;
 
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+	case CPU_BMIPS4350:
+		cpu_wait = bcm_r4k_wait;
+		break;
+#endif
+
 	case CPU_RM7000:
 		cpu_wait = rm7k_wait_irqoff;
 		break;
@@ -246,6 +303,33 @@ void __init check_wait(void)
 	}
 }
 
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_POWER_SAVE)
+/* for power management */
+static void set_cpu_r4k_wait(int enable)
+{
+	if(enable) {
+		cpu_wait = bcm_r4k_wait;
+		printk("wait instruction: enabled\n");
+    }
+	else {
+		cpu_wait = NULL;
+		printk("wait instruction: disabled\n");
+    }
+}
+
+static int get_cpu_r4k_wait(void)
+{
+	if(cpu_wait == bcm_r4k_wait)
+		return 1;
+	else
+		return 0;
+}
+
+#include <linux/module.h> // just for EXPORT_SYMBOL
+EXPORT_SYMBOL(set_cpu_r4k_wait);
+EXPORT_SYMBOL(get_cpu_r4k_wait);
+#endif 
+
 static inline void check_errata(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
diff --git a/arch/mips/kernel/csrc-r4k-bcm-pwr.c b/arch/mips/kernel/csrc-r4k-bcm-pwr.c
new file mode 100644
index 0000000000000000000000000000000000000000..24b2d3d12e60b147f7dc50b576180b92c2eada8f
--- /dev/null
+++ b/arch/mips/kernel/csrc-r4k-bcm-pwr.c
@@ -0,0 +1,60 @@
+/***********************************************************
+ *
+ * Copyright (c) 2009 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * <:label-BRCM:2009:DUAL/GPL:standard
+ * 
+ * Unless you and Broadcom execute a separate written software license 
+ * agreement governing use of this software, this software is licensed 
+ * to you under the terms of the GNU General Public License version 2 
+ * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+ * with the following added to such license:
+ * 
+ *    As a special exception, the copyright holders of this software give 
+ *    you permission to link this software with independent modules, and 
+ *    to copy and distribute the resulting executable under terms of your 
+ *    choice, provided that you also meet, for each linked independent 
+ *    module, the terms and conditions of the license of that module. 
+ *    An independent module is a module which is not derived from this
+ *    software.  The special exception does not apply to any modifications 
+ *    of the software.  
+ * 
+ * Not withstanding the above, under no circumstances may you combine 
+ * this software in any way with any other Broadcom software provided 
+ * under a license other than the GPL, without Broadcom's express prior 
+ * written consent. 
+ * 
+ * :>
+ *
+ ************************************************************/
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h>
+#include <bcm_map_part.h>
+
+static cycle_t timer2_hpt_read(struct clocksource *cs)
+{
+    return (TIMER->TimerCnt2);
+}
+
+static struct clocksource clocksource_mips = {
+	.name		= "MIPS",
+	.read		= timer2_hpt_read,
+	.mask		= CLOCKSOURCE_MASK(30),
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_r4k_clocksource(void)
+{
+	if (!cpu_has_counter || !mips_hpt_frequency)
+		return -ENXIO;
+
+	/* Calculate a somewhat reasonable rating value */
+	clocksource_mips.rating = 300;
+
+	clocksource_register_hz(&clocksource_mips, 50000000);
+
+	return 0;
+}
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 8882e5766f27ce5b7a8a57932b4b103b7a69eaa7..104a345c2b35d9531ddb68e003f1516ce4bb9b4b 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -52,6 +52,17 @@ NESTED(except_vec1_generic, 0, sp)
 NESTED(except_vec3_generic, 0, sp)
 	.set	push
 	.set	noat
+
+#if defined(CONFIG_BCM_KF_DSP_EXCEPT) && defined(CONFIG_BCM_BCMDSP_MODULE)
+	/* if we are running on DSP TP branch to brcm_dsp_except */
+	nop
+	mfc0	k0, $22, 3
+	srl	k0, k0, 31
+	la	k1, main_tp_num
+	lw	k1, 0(k1)
+	bne	k0, k1, brcm_dsp_except
+#endif
+
 #if R5432_CP0_INTERRUPT_WAR
 	mfc0	k0, CP0_INDEX
 #endif
@@ -62,6 +73,14 @@ NESTED(except_vec3_generic, 0, sp)
 #endif
 	PTR_L	k0, exception_handlers(k1)
 	jr	k0
+
+#if defined(CONFIG_BCM_KF_DSP_EXCEPT) && defined(CONFIG_BCM_BCMDSP_MODULE)
+brcm_dsp_except:
+	PTR_LA	ra, ret_from_exception 
+	PTR_LA	k0, 0xa0000200
+	jr	k0 
+#endif
+
 	.set	pop
 	END(except_vec3_generic)
 
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c504b212f8f3f968ede65e514cbcc96168c7c330..c541ac975854174f93a418fe307302dfd94427ec 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -70,7 +70,16 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
  * mips_io_port_base is the begin of the address space to which x86 style
  * I/O ports are mapped.
  */
+#if !defined(CONFIG_BCM_KF_MIPS_BCM9685XX) && defined(CONFIG_BCM_KF_MIPS_IOPORT_BASE)
+/* mips_io_port_base is normally set using set_io_port_base.  The
+   only reason we would need it here is to get around a race condition.
+   I don't know what the race condition is, but some ivestigation can be done
+   later to determine if we can remove it.  For now, leave it in so it
+   doesn't hinder the development. */
+const unsigned long mips_io_port_base = KSEG1;
+#else
 const unsigned long mips_io_port_base = -1;
+#endif
 EXPORT_SYMBOL(mips_io_port_base);
 
 static struct resource code_resource = { .name = "Kernel code", };
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index ba9376bf52a1e80bdaaef76201b50d973c932d0c..f2a4998dd6fd359174e7c19115d5b3dd319d61c6 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -143,8 +143,64 @@ void __irq_entry smp_call_function_interrupt(void)
 	irq_exit();
 }
 
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+
+// yeah, I know, this won't work if numcpus>2, but its good enough for now
+int other_cpu_stopped=0;
+EXPORT_SYMBOL(other_cpu_stopped);
+
+void stop_other_cpu(void)
+{
+	int count=0;
+	smp_send_stop();
+
+	// make sure the other CPU is really stopped
+	do
+	{
+		udelay(1000);
+		count++;
+		if (count % 4000 == 0)
+		{
+			printk(KERN_WARNING "still waiting for other cpu to stop, "
+			                    "jiffies=%lu\n", jiffies);
+		}
+	} while (!other_cpu_stopped);
+}
+EXPORT_SYMBOL(stop_other_cpu);
+
+#endif /* CONFIG_MIPS_BCM963XX */
+
+
 static void stop_this_cpu(void *dummy)
 {
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX)
+        printk(KERN_INFO "\nstopping CPU %d\n", smp_processor_id());
+    
+        /*
+         * Do not allow any more processing of any kind on this CPU.
+         * interrupts may trigger processing, so disable it.
+         * Hmm, this may cause us problems.  If there are any threads on this
+         * CPU which is holding a mutex or spinlock which does not block
+         * interrupts, and this mutex or spinlock is needed by the other
+         * processor (e.g. to write the firmware image), we will deadlock.
+         * PROBABLY should be very rare.....
+         */
+        local_irq_disable();
+    
+        /*
+         * Remove this CPU:
+         */
+        set_cpu_online(smp_processor_id(), false); 
+    
+        other_cpu_stopped=1;
+    
+        /*
+         * just spin, do not call cpu_wait because some implementations,
+         * namely, brcm_wait, will re-enable interrupts.
+         */
+        for (;;) {
+        }
+#else
 	/*
 	 * Remove this CPU:
 	 */
@@ -153,6 +209,7 @@ static void stop_this_cpu(void *dummy)
 		if (cpu_wait)
 			(*cpu_wait)();		/* Wait if available. */
 	}
+#endif /* CONFIG_BCM_KF_MIPS_BCM963XX */
 }
 
 void smp_send_stop(void)
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 6af08d896e20bdfd3ffc03bd7b03c99d6e7246d1..774a0ac930bf7084fa9e39f0317939c45a23dde2 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -18,6 +18,7 @@
 #include <asm/r4kcache.h>
 #include <asm/hazards.h>
 
+
 /*
  * These definitions are correct for the 24K/34K/74K SPRAM sample
  * implementation. The 4KS interpreted the tags differently...
@@ -217,3 +218,5 @@ void __cpuinit spram_config(void)
 				    &dspram_load_tag, &dspram_store_tag);
 	}
 }
+
+
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cfdaaa4cffc0a28fa60b662d6eccf992396ebe58..416fb17597c71f54c5d757d9eb3dc0353598e203 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -125,6 +125,34 @@ static int __init set_raw_show_trace(char *str)
 __setup("raw_show_trace", set_raw_show_trace);
 #endif
 
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+
+long * traps_fap0DbgVals = NULL;
+long * traps_fap1DbgVals = NULL;
+EXPORT_SYMBOL(traps_fap0DbgVals);
+EXPORT_SYMBOL(traps_fap1DbgVals);
+
+static void dumpFapInfo(void)
+{
+    int i;
+    printk("FAP0: ");
+    if (traps_fap0DbgVals != NULL)
+        for (i = 0; i < 10; i++)
+        {
+            printk("[%d]:%08lx ", i, traps_fap0DbgVals[i]);
+        }
+    printk("\n");
+    
+    printk("FAP1: ");
+    if (traps_fap1DbgVals != NULL)
+        for (i = 0; i < 10; i++)
+        {
+            printk("[%d]:%08lx ", i, traps_fap1DbgVals[i]);
+        }
+    printk("\n");
+}
+#endif
+
 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 {
 	unsigned long sp = regs->regs[29];
@@ -135,12 +163,29 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 		show_raw_backtrace(sp);
 		return;
 	}
+
+#if defined(CONFIG_BCM_KF_SHOW_RAW_BACKTRACE)&&defined(CONFIG_KALLSYMS)
+	/*
+	 * Always print the raw backtrace, this will be helpful
+	 * if unwind_stack fails before giving a proper decoded backtrace
+	 */ 
+	show_raw_backtrace(sp);
+	printk("\n");
+#endif
+
 	printk("Call Trace:\n");
 	do {
 		print_ip_sym(pc);
 		pc = unwind_stack(task, &sp, pc, &ra);
 	} while (pc);
 	printk("\n");
+    
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+        printk("FAP Information:\n");
+        dumpFapInfo();
+        printk("\n");
+#endif
+    
 }
 
 /*
@@ -208,6 +253,11 @@ void dump_stack(void)
 {
 	struct pt_regs regs;
 
+#if (defined(CONFIG_BCM_KF_BOUNCE) && defined(CONFIG_BRCM_BOUNCE))
+	extern void bounce_panic(void);
+	bounce_panic();
+#endif
+
 	prepare_frametrace(&regs);
 	show_backtrace(current, &regs);
 }
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index fd35daa45314a370b89f521e3ec401d39e5dfdc9..50c187ed345fb2900f921a27492823720dfd076e 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <asm/io.h>
 
+#if !defined(CONFIG_BCM_KF_KERN_WARNING) || defined(CONFIG_NO_GENERIC_PCI_IOPORT_MAP)
 void __iomem *__pci_ioport_map(struct pci_dev *dev,
 			       unsigned long port, unsigned int nr)
 {
@@ -39,6 +40,7 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev,
 
 	return (void __iomem *) (ctrl->io_map_base + port);
 }
+#endif
 
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index bda8eb26ece74098ecaa49c2693ab40dd2bcb772..99e45da7b59ea606acca60e385b3e5c0eeaf3101 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -543,10 +543,14 @@ static inline void local_r4k_flush_data_cache_page(void * addr)
 
 static void r4k_flush_data_cache_page(unsigned long addr)
 {
+#if defined(CONFIG_BCM_KF_DCACHE_SHARED) && defined(CONFIG_BCM_DCACHE_SHARED)
+        local_r4k_flush_data_cache_page((void *) addr);
+#else
 	if (in_atomic())
 		local_r4k_flush_data_cache_page((void *)addr);
 	else
 		r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
+#endif
 }
 
 struct flush_icache_range_args {
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 33aadbcf170bb4428020ddb87a17383180719400..e9ae564a537bbc5c070d09951fc18563a4db6a6a 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -1,3 +1,10 @@
+#if (defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX))
+/* get_user_pages_fast() is not working properly on BMIPS4350, some times wrong
+ * data is seen when the pages returned by this fucntion are used. The problem might
+ * be related to cache flushing. Disabling this architure related function, and
+ * the kernel will fallback to use of get_user_pages(), see mm/util.c
+ */
+#else
 /*
  * Lockless get_user_pages_fast for MIPS
  *
@@ -313,3 +320,4 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 	}
 	return ret;
 }
+#endif
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 1a85ba92eb5c274aaf7d96020bc34b6d7338ea27..908748f6b7fb1e771d296d6c983c00c58ee8d0ac 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -133,7 +133,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
 #ifdef CONFIG_MIPS_MT_SMTC
 	idx += FIX_N_COLOURS * smp_processor_id() +
-		(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
+			(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
 #else
 	idx += in_interrupt() ? FIX_N_COLOURS : 0;
 #endif
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c3ac4b086eb203738d2c2bfdbd32b61171ded9ab..cffb464bd85b83809dd2903cb85f4514045e18c0 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -18,6 +18,15 @@ obj-$(CONFIG_PCI_TX4927)	+= ops-tx4927.o
 obj-$(CONFIG_BCM47XX)		+= pci-bcm47xx.o
 obj-$(CONFIG_BCM63XX)		+= pci-bcm63xx.o fixup-bcm63xx.o \
 					ops-bcm63xx.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_PCI_FIXUP)
+ifdef CONFIG_MIPS_BCM963XX
+obj-$(CONFIG_BCM_PCI)		+= pci-bcm963xx.o fixup-bcm963xx.o \
+					ops-bcm963xx.o
+ifneq ($(findstring _$(strip $(BRCM_CHIP))_,,_63381_6838_6848_),)
+EXTRA_CFLAGS += -I$(INC_BRCMSHARED_PUB_PATH)/pmc
+endif
+endif
+endif # BCM_KF
 obj-$(CONFIG_MIPS_ALCHEMY)	+= pci-alchemy.o
 obj-$(CONFIG_SOC_AR724X)	+= pci-ath724x.o
 
@@ -61,3 +70,9 @@ obj-$(CONFIG_CPU_XLR)		+= pci-xlr.o
 ifdef CONFIG_PCI_MSI
 obj-$(CONFIG_CPU_CAVIUM_OCTEON)	+= msi-octeon.o
 endif
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_PCI_FIXUP)
+ifeq "$(CONFIG_BCM_PCI)" "y"
+EXTRA_CFLAGS += -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD)
+endif
+endif # BCM_KF
\ No newline at end of file
diff --git a/arch/mips/pci/fixup-bcm963xx.c b/arch/mips/pci/fixup-bcm963xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..2e115ba2767ee76e8e8ffd9aa913da1b80a8bdcd
--- /dev/null
+++ b/arch/mips/pci/fixup-bcm963xx.c
@@ -0,0 +1,120 @@
+/* 
+* <:copyright-BRCM:2011:GPL/GPL:standard
+* 
+*    Copyright (c) 2011 Broadcom Corporation
+*    All Rights Reserved
+* 
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, version 2, as published by
+* the Free Software Foundation (the "GPL").
+* 
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+* 
+* 
+* A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+* writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+* Boston, MA 02111-1307, USA.
+* 
+* :>
+*/
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include <bcm_intr.h>
+#include <bcm_map_part.h>
+#include <bcmpci.h>
+
+#if defined(CONFIG_BCM96362) ||     defined(CONFIG_BCM963268) ||         defined(CONFIG_USB)
+static char irq_tab_bcm63xx[] __initdata = {
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM963268)
+    [WLAN_ONCHIP_DEV_SLOT] = INTERRUPT_ID_WLAN,
+#endif
+#if defined(CONFIG_USB)
+    [USB_HOST_SLOT] = INTERRUPT_ID_USBH,
+    [USB20_HOST_SLOT] = INTERRUPT_ID_USBH20
+#endif
+};
+#endif
+
+static void bcm63xx_fixup_header(struct pci_dev *dev)
+{
+    uint32 memaddr;
+    uint32 size;
+
+    memaddr = pci_resource_start(dev, 0);
+    size = pci_resource_len(dev, 0);
+
+    if (dev->bus->number == BCM_BUS_PCI) {
+        switch (PCI_SLOT(dev->devfn)) {
+#if defined(CONFIG_USB)
+            case USB_HOST_SLOT:
+                dev->resource[0].flags |= IORESOURCE_PCI_FIXED; // prevent linux from reallocating resources
+                break;
+    
+            case USB20_HOST_SLOT:
+               dev->resource[0].flags |= IORESOURCE_PCI_FIXED; // prevent linux from reallocating resources
+               break;
+#endif
+#if defined(WLAN_CHIPC_BASE)
+            case WLAN_ONCHIP_DEV_SLOT:
+               dev->resource[0].flags |= IORESOURCE_PCI_FIXED; // prevent linux from reallocating resources
+               break;
+#endif   
+        }
+    }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_fixup_header);
+
+
+static void bcm63xx_fixup_final(struct pci_dev *dev)
+{
+    uint32 memaddr;
+    uint32 size;
+
+    memaddr = pci_resource_start(dev, 0);
+    size = pci_resource_len(dev, 0);
+
+    if (dev->bus->number == BCM_BUS_PCI) {
+        switch (PCI_SLOT(dev->devfn)) {
+         }
+    } 
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_fixup_final);
+
+
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+    return 0;
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+
+#if defined(PCIEH)
+    if (pci_is_pcie((struct pci_dev*)dev)) {
+
+#if defined(PCIEH_1)
+        if ((dev->bus->number >= BCM_BUS_PCIE1_ROOT)&& (dev->bus->number <= BCM_BUS_PCIE1_DEVICE))
+            return INTERRUPT_ID_PCIE1_RC;
+        
+        if ((dev->bus->number >= BCM_BUS_PCIE_ROOT) && (dev->bus->number <= BCM_BUS_PCIE_DEVICE))
+            return INTERRUPT_ID_PCIE_RC;
+#endif
+        /* single RC */
+        return INTERRUPT_ID_PCIE_RC;
+    }
+#endif /* PCIEH */
+    
+#if defined(PCI_CFG) ||  defined(WLAN_CHIPC_BASE) ||   defined(CONFIG_USB)
+    return irq_tab_bcm63xx[slot];
+#else
+    return 0;    
+#endif
+
+}
+
diff --git a/arch/mips/pci/ops-bcm963xx.c b/arch/mips/pci/ops-bcm963xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..9efffb0e5114d3039177a2cf87d5f36886828810
--- /dev/null
+++ b/arch/mips/pci/ops-bcm963xx.c
@@ -0,0 +1,725 @@
+/* 
+<:copyright-BRCM:2012:GPL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:> 
+*/
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/addrspace.h>
+
+#include <bcm_intr.h>
+#include <bcm_map_part.h>
+#include <bcmpci.h>
+#include <boardparms.h>
+#include <board.h>
+
+#include <linux/delay.h>
+
+
+#if 0
+#define DPRINT(x...)        printk(x)
+#else
+#undef DPRINT
+#define DPRINT(x...)
+#endif
+
+#if defined(CONFIG_USB)
+static int pci63xx_int_usb_read(unsigned int devfn, int where, u32 * value, int size);
+static int pci63xx_int_usb_write(unsigned int devfn, int where, u32 * value, int size);
+
+static bool usb_mem_size_rd = FALSE;
+static uint32 usb_mem_base = 0;
+static uint32 usb_cfg_space_cmd_reg = 0;
+
+static int pci63xx_int_usb20_read(unsigned int devfn, int where, u32 * value, int size);
+static int pci63xx_int_usb20_write(unsigned int devfn, int where, u32 * value, int size);
+
+static bool usb20_mem_size_rd = FALSE;
+static uint32 usb20_mem_base = 0;
+static uint32 usb20_cfg_space_cmd_reg = 0;
+#endif /* CONFIG_USB */
+
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM963268)
+static int pci63xx_wlan_pci_read(unsigned int devfn, int where, u32 * value, int size);
+static int pci63xx_wlan_pci_write(unsigned int devfn, int where, u32 val, int size);
+uint32 pci63xx_wlan_soft_config_space[WLAN_ONCHIP_DEV_NUM][WLAN_ONCHIP_PCI_HDR_DW_LEN] = {
+	{WLAN_ONCHIP_PCI_ID, 0x00000006, 0x02800000, 0x00000010,
+	 WLAN_CHIPC_BASE,    0x00000000, 0x00000000, 0x00000000,
+	 0x00000000,  	     0x00000000, 0x00000000, 0x051314e4,
+	 0x00000000,  	     0x00000040, 0x00000000, 0x0000010f,
+	 0xce035801,         0x00004008, 0x0080d005, 0x00000000,
+	 0x00000000,         0x00000000, 0x00784809, 0x00000010,
+	 0x00000000,         0x00000000, 0x00000000, 0x00000000,
+	 0x00000000,         0x00000000, 0x00000000, 0x00000000,
+	 0x18001000,         0x00000000, 0xffffffff, 0x00000003,
+	 0x00000000,         0x00000100, 0x00000000, 0x00000000,
+	 0x00000000,         0x00000000, 0x00010000, 0x18101000,
+	 0x00000000,         0x00000000, 0x00000000, 0x00000000,
+	 0x00000000,         0x00000000, 0x00000000, 0x00000000,
+	 0x00010010,         0x00288fa0, 0x00190100, 0x00176c11,
+	 0x30110040,         0x00000000, 0x00000000, 0x00000000,
+	 0x00000000,         0x00000000, 0x00000000, 0x00000000,
+	 },
+};                                      
+#endif
+
+#if defined(CONFIG_USB)
+/* --------------------------------------------------------------------------
+    Name: pci63xx_int_usb_write
+Abstract: PCI Config write on internal device(s)
+ -------------------------------------------------------------------------- */
+static int pci63xx_int_usb_write(unsigned int devfn, int where, u32 * value, int size)
+{
+    switch (size) {
+        case 1:
+            DPRINT("W => Slot: %d Where: %2X Len: %d Data: %02X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            break;
+        case 2:
+            DPRINT("W => Slot: %d Where: %2X Len: %d Data: %04X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            switch (where) {
+                case PCI_COMMAND:
+                    usb_cfg_space_cmd_reg = *value;
+                    break;
+                default:
+                    break;
+            }
+            break;
+        case 4:
+            DPRINT("W => Slot: %d Where: %2X Len: %d Data: %08X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            switch (where) {
+                case PCI_BASE_ADDRESS_0:
+                    if (*value == 0xffffffff) {
+                        usb_mem_size_rd = TRUE;
+                    } else {
+                        usb_mem_base = *value;
+                    }
+                    break;
+                default:
+                    break;
+            }
+            break;
+        default:
+            break;
+    }
+
+    return PCIBIOS_SUCCESSFUL;
+}
+
+/* --------------------------------------------------------------------------
+    Name: pci63xx_int_usb_read
+Abstract: PCI Config read on internal device(s)
+ -------------------------------------------------------------------------- */
+static int pci63xx_int_usb_read(unsigned int devfn, int where, u32 * value, int size)
+{
+    uint32 retValue = 0xFFFFFFFF;
+
+    // For now, this is specific to the USB Host controller. We can
+    // make it more general if we have to...
+    // Emulate PCI Config accesses
+    switch (where) {
+        case PCI_VENDOR_ID:
+        case PCI_DEVICE_ID:
+            retValue = PCI_VENDOR_ID_BROADCOM | 0x63000000;
+            break;
+        case PCI_COMMAND:
+        case PCI_STATUS:
+            retValue = (0x0006 << 16) | usb_cfg_space_cmd_reg;
+            break;
+        case PCI_CLASS_REVISION:
+        case PCI_CLASS_DEVICE:
+            retValue = (PCI_CLASS_SERIAL_USB << 16) | (0x10 << 8) | 0x01;
+            break;
+        case PCI_BASE_ADDRESS_0:
+            if (usb_mem_size_rd) {
+                retValue = USB_BAR0_MEM_SIZE;
+            } else {
+                if (usb_mem_base != 0)
+                    retValue = usb_mem_base;
+                else
+                    retValue = USB_OHCI_BASE;
+            }
+            usb_mem_size_rd = FALSE;
+            break;
+        case PCI_CACHE_LINE_SIZE:
+            retValue = L1_CACHE_BYTES/4;
+            break;
+        case PCI_LATENCY_TIMER:
+            retValue = 0;
+            break;
+        case PCI_HEADER_TYPE:
+            retValue = PCI_HEADER_TYPE_NORMAL;
+            break;
+        case PCI_SUBSYSTEM_VENDOR_ID:
+            retValue = PCI_VENDOR_ID_BROADCOM;
+            break;
+        case PCI_SUBSYSTEM_ID:
+            retValue = 0x6300;
+            break;
+        case PCI_INTERRUPT_LINE:
+            retValue = INTERRUPT_ID_USBH;
+            break;
+        default:
+            break;
+    }
+
+    switch (size) {
+        case 1:
+            *value = (retValue >> ((where & 3) << 3)) & 0xff;
+            DPRINT("R <= Slot: %d Where: %2X Len: %d Data: %02X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            break;
+        case 2:
+            *value = (retValue >> ((where & 3) << 3)) & 0xffff;
+            DPRINT("R <= Slot: %d Where: %2X Len: %d Data: %04X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            break;
+        case 4:
+            *value = retValue;
+            DPRINT("R <= Slot: %d Where: %2X Len: %d Data: %08X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            break;
+        default:
+            break;
+    }
+    return PCIBIOS_SUCCESSFUL;
+}
+
+/* --------------------------------------------------------------------------
+    Name: pci63xx_int_usb20_write
+Abstract: PCI Config write on internal device(s)
+ -------------------------------------------------------------------------- */
+static int pci63xx_int_usb20_write(unsigned int devfn, int where, u32 * value, int size)
+{
+    switch (size) {
+        case 1:
+            DPRINT("W => Slot: %d Where: %2X Len: %d Data: %02X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            break;
+        case 2:
+            DPRINT("W => Slot: %d Where: %2X Len: %d Data: %04X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            switch (where) {
+                case PCI_COMMAND:
+                    usb20_cfg_space_cmd_reg = *value;
+                    break;
+                default:
+                    break;
+            }
+            break;
+        case 4:
+            DPRINT("W => Slot: %d Where: %2X Len: %d Data: %08X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            switch (where) {
+                case PCI_BASE_ADDRESS_0:
+                    if (*value == 0xffffffff) {
+                        usb20_mem_size_rd = TRUE;
+                    } else {
+                        usb20_mem_base = *value;
+                    }
+                    break;
+                default:
+                    break;
+            }
+            break;
+        default:
+            break;
+    }
+
+    return PCIBIOS_SUCCESSFUL;
+}
+
+/* --------------------------------------------------------------------------
+    Name: pci63xx_int_usb20_read
+Abstract: PCI Config read on internal device(s)
+ -------------------------------------------------------------------------- */
+static int pci63xx_int_usb20_read(unsigned int devfn, int where, u32 * value, int size)
+{
+    uint32 retValue = 0xFFFFFFFF;
+
+    // For now, this is specific to the USB Host controller. We can
+    // make it more general if we have to...
+    // Emulate PCI Config accesses
+    switch (where) {
+        case PCI_VENDOR_ID:
+        case PCI_DEVICE_ID:
+            retValue = PCI_VENDOR_ID_BROADCOM | 0x63000000;
+            break;
+        case PCI_COMMAND:
+        case PCI_STATUS:
+            retValue = (0x0006 << 16) | usb20_cfg_space_cmd_reg;
+            break;
+        case PCI_CLASS_REVISION:
+        case PCI_CLASS_DEVICE:
+            retValue = (PCI_CLASS_SERIAL_USB << 16) | (0x20 << 8) | 0x01;
+            break;
+        case PCI_BASE_ADDRESS_0:
+            if (usb20_mem_size_rd) {
+                retValue = USB_BAR0_MEM_SIZE;
+            } else {
+                if (usb20_mem_base != 0)
+                    retValue = usb20_mem_base;
+                else
+                    retValue = USB_EHCI_BASE;
+            }
+            usb20_mem_size_rd = FALSE;
+            break;
+        case PCI_CACHE_LINE_SIZE:
+            retValue = L1_CACHE_BYTES/4;
+            break;
+        case PCI_LATENCY_TIMER:
+            retValue = 0;
+            break;
+        case PCI_HEADER_TYPE:
+            retValue = PCI_HEADER_TYPE_NORMAL;
+            break;
+        case PCI_SUBSYSTEM_VENDOR_ID:
+            retValue = PCI_VENDOR_ID_BROADCOM;
+            break;
+        case PCI_SUBSYSTEM_ID:
+            retValue = 0x6300;
+            break;
+        case PCI_INTERRUPT_LINE:
+            retValue = INTERRUPT_ID_USBH20;
+            break;
+        default:
+            break;
+    }
+
+    switch (size) {
+        case 1:
+            *value = (retValue >> ((where & 3) << 3)) & 0xff;
+            DPRINT("R <= Slot: %d Where: %2X Len: %d Data: %02X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            break;
+        case 2:
+            *value = (retValue >> ((where & 3) << 3)) & 0xffff;
+            DPRINT("R <= Slot: %d Where: %2X Len: %d Data: %04X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            break;
+        case 4:
+            *value = retValue;
+            DPRINT("R <= Slot: %d Where: %2X Len: %d Data: %08X\n",
+                PCI_SLOT(devfn), where, size, *value);
+            break;
+        default:
+            break;
+    }
+
+    return PCIBIOS_SUCCESSFUL;
+}
+#endif
+
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM963268)
+/* --------------------------------------------------------------------------
+    Name: pci63xx_wlan_pci_write
+    Abstract: PCI Config write on internal device(s)
+    
+    extra read/write to take care / pass through 
+    BASE ADDR 0, where=16 , R/W
+    INTERRUPT PIN, where=60, R/W
+    COMMAND, where=4, R/W
+    LATENCY TIMER, where=13
+ -------------------------------------------------------------------------- */
+static int pci63xx_wlan_pci_write(unsigned int devfn, int where, u32 val, int size)
+{
+    uint32 data;
+        
+    if(where >= 256) {
+    	return PCIBIOS_BAD_REGISTER_NUMBER;    
+    }
+    data = pci63xx_wlan_soft_config_space[PCI_SLOT(devfn)-WLAN_ONCHIP_DEV_SLOT][where/4];
+
+    switch(size) {
+        case 1:
+            data = (data & ~(0xff << ((where & 3) << 3))) |
+                (val << ((where & 3) << 3));
+            break;
+        case 2:
+            data = (data & ~(0xffff << ((where & 3) << 3))) |
+                (val << ((where & 3) << 3));
+            break;
+        case 4:
+            data = val;
+            break;
+        default:
+            break;
+    }
+    pci63xx_wlan_soft_config_space[PCI_SLOT(devfn)-WLAN_ONCHIP_DEV_SLOT][where/4] = data;
+
+    return PCIBIOS_SUCCESSFUL;
+}
+
+/* --------------------------------------------------------------------------
+    Name: pci63xx_wlan_pci_read
+Abstract: PCI Config read on internal device(s)
+ -------------------------------------------------------------------------- */
+static int pci63xx_wlan_pci_read(unsigned int devfn, int where, u32 * val, int size)
+{
+    uint32 data;    
+    
+    if(where >= 256) {
+    	data = 0xffffffff;
+    }	
+    else	
+    	data = pci63xx_wlan_soft_config_space[PCI_SLOT(devfn)-WLAN_ONCHIP_DEV_SLOT][where/4];
+
+    switch(size) {
+        case 1:
+            *val = (data >> ((where & 3) << 3)) & 0xff;
+            break;
+        case 2:
+            *val = (data >> ((where & 3) << 3)) & 0xffff;
+            break;
+        case 4:
+            *val = data;
+             /* Special case for reading PCI device range */
+            if ((where >= PCI_BASE_ADDRESS_0) && (where <= PCI_BASE_ADDRESS_5)) {
+                if (data == 0xffffffff) {
+                	if (where == PCI_BASE_ADDRESS_0)
+                        *val = 0xffffe000;/* PCI_SIZE_8K */
+                	else 
+                        *val = 0xffffffff;
+                }
+            }
+            if(where == PCI_ROM_ADDRESS)
+                *val = 0xffffffff;
+            break;
+        default:
+            break;
+    }
+    return PCIBIOS_SUCCESSFUL;
+}
+#endif
+
+#if defined(CONFIG_USB)
+static int isUsbHostPresent(void)
+{
+	if(kerSysGetUsbHostPortEnable(0) || kerSysGetUsbHostPortEnable(1))
+		return 1;
+	else
+		return 0;
+}
+#endif
+
+int bcm63xx_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+    int where, int size, u32 * val)
+{
+    *val = 0xffffffff;
+
+#if defined(CONFIG_USB)
+    if(isUsbHostPresent())
+    {
+        if (PCI_SLOT(devfn) == USB_HOST_SLOT)
+            return pci63xx_int_usb_read(devfn, where, val, size);
+        if (PCI_SLOT(devfn) == USB20_HOST_SLOT)
+            return pci63xx_int_usb20_read(devfn, where, val, size);
+    }
+#endif
+
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM963268)
+    if (PCI_SLOT(devfn) == WLAN_ONCHIP_DEV_SLOT)
+       	return pci63xx_wlan_pci_read(devfn, where, val, size);
+#endif
+
+
+    return PCIBIOS_SUCCESSFUL;
+}
+
+int bcm63xx_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+    int where, int size, u32 val)
+{
+
+#if defined(CONFIG_USB)
+    if(isUsbHostPresent())
+    {
+        if (PCI_SLOT(devfn) == USB_HOST_SLOT)
+            return pci63xx_int_usb_write(devfn, where, &val, size);
+        if (PCI_SLOT(devfn) == USB20_HOST_SLOT)
+            return pci63xx_int_usb20_write(devfn, where, &val, size);
+    }
+#endif
+
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM963268)
+    if (PCI_SLOT(devfn) == WLAN_ONCHIP_DEV_SLOT)
+       	return pci63xx_wlan_pci_write(devfn, where, val, size);
+#endif
+
+    return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops bcm63xx_pci_ops = {
+    .read   = bcm63xx_pcibios_read,
+    .write  = bcm63xx_pcibios_write
+};
+
+
+#if defined(PCIEH)
+/* supported external PCIE devices */
+enum pcie_device_supported {UNSPECIFIED, SWITCH_8232104c};
+#define PCIID_SWITCH_8232104c  0x8232104c
+/* supported devices */
+enum pcie_device_supported first_device_detected = UNSPECIFIED;
+
+/* check supported devices, setup config properly before calling */
+static u32 bcm63xx_pcie_detect_first_device(void)
+{	
+    u32 devid;
+	
+    devid = *(u32*)(((u8*)PCIEH)+PCIEH_DEV_OFFSET);
+
+    switch(devid) {
+        case PCIID_SWITCH_8232104c:
+            first_device_detected = SWITCH_8232104c;
+            break;
+        default:
+            first_device_detected = UNSPECIFIED;
+    }
+
+    return devid;
+}
+
+static inline u32 CFGOFFSET(u32 bus, u32 devfn, u32 where)
+{
+    if(bus == BCM_BUS_PCIE_ROOT ) {
+        /* access offset 0 */
+        return where;
+#if defined(PCIEH_1)
+     } else if(bus == BCM_BUS_PCIE1_ROOT){
+        /* access offset 0 */
+        return where;
+#endif
+    } else {
+        /* access offset */
+        return PCIEH_DEV_OFFSET|where;
+    }
+}
+
+#define CFGADDR(bus,devfn,where)   CFGOFFSET((bus)->number,(devfn),where)
+
+static inline u32 READCFG32(u32 bus, u32 addr)
+{
+#if defined(PCIEH_1)
+    if(bus >= BCM_BUS_PCIE1_ROOT) {
+        DPRINT("pcie_reading addr(0x%x) = 0x%x\n", (unsigned int)(((u8*)PCIEH_1) + (addr&~3)),*(u32*)(((u8*)PCIEH_1) + (addr&~3)));
+        return *(u32 *)(((u8*)PCIEH_1) + (addr&~3));
+    } else 
+#endif
+    {
+        DPRINT("pcie_reading addr(0x%x) = 0x%x\n", (unsigned int)(((u8*)PCIEH) + (addr&~3)),*(u32*)(((u8*)PCIEH) + (addr&~3)));
+        return *(u32 *)(((u8*)PCIEH) + (addr&~3));
+    }
+}
+
+static inline void WRITECFG32(u32 bus, u32 addr, u32 data)
+{
+#if defined(PCIEH_1)
+    if(bus >= BCM_BUS_PCIE1_ROOT) {
+        *(u32 *)(((u8*)PCIEH_1) + (addr & ~3)) = data;
+    } else 
+#endif
+    {
+    DPRINT("pcie_writing addr(0x%x) = 0x%x\n", (unsigned int)(((u8*)PCIEH) + (addr & ~3)), data);
+    *(u32 *)(((u8*)PCIEH) + (addr & ~3)) = data;
+    }
+}
+
+static void pci63xx_pcie_config_select_by_numbers(u32 bus_no, u32 dev_no, u32 func_no)
+{
+    /* set device bus/func/func */
+#if defined(UBUS2_PCIE)
+#if defined(PCIEH_1)
+   if(bus_no >= BCM_BUS_PCIE1_ROOT) {
+        //PCIEH_1_MISC_REGS->misc_ctrl |= PCIE_MISC_CTRL_CFG_READ_UR_MODE;
+        PCIEH_1_PCIE_EXT_CFG_REGS->index = ((bus_no<<PCIE_EXT_CFG_BUS_NUM_SHIFT)|(dev_no <<PCIE_EXT_CFG_DEV_NUM_SHIFT)|(func_no<<PCIE_EXT_CFG_FUNC_NUM_SHIFT));
+        DPRINT("PCIEH_PCIE_EXT_CFG_REGS->index(%d/%d/%d) = 0x%x\n", bus_no, dev_no, func_no, (unsigned int) PCIEH_1_PCIE_EXT_CFG_REGS->index );   	
+   } else
+#endif
+    {
+    /* disable data bus error for enumeration */
+        //PCIEH_MISC_REGS->misc_ctrl |= PCIE_MISC_CTRL_CFG_READ_UR_MODE;
+        PCIEH_PCIE_EXT_CFG_REGS->index = ((bus_no<<PCIE_EXT_CFG_BUS_NUM_SHIFT)|(dev_no <<PCIE_EXT_CFG_DEV_NUM_SHIFT)|(func_no<<PCIE_EXT_CFG_FUNC_NUM_SHIFT));
+        DPRINT("PCIEH_PCIE_EXT_CFG_REGS->index(%d/%d/%d) = 0x%x\n", bus_no, dev_no, func_no, (unsigned int) PCIEH_PCIE_EXT_CFG_REGS->index );
+    }
+#else    
+    PCIEH_BRIDGE_REGS->bridgeOptReg2 &= ~(PCIE_BRIDGE_OPT_REG2_cfg_type1_bus_no_MASK |
+    		PCIE_BRIDGE_OPT_REG2_cfg_type1_dev_no_MASK|PCIE_BRIDGE_OPT_REG2_cfg_type1_func_no_MASK);
+     
+    PCIEH_BRIDGE_REGS->bridgeOptReg2 |= ((bus_no<<PCIE_BRIDGE_OPT_REG2_cfg_type1_bus_no_SHIFT) |
+    		(dev_no<<PCIE_BRIDGE_OPT_REG2_cfg_type1_dev_no_SHIFT) |
+    		(func_no<<PCIE_BRIDGE_OPT_REG2_cfg_type1_func_no_SHIFT) |
+        PCIE_BRIDGE_OPT_REG2_cfg_type1_bd_sel_MASK );
+    DPRINT("PCIEH_BRIDGE_REGS->bridgeOptReg2 = 0x%x\n", (unsigned int) PCIEH_BRIDGE_REGS->bridgeOptReg2 );
+#endif
+
+}
+#if !defined(PCIEH_1)
+/* this is the topology of deviceid/vendorid 0x823210
+  (US, bus N)
+    |
+    |--- (DS #0, bus N+1) --- Dev #0 (bus N+2)
+    |--- (DS #1, bus N+1) --- Dev #1 (bus N+3)
+    |--- (DS #2, bus N+1) --- Dev #2 (bus N+4)
+*/
+static int pci63xx_pcie_can_access_switch8232104c_by_numbers(u32 bus_no, u32 dev_no, u32 func_no)
+{
+
+  /* disable data bus error for enumeration */
+#if defined(UBUS2_PCIE)
+    PCIEH_MISC_REGS->misc_ctrl |= PCIE_MISC_CTRL_CFG_READ_UR_MODE;
+#else
+    PCIEH_BRIDGE_REGS->bridgeOptReg2 |= (PCIE_BRIDGE_OPT_REG2_dis_pcie_abort_MASK );
+#endif
+																					
+    if ((bus_no == BCM_BUS_PCIE_DEVICE + 1) && (dev_no <= 2)) {
+        return TRUE;
+    } else if ((bus_no >= BCM_BUS_PCIE_DEVICE + 2 ) && (bus_no <= BCM_BUS_PCIE_DEVICE + 4)) {
+    /*support single function device*/
+    return (dev_no == 0); 
+    }
+    return FALSE;	
+}
+#endif
+
+static int pci63xx_pcie_can_access_by_numbers(u32 bus_no, u32 dev_no, u32 func_no)
+{
+		
+    /* select device */
+    pci63xx_pcie_config_select_by_numbers(bus_no, dev_no, func_no);
+
+#if defined(PCIEH_1)
+    if (bus_no == BCM_BUS_PCIE1_ROOT) {
+        /* bridge */
+        return (dev_no == 0); /*otherwise will loop for the rest of the device*/
+    } else if (bus_no == BCM_BUS_PCIE1_DEVICE) {
+        /* upstream port or end device */
+        /* check link up*/
+        if(!(PCIEH_1_BLK_1000_REGS->dlStatus&PCIE_IP_BLK1000_DL_STATUS_PHYLINKUP_MASK)) {
+            return 0;
+        }
+       	return (dev_no == 0); /*otherwise will loop for the rest of the device*/
+     } else
+#endif
+
+    if (bus_no == BCM_BUS_PCIE_ROOT ) {
+        /* bridge */
+        return (dev_no == 0); /*otherwise will loop for the rest of the device*/
+    } else if (bus_no == BCM_BUS_PCIE_DEVICE) {
+        /* upstream port or end device */
+        /* check link up*/
+        if(!(PCIEH_BLK_1000_REGS->dlStatus&PCIE_IP_BLK1000_DL_STATUS_PHYLINKUP_MASK)) {
+            return 0;
+        }
+        bcm63xx_pcie_detect_first_device();
+
+       	return (dev_no == 0); /*otherwise will loop for the rest of the device*/
+#if !defined(PCIEH_1)
+		/* no support on external bridge */
+    } else { 	
+        if(first_device_detected == SWITCH_8232104c)
+            return pci63xx_pcie_can_access_switch8232104c_by_numbers(bus_no, dev_no, func_no);
+#endif
+    }
+    return 0;
+	
+}
+
+static int pci63xx_pcie_can_access(struct pci_bus *bus, int devfn)
+{
+    return pci63xx_pcie_can_access_by_numbers(bus->number,PCI_SLOT(devfn), PCI_FUNC(devfn));     
+}
+
+static int bcm63xx_pciebios_read(struct pci_bus *bus, unsigned int devfn,
+    int where, int size, u32 * val)
+{
+    u32 data = 0;
+    DPRINT("pcie_read, bus=%d, devfn=%d, where=%d, size=%d, val=0x%x\n", bus->number, devfn, where, size, *val);
+    if ((size == 2) && (where & 1)) {
+        return PCIBIOS_BAD_REGISTER_NUMBER;
+    }
+    else if ((size == 4) && (where & 3)) {
+        return PCIBIOS_BAD_REGISTER_NUMBER;
+    }
+
+    if (pci63xx_pcie_can_access(bus, devfn)) {
+        data = READCFG32((bus)->number,CFGADDR(bus, devfn, where));
+        if (data == 0xdeaddead) {
+            data = 0xffffffff;
+            return PCIBIOS_DEVICE_NOT_FOUND;
+        }
+    }            
+    else {
+        data = 0xffffffff;
+        return PCIBIOS_DEVICE_NOT_FOUND;
+    }
+       
+    if (size == 1)
+        *val = (data >> ((where & 3) << 3)) & 0xff;
+    else if (size == 2)
+        *val = (data >> ((where & 3) << 3)) & 0xffff;
+    else
+        *val = data;
+
+    return PCIBIOS_SUCCESSFUL;
+
+}
+
+static int bcm63xx_pciebios_write(struct pci_bus *bus, unsigned int devfn,
+    int where, int size, u32 val)
+{
+    u32 cfgaddr = CFGADDR(bus, devfn, where);
+    u32 data = 0;
+
+    DPRINT("pcie_write, bus=%d, devfn=%d, where=%d, size=%d, val=0x%x\n", bus->number, devfn, where, size, val);
+
+    if ((size == 2) && (where & 1))
+        return PCIBIOS_BAD_REGISTER_NUMBER;
+    else if ((size == 4) && (where & 3))
+        return PCIBIOS_BAD_REGISTER_NUMBER;
+
+    if (!pci63xx_pcie_can_access(bus, devfn))
+        return PCIBIOS_DEVICE_NOT_FOUND;
+
+    data = READCFG32((bus)->number,cfgaddr);
+
+    if (size == 1)
+        data = (data & ~(0xff << ((where & 3) << 3))) |
+            (val << ((where & 3) << 3));
+    else if (size == 2)
+        data = (data & ~(0xffff << ((where & 3) << 3))) |
+            (val << ((where & 3) << 3));
+    else
+        data = val;
+
+    WRITECFG32((bus)->number, cfgaddr, data);
+
+    return PCIBIOS_SUCCESSFUL;
+
+}
+
+struct pci_ops bcm63xx_pcie_ops = {
+    .read   = bcm63xx_pciebios_read,
+    .write  = bcm63xx_pciebios_write
+};
+#endif
diff --git a/arch/mips/pci/pci-bcm963xx.c b/arch/mips/pci/pci-bcm963xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..466768354d677aa1e3f02a8668d037bb1a9b433c
--- /dev/null
+++ b/arch/mips/pci/pci-bcm963xx.c
@@ -0,0 +1,725 @@
+/*
+<:copyright-BRCM:2012:GPL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:> 
+*/
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <bcm_map_part.h>
+#include <bcmpci.h>
+#include <boardparms.h>
+#include <board.h>
+#if defined(CONFIG_BCM963381) || defined(CONFIG_BCM96838) || defined(CONFIG_BCM96848)
+#include <pmc_pcie.h>
+#include <pmc_drv.h>
+#endif
+#include <shared_utils.h>  
+
+extern struct pci_ops bcm63xx_pci_ops;
+static struct resource bcm_pci_io_resource = {
+    .name   = "bcm63xx pci IO space",
+    .start  = BCM_PCI_IO_BASE,
+    .end    = BCM_PCI_IO_BASE + BCM_PCI_IO_SIZE - 1,
+    .flags  = IORESOURCE_IO
+};
+
+static struct resource bcm_pci_mem_resource = {
+    .name   = "bcm63xx pci memory space",
+    .start  = BCM_PCI_MEM_BASE,
+    .end    = BCM_PCI_MEM_BASE + BCM_PCI_MEM_SIZE - 1,
+    .flags  = IORESOURCE_MEM
+};
+
+struct pci_controller bcm63xx_controller = {
+    .pci_ops	= &bcm63xx_pci_ops,
+    .io_resource	= &bcm_pci_io_resource,
+    .mem_resource	= &bcm_pci_mem_resource,
+};
+
+#if defined(PCIEH)
+extern struct pci_ops bcm63xx_pcie_ops;
+static struct resource bcm_pcie_io_resource = {
+    .name   = "bcm63xx pcie null io space",
+    .start  = 0,
+    .end    = 0,
+    .flags  = 0
+};
+
+static struct resource bcm_pcie_mem_resource = {
+    .name   = "bcm63xx pcie memory space",
+    .start  = BCM_PCIE_MEM1_BASE,
+    .end    = BCM_PCIE_MEM1_BASE + BCM_PCIE_MEM1_SIZE - 1,
+    .flags  = IORESOURCE_MEM
+};
+
+struct pci_controller bcm63xx_pcie_controller = {
+    .pci_ops	= &bcm63xx_pcie_ops,
+    .io_resource	= &bcm_pcie_io_resource,
+    .mem_resource	= &bcm_pcie_mem_resource,
+};
+
+#if defined(PCIEH_1)
+static struct resource bcm_pcie1_mem_resource = {
+    .name   = "bcm63xx pcie memory space",
+    .start  = BCM_PCIE_MEM2_BASE,
+    .end    = BCM_PCIE_MEM2_BASE + BCM_PCIE_MEM2_SIZE - 1,
+    .flags  = IORESOURCE_MEM
+};
+
+struct pci_controller bcm63xx_pcie1_controller = {
+    .pci_ops	= &bcm63xx_pcie_ops,
+    .io_resource	= &bcm_pcie_io_resource,
+    .mem_resource	= &bcm_pcie1_mem_resource,
+};
+#endif
+#endif
+
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM963268)
+extern uint32 pci63xx_wlan_soft_config_space[WLAN_ONCHIP_DEV_NUM][WLAN_ONCHIP_PCI_HDR_DW_LEN];
+static int __init bcm63xx_pci_swhdr_patch(void)
+{
+    /* modify sw pci hdr for different board for onchip wlan */
+    int i;
+    for (i = 0; i <WLAN_ONCHIP_DEV_NUM; i++) {       
+        BpUpdateWirelessPciConfig(pci63xx_wlan_soft_config_space[i][0],pci63xx_wlan_soft_config_space[i],WLAN_ONCHIP_PCI_HDR_DW_LEN);
+    }
+    return 0;
+}
+#endif
+
+#if defined(PCIEH)
+#if defined(CONFIG_BCM963268) || defined(CONFIG_BCM96318) || defined(CONFIG_BCM960333) || defined(CONFIG_BCM96838) || defined(PCIE3_CORE)
+/* 
+  Function pcie_mdio_read (phyad, regad)
+
+   Parameters:
+     phyad ... MDIO PHY address (typically 0!)
+     regad ... Register address in range 0-0x1f
+
+   Description:
+     Perform PCIE MDIO read on specified PHY (typically 0), and Register.
+     Access is through an indirect command/status mechanism, and timeout
+     is possible. If command is not immediately complete, which would
+     be typically the case, one more attempt is made after a 1ms delay.
+
+   Return: 16-bit data item or 0xdead on MDIO timeout
+*/
+static uint16 bcm63xx_pcie_mdio_read (int port, uint16 phyad, uint16 regad) 
+{
+    int timeout;
+    uint32 data;
+    uint16 retval;
+    volatile PcieBlk1000Regs *RcDLReg;
+#if defined(PCIEH_1)
+    RcDLReg = port ? PCIEH_1_BLK_1000_REGS : PCIEH_BLK_1000_REGS;
+#else 
+    RcDLReg = PCIEH_BLK_1000_REGS; 
+#endif
+    /* Bit-20=1 to initiate READ, bits 19:16 is the phyad, bits 4:0 is the regad */
+    data = 0x100000;
+    data = data |((phyad & 0xf)<<16);
+    data = data |(regad & 0x1F);
+
+    RcDLReg->mdioAddr = data;
+    /* critical delay */
+    udelay(1000);
+
+    timeout = 2;
+    while (timeout-- > 0) {
+        data = RcDLReg->mdioRdData;
+        /* Bit-31=1 is DONE */
+        if (data & 0x80000000)
+            break;
+        timeout = timeout - 1;
+        udelay(1000);
+    }
+
+    if (timeout == 0) {
+        retval = 0xdead;
+    }else 
+        /* Bits 15:0 is read data*/
+        retval = (data&0xffff);
+
+    return retval;
+}
+
+/* 
+ Function pcie_mdio_write (phyad, regad, wrdata)
+
+   Parameters:
+     phyad ... MDIO PHY address (typically 0!)
+     regad  ... Register address in range 0-0x1f
+     wrdata ... 16-bit write data
+
+   Description:
+     Perform PCIE MDIO write on specified PHY (typically 0), and Register.
+     Access is through an indirect command/status mechanism, and timeout
+     is possible. If command is not immediately complete, which would
+     be typically the case, one more attempt is made after a 1ms delay.
+
+   Return: 1 on success, 0 on timeout
+*/
+static int bcm63xx_pcie_mdio_write (int port, uint16 phyad, uint16 regad, uint16 wrdata)
+{
+    int timeout;
+    uint32 data;
+    volatile PcieBlk1000Regs *RcDLReg;
+#if defined(PCIEH_1)
+    RcDLReg = port ? PCIEH_1_BLK_1000_REGS : PCIEH_BLK_1000_REGS;
+#else 
+    RcDLReg = PCIEH_BLK_1000_REGS; 
+#endif
+
+    /* bits 19:16 is the phyad, bits 4:0 is the regad */
+    data = ((phyad & 0xf) << 16);
+    data = data | (regad & 0x1F);
+
+    RcDLReg->mdioAddr = data;
+    udelay(1000);
+
+    /* Bit-31=1 to initial the WRITE, bits 15:0 is the write data */
+    data = 0x80000000;
+    data = data | (wrdata & 0xFFFF);
+
+    RcDLReg->mdioWrData = data;
+    udelay(1000);
+
+    /* Bit-31=0 when DONE */
+    timeout = 2;
+    while (timeout-- > 0) {
+
+        data = RcDLReg->mdioWrData;
+
+        /* CTRL1 Bit-31=1 is DONE */
+        if ((data & 0x80000000) == 0 )
+            break;
+
+        timeout = timeout - 1;
+        udelay(1000);
+    }
+
+    if (timeout == 0){
+        return 0;
+    } else 
+        return 1;
+}
+
+static void bcm63xx_pcie_phy_mode_config(int port)
+{
+    uint16 data;
+
+#if defined(CONFIG_BCM963268)
+   /*
+    * PCIe Serdes register at block 820, register 18, bit 3:0 from 7 to F. Help reduce EMI spur.
+    */
+    bcm63xx_pcie_mdio_write(port, 1, 0x1f , 0x8200); 
+    data = bcm63xx_pcie_mdio_read (port, 1, 0x18);
+    data = ((data&0xfff0) | 0xf);
+    bcm63xx_pcie_mdio_write(port, 1, 0x18, data);
+#endif
+
+#if defined(CONFIG_BCM96318) || defined(CONFIG_BCM960333) || defined(CONFIG_BCM96838)
+   /*
+    * PCIe Serdes register at block 808, register 1a, bit 11=1, 16-bit default 0x0283, new value 0x0a83.
+    * Help reduce SerDes Tx jitter
+    */
+    bcm63xx_pcie_mdio_write(port, 0, 0x1f , 0x8080); 
+    data = bcm63xx_pcie_mdio_read (port, 0, 0x1a);
+    data = ((data&0xffff) | 0x800);
+    bcm63xx_pcie_mdio_write(port, 0, 0x1a, data);
+    
+   /*
+    * Signal detect level at block 840, register 1D, bits[5:3], default 0xf000, new value 0xf008.
+    * Help to have enough margin
+    */
+    bcm63xx_pcie_mdio_write(port, 0, 0x1f , 0x8400); 
+    data = bcm63xx_pcie_mdio_read (port, 0, 0x1d);
+    data = ((data&0xffc7) | 0x8);
+    bcm63xx_pcie_mdio_write(port, 0, 0x1d, data);
+#endif
+
+#if defined(RCAL_1UM_VERT)
+	/*
+	 * Rcal Calibration Timers
+	 *   Block 0x1000, Register 1, bit 4(enable), and 3:0 (value)
+	 */
+    {
+        int val = 0;
+        uint16 data = 0;
+        if(GetRCalSetting(RCAL_1UM_VERT, &val)== kPMC_NO_ERROR) {
+            printk("bcm63xx_pcie: setting resistor calibration value to 0x%x\n", val);
+            bcm63xx_pcie_mdio_write(port, 0, 0x1f , 0x1000);
+            data = bcm63xx_pcie_mdio_read (port, 0, 1);
+            data = ((data & 0xffe0) | (val & 0xf) | (1 << 4)); /*enable*/
+            bcm63xx_pcie_mdio_write(port, 0, 1, data);
+        }
+    }
+#endif
+#if defined(PCIE3_CORE) /* CONFIG_BCM963381, CONFIG_BCM96848 */
+    //printk("chipid:0x%x , chiprev:0x%x \n", kerSysGetChipId(), (UtilGetChipRev()));
+    {
+        printk("bcm63xx_pcie: applying serdes parameters\n");
+        /*
+         * VCO Calibration Timers
+         * Workaround: 
+         * Block 0x3000, Register 0xB = 0x40
+         * Block 0x3000, Register 0xD = 7
+         * Notes: 
+         * -Fixed in 63148A0, 63381B0, 63138B0, 6848 but ok to write anyway
+         */ 
+        bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x3000);
+        data = bcm63xx_pcie_mdio_read (port, 0, 0x1f);  /* just to exericise the read */
+        bcm63xx_pcie_mdio_write(port, 0, 0xB, 0x40);
+        bcm63xx_pcie_mdio_write(port, 0, 0xD, 7);
+        
+        /*	
+         * Reference clock output level
+         * Workaround:
+         * Block 0x2200, Register 3 = 0xaba4
+         * Note: 
+         * -Fixed in 63148A0, 63381B0, 63138B0, 6848 but ok to write anyway
+         */
+        bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x2200);
+        bcm63xx_pcie_mdio_write(port, 0, 3, 0xaba4);
+        
+        /* 
+         * Tx Pre-emphasis
+         * Workaround:
+         * Block 0x4000, Register 0 = 0x1d20  // Gen1
+         * Block 0x4000, Register 1 = 0x12cd  // Gen1
+         * Block 0x4000, Register 3 = 0x0016  // Gen1, Gen2
+         * Block 0x4000, Register 4 = 0x5920  // Gen2
+         * Block 0x4000, Register 5 = 0x13cd  // Gen2
+         * Notes: 
+         * -Fixed in 63148A0, 63381B0, 63138B0, 6848 but ok to write anyway
+         */
+        bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x4000);
+        bcm63xx_pcie_mdio_write(port, 0, 0, 0x1D20); 
+        bcm63xx_pcie_mdio_write(port, 0, 1, 0x12CD);
+        bcm63xx_pcie_mdio_write(port, 0, 3, 0x0016);
+        bcm63xx_pcie_mdio_write(port, 0, 4, 0x5920);
+        bcm63xx_pcie_mdio_write(port, 0, 5, 0x13CD);
+        
+        /*
+         * Rx Signal Detect
+         * Workaround:
+         * Block 0x6000, Register 5 = 0x2c0d 
+         * Notes:
+         * -Fixed in 63148A0, 63381B0, 63138B0, 6848 but ok to write anyway
+         */
+        bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x6000);
+        bcm63xx_pcie_mdio_write(port, 0, 0x5, 0x2C0D);	
+        
+        /*
+         * Rx Jitter Tolerance
+         * Workaround:
+         * Block 0x7300, Register 3 = 0x190  // Gen1
+         * Block 0x7300, Register 9 = 0x194  // Gen2
+         * Notes:
+         * -Gen1 setting 63148A0, 63381B0, 63138B0, 6848 but ok to write anyway
+         * -Gen2 setting only in latest SerDes RTL  / future tapeouts
+         */
+        bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x7300);
+        bcm63xx_pcie_mdio_write(port, 0, 3, 0x190);
+        bcm63xx_pcie_mdio_write(port, 0, 9, 0x194);
+        
+        /* 
+         * Gen2 Rx Equalizer
+         * Workaround:
+         * Block 0x6000 Register 7 = 0xf0c8  // Gen2
+         * Notes:
+         * -New setting only in latest SerDes RTL / future tapeouts
+         */
+        bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x6000);
+        bcm63xx_pcie_mdio_write(port, 0, 7, 0xf0c8);
+        
+        /*
+         * SSC Parameters
+         * Workaround:
+         * Block 0x1100, Register 0xA = 0xea3c  
+         * Block 0x1100, Register 0xB = 0x04e7
+         * Block 0x1100, Register 0xC = 0x0039 
+         * Block 0x2200, Register 5 = 0x5044    // VCO parameters for fractional mode, -175ppm
+         * Block 0x2200, Register 6 = 0xfef1    // VCO parameters for fractional mode, -175ppm
+         * Block 0x2200, Register 7 = 0xe818    // VCO parameters for fractional mode, -175ppm
+         * Notes:
+         * -Only need to apply these fixes when enabling Spread Spectrum Clocking (SSC), which would likely be a flash option
+         * -Block 0x1100 fixed in 63148A0, 63381B0, 63138B0, 6848 but ok to write anyway
+         */
+        
+        /*
+         * EP Mode PLL Bandwidth and Peaking
+         * Workaround:
+         * Block 0x2100, Register 0 = 0x5174
+         * Block 0x2100, Register 4 = 0x6023
+         * Notes:
+         * -Only needed for EP mode, but ok to write in RC mode too
+         * -New setting only in latest SerDes RTL / future tapeouts
+         */
+        bcm63xx_pcie_mdio_write(port, 0, 0x1f, 0x2100);
+        bcm63xx_pcie_mdio_write(port, 0, 0, 0x5174);
+        bcm63xx_pcie_mdio_write(port, 0, 4, 0x6023);
+    }
+#endif
+
+    return;
+}
+#endif 
+
+#if defined(UBUS2_PCIE)
+static void __init bcm63xx_pcie_hw_powerup(int port, bool PowerOn)
+{
+#if defined(PMC_PCIE_H)
+    pmc_pcie_power_up(port);
+#endif
+    return;
+}
+#endif
+
+static void __init bcm63xx_pcie_pcie_reset(int port, bool PowerOn)
+{
+#if defined(PCIE3_CORE) /* CONFIG_BCM963381, CONFIG_BCM96848 */
+    u32 val = MISC->miscPCIECtrl;
+    if(PowerOn) {
+        val &= ~(1<<port);
+        MISC->miscPCIECtrl = val;
+        mdelay(10);
+        /* adjust pcie phy */
+        bcm63xx_pcie_phy_mode_config(port);
+        mdelay(10);
+        val |= (1<<port);
+        MISC->miscPCIECtrl = val;
+        mdelay(10);
+    } else {
+        val &= ~(1<<port);
+        MISC->miscPCIECtrl = val;
+    }
+#endif
+    /* ubus2 pcie architecture*/
+#if defined(UBUS2_PCIE)
+    if(port == 0){
+#if defined(CONFIG_BCM96318) || defined(CONFIG_BCM960333)
+        PERF->blkEnables |= PCIE_CLK_EN;
+
+#if defined(CONFIG_BCM96318)
+        PERF->blkEnables |= PCIE25_CLK_EN;
+        PERF->blkEnablesUbus |= PCIE_UBUS_CLK_EN;
+#endif
+        /*
+         * SOFT_RST_PCIE_EXT is the software equivalent of a power-on or push-button reset, clears PCIe sticky bits, 
+         * Hard Reset registers, and SerDes MDIO Registers, and because of this is only appropriate to assert in RC mode.
+         * SOFT_RST_PCIE_HARD (hard reset) is also available. It is directly equivalent to the device hard reset to PCIe, and should not be required
+         */
+        PERF->softResetB &= ~(SOFT_RST_PCIE_EXT |SOFT_RST_PCIE|SOFT_RST_PCIE_CORE);
+        mdelay(10);		
+        PERF->softResetB |= (SOFT_RST_PCIE_EXT);
+        mdelay(10);
+        PERF->softResetB |= SOFT_RST_PCIE;
+        mdelay(10);
+
+#if defined(CONFIG_BCM96318) || defined(CONFIG_BCM960333)
+        /* adjust pcie phy */
+        bcm63xx_pcie_phy_mode_config(port);
+#endif
+        /* optional serdes initialization de-asserts */
+        PCIEH_MISC_HARD_REGS->hard_pcie_hard_debug &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ;
+        mdelay(10);
+        PERF->softResetB |= SOFT_RST_PCIE_CORE;
+        mdelay(10);	
+#endif
+#if defined(CONFIG_BCM96838)
+        PERF->pcie_softResetB_lo &= ~(SOFT_RST_PCIE0_CORE);
+        /* adjust pcie phy */
+        bcm63xx_pcie_phy_mode_config(port);         
+        mdelay(10);               
+        PERF->pcie_softResetB_lo |= (SOFT_RST_PCIE0_CORE);
+        mdelay(10);
+#endif        
+    }
+
+    if(port == 1){
+#if defined(CONFIG_BCM96838)
+        PERF->pcie_softResetB_lo &= ~(SOFT_RST_PCIE1_CORE);
+        /* adjust pcie phy */
+        bcm63xx_pcie_phy_mode_config(port);
+        mdelay(10);        
+        PERF->pcie_softResetB_lo |= (SOFT_RST_PCIE1_CORE);
+        mdelay(10); 
+#endif
+    }
+#else /* ubus1 pcie architecture*/
+    PERF->blkEnables |= PCIE_CLK_EN;
+
+    /* pcie serdes enable */
+
+#if defined(CONFIG_BCM96328) || defined(CONFIG_BCM96362) ||  defined(CONFIG_BCM963268)
+    MISC->miscSerdesCtrl |= (SERDES_PCIE_ENABLE|SERDES_PCIE_EXD_ENABLE);
+#endif    
+
+    /* reset pcie and ext device */
+    PERF->softResetB &= ~(SOFT_RST_PCIE|SOFT_RST_PCIE_EXT|SOFT_RST_PCIE_CORE);
+
+#if defined(CONFIG_BCM96328)  || defined(CONFIG_BCM963268)
+    PERF->softResetB &= ~SOFT_RST_PCIE_HARD;
+    mdelay(10);
+   
+    PERF->softResetB |= SOFT_RST_PCIE_HARD;
+#endif
+
+    mdelay(10);
+    
+    PERF->softResetB |= (SOFT_RST_PCIE|SOFT_RST_PCIE_CORE);
+    mdelay(10);
+    
+#if defined(CONFIG_BCM963268)
+    /* adjust pcie phy */
+    bcm63xx_pcie_phy_mode_config(0);
+#endif    
+    
+    PERF->softResetB |= (SOFT_RST_PCIE_EXT);
+
+#endif 	
+    /* this is a critical delay */
+    mdelay(200);
+}
+
+#if defined(UBUS2_PCIE)
+#ifndef PCIEH_0_CPU_INTR1_REGS
+#define PCIEH_0_CPU_INTR1_REGS        PCIEH_CPU_INTR1_REGS
+#endif
+#ifndef PCIEH_0_REGS
+#define PCIEH_0_REGS                  PCIEH_REGS
+#endif
+#ifndef PCIEH_0_MISC_REGS
+#define PCIEH_0_MISC_REGS             PCIEH_MISC_REGS
+#endif
+#ifndef PCIEH_0_RC_CFG_VENDOR_REGS
+#define PCIEH_0_RC_CFG_VENDOR_REGS    PCIEH_RC_CFG_VENDOR_REGS
+#endif
+#ifndef PCIEH_0_PCIE_EXT_CFG_REGS
+#define PCIEH_0_PCIE_EXT_CFG_REGS     PCIEH_PCIE_EXT_CFG_REGS
+#endif
+#ifndef PCIEH_0_BLK_428_REGS
+#define PCIEH_0_BLK_428_REGS          PCIEH_BLK_428_REGS
+#endif
+#ifndef PCIEH_0_BLK_404_REGS
+#define PCIEH_0_BLK_404_REGS          PCIEH_BLK_404_REGS
+#endif
+#ifndef BCM_BUS_PCIE_0_DEVICE
+#define BCM_BUS_PCIE_0_DEVICE         BCM_BUS_PCIE_DEVICE
+#endif
+#ifndef BCM_PCIE_0_MEM_BASE
+#define BCM_PCIE_0_MEM_BASE           BCM_PCIE_MEM1_BASE
+#endif
+#ifndef BCM_PCIE_0_MEM_SIZE
+#define BCM_PCIE_0_MEM_SIZE           BCM_PCIE_MEM1_SIZE
+#endif
+
+#if defined(PCIEH_1)
+#ifndef BCM_BUS_PCIE_1_DEVICE
+#define BCM_BUS_PCIE_1_DEVICE         BCM_BUS_PCIE1_DEVICE
+#endif
+#ifndef BCM_PCIE_1_MEM_BASE
+#define BCM_PCIE_1_MEM_BASE           BCM_PCIE_MEM2_BASE
+#endif
+#ifndef BCM_PCIE_1_MEM_SIZE
+#define BCM_PCIE_1_MEM_SIZE           BCM_PCIE_MEM2_SIZE
+#endif
+#endif /* PCIEH_1 */
+
+#define BCM63XX_PCIE_UBUS2_INIT_PORT(X) {            \
+        PCIEH_##X##_CPU_INTR1_REGS->maskClear = (    \
+            PCIE_CPU_INTR1_PCIE_INTD_CPU_INTR |      \
+            PCIE_CPU_INTR1_PCIE_INTC_CPU_INTR |      \
+            PCIE_CPU_INTR1_PCIE_INTB_CPU_INTR |      \
+            PCIE_CPU_INTR1_PCIE_INTA_CPU_INTR );     \
+       /* setup outgoing mem resource window */      \
+        PCIEH_##X##_MISC_REGS->cpu_2_pcie_mem_win0_base_limit = (((BCM_PCIE_##X##_MEM_BASE+BCM_PCIE_##X##_MEM_SIZE-1)&PCIE_MISC_CPU_2_PCI_MEM_WIN_LO_BASE_LIMIT_LIMIT_MASK) \
+                                                                 |((BCM_PCIE_##X##_MEM_BASE>>PCIE_MISC_CPU_2_PCI_MEM_WIN_LO_BASE_LIMIT_LIMIT_SHIFT)<<PCIE_MISC_CPU_2_PCI_MEM_WIN_LO_BASE_LIMIT_BASE_SHIFT)); \
+        \
+        PCIEH_##X##_MISC_REGS->cpu_2_pcie_mem_win0_lo |= (BCM_PCIE_##X##_MEM_BASE&PCIE_MISC_CPU_2_PCI_MEM_WIN_LO_BASE_ADDR_MASK); \
+        \
+        /* setup incoming DDR memory BAR(1) */        \
+        PCIEH_##X##_MISC_REGS->rc_bar1_config_lo = ((DDR_UBUS_ADDRESS_BASE&PCIE_MISC_RC_BAR_CONFIG_LO_MATCH_ADDRESS_MASK) \
+                                                    | PCIE_MISC_RC_BAR_CONFIG_LO_SIZE_256MB); \
+        \
+        PCIEH_##X##_MISC_REGS->ubus_bar1_config_remap = PCIE_MISC_UBUS_BAR_CONFIG_ACCESS_EN; \
+        \
+        /* set device bus/func/func */ \
+        PCIEH_##X##_PCIE_EXT_CFG_REGS->index = (BCM_BUS_PCIE_##X##_DEVICE<<PCIE_EXT_CFG_BUS_NUM_SHIFT); \
+        \
+        /* setup class code, as bridge */ \
+        PCIEH_##X##_BLK_428_REGS->idVal3 &= ~PCIE_IP_BLK428_ID_VAL3_CLASS_CODE_MASK; \
+        PCIEH_##X##_BLK_428_REGS->idVal3 |= (PCI_CLASS_BRIDGE_PCI << 8);             \
+        /* disable bar0 size */ \
+        PCIEH_##X##_BLK_404_REGS->config2 &= ~PCIE_IP_BLK404_CONFIG_2_BAR1_SIZE_MASK; \
+}
+
+/*
+  * Program the timeouts
+  *   MISC_UBUS_TIMEOUT:                        0x0180_0000 (250 msec, 10ns increments, based on curent PCIE Clock)
+  *   RC_CFG_PCIE_DEVICE_STATUS_CONTROL_2:      0x0006      (210ms)
+  *
+  * Note: PCI structures Endianness is not properly taken care, So need to write deviceStatus2 i.o deviceControl2
+  *       Writing deviceStatus2 has no impact as it is RO filed only.
+  */
+#define BCM63XX_PCIE_CONFIG_TIMEOUTS(X) {                  \
+        PCIEH_##X##_MISC_REGS->ubus_timeout = 0x01800000;  \
+        PCIEH_##X##_REGS->deviceControl2 = 0x0006;         \
+        PCIEH_##X##_REGS->deviceStatus2 = 0x0006;          \
+}
+
+#else
+
+
+/*
+  * Program the timeouts
+  *   MISC_UBUS_TIMEOUT:                        (default is large 1sec. No need to program)
+  *   RC_CFG_PCIE_DEVICE_STATUS_CONTROL_2:      0x0006      (210ms)
+  *
+  * Note: PCI structures Endianness is not properly taken care, So need to write deviceStatus2 i.o deviceControl2
+  *       Writing deviceStatus2 has no impact as it is RO filed only. 
+  */
+#define BCM63XX_PCIE_CONFIG_TIMEOUTS(X) {            \
+        PCIEH_REGS->deviceControl2 = 0x0006;         \
+        PCIEH_REGS->deviceStatus2 = 0x0006;          \
+}
+
+#endif /* UBUS2_PCIE */
+
+#if defined(PCIE3_CORE)
+#ifndef PCIEH_0_MISC_REGS
+#define PCIEH_0_MISC_REGS	PCIEH_MISC_REGS
+#endif
+#define BCM63XX_PCIE_CORE3_INIT_PORT(X) {        \
+        PCIEH_##X##_MISC_REGS->misc_ctrl |= (    \
+            PCIE_MISC_CTRL_BURST_ALIGN           \
+            |PCIE_MISC_CTRL_PCIE_IN_WR_COMBINE   \
+            |PCIE_MISC_CTRL_PCIE_RCB_MPS_MODE    \
+            |PCIE_MISC_CTRL_PCIE_RCB_64B_MODE);  \
+}
+#else
+#define BCM63XX_PCIE_CORE3_INIT_PORT(X)
+#endif /* PCIE3_CORE */
+#endif /* PCIEH */
+
+static int __init bcm63xx_pci_init(void)
+{
+    /* adjust global io port range */
+    ioport_resource.start = BCM_PCI_IO_BASE;
+    ioport_resource.end = BCM_PCI_IO_BASE + BCM_PCI_IO_SIZE-1;
+
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM963268)
+    bcm63xx_pci_swhdr_patch();
+#endif
+    /* bus 0 */
+    register_pci_controller(&bcm63xx_controller);
+
+#if defined(PCIEH)
+#if defined(UBUS2_PCIE)
+/* defined(CONFIG_BCM96318) || defined(CONFIG_BCM96838) */
+    if(kerSysGetPciePortEnable(0)){
+        bcm63xx_pcie_hw_powerup(0, TRUE);
+        bcm63xx_pcie_pcie_reset(0, TRUE);
+        BCM63XX_PCIE_CORE3_INIT_PORT(0);
+        BCM63XX_PCIE_UBUS2_INIT_PORT(0);
+#if !defined(CONFIG_CPU_LITTLE_ENDIAN)
+        PCIEH_RC_CFG_VENDOR_REGS->specificReg1 = PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BYTE_ALIGN;
+#endif
+        /* Currently disabled until good timeout values are available from design team
+        BCM63XX_PCIE_CONFIG_TIMEOUTS(0);
+        */
+        /*bus 1 and 2 */
+        register_pci_controller(&bcm63xx_pcie_controller);
+    }
+#if defined(PCIEH_1)
+    if(kerSysGetPciePortEnable(1)){
+        bcm63xx_pcie_hw_powerup(1, TRUE);
+        bcm63xx_pcie_pcie_reset(1, TRUE);
+        BCM63XX_PCIE_CORE3_INIT_PORT(1);
+        BCM63XX_PCIE_UBUS2_INIT_PORT(1);
+#if !defined(CONFIG_CPU_LITTLE_ENDIAN)
+        PCIEH_1_RC_CFG_VENDOR_REGS->specificReg1 = PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BYTE_ALIGN;
+#endif
+        /* Currently disabled until good timeout values are available from design team
+        BCM63XX_PCIE_CONFIG_TIMEOUTS(1);
+        */
+        /*bus 3 and 4 */
+        register_pci_controller(&bcm63xx_pcie1_controller);
+    }
+#endif
+
+#else	/* UBUS2_PCIE */
+
+    bcm63xx_pcie_pcie_reset(0, TRUE);
+
+#if defined(CONFIG_BCM96362) || defined(CONFIG_BCM96328) ||   defined(CONFIG_BCM963268)
+PCIEH_BRIDGE_REGS->bridgeOptReg1 |= (PCIE_BRIDGE_OPT_REG1_en_l1_int_status_mask_polarity |
+        PCIE_BRIDGE_OPT_REG1_en_pcie_bridge_hole_detection  |
+        PCIE_BRIDGE_OPT_REG1_en_rd_reply_be_fix |
+        PCIE_BRIDGE_OPT_REG1_enable_rd_be_opt);
+    
+    PCIEH_BRIDGE_REGS->rcInterruptMask |= (
+        PCIE_BRIDGE_INTERRUPT_MASK_int_a_MASK |
+        PCIE_BRIDGE_INTERRUPT_MASK_int_b_MASK |
+        PCIE_BRIDGE_INTERRUPT_MASK_int_c_MASK |
+        PCIE_BRIDGE_INTERRUPT_MASK_int_c_MASK );
+
+    /* enable credit checking and error checking */
+    PCIEH_BRIDGE_REGS->bridgeOptReg2 |= ( PCIE_BRIDGE_OPT_REG2_enable_tx_crd_chk_MASK |
+                                          PCIE_BRIDGE_OPT_REG2_dis_ubus_ur_decode_MASK );
+#endif
+
+#if defined(CONFIG_BCM963268)
+    /* setup outgoing window */
+    PCIEH_BRIDGE_REGS->Ubus2PcieBar0BaseMask |= ((BCM_PCIE_MEM1_BASE&PCIE_BRIDGE_BAR0_BASE_base_MASK)|
+                                                 (((BCM_PCIE_MEM1_BASE+BCM_PCIE_MEM1_SIZE-1) >>PCIE_BRIDGE_BAR0_BASE_base_MASK_SHIFT)
+                                                    << PCIE_BRIDGE_BAR0_BASE_mask_MASK_SHIFT)) | PCIE_BRIDGE_BAR0_BASE_swap_enable;
+#endif
+
+#if defined(CONFIG_BCM96362) ||     defined(CONFIG_BCM96328) ||              defined(CONFIG_BCM963268)
+    /* set device bus/func/func */
+    PCIEH_BRIDGE_REGS->bridgeOptReg2 |= ((BCM_BUS_PCIE_DEVICE<<PCIE_BRIDGE_OPT_REG2_cfg_type1_bus_no_SHIFT) |
+        PCIE_BRIDGE_OPT_REG2_cfg_type1_bd_sel_MASK );
+#endif
+
+    /* setup class code, as bridge */
+    PCIEH_BLK_428_REGS->idVal3 &= ~PCIE_IP_BLK428_ID_VAL3_CLASS_CODE_MASK;
+    PCIEH_BLK_428_REGS->idVal3 |= (PCI_CLASS_BRIDGE_PCI << 8);
+    /* disable bar0 size */
+    PCIEH_BLK_404_REGS->config2 &= ~PCIE_IP_BLK404_CONFIG_2_BAR1_SIZE_MASK;
+
+    /* Currently disabled until good timeout values are available from design team
+    BCM63XX_PCIE_CONFIG_TIMEOUTS(0);
+    */
+
+    /*bus 1 and 2 */
+    register_pci_controller(&bcm63xx_pcie_controller);
+#endif /* UBUS2_PCIE */
+#endif /* PCIEH */
+
+    return 0;
+}
+
+arch_initcall(bcm63xx_pci_init);
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0514866fa9255f13f8cc2578c840b22ad7626482..beb7e5084e396c7e75a763aac317f81de388cdaf 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -124,8 +124,15 @@ void __devinit register_pci_controller(struct pci_controller *hose)
 	if (request_resource(&iomem_resource, hose->mem_resource) < 0)
 		goto out;
 	if (request_resource(&ioport_resource, hose->io_resource) < 0) {
-		release_resource(hose->mem_resource);
-		goto out;
+#if defined(CONFIG_BCM_KF_PCI_FIXUP)
+		if(!((hose->io_resource->start == 0) && (hose->io_resource->end == 0)))
+		{
+#endif	
+			release_resource(hose->mem_resource);
+			goto out;
+#if defined(CONFIG_BCM_KF_PCI_FIXUP)
+		}
+#endif
 	}
 
 	*hose_tail = hose;
diff --git a/arch/sh/boot/compressed/vmlinux.scr b/arch/sh/boot/compressed/vmlinux.scr
deleted file mode 100644
index 862d748082369a24eee471b493c4b07d3220ed36..0000000000000000000000000000000000000000
--- a/arch/sh/boot/compressed/vmlinux.scr
+++ /dev/null
@@ -1,10 +0,0 @@
-SECTIONS
-{
-  .rodata..compressed : {
-	input_len = .;
-	LONG(input_data_end - input_data) input_data = .;
-	*(.data)
-	output_len = . - 4;
-	input_data_end = .;
-	}
-}
diff --git a/arch/sh/boot/romimage/vmlinux.scr b/arch/sh/boot/romimage/vmlinux.scr
deleted file mode 100644
index 590394e2f5f2e48db6e500bf3fb0e6bebeab703f..0000000000000000000000000000000000000000
--- a/arch/sh/boot/romimage/vmlinux.scr
+++ /dev/null
@@ -1,8 +0,0 @@
-SECTIONS
-{
-  .text : {
-	zero_page_pos = .;
-	*(.data)
-	end_data = .;
-	}
-}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 8e84225c096b6adfafcde59d08e066e5751af9c2..3da6db0727e437886df454b5d0f00bf7d06c73a1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -935,6 +935,14 @@ config CRYPTO_ZLIB
 	help
 	  This is the zlib algorithm.
 
+config CRYPTO_LZMA
+	tristate "LZMA compression algorithm"
+	select CRYPTO_ALGAPI
+	select LZMA_COMPRESS
+	select LZMA_DECOMPRESS
+	help
+	  This is the LZMA algorithm.
+
 config CRYPTO_LZO
 	tristate "LZO compression algorithm"
 	select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index 30f33d675330e4adfe680a343e98448a82a6171d..6050a2bf6b5c041e198dc6f4559925a29e83aadf 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
 obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
+obj-$(CONFIG_CRYPTO_LZMA) += lzma.o
 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
 obj-$(CONFIG_CRYPTO_RNG2) += rng.o
 obj-$(CONFIG_CRYPTO_RNG2) += krng.o
diff --git a/crypto/lzma.c b/crypto/lzma.c
new file mode 100644
index 0000000000000000000000000000000000000000..2f4d8e139c6fd93b98d117998826d309dc7e5ca9
--- /dev/null
+++ b/crypto/lzma.c
@@ -0,0 +1,146 @@
+/*
+ * Cryptographic API.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/lzma.h>
+
+struct lzma_ctx {
+	CLzmaEncHandle *p;
+	SizeT propsSize;
+	Byte propsEncoded[LZMA_PROPS_SIZE];
+};
+
+static void lzma_free_workspace(struct lzma_ctx *ctx)
+{
+	LzmaEnc_Destroy(ctx->p, &lzma_alloc, &lzma_alloc);
+}
+
+static int lzma_alloc_workspace(struct lzma_ctx *ctx, CLzmaEncProps *props)
+{
+	SRes res;
+
+	ctx->p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc);
+	if (ctx->p == NULL)
+		return -ENOMEM;
+
+	res = LzmaEnc_SetProps(ctx->p, props);
+	if (res != SZ_OK) {
+		lzma_free_workspace(ctx);
+		return -EINVAL;
+	}
+
+	ctx->propsSize = sizeof(ctx->propsEncoded);
+	res = LzmaEnc_WriteProperties(ctx->p, ctx->propsEncoded, &ctx->propsSize);
+	if (res != SZ_OK) {
+		lzma_free_workspace(ctx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int lzma_init(struct crypto_tfm *tfm)
+{
+	struct lzma_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+	CLzmaEncProps props;
+	LzmaEncProps_Init(&props);
+
+	props.dictSize = LZMA_BEST_DICT(0x2000);
+	props.level = LZMA_BEST_LEVEL;
+	props.lc = LZMA_BEST_LC;
+	props.lp = LZMA_BEST_LP;
+	props.pb = LZMA_BEST_PB;
+	props.fb = LZMA_BEST_FB;
+
+	ret = lzma_alloc_workspace(ctx, &props);
+	return ret;
+}
+
+static void lzma_exit(struct crypto_tfm *tfm)
+{
+	struct lzma_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	lzma_free_workspace(ctx);
+}
+
+static int lzma_compress(struct crypto_tfm *tfm, const u8 *src,
+			 unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	struct lzma_ctx *ctx = crypto_tfm_ctx(tfm);
+	SizeT compress_size = (SizeT)(*dlen);
+	int ret;
+
+	ret = LzmaEnc_MemEncode(ctx->p, dst, &compress_size, src, slen,
+				1, NULL, &lzma_alloc, &lzma_alloc);
+	if (ret != SZ_OK)
+		return -EINVAL;
+
+	*dlen = (unsigned int)compress_size;
+	return 0;
+}
+
+static int lzma_decompress(struct crypto_tfm *tfm, const u8 *src,
+			   unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	struct lzma_ctx *ctx = crypto_tfm_ctx(tfm);
+	SizeT dl = (SizeT)*dlen;
+	SizeT sl = (SizeT)slen;
+	ELzmaStatus status;
+	int ret;
+
+	ret = LzmaDecode(dst, &dl, src, &sl, ctx->propsEncoded, ctx->propsSize,
+			 LZMA_FINISH_END, &status, &lzma_alloc);
+
+	if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED)
+		return -EINVAL;
+
+	*dlen = (unsigned int)dl;
+	return 0;
+}
+
+static struct crypto_alg lzma_alg = {
+	.cra_name		= "lzma",
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct lzma_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(lzma_alg.cra_list),
+	.cra_init		= lzma_init,
+	.cra_exit		= lzma_exit,
+	.cra_u			= { .compress = {
+	.coa_compress 		= lzma_compress,
+	.coa_decompress  	= lzma_decompress } }
+};
+
+static int __init lzma_mod_init(void)
+{
+	return crypto_register_alg(&lzma_alg);
+}
+
+static void __exit lzma_mod_exit(void)
+{
+	crypto_unregister_alg(&lzma_alg);
+}
+
+module_init(lzma_mod_init);
+module_exit(lzma_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZMA Compression Algorithm");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5674878ff6c18d244ec23b4f8b7815647a814c9b..568b4f9785f4f6a7c36be4d7ad161789e83bfd8a 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -197,6 +197,11 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
 	char *xbuf[XBUFSIZE];
 	int ret = -ENOMEM;
 
+#if defined(CONFIG_BCM_KF_IP)
+/* disable testing spu alg*/
+	return 0;
+#endif
+
 	if (testmgr_alloc_buf(xbuf))
 		goto out_nobuf;
 
@@ -2803,7 +2808,9 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
 	return rc;
 
 notest:
+#if !defined(CONFIG_BCM_KF_IP)
 	printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
+#endif
 	return 0;
 non_fips_alg:
 	return -EINVAL;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d236aef7e59fff7b7bc255cea2a86ce0a40425c1..2427669455fb5f52b3efa0397e707ac694ed9a01 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -96,6 +96,10 @@ source "drivers/memstick/Kconfig"
 
 source "drivers/leds/Kconfig"
 
+if BCM_KF_ANDROID
+source "drivers/switch/Kconfig"
+endif
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 95952c82bf16653e40aa46ef1eeaa2f4fe96f8fa..424d6d9c20c4b5860b93f6e2c2dbb76871ae5077 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -100,6 +100,11 @@ obj-$(CONFIG_CPU_IDLE)		+= cpuidle/
 obj-y				+= mmc/
 obj-$(CONFIG_MEMSTICK)		+= memstick/
 obj-y				+= leds/
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_SWITCH)		+= switch/
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
 obj-$(CONFIG_SGI_SN)		+= sn/
 obj-y				+= firmware/
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9aa618acfe97434dd90f58e37c30ce7b672139f2..e35cec85d6b89fbc47168404d49e87fb059a31bf 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -192,4 +192,33 @@ config DMA_SHARED_BUFFER
 	  APIs extension; the file's descriptor can then be passed on to other
 	  driver.
 
+config SYNC
+	bool "Synchronization framework"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	select ANON_INODES
+	help
+	  This option enables the framework for synchronization between multiple
+	  drivers.  Sync implementations can take advantage of hardware
+	  synchronization built into devices like GPUs.
+
+config SW_SYNC
+	bool "Software synchronization objects"
+	default n
+	depends on SYNC
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  A sync object driver that uses a 32bit counter to coordinate
+	  syncrhronization.  Useful when there is no hardware primitive backing
+	  the synchronization.
+
+config SW_SYNC_USER
+       bool "Userspace API for SW_SYNC"
+       default n
+       depends on SW_SYNC
+       depends on BCM_KF_ANDROID && BCM_ANDROID
+       help
+         Provides a user space API to the sw sync object.
+         *WARNING* improper use of this can result in deadlocking kernel
+	 drivers from userspace.
 endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b6d1b9c4200ca2756b313e9a55392b9884cde051..ac1856c4a78f2f2d5f2f845b1705ac9d315f6a44 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -21,5 +21,12 @@ obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
 obj-$(CONFIG_REGMAP)	+= regmap/
 obj-$(CONFIG_SOC_BUS) += soc.o
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_SYNC)	+= sync.o
+obj-$(CONFIG_SW_SYNC)	+= sw_sync.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ee946865d6cb271ab001ed7837501843c7c31f75..36348cc1a4abc51e08d4b5b097d7c146ebfe7af0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,20 @@ menu "Character devices"
 
 source "drivers/tty/Kconfig"
 
+config DEVMEM
+	bool "Memory device driver"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+	help
+	  The memory driver provides two character devices, mem and kmem, which
+	  provide access to the system's memory. The mem device is a view of
+	  physical memory, and each byte in the device corresponds to the
+	  matching physical address. The kmem device is the same as mem, but
+	  the addresses correspond to the kernel's virtual address space rather
+	  than physical memory. These devices are standard parts of a Linux
+	  system and most users should say Y here. You might say N if very
+	  security conscience or memory is tight.
+
 config DEVKMEM
 	bool "/dev/kmem virtual device support"
 	default y
@@ -583,6 +597,11 @@ config DEVPORT
 	depends on ISA || PCI
 	default y
 
+config DCC_TTY
+	tristate "DCC tty driver"
+	depends on ARM
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+
 source "drivers/s390/char/Kconfig"
 
 config RAMOOPS
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 0dc5d7ce4864b5f61d7c811b2cf7c7218df6739c..3d571801a0d2ec805337fd79875834e734d2d828 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -57,6 +57,11 @@ obj-$(CONFIG_IPMI_HANDLER)	+= ipmi/
 obj-$(CONFIG_HANGCHECK_TIMER)	+= hangcheck-timer.o
 obj-$(CONFIG_TCG_TPM)		+= tpm/
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_DCC_TTY)		+= dcc_tty.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_PS3_FLASH)		+= ps3flash.o
 obj-$(CONFIG_RAMOOPS)		+= ramoops.o
 
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e24a2a1b666661aab161c508bc6696bd3fe7979f..36176416c587f24624e6d24f8d194636921d365a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -99,6 +99,17 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
 	  Be aware that not all cpufreq drivers support the conservative
 	  governor. If unsure have a look at the help section of the
 	  driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+	bool "interactive"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	select CPU_FREQ_GOV_INTERACTIVE
+	help
+	  Use the CPUFreq governor 'interactive' as default. This allows
+	  you to get a full dynamic cpu frequency capable system by simply
+	  loading your cpufreq low-level hardware driver, using the
+	  'interactive' governor for latency-sensitive workloads.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -156,6 +167,24 @@ config CPU_FREQ_GOV_ONDEMAND
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+	tristate "'interactive' cpufreq policy governor"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  'interactive' - This driver adds a dynamic cpufreq policy governor
+	  designed for latency-sensitive workloads.
+
+	  This governor attempts to reduce the latency of clock
+	  increases so that the system is more responsive to
+	  interactive workloads.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cpufreq_interactive.
+
+	  For details, take a look at linux/Documentation/cpu-freq.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
 	tristate "'conservative' cpufreq governor"
 	depends on CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9531fc2eda22d15591f8ce867950d84a6cadbca9..cf9941dd2540505d621997b3567c9e01f7175fe4 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -9,6 +9,11 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)	+= cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)	+= cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)	+= cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)	+= cpufreq_conservative.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE)	+= cpufreq_interactive.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 # CPUfreq cross-arch helpers
 obj-$(CONFIG_CPU_FREQ_TABLE)		+= freq_table.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7f2f149ae40fd4efa933052aa1caf9e1c3834b45..003329382c11c19dc99a5335348383dd19f2ed24 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -968,6 +968,13 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 	if (ret)
 		goto err_out_unregister;
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	if (cpufreq_driver->init_sysfs) {
+		ret = cpufreq_driver->init_sysfs(policy);
+		if (ret)
+			goto err_out_unregister;
+	}
+#endif
 	unlock_policy_rwsem_write(cpu);
 
 	kobject_uevent(&policy->kobj, KOBJ_ADD);
@@ -1725,6 +1732,61 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
 	return ret;
 }
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+int cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
+{
+	return __cpufreq_set_policy(data, policy);
+}
+EXPORT_SYMBOL(cpufreq_set_policy);
+
+// set governor, and if supported, set speed to specified fraction of max
+int cpufreq_set_speed(const char *govstr, int fraction)
+{
+	struct cpufreq_policy *data, policy;
+	struct cpufreq_governor *governor;
+	unsigned cpu = smp_processor_id();
+	int rc;
+
+	governor = __find_governor(govstr);
+	if (governor == 0) {
+		printk("governor %s unavailable\n", govstr);
+		return -ENOENT;
+	}
+
+	data = cpufreq_cpu_get(cpu);
+	policy = *data;
+	policy.governor = governor;
+	rc = __cpufreq_set_policy(data, &policy);
+	if (rc == 0) {
+		data->user_policy.policy = data->policy;
+		data->user_policy.governor = data->governor;
+		if (fraction && data->governor->store_setspeed) {
+			data->governor->store_setspeed(data, data->max / fraction);
+		}
+	}
+	cpufreq_cpu_put(data);
+	return rc;
+}
+EXPORT_SYMBOL(cpufreq_set_speed);
+
+// return current frequency (0 if dynamic) and max frequency
+unsigned cpufreq_get_freq_max(unsigned *max)
+{
+	unsigned cpu = smp_processor_id();
+	struct cpufreq_policy *data;
+	unsigned freq = 0;
+
+	data = cpufreq_cpu_get(cpu);
+	if (max) *max = data->max;
+	if (data->governor && data->governor->store_setspeed) {
+		freq = data->cur;
+	}
+	cpufreq_cpu_put(data);
+
+	return freq;
+}
+EXPORT_SYMBOL(cpufreq_get_freq_max);
+#endif
 /**
  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
  *	@cpu: CPU which shall be re-evaluated
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 836e9b062e5ec4a2e08c935d4c49f17a79ef124b..5d1e0588b736674be28639bda80e20e275b26faa 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -29,11 +29,21 @@
  */
 
 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL		(10)
+#if defined(CONFIG_BCM_KF_ONDEMAND)
+#define DEF_FREQUENCY_UP_THRESHOLD		(20)
+#define DEF_SAMPLING_DOWN_FACTOR		(10)
+#else
 #define DEF_FREQUENCY_UP_THRESHOLD		(80)
 #define DEF_SAMPLING_DOWN_FACTOR		(1)
+#endif
 #define MAX_SAMPLING_DOWN_FACTOR		(100000)
+#if defined(CONFIG_BCM_KF_ONDEMAND)
+#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL	(10)
+#define MICRO_FREQUENCY_UP_THRESHOLD		(40)
+#else
 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL	(3)
 #define MICRO_FREQUENCY_UP_THRESHOLD		(95)
+#endif
 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
 #define MIN_FREQUENCY_UP_THRESHOLD		(11)
 #define MAX_FREQUENCY_UP_THRESHOLD		(100)
@@ -431,7 +441,169 @@ define_one_global_rw(sampling_down_factor);
 define_one_global_rw(ignore_nice_load);
 define_one_global_rw(powersave_bias);
 
+#if defined(CONFIG_BCM_KF_ONDEMAND)
+// clear current frequency table indices
+static void min_request_clr(struct cpufreq_frequency_table *table)
+{
+	int idx;
+
+	for (idx = 0; table[idx].frequency != CPUFREQ_TABLE_END; idx++)
+		table[idx].index = 0;
+}
+
+// change governor policy min
+static void min_request_chg(struct cpufreq_policy *policy,
+	struct cpufreq_frequency_table *table)
+{
+	unsigned freq, max = 0;
+	int idx = 0;
+
+	// find highest entry whose request count is non-zero
+	while ((freq = table[idx].frequency) != CPUFREQ_TABLE_END) {
+		if (table[idx].index && freq > max)
+			max = freq;
+		idx++;
+	}
+
+	// use lowest min freq if no requests
+	if (max == 0)
+		max = table[0].frequency;
+
+	// update policy if current min != max
+	if (policy->min != max) {
+		struct cpufreq_policy new_policy = *policy;
+		new_policy.min = max;
+
+		if (policy->min < max)
+			__cpufreq_driver_target(policy, max, CPUFREQ_RELATION_L);
+		cpufreq_set_policy(policy, &new_policy);
+	}
+}
+
+// decrement request count for min freq of 'freq'
+static int min_request_dec(struct cpufreq_policy *policy,
+	struct cpufreq_frequency_table *table, unsigned freqmin)
+{
+	unsigned freq;
+	int idx = 0;
+
+	// assume table ordered by frequency; find first entry >= freqmin
+	// (cpufreq_frequency_table_target() honors current policy->min)
+	while ((freq = table[idx].frequency) != CPUFREQ_TABLE_END) {
+		if (freq != CPUFREQ_ENTRY_INVALID && freq >= freqmin) {
+			if (table[idx].index && --table[idx].index == 0) {
+				// expired minimum ... update policy
+				min_request_chg(policy, table);
+			}
+			return 0;
+		}
+		idx++;
+	}
+	return idx;
+}
+
+// increment request count for min freq of 'freq'
+static int min_request_inc(struct cpufreq_policy *policy,
+		struct cpufreq_frequency_table *table, unsigned freqmin)
+{
+	unsigned freq;
+	int idx = 0;
+
+	// assume table ordered by frequency; find first entry >= freqmin
+	// (cpufreq_frequency_table_target() honors current policy->min)
+	while ((freq = table[idx].frequency) != CPUFREQ_TABLE_END) {
+		if (freq != CPUFREQ_ENTRY_INVALID && freq >= freqmin) {
+			if (table[idx].index++ == 0 && freq > policy->min) {
+				// higher minimum ... update policy
+				min_request_chg(policy, table);
+			}
+			return 0;
+		}
+		idx++;
+	}
+	return idx;
+}
+
+static int reservation_update(int freq)
+{
+	struct cpufreq_policy *policy;
+	struct cpu_dbs_info_s *dbs_info;
+	int ret, cpu;
+
+	if (!dbs_enable) return -EINVAL;
+	cpu = get_cpu();
+	policy = cpufreq_cpu_get(cpu);
+	put_cpu();
+	if (!policy) return -EFAULT;
+	dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
+	cpufreq_cpu_put(policy);
+
+	if (freq > 0)
+		ret = min_request_inc(policy, dbs_info->freq_table, freq);
+	else
+		ret = min_request_dec(policy, dbs_info->freq_table, -freq);
+
+	return ret ? -ENOENT : 0;
+}
+
+// request to reserve minimum frequency
+static ssize_t show_reserve(struct kobject *a, struct attribute *b,
+	char *buf)
+{
+	struct cpufreq_policy *policy;
+	struct cpu_dbs_info_s *dbs_info;
+	unsigned count = 0;
+	unsigned freq;
+	int idx = 0;
+	int cpu;
+
+	cpu = get_cpu();
+	policy = cpufreq_cpu_get(cpu);
+	put_cpu();
+	if (!policy) return -ENOENT;
+	dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
+	cpufreq_cpu_put(policy);
+
+	while ((freq = dbs_info->freq_table[idx].frequency) != CPUFREQ_TABLE_END) {
+		count += sprintf(buf + count, "%u:%u ",
+			dbs_info->freq_table[idx].frequency,
+			dbs_info->freq_table[idx].index);
+		idx++;
+	}
+	count += sprintf(buf + count - 1, "\n") - 1;
+	return count;
+}
+
+static ssize_t store_reserve(struct kobject *a, struct attribute *b,
+	const char *buf, size_t count)
+{
+	int freq;
+
+	if (sscanf(buf, "%d", &freq) != 1)
+		return -EINVAL;
+
+	return reservation_update(freq) ?: count;
+}
+
+define_one_global_rw(reserve);
+
+int cpufreq_minimum_reserve(int freq)
+{
+	return reservation_update(freq);
+}
+EXPORT_SYMBOL(cpufreq_minimum_reserve);
+
+int cpufreq_minimum_unreserve(int freq)
+{
+	return reservation_update(-freq);
+}
+EXPORT_SYMBOL(cpufreq_minimum_unreserve);
+#endif
+
 static struct attribute *dbs_attributes[] = {
+#if defined(CONFIG_BCM_KF_ONDEMAND)
+	&reserve.attr,
+#endif
 	&sampling_rate_min.attr,
 	&sampling_rate.attr,
 	&up_threshold.attr,
@@ -623,7 +795,12 @@ static void do_dbs_timer(struct work_struct *work)
 			delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
 				* dbs_info->rate_mult);
 
+#if defined(CONFIG_BCM_KF_ONDEMAND)
+			// only align "normal" samples
+			if (dbs_info->rate_mult == 1 && num_online_cpus() > 1)
+#else
 			if (num_online_cpus() > 1)
+#endif
 				delay -= jiffies % delay;
 		}
 	} else {
@@ -708,6 +885,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 		this_dbs_info->cpu = cpu;
 		this_dbs_info->rate_mult = 1;
 		ondemand_powersave_bias_init_cpu(cpu);
+#if defined(CONFIG_BCM_KF_ONDEMAND)
+		min_request_clr(this_dbs_info->freq_table);
+#endif
 		/*
 		 * Start the timerschedule work, when this governor
 		 * is used for first time
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 78a666d1e5f589892620edc200b24c4b254ec273..6908a80e74c8dd7b8cd8ba840c4c8e0ddd4249ee 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -18,3 +18,7 @@ config CPU_IDLE_GOV_MENU
 	bool
 	depends on CPU_IDLE && NO_HZ
 	default y
+
+config ARCH_NEEDS_CPU_IDLE_COUPLED
+	def_bool n
+	depends on BCM_KF_ANDROID && BCM_ANDROID
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 5634f88379df9797fac371a3b3b8376df5d631db..21248239edbd929cb7a8d0471de6212c31c8f036 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,3 +3,8 @@
 #
 
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 3d704abd7912b1ce9a2bd236e84188fdd81aace3..3bfda5f371f3220df4e4ec9d21c33341d5154d3e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1475,6 +1475,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
 
 	return false;
 }
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+EXPORT_SYMBOL(pl08x_filter_id);
+#endif /* CONFIG_BCM_KF_ARM_BCM963XX */
 
 /*
  * Just check that the device is there and active
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ffddcba32af62b637baa09fd9487f515451fca4f..dc503de12b1dac22a8eb72a454ca47f3ff996742 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -55,6 +55,28 @@ config HIDRAW
 
 	If unsure, say Y.
 
+config UHID
+	tristate "User-space I/O driver support for HID subsystem"
+	depends on HID
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	---help---
+	Say Y here if you want to provide HID I/O Drivers from user-space.
+	This allows to write I/O drivers in user-space and feed the data from
+	the device into the kernel. The kernel parses the HID reports, loads the
+	corresponding HID Device Driver or provides input devices on top of your
+	user-space device.
+
+	This driver cannot be used to parse HID-reports in user-space and write
+	special HID-drivers. You should use hidraw for that.
+	Instead, this driver allows to write the transport-layer driver in
+	user-space like USB-HID and Bluetooth-HID do in kernel-space.
+
+	If unsure, say N.
+
+	To compile this driver as a module, choose M here: the
+	module will be called uhid.
+
 source "drivers/hid/usbhid/Kconfig"
 
 menu "Special HID drivers"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 22f1d16cd79c7a680a6983812146459fd76ff1ab..55370b030e56e334feac849b54dfe9e09338448e 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -8,6 +8,11 @@ ifdef CONFIG_DEBUG_FS
 endif
 
 obj-$(CONFIG_HID)		+= hid.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_UHID)		+= uhid.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 hid-$(CONFIG_HIDRAW)		+= hidraw.o
 
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ea8736bc257df7e2f0bd998aed42eec4259ba411..a6eb1b554ed868d2f78b752fc24cca77403d0ffc 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -913,4 +913,15 @@ config SCx200_ACB
 	  This support is also available as a module.  If so, the module
 	  will be called scx200_acb.
 
+config I2C_GPIO_CUSTOM
+	tristate "Custom GPIO-based I2C driver"
+	depends on GENERIC_GPIO
+	select I2C_GPIO
+	help
+	  This is an I2C driver to register 1 to 4 custom I2C buses using
+	  GPIO lines.
+							   
+	  This support is also available as a module.  If so, the module
+	  will be called i2c-gpio-custom.
+
 endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 2f05d7b6c4140b70ed4acaf59dbf8dc660ecc512..39dade0d76dcd3a48ea1fdb845a13944bebbc19f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -89,4 +89,6 @@ obj-$(CONFIG_I2C_STUB)		+= i2c-stub.o
 obj-$(CONFIG_SCx200_ACB)	+= scx200_acb.o
 obj-$(CONFIG_SCx200_I2C)	+= scx200_i2c.o
 
+obj-$(CONFIG_I2C_GPIO_CUSTOM)   += i2c-gpio-custom.o
+
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/busses/i2c-gpio-custom.c b/drivers/i2c/busses/i2c-gpio-custom.c
new file mode 100644
index 0000000000000000000000000000000000000000..c8b96ddc3094a7c0789608fe8e813cd7977f8b58
--- /dev/null
+++ b/drivers/i2c/busses/i2c-gpio-custom.c
@@ -0,0 +1,203 @@
+/*
+ *  Custom GPIO-based I2C driver
+ *
+ *  Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ *  The behaviour of this driver can be altered by setting some parameters
+ *  from the insmod command line.
+ *
+ *  The following parameters are adjustable:
+ *
+ *	bus0	These four arguments can be arrays of
+ *	bus1	1-8 unsigned integers as follows:
+ *	bus2
+ *	bus3	<id>,<sda>,<scl>,<udelay>,<timeout>,<sda_od>,<scl_od>,<scl_oo>
+ *
+ *  where:
+ *
+ *  <id>	ID to used as device_id for the corresponding bus (required)
+ *  <sda>	GPIO pin ID to used for SDA (required)
+ *  <scl>	GPIO pin ID to used for SCL (required)
+ *  <udelay>	signal toggle delay.
+ *  <timeout>	clock stretching timeout.
+ *  <sda_od>	SDA is configured as open drain.
+ *  <scl_od>	SCL is configured as open drain.
+ *  <scl_oo>	SCL output drivers cannot be turned off.
+ *
+ *  See include/i2c-gpio.h for more information about the parameters.
+ *
+ *  If this driver is built into the kernel, you can use the following kernel
+ *  command line parameters, with the same values as the corresponding module
+ *  parameters listed above:
+ *
+ *	i2c-gpio-custom.bus0
+ *	i2c-gpio-custom.bus1
+ *	i2c-gpio-custom.bus2
+ *	i2c-gpio-custom.bus3
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <linux/i2c-gpio.h>
+
+#define DRV_NAME	"i2c-gpio-custom"
+#define DRV_DESC	"Custom GPIO-based I2C driver"
+#define DRV_VERSION	"0.1.1"
+
+#define PFX		DRV_NAME ": "
+
+#define BUS_PARAM_ID		0
+#define BUS_PARAM_SDA		1
+#define BUS_PARAM_SCL		2
+#define BUS_PARAM_UDELAY	3
+#define BUS_PARAM_TIMEOUT	4
+#define BUS_PARAM_SDA_OD	5
+#define BUS_PARAM_SCL_OD	6
+#define BUS_PARAM_SCL_OO	7
+
+#define BUS_PARAM_REQUIRED	3
+#define BUS_PARAM_COUNT		8
+#define BUS_COUNT_MAX		4
+
+static unsigned int bus0[BUS_PARAM_COUNT] __initdata;
+static unsigned int bus1[BUS_PARAM_COUNT] __initdata;
+static unsigned int bus2[BUS_PARAM_COUNT] __initdata;
+static unsigned int bus3[BUS_PARAM_COUNT] __initdata;
+
+static unsigned int bus_nump[BUS_COUNT_MAX] __initdata;
+
+#define BUS_PARM_DESC \
+	" config -> id,sda,scl[,udelay,timeout,sda_od,scl_od,scl_oo]"
+
+module_param_array(bus0, uint, &bus_nump[0], 0);
+MODULE_PARM_DESC(bus0, "bus0" BUS_PARM_DESC);
+module_param_array(bus1, uint, &bus_nump[1], 0);
+MODULE_PARM_DESC(bus1, "bus1" BUS_PARM_DESC);
+module_param_array(bus2, uint, &bus_nump[2], 0);
+MODULE_PARM_DESC(bus2, "bus2" BUS_PARM_DESC);
+module_param_array(bus3, uint, &bus_nump[3], 0);
+MODULE_PARM_DESC(bus3, "bus3" BUS_PARM_DESC);
+
+static struct platform_device *devices[BUS_COUNT_MAX];
+static unsigned int nr_devices;
+
+static void i2c_gpio_custom_cleanup(void)
+{
+	int i;
+
+	for (i = 0; i < nr_devices; i++)
+		if (devices[i])
+			platform_device_put(devices[i]);
+}
+
+static int __init i2c_gpio_custom_add_one(unsigned int id, unsigned int *params)
+{
+	struct platform_device *pdev;
+	struct i2c_gpio_platform_data pdata;
+	int err;
+
+	if (!bus_nump[id])
+		return 0;
+
+	if (bus_nump[id] < BUS_PARAM_REQUIRED) {
+		printk(KERN_ERR PFX "not enough parameters for bus%d\n", id);
+		err = -EINVAL;
+		goto err;
+	}
+
+	pdev = platform_device_alloc("i2c-gpio", params[BUS_PARAM_ID]);
+	if (!pdev) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	pdata.sda_pin = params[BUS_PARAM_SDA];
+	pdata.scl_pin = params[BUS_PARAM_SCL];
+	pdata.udelay = params[BUS_PARAM_UDELAY];
+	pdata.timeout = params[BUS_PARAM_TIMEOUT];
+	pdata.sda_is_open_drain = params[BUS_PARAM_SDA_OD] != 0;
+	pdata.scl_is_open_drain = params[BUS_PARAM_SCL_OD] != 0;
+	pdata.scl_is_output_only = params[BUS_PARAM_SCL_OO] != 0;
+
+	err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
+	if (err)
+		goto err_put;
+
+	err = platform_device_add(pdev);
+	if (err)
+		goto err_put;
+
+	devices[nr_devices++] = pdev;
+	return 0;
+
+err_put:
+	platform_device_put(pdev);
+err:
+	return err;
+}
+
+static int __init i2c_gpio_custom_probe(void)
+{
+	int err;
+
+	printk(KERN_INFO DRV_DESC " version " DRV_VERSION "\n");
+
+	err = i2c_gpio_custom_add_one(0, bus0);
+	if (err)
+		goto err;
+
+	err = i2c_gpio_custom_add_one(1, bus1);
+	if (err)
+		goto err;
+
+	err = i2c_gpio_custom_add_one(2, bus2);
+	if (err)
+		goto err;
+
+	err = i2c_gpio_custom_add_one(3, bus3);
+	if (err)
+		goto err;
+
+	if (!nr_devices) {
+		printk(KERN_ERR PFX "no bus parameter(s) specified\n");
+		err = -ENODEV;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	i2c_gpio_custom_cleanup();
+	return err;
+}
+
+#ifdef MODULE
+static int __init i2c_gpio_custom_init(void)
+{
+	return i2c_gpio_custom_probe();
+}
+module_init(i2c_gpio_custom_init);
+
+static void __exit i2c_gpio_custom_exit(void)
+{
+	i2c_gpio_custom_cleanup();
+}
+module_exit(i2c_gpio_custom_exit);
+#else
+subsys_initcall(i2c_gpio_custom_probe);
+#endif /* MODULE*/
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org >");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 332597980817d4d97165f7172c7ebd4bc494a7a2..cfd2c48bc58348c978de4ccb3f3ad8271da0e367 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -165,6 +165,16 @@ config INPUT_APMPOWER
 	  To compile this driver as a module, choose M here: the
 	  module will be called apm-power.
 
+config INPUT_KEYRESET
+	tristate "Reset key"
+	depends on INPUT
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	---help---
+	  Say Y here if you want to reboot when some keys are pressed;
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keyreset.
+
 comment "Input Device Drivers"
 
 source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index b173a13a73caa4b8e93eee97289cd55fd0eb9e74..81419ee8655da21b9b47c75310649ec7e65ba114 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -25,3 +25,8 @@ obj-$(CONFIG_INPUT_MISC)	+= misc/
 
 obj-$(CONFIG_INPUT_APMPOWER)	+= apm-power.o
 obj-$(CONFIG_INPUT_OF_MATRIX_KEYMAP) += of_keymap.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_INPUT_KEYRESET)	+= keyreset.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7faf4a7fcaa9219d278b67b75a77a6c10081f678..d699a0cc5278a8f38b5d8364a988ff2538ff5bda 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -279,6 +279,18 @@ config INPUT_ATI_REMOTE2
 	  To compile this driver as a module, choose M here: the module will be
 	  called ati_remote2.
 
+config INPUT_KEYCHORD
+	tristate "Key chord input driver support"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  Say Y here if you want to enable the key chord driver
+	  accessible at /dev/keychord.  This driver can be used
+	  for receiving notifications when client specified key
+	  combinations are pressed.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keychord.
+
 config INPUT_KEYSPAN_REMOTE
 	tristate "Keyspan DMR USB remote control (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
@@ -407,6 +419,12 @@ config INPUT_SGI_BTNS
 	  To compile this driver as a module, choose M here: the
 	  module will be called sgi_btns.
 
+config INPUT_GPIO
+	tristate "GPIO driver support"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  Say Y here if you want to support gpio based keys, wheels etc...
+
 config HP_SDC_RTC
 	tristate "HP SDC Real Time Clock"
 	depends on (GSC || HP300) && SERIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f55cdf4916fae2d717a3828099f130c71bbffd02..19fc8ac57f57260c5e6276d79e56a1eb01e6cbe5 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -25,8 +25,18 @@ obj-$(CONFIG_INPUT_DA9052_ONKEY)	+= da9052_onkey.o
 obj-$(CONFIG_INPUT_DM355EVM)		+= dm355evm_keys.o
 obj-$(CONFIG_INPUT_GP2A)		+= gp2ap002a00f.o
 obj-$(CONFIG_INPUT_GPIO_TILT_POLLED)	+= gpio_tilt_polled.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_INPUT_GPIO)		+= gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_HP_SDC_RTC)		+= hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)	+= ixp4xx-beeper.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_INPUT_KEYCHORD)		+= keychord.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)	+= keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)		+= kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff4b8cfda585b6461309824f277e52d263e0dcb6..e677d553d8af8dc5c79e784c93981095e4b67bca 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -468,6 +468,13 @@ config LEDS_TRIGGER_DEFAULT_ON
 	  This allows LEDs to be initialised in the ON state.
 	  If unsure, say Y.
 
+config LEDS_TRIGGER_SLEEP
+	tristate "LED Sleep Mode Trigger"
+	depends on LEDS_TRIGGERS && HAS_EARLYSUSPEND
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  This turns LEDs on when the screen is off but the cpu still running.
+
 comment "iptables trigger is under Netfilter config (LED target)"
 	depends on LEDS_TRIGGERS
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 890481cb09f6b23aa762a242b4a9b4e98998fb91..6b8bb55a7d7529e1040d5bb09f780c8263c7db04 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -56,3 +56,8 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT)	+= ledtrig-heartbeat.o
 obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT)	+= ledtrig-backlight.o
 obj-$(CONFIG_LEDS_TRIGGER_GPIO)		+= ledtrig-gpio.o
 obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON)	+= ledtrig-default-on.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_LEDS_TRIGGER_SLEEP)	+= ledtrig-sleep.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 5bff8439dc68a7e8c6424f0cbed3746cd9820efb..28381117f3b0c147b857a076b293431aabf21439 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -185,8 +185,13 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 	led_trigger_set_default(led_cdev);
 #endif
 
+#if defined(CONFIG_BCM_KF_LEDS)
+	dev_dbg(parent, "Registered led device: %s\n",
+			led_cdev->name);
+#else
 	printk(KERN_DEBUG "Registered led device: %s\n",
 			led_cdev->name);
+#endif //CONFIG_BCM_KF_LEDS
 
 	return 0;
 }
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index ecda1c4c75c793178690792a0b62c4b99b87c374..eb84e0cf67a8138480d02abee62d950228df0f30 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -419,6 +419,15 @@ config HMC6352
 	  This driver provides support for the Honeywell HMC6352 compass,
 	  providing configuration and heading data via sysfs.
 
+config SENSORS_AK8975
+	tristate "AK8975 compass support"
+	default n
+	depends on I2C
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  If you say yes here you get support for Asahi Kasei's
+	  orientation sensor AK8975.
+
 config EP93XX_PWM
 	tristate "EP93xx PWM support"
 	depends on ARCH_EP93XX
@@ -462,6 +471,11 @@ config TI_DAC7512
 	  This driver can also be built as a module. If so, the module
 	  will be called ti_dac7512.
 
+config UID_STAT
+	bool "UID based statistics tracking exported to /proc/uid_stat"
+	default n
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+
 config VMWARE_BALLOON
 	tristate "VMware Balloon Driver"
 	depends on X86
@@ -535,6 +549,15 @@ config MAX8997_MUIC
 	  Maxim MAX8997 PMIC.
 	  The MAX8997 MUIC is a USB port accessory detector and switch.
 
+config WL127X_RFKILL
+	tristate "Bluetooth power control driver for TI wl127x"
+	depends on RFKILL
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	---help---
+	 Creates an rfkill entry in sysfs for power control of Bluetooth
+	 TI wl127x chips.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c7c058ab3be41c2a0f0e1d2f4d8b47209640a6f6..6a86afe5d9aca5d3d9afc70040b9b5ce94666149 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,6 +33,11 @@ obj-$(CONFIG_SENSORS_TSL2550)	+= tsl2550.o
 obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o
 obj-$(CONFIG_DS1682)		+= ds1682.o
 obj-$(CONFIG_TI_DAC7512)	+= ti_dac7512.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_UID_STAT)		+= uid_stat.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_C2PORT)		+= c2port/
 obj-$(CONFIG_IWMC3200TOP)      += iwmc3200top/
 obj-$(CONFIG_HMC6352)		+= hmc6352.o
@@ -50,3 +55,9 @@ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
 obj-$(CONFIG_ALTERA_STAPL)	+=altera-stapl/
 obj-$(CONFIG_MAX8997_MUIC)	+= max8997-muic.o
 obj-$(CONFIG_HWLAT_DETECTOR)	+= hwlat_detector.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_WL127X_RFKILL)	+= wl127x-rfkill.o
+obj-$(CONFIG_SENSORS_AK8975)	+= akm8975.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783bf924fccc2168aea37b9a1056d5095dd6..44d2c15313621c4e75820d6d95359096c63fec83 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,16 @@ config MMC_BLOCK_BOUNCE
 
 	  If unsure, say Y here.
 
+config MMC_BLOCK_DEFERRED_RESUME
+	bool "Deferr MMC layer resume until I/O is requested"
+	depends on MMC_BLOCK
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  Say Y here to enable deferred MMC resume until I/O
+	  is requested. This will reduce overall resume latency and
+	  save power when theres an SD card inserted but not being used.
+
 config SDIO_UART
 	tristate "SDIO UART/GPS class support"
 	help
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef103871517f1bb4adf33a5b8218a297ed487c54..ad43b594bf470dc06b755a894e1ec8b745ed4f10 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -27,3 +27,22 @@ config MMC_CLKGATE
 	  support handling this in order for it to be of any use.
 
 	  If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+	boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  If you say Y here, support will be added for embedded SDIO
+	  devices which do not contain the necessary enumeration
+	  support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+	bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  If you say Y here, the MMC layer will be extra paranoid
+	  about re-trying SD init requests. This can be a useful
+	  work-around for buggy controllers and hardware. Enable
+	  if you are experiencing issues with SD detection.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c60cee92a2b2fe9bfd4a9b863dbc95d0e76e2eae..476aa54ffa7fc0fee19fc701da912697d11d5087 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,6 +122,8 @@ static int mmc_bus_remove(struct device *dev)
 	return 0;
 }
 
+#if defined(CONFIG_BCM_KF_EMMC)
+#ifdef CONFIG_PM_SLEEP
 static int mmc_bus_suspend(struct device *dev)
 {
 	struct mmc_driver *drv = to_mmc_driver(dev->driver);
@@ -143,6 +145,30 @@ static int mmc_bus_resume(struct device *dev)
 		ret = drv->resume(card);
 	return ret;
 }
+#endif
+#else
+static int mmc_bus_suspend(struct device *dev)
+{
+	struct mmc_driver *drv = to_mmc_driver(dev->driver);
+	struct mmc_card *card = mmc_dev_to_card(dev);
+	int ret = 0;
+
+	if (dev->driver && drv->suspend)
+		ret = drv->suspend(card);
+	return ret;
+}
+
+static int mmc_bus_resume(struct device *dev)
+{
+	struct mmc_driver *drv = to_mmc_driver(dev->driver);
+	struct mmc_card *card = mmc_dev_to_card(dev);
+	int ret = 0;
+
+	if (dev->driver && drv->resume)
+		ret = drv->resume(card);
+	return ret;
+}
+#endif /* CONFIG_BCM_KF_EMMC */
 
 #ifdef CONFIG_PM_RUNTIME
 
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 54df5adc04137741ee7710d6ef5ee119add388d7..3862d652e94dca247907d0512d55893f97338cc5 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -235,6 +235,38 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 	return err;
 }
 
+#if defined(CONFIG_BCM_KF_EMMC)
+static void mmc_select_card_type(struct mmc_card *card)
+{
+	struct mmc_host *host = card->host;
+	u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+	u32 caps = host->caps, caps2 = host->caps2;
+	unsigned int hs_max_dtr = 0;
+	
+	if (card_type & EXT_CSD_CARD_TYPE_26)
+		hs_max_dtr = 26000000;
+	
+	if (caps & MMC_CAP_MMC_HIGHSPEED &&
+	                card_type & EXT_CSD_CARD_TYPE_52)
+		hs_max_dtr = 52000000;
+	
+	if ((caps & MMC_CAP_1_8V_DDR &&
+	                card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
+	    (caps & MMC_CAP_1_2V_DDR &&
+	                card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
+		hs_max_dtr = 52000000;
+	
+	if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+	                card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
+	    (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+	                card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
+		hs_max_dtr = 200000000;
+	
+	card->ext_csd.hs_max_dtr = hs_max_dtr;
+	card->ext_csd.card_type = card_type;
+}
+#endif /* CONFIG_BCM_KF_EMMC */
+
 /*
  * Decode extended CSD.
  */
@@ -262,7 +294,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
 	}
 
 	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
-	if (card->ext_csd.rev > 6) {
+#if defined(CONFIG_BCM_KF_EMMC)
+	if (card->ext_csd.rev > 7) {
+#else      
+ 	if (card->ext_csd.rev > 6) {
+#endif /* CONFIG_BCM_KF_EMMC */
 		pr_err("%s: unrecognised EXT_CSD revision %d\n",
 			mmc_hostname(card->host), card->ext_csd.rev);
 		err = -EINVAL;
@@ -285,55 +321,60 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
 			mmc_card_set_blockaddr(card);
 	}
 	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
-	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
-	case EXT_CSD_CARD_TYPE_SDR_ALL:
-	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
-	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
-	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
-		card->ext_csd.hs_max_dtr = 200000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
-		break;
-	case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
-	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
-	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
-	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
-		card->ext_csd.hs_max_dtr = 200000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
-		break;
-	case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
-	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
-	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
-	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
-		card->ext_csd.hs_max_dtr = 200000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
-		break;
-	case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
-	     EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 52000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
-		break;
-	case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
-	     EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 52000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
-		break;
-	case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
-	     EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 52000000;
-		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
-		break;
-	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 52000000;
-		break;
-	case EXT_CSD_CARD_TYPE_26:
-		card->ext_csd.hs_max_dtr = 26000000;
-		break;
-	default:
-		/* MMC v4 spec says this cannot happen */
-		pr_warning("%s: card is mmc v4 but doesn't "
-			"support any high-speed modes.\n",
-			mmc_hostname(card->host));
-	}
+#if defined(CONFIG_BCM_KF_EMMC)
+	mmc_select_card_type(card);
+#else   
+ 	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
+ 	case EXT_CSD_CARD_TYPE_SDR_ALL:
+ 	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
+ 	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
+ 	case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
+ 		card->ext_csd.hs_max_dtr = 200000000;
+ 		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
+ 		break;
+ 	case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
+ 	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
+ 	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
+ 	case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
+ 		card->ext_csd.hs_max_dtr = 200000000;
+ 		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
+ 		break;
+ 	case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
+ 	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
+ 	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
+ 	case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
+ 		card->ext_csd.hs_max_dtr = 200000000;
+ 		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
+ 		break;
+ 	case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
+ 	     EXT_CSD_CARD_TYPE_26:
+ 		card->ext_csd.hs_max_dtr = 52000000;
+ 		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
+ 		break;
+ 	case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
+ 	     EXT_CSD_CARD_TYPE_26:
+ 		card->ext_csd.hs_max_dtr = 52000000;
+ 		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
+ 		break;
+ 	case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
+ 	     EXT_CSD_CARD_TYPE_26:
+ 		card->ext_csd.hs_max_dtr = 52000000;
+ 		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
+ 		break;
+ 	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
+ 		card->ext_csd.hs_max_dtr = 52000000;
+ 		break;
+ 
+ 	case EXT_CSD_CARD_TYPE_26:
+ 		card->ext_csd.hs_max_dtr = 26000000;
+ 		break;
+ 	default:
+ 		/* MMC v4 spec says this cannot happen */
+ 		pr_warning("%s: card is mmc v4 but doesn't "
+ 			"support any high-speed modes.\n",
+ 			mmc_hostname(card->host));
+ 	}
+#endif /* CONFIG_BCM_KF_EMMC */
 
 	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
 	card->ext_csd.raw_erase_timeout_mult =
@@ -745,7 +786,11 @@ static int mmc_select_powerclass(struct mmc_card *card,
  */
 static int mmc_select_hs200(struct mmc_card *card)
 {
-	int idx, err = 0;
+#if defined(CONFIG_BCM_KF_EMMC)
+	int idx, err = -EINVAL;
+#else 	
+ 	int idx, err = 0;
+#endif /* CONFIG_BCM_KF_EMMC */
 	struct mmc_host *host;
 	static unsigned ext_csd_bits[] = {
 		EXT_CSD_BUS_WIDTH_4,
@@ -760,11 +805,21 @@ static int mmc_select_hs200(struct mmc_card *card)
 
 	host = card->host;
 
+#if defined(CONFIG_BCM_KF_EMMC)
 	if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
-	    host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
-		if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
-			err = mmc_set_signal_voltage(host,
-						     MMC_SIGNAL_VOLTAGE_180, 0);
+		 host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+			err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
+
+	if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+		 host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+			err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
+#else
+ 	if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
+ 	    host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+ 		if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
+ 			err = mmc_set_signal_voltage(host,
+ 						     MMC_SIGNAL_VOLTAGE_180, 0);
+#endif /* CONFIG_BCM_KF_EMMC */
 
 	/* If fails try again during next card power cycle */
 	if (err)
@@ -1060,6 +1115,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 	if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
 		if (max_dtr > card->ext_csd.hs_max_dtr)
 			max_dtr = card->ext_csd.hs_max_dtr;
+#if defined(CONFIG_BCM_KF_EMMC)
+		if (mmc_card_highspeed(card) && (max_dtr > 52000000))
+			max_dtr = 52000000;
+#endif /* CONFIG_BCM_KF_EMMC */
 	} else if (max_dtr > card->csd.max_dtr) {
 		max_dtr = card->csd.max_dtr;
 	}
@@ -1071,14 +1130,24 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 	 */
 	if (mmc_card_highspeed(card)) {
 		if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
-			&& ((host->caps & (MMC_CAP_1_8V_DDR |
-			     MMC_CAP_UHS_DDR50))
-				== (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
+#if defined(CONFIG_BCM_KF_EMMC)
+			&& ((host->caps & MMC_CAP_1_8V_DDR)
+				== MMC_CAP_1_8V_DDR ))
+#else 				
+ 			&& ((host->caps & (MMC_CAP_1_8V_DDR |
+ 			     MMC_CAP_UHS_DDR50))
+ 				== (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
+#endif /* CONFIG_BCM_KF_EMMC */
 				ddr = MMC_1_8V_DDR_MODE;
 		else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
-			&& ((host->caps & (MMC_CAP_1_2V_DDR |
-			     MMC_CAP_UHS_DDR50))
-				== (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
+#if defined(CONFIG_BCM_KF_EMMC)
+			&& ((host->caps & MMC_CAP_1_2V_DDR)
+				== MMC_CAP_1_2V_DDR ))
+#else 				
+ 			&& ((host->caps & (MMC_CAP_1_2V_DDR |
+ 			     MMC_CAP_UHS_DDR50))
+ 				== (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
+#endif /* CONFIG_BCM_KF_EMMC */
 				ddr = MMC_1_2V_DDR_MODE;
 	}
 
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2bc06e7344db5970c754ab6cbf0a849830d8339f..50d18fd98354139aaed99c9fac20a41477624534 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -218,6 +218,18 @@ config MMC_SDHCI_SPEAR
 	  If you have a controller with this interface, say Y or M here.
 
 	  If unsure, say N.
+   
+config MMC_SDHCI_BCM63xx
+	tristate "SDHCI support on Broadcom DSL/PON CPE device"
+	depends on BCM_KF_EMMC
+	depends on MMC_SDHCI && MMC_SDHCI_PLTFM
+	help
+	  This selects the Secure Digital Host Controller Interface (SDHCI)
+	  for the EMMC block on Broadcom DSL/PON SoCs
+
+	  If you have a controller with this interface, say Y or M here.
+
+	  If unsure, say N.
 
 config MMC_SDHCI_S3C_DMA
 	bool "DMA support on S3C SDHCI"
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3e7e26d0807346a82353544a0623086ce4249409..1f17865fed0366fe9ba77625c0021b5f4cb59756 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -54,6 +54,16 @@ obj-$(CONFIG_MMC_SDHCI_TEGRA)		+= sdhci-tegra.o
 obj-$(CONFIG_MMC_SDHCI_OF_ESDHC)	+= sdhci-of-esdhc.o
 obj-$(CONFIG_MMC_SDHCI_OF_HLWD)		+= sdhci-of-hlwd.o
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_EMMC)
+obj-$(CONFIG_MMC_SDHCI_BCM63xx)		+= sdhci-bcm63xx.o
+endif # BCM_KF
+
 ifeq ($(CONFIG_CB710_DEBUG),y)
 	CFLAGS-cb710-mmc	+= -DDEBUG
 endif
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_EMMC)
+ifeq ($(CONFIG_MMC_SDHCI_BCM63xx),y)
+	EXTRA_CFLAGS += -I$(INC_BRCMBOARDPARMS_PATH)/$(BRCM_BOARD) -I$(SRCBASE)/include -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD)
+endif
+endif # BCM_KF
diff --git a/drivers/mmc/host/sdhci-bcm63xx.c b/drivers/mmc/host/sdhci-bcm63xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..f505b93100a3a50977451ecf721ba7c76202e027
--- /dev/null
+++ b/drivers/mmc/host/sdhci-bcm63xx.c
@@ -0,0 +1,111 @@
+#if defined(CONFIG_BCM_KF_EMMC)
+/**************************************************************
+ * sdhci-bcm63xx.c Support for SDHCI on Broadcom DSL/PON CPE SoC's
+ *
+ * Author: Farhan Ali <fali@broadcom.com>
+ * Based on sdhci-brcmstb.c
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * <:label-BRCM:2014:DUAL/GPL:standard
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed
+ * to you under the terms of the GNU General Public License version 2
+ * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+ * with the following added to such license:
+ *
+ *    As a special exception, the copyright holders of this software give
+ *    you permission to link this software with independent modules, and
+ *    to copy and distribute the resulting executable under terms of your
+ *    choice, provided that you also meet, for each linked independent
+ *    module, the terms and conditions of the license of that module.
+ *    An independent module is a module which is not derived from this
+ *    software.  The special exception does not apply to any modifications
+ *    of the software.
+ *
+ * Not withstanding the above, under no circumstances may you combine
+ * this software in any way with any other Broadcom software provided
+ * under a license other than the GPL, without Broadcom's express prior
+ * written consent.
+ *
+ * :>
+ *
+ ************************************************************/
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <bcm_map_part.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+   
+static struct sdhci_pltfm_data sdhci_bcm63xx_pdata = {
+	/* Quirks and ops defined here will be passed to sdhci_host structure */
+};
+
+static int sdhci_bcm63xx_probe(struct platform_device *pdev)
+{
+	int res = 0;
+	struct sdhci_host *host;
+	struct sdhci_pltfm_host *pltfm_host;
+
+	/* Force straps to enable emmc signals - must prevent platform device register if not using emmc */
+	AHBSS_CTRL->ahbss_ctrl_cfg |= FORCE_EMMC_BOOT_STRAP;
+	
+	/* Check if we are in normal mode, if not then force us in normal mode */
+	while( EMMC_BOOT->emmc_boot_status & EMMC_BOOT_MODE_MASK )
+	{
+		EMMC_BOOT->emmc_boot_main_ctl &= ~EMMC_BOOT_ENABLE;		
+	}
+	
+	host = sdhci_pltfm_init(pdev, &sdhci_bcm63xx_pdata);
+	if (IS_ERR(host))
+		return PTR_ERR(host);
+	
+	/* Get pltfm host */
+	pltfm_host = sdhci_priv(host);
+
+	/* Set Caps */
+	host->mmc->caps  |= MMC_CAP_NONREMOVABLE;
+	host->mmc->caps  |= MMC_CAP_8_BIT_DATA;
+	host->mmc->caps  |= MMC_CAP_1_8V_DDR;
+	
+	/* Disable HS200 - Controller can only output 100Mhz clock */
+	host->mmc->caps2 &= ~(MMC_CAP2_HS200);
+	
+	/* Disable 1.2V modes - Controller doesnt support 1.2V signalling */
+	host->mmc->caps  &= ~(MMC_CAP_1_2V_DDR);
+	host->mmc->caps2 &= ~(MMC_CAP2_HS200_1_2V_SDR);
+
+	res = sdhci_add_host(host);
+	if (res)
+		sdhci_pltfm_free(pdev);
+	
+	return res;
+}
+
+static int sdhci_bcm63xx_remove(struct platform_device *pdev)
+{
+	int res;
+	res = sdhci_pltfm_unregister(pdev);
+	return res;
+}
+
+static struct platform_driver sdhci_bcm63xx_driver = {
+	.driver		= {
+		.name	= "sdhci-bcm63xx",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= sdhci_bcm63xx_probe,
+	.remove		= sdhci_bcm63xx_remove,
+};
+
+module_platform_driver(sdhci_bcm63xx_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for Broadcom DSL/PON CPE devices");
+MODULE_AUTHOR("Farhan Ali <fali@broadcom.com>");
+MODULE_LICENSE("GPL v2");
+#endif /* CONFIG_BCM_KF_EMMC */
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 27143e042af5b2dfec767a1d1cf61762fcf2734b..68eb8da88ffb66c39d133fd09165198eee1900de 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -328,4 +328,8 @@ source "drivers/mtd/lpddr/Kconfig"
 
 source "drivers/mtd/ubi/Kconfig"
 
+if BCM_KF_MTD_BCMNAND
+source "drivers/mtd/brcmnand/Kconfig"
+endif
+
 endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index f90135429dc7568e6446d9ac350837ed2ba648fc..42dbe9dbe2bda37bf18771cf166d7e7875265084 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -32,4 +32,7 @@ inftl-objs		:= inftlcore.o inftlmount.o
 
 obj-y		+= chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MTD_BCMNAND)
+obj-$(CONFIG_MTD_BRCMNAND)	+= brcmnand/
+endif #BCM_KF # defined(CONFIG_BCM_KF_MTD_BCMNAND)
 obj-$(CONFIG_MTD_UBI)		+= ubi/
diff --git a/drivers/mtd/brcmnand/Kconfig b/drivers/mtd/brcmnand/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..ff100930b551a5bcca3dac8ab5e1724990ada99d
--- /dev/null
+++ b/drivers/mtd/brcmnand/Kconfig
@@ -0,0 +1,74 @@
+#
+# linux/drivers/mtd/brcmnand/Kconfig
+#
+
+config MTD_BRCMNAND
+	tristate "Broadcom NAND controller support"
+	depends on BCM_KF_MTD_BCMNAND
+	select BRCMNAND_MTD_EXTENSION
+	default n
+	help
+	  Say Y to enable the onchip NAND controller.
+
+config BRCMNAND_MTD_EXTENSION
+	bool "Enable Broadcom NAND controller extension"
+	depends on BCM_KF_MTD_BCMNAND
+	depends on MTD_BRCMNAND
+	default y
+	help
+	  Say Y to enable Broadcom NAND extension in order to support
+	  27Byte Spare Area and 8KB page NAND flashes.
+
+config MTD_BRCMNAND_VERIFY_WRITE
+	bool "Verify Broadcom NAND page writes"
+	default n
+	depends on BCM_KF_MTD_BCMNAND
+	depends on MTD_BRCMNAND
+	help
+	  This adds an extra check when data is written to the flash. The
+	  Broadcom NAND flash device internally checks only bits transitioning
+	  from 1 to 0. There is a rare possibility that even though the
+	  device thinks the write was successful, a bit could have been
+	  flipped accidentally due to device wear or something else.
+
+config MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+	bool "Refresh a block on a one bit correctable ECC error"
+	default n
+	depends on BCM_KF_MTD_BCMNAND
+	depends on MTD_BRCMNAND
+	help
+	  If there is a 1-bit correctable error detected during NAND flash
+	  read, the Broadcom NAND flash driver can refresh the corresponding
+	  NAND flash block.  Refreshing implies a sequence of
+	  read->erase->write. Refreshing the block drastically reduces the
+	  probability of occurance of a similar (correctable) error.
+
+	  Default is N because this is normally handled by UBI.
+
+config MTD_BRCMNAND_EDU
+	bool "Enable Broadcom NAND DMA (EDU)"
+	default y
+	select MTD_BRCMNAND_USE_ISR
+	select MTD_BRCMNAND_ISR_QUEUE
+	depends on BCM_KF_MTD_BCMNAND
+	depends on MTD_BRCMNAND && BRCM_HAS_EDU
+	help
+	  Say Y to enable the EBI DMA unit for NAND flash transfers.
+	  Say N to use PIO transfers.
+
+config BRCMNAND_MAJOR_VERS
+	int "Broadcom NAND Major Version"
+    default 2
+	depends on BCM_KF_MTD_BCMNAND
+	depends on MTD_BRCMNAND
+	help
+      NAND controller major version.
+
+config BRCMNAND_MINOR_VERS
+	int "Broadcom NAND Minor Version"
+    default 1
+	depends on BCM_KF_MTD_BCMNAND
+	depends on MTD_BRCMNAND
+	help
+      NAND controller minor version.
+
diff --git a/drivers/mtd/brcmnand/Makefile b/drivers/mtd/brcmnand/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..481e922d81e08ad59221b0187512372094edcda4
--- /dev/null
+++ b/drivers/mtd/brcmnand/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Broadcom NAND MTD
+#
+
+obj-$(CONFIG_MTD_BRCMNAND)		+=  bcm63xx-nand.o brcmnand_base.o brcmnand_bbt.o
+## obj-$(CONFIG_MTD_BRCMNAND_EDU)		+= edu.o
+obj-$(CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING)	+= brcmnand_cet.o
+EXTRA_CFLAGS	+= -I $(TOPDIR)/include/asm-generic -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD)
+
diff --git a/drivers/mtd/brcmnand/bcm63xx-nand.c b/drivers/mtd/brcmnand/bcm63xx-nand.c
new file mode 100644
index 0000000000000000000000000000000000000000..4b8df307b8c433351559dc1617adfa0d7eed65cc
--- /dev/null
+++ b/drivers/mtd/brcmnand/bcm63xx-nand.c
@@ -0,0 +1,761 @@
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+/*
+ *
+ *  drivers/mtd/brcmnand/bcm7xxx-nand.c
+ *
+    <:copyright-BRCM:2011:DUAL/GPL:standard
+    
+       Copyright (c) 2011 Broadcom Corporation
+       All Rights Reserved
+    
+    Unless you and Broadcom execute a separate written software license
+    agreement governing use of this software, this software is licensed
+    to you under the terms of the GNU General Public License version 2
+    (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+    with the following added to such license:
+    
+       As a special exception, the copyright holders of this software give
+       you permission to link this software with independent modules, and
+       to copy and distribute the resulting executable under terms of your
+       choice, provided that you also meet, for each linked independent
+       module, the terms and conditions of the license of that module.
+       An independent module is a module which is not derived from this
+       software.  The special exception does not apply to any modifications
+       of the software.
+    
+    Not withstanding the above, under no circumstances may you combine
+    this software in any way with any other Broadcom software provided
+    under a license other than the GPL, without Broadcom's express prior
+    written consent.
+    
+    :> 
+
+
+    File: bcm7xxx-nand.c
+
+    Description: 
+    This is a device driver for the Broadcom NAND flash for bcm97xxx boards.
+when    who what
+-----   --- ----
+051011  tht codings derived from OneNand generic.c implementation.
+
+ * THIS DRIVER WAS PORTED FROM THE 2.6.18-7.2 KERNEL RELEASE
+ */
+ 
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <bcm_map_part.h>
+#include <board.h>
+#include "brcmnand_priv.h"
+#include <linux/slab.h> 
+#include <flash_api.h>
+
+#define PRINTK(...)
+//#define PRINTK printk
+
+#define DRIVER_NAME     "brcmnand"
+#define DRIVER_INFO     "Broadcom NAND controller"
+
+extern bool kerSysIsRootfsSet(void);
+
+static int __devinit brcmnanddrv_probe(struct platform_device *pdev);
+static int __devexit brcmnanddrv_remove(struct platform_device *pdev);
+
+static struct mtd_partition bcm63XX_nand_parts[] = 
+{
+    {name: "rootfs",        offset: 0, size: 0},
+    {name: "rootfs_update", offset: 0, size: 0},
+    {name: "data",          offset: 0, size: 0},
+    {name: "nvram",         offset: 0, size: 0},
+    {name: "image",         offset: 0, size: 0},
+    {name: "image_update",  offset: 0, size: 0},
+    {name: "rootfs_update_data", offset: 0, size: 0},
+    {name: "dummy2",        offset: 0, size: 0},
+    {name: "dummy3",        offset: 0, size: 0},
+    {name: "dummy4",        offset: 0, size: 0},
+    {name: "dummy5",        offset: 0, size: 0},
+    {name: "dummy6",        offset: 0, size: 0},
+    {name: NULL,            offset: 0, size: 0}
+};
+
+#if 0
+static char* misc_mtd_partition_names[BCM_MAX_EXTRA_PARTITIONS] =
+{
+	"misc1",
+	"misc2",
+	"misc3",
+	NULL,
+};
+#endif
+
+static struct platform_driver brcmnand_platform_driver =
+{
+    .probe      = brcmnanddrv_probe,
+    .remove     = __devexit_p(brcmnanddrv_remove),
+    .driver     =
+     {
+        .name   = DRIVER_NAME,
+     },
+};
+
+static struct resource brcmnand_resources[] =
+{
+    [0] = {
+            .name   = DRIVER_NAME,
+            .start  = BPHYSADDR(BCHP_NAND_REG_START),
+            .end    = BPHYSADDR(BCHP_NAND_REG_END) + 3,
+            .flags  = IORESOURCE_MEM,
+          },
+};
+
+static struct brcmnand_info
+{
+    struct mtd_info mtd;
+    struct brcmnand_chip brcmnand;
+    int nr_parts;
+    struct mtd_partition* parts;
+} *gNandInfo[NUM_NAND_CS];
+
+int gNandCS[NAND_MAX_CS];
+/* Number of NAND chips, only applicable to v1.0+ NAND controller */
+int gNumNand = 0;
+int gClearBBT = 0;
+char gClearCET = 0;
+uint32_t gNandTiming1[NAND_MAX_CS], gNandTiming2[NAND_MAX_CS];
+uint32_t gAccControl[NAND_MAX_CS], gNandConfig[NAND_MAX_CS];
+
+static unsigned long t1[NAND_MAX_CS] = {0};
+static int nt1 = 0;
+static unsigned long t2[NAND_MAX_CS] = {0};
+static int nt2 = 0;
+static unsigned long acc[NAND_MAX_CS] = {0};
+static int nacc = 0;
+static unsigned long nandcfg[NAND_MAX_CS] = {0};
+static int ncfg = 0;
+static void* gPageBuffer = NULL;
+
+#if 0
+static int __devinit 
+is_split_partition (struct mtd_info* mtd, unsigned long offset, unsigned long size, unsigned long *split_offset)
+{
+    uint8_t buf[0x100];
+    size_t retlen;
+    int split_found = 0;
+
+    /* Search RootFS partion for split marker.
+     * Marker is located in the last 0x100 bytes of the last BootFS Erase Block
+     * If marker is found, we have separate Boot and Root Partitions.
+     */
+    for (*split_offset = offset + mtd->erasesize; *split_offset <= offset + size; *split_offset += mtd->erasesize)
+    {
+        if (mtd->_block_isbad(mtd, *split_offset - mtd->erasesize)) {
+            continue;
+        }
+        mtd->_read(mtd, *split_offset - 0x100, 0x100, &retlen, buf);
+
+        if (!strncmp (BCM_BCMFS_TAG, buf, strlen (BCM_BCMFS_TAG))) {
+            if (!strncmp (BCM_BCMFS_TYPE_UBIFS, &buf[strlen (BCM_BCMFS_TAG)], strlen (BCM_BCMFS_TYPE_UBIFS)))
+            {
+                printk("***** Found UBIFS Marker at 0x%08lx\n", *split_offset - 0x100); 
+                split_found = 1;
+                break;
+            }
+        }
+    }
+
+    return split_found;
+}
+#endif
+
+static void __devinit 
+brcmnanddrv_setup_mtd_partitions(struct brcmnand_info* nandinfo)
+{
+    int boot_from_nand;
+#if 0
+    int i=0, part_num=0;
+    uint64_t extra=0, extra_single_part_size=0;
+#endif
+
+    if (flash_get_flash_type() == FLASH_IFC_NAND)
+        boot_from_nand = 1;
+    else
+        boot_from_nand = 0;
+
+    if( boot_from_nand == 0 )
+    {
+        nandinfo->nr_parts = 1;
+        nandinfo->parts = bcm63XX_nand_parts;
+
+        bcm63XX_nand_parts[0].name = "data";
+        bcm63XX_nand_parts[0].offset = 0;
+        if( device_size(&(nandinfo->mtd)) < NAND_BBT_THRESHOLD_KB )
+        {
+            bcm63XX_nand_parts[0].size =
+                device_size(&(nandinfo->mtd)) - (NAND_BBT_SMALL_SIZE_KB*1024);
+        }
+        else
+        {
+            bcm63XX_nand_parts[0].size =
+                device_size(&(nandinfo->mtd)) - (NAND_BBT_BIG_SIZE_KB*1024);
+        }
+        bcm63XX_nand_parts[0].ecclayout = nandinfo->mtd.ecclayout;
+
+        PRINTK("Part[0] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[0].name,
+            bcm63XX_nand_parts[0].size, bcm63XX_nand_parts[0].offset);
+    }
+    else
+    {
+#if 1
+        static NVRAM_DATA nvram;
+        struct mtd_info* mtd = &nandinfo->mtd;
+        unsigned long rootfs_ofs, update_ofs, high_bank_ofs, bank_size;
+        unsigned long num_blks, bank_blks;
+
+        /* The CFE boot loader saved the offset of the partition from which
+         * the Linux image was loaded.
+         */
+        kerSysBlParmsGetInt(NAND_RFS_OFS_NAME, (int *) &rootfs_ofs);
+        kerSysNvRamGet((char *)&nvram, sizeof(nvram), 0);
+        PRINTK("rootfs_ofs=0x%8.8lx\n", rootfs_ofs);
+
+        nandinfo->nr_parts = 7;
+        nandinfo->parts = bcm63XX_nand_parts;
+
+        num_blks = nandinfo->mtd.size >> 17; /* 2^17 = 128k */
+        switch (num_blks) {
+            case 1024: bank_blks = 491; break;
+            case 2048: bank_blks = 1003; break;
+            default: bank_blks = 0;
+        }
+
+        if (!bank_blks || nandinfo->mtd.erasesize != 128*1024) {
+            printk("Unknown flash size (%lu/%lu)!\n",
+                   (unsigned long)nandinfo->mtd.size,
+                   (unsigned long)nandinfo->mtd.erasesize);
+            return;
+        }
+
+        bank_size = bank_blks*128*1024;
+        high_bank_ofs = (bank_blks + 1) * 128;
+
+ 	if (rootfs_ofs == 1*128 || rootfs_ofs == high_bank_ofs) {
+
+            /*== Old JFFS2 banked flash layout used ==*/
+
+            update_ofs = (rootfs_ofs == 1*128) ? high_bank_ofs : 1*128;
+
+            /* Boot-sector + nvram */
+            bcm63XX_nand_parts[3].offset = 0*1024;
+            bcm63XX_nand_parts[3].size = 128*1024;
+            bcm63XX_nand_parts[3].ecclayout = mtd->ecclayout;
+
+            /* rootfs */
+            bcm63XX_nand_parts[0].offset = rootfs_ofs*1024;
+            bcm63XX_nand_parts[0].size = bank_size;
+            bcm63XX_nand_parts[0].ecclayout = mtd->ecclayout;
+
+            /* rootfs_update */
+            bcm63XX_nand_parts[1].offset = update_ofs*1024;
+            bcm63XX_nand_parts[1].size = bank_size;
+            bcm63XX_nand_parts[1].ecclayout = mtd->ecclayout;
+
+            /* data (legacy, not used) */
+            bcm63XX_nand_parts[2].offset = 984*128*1024;
+            bcm63XX_nand_parts[2].size = 32*128*1024;
+            bcm63XX_nand_parts[2].ecclayout = mtd->ecclayout;
+
+            /* image (overlaping rootfs) */
+            bcm63XX_nand_parts[4].offset = rootfs_ofs*1024;
+            bcm63XX_nand_parts[4].size = bank_size;
+            bcm63XX_nand_parts[4].ecclayout = mtd->ecclayout;
+
+            /* image_update (overlaping rootfs_update) */
+            bcm63XX_nand_parts[5].offset = update_ofs*1024;;
+            bcm63XX_nand_parts[5].size = bank_size;
+            bcm63XX_nand_parts[5].ecclayout = mtd->ecclayout;
+
+            /* rootfs_update_data (legacy, not used) */
+            bcm63XX_nand_parts[6].offset = 737*128*1024;;
+            bcm63XX_nand_parts[6].size = 246*128*1024;
+            bcm63XX_nand_parts[6].ecclayout = mtd->ecclayout;
+
+            if (kerSysIsRootfsSet() == false) {
+                kerSysSetBootParm("root=", "mtd:rootfs");
+                kerSysSetBootParm("rootfstype=", "jffs2");
+                if (rootfs_ofs == 1*128)
+                    kerSysSetBootParm("phantomBootParm=", "bank=low");
+                else
+                    kerSysSetBootParm("phantomBootParm=", "bank=high");
+            }
+
+        } else {
+
+            /*== New UBI volume based flash layout used ==*/
+
+            /* Boot-sector (old nvram) */
+            bcm63XX_nand_parts[3].name = "nvram";
+            bcm63XX_nand_parts[3].offset = 0*1024;
+            bcm63XX_nand_parts[3].size = 128*1024;
+            bcm63XX_nand_parts[3].ecclayout = mtd->ecclayout;
+
+            /* New nvram partition */
+            bcm63XX_nand_parts[2].name = "nvram2";
+            bcm63XX_nand_parts[2].offset = 1*128*1024;
+            bcm63XX_nand_parts[2].size = 3*128*1024;
+            bcm63XX_nand_parts[2].ecclayout = mtd->ecclayout;
+
+            /* Kernel 0 partition */
+            bcm63XX_nand_parts[0].name = "kernel_0";
+            bcm63XX_nand_parts[0].offset = 4*128*1024;
+            bcm63XX_nand_parts[0].size = 40*128*1024;
+            bcm63XX_nand_parts[0].ecclayout = mtd->ecclayout;
+
+            /* Kernel 1 partition */
+            bcm63XX_nand_parts[1].name = "kernel_1";
+            bcm63XX_nand_parts[1].offset = 44*128*1024;
+            bcm63XX_nand_parts[1].size = 40*128*1024;
+            bcm63XX_nand_parts[1].ecclayout = mtd->ecclayout;
+
+            /* Merged partition */
+            bcm63XX_nand_parts[4].name = "ubi";
+            bcm63XX_nand_parts[4].offset = 84*128*1024;
+            bcm63XX_nand_parts[4].size = (num_blks - (84+8))*128*1024;
+            bcm63XX_nand_parts[4].ecclayout = mtd->ecclayout;
+
+            /* Old lay-out rootfs 0 partition */
+            bcm63XX_nand_parts[5].name = "mtd_lo";
+            bcm63XX_nand_parts[5].offset = 84*128*1024;;
+            bcm63XX_nand_parts[5].size =
+                (high_bank_ofs*1024) - bcm63XX_nand_parts[5].offset;
+            bcm63XX_nand_parts[5].ecclayout = mtd->ecclayout;
+
+            /* Old lay-out rootfs 1 + data + unused partition */
+            bcm63XX_nand_parts[6].name = "mtd_hi";
+            bcm63XX_nand_parts[6].offset = high_bank_ofs*1024;
+            bcm63XX_nand_parts[6].size =
+                (num_blks - 8)*128*1024 - bcm63XX_nand_parts[6].offset;
+            bcm63XX_nand_parts[6].ecclayout = mtd->ecclayout;
+
+            /* Last 8 eb's resereved for bad block tables */
+
+            if (kerSysIsRootfsSet() == false) {
+                if (nvram.iVersion == 0x04) {
+                    /* Using merged ubi partition */
+                    kerSysSetBootParm("ubi.mtd", "4");
+                    if ((rootfs_ofs*1024) == bcm63XX_nand_parts[0].offset)
+                        kerSysSetBootParm("root=", "ubi:rootfs_0");
+                    else
+                        kerSysSetBootParm("root=", "ubi:rootfs_1");
+                } else {
+                    /* First boot after upgrade from jffs2 (using low bank only) */
+                    kerSysSetBootParm("ubi.mtd", "5");
+                    kerSysSetBootParm("root=", "ubi:rootfs_0");
+                }
+                kerSysSetBootParm("rootfstype=", "ubifs");
+            }
+        }
+
+        PRINTK("Part[0] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[0].name,
+            bcm63XX_nand_parts[0].size, bcm63XX_nand_parts[0].offset);
+        PRINTK("Part[1] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[1].name,
+            bcm63XX_nand_parts[1].size, bcm63XX_nand_parts[1].offset);
+        PRINTK("Part[2] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[2].name,
+            bcm63XX_nand_parts[2].size, bcm63XX_nand_parts[2].offset);
+        PRINTK("Part[3] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[3].name,
+            bcm63XX_nand_parts[3].size, bcm63XX_nand_parts[3].offset);
+        PRINTK("Part[4] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[4].name,
+            bcm63XX_nand_parts[4].size, bcm63XX_nand_parts[4].offset);
+        PRINTK("Part[5] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[5].name,
+            bcm63XX_nand_parts[5].size, bcm63XX_nand_parts[5].offset);
+        PRINTK("Part[6] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[6].name,
+            bcm63XX_nand_parts[6].size, bcm63XX_nand_parts[6].offset);
+#else
+        static NVRAM_DATA nvram;
+        struct mtd_info* mtd = &nandinfo->mtd;
+        unsigned long rootfs_ofs;
+        int rootfs, rootfs_update;
+        unsigned long  split_offset;
+
+        kerSysBlParmsGetInt(NAND_RFS_OFS_NAME, (int *) &rootfs_ofs);
+        kerSysNvRamGet((char *)&nvram, sizeof(nvram), 0);
+        nandinfo->nr_parts = 7;
+        nandinfo->parts = bcm63XX_nand_parts;
+
+        /* Root FS.  The CFE RAM boot loader saved the rootfs offset that the
+         * Linux image was loaded from.
+         */
+        PRINTK("rootfs_ofs=0x%8.8lx, part1ofs=0x%8.8lx, part2ofs=0x%8.8lx\n",
+	       rootfs_ofs, (unsigned long)nvram.ulNandPartOfsKb[NP_ROOTFS_1],
+	       (unsigned long)nvram.ulNandPartOfsKb[NP_ROOTFS_2]);
+        if( rootfs_ofs == nvram.ulNandPartOfsKb[NP_ROOTFS_1] )
+        {
+            rootfs = NP_ROOTFS_1;
+            rootfs_update = NP_ROOTFS_2;
+        }
+        else
+        {
+            if( rootfs_ofs == nvram.ulNandPartOfsKb[NP_ROOTFS_2] )
+            {
+                rootfs = NP_ROOTFS_2;
+                rootfs_update = NP_ROOTFS_1;
+            }
+            else
+            {
+                /* Backward compatibility with old cferam. */
+                extern unsigned char _text;
+                unsigned long rootfs_ofs = *(unsigned long *) (&_text - 4);
+
+                if( rootfs_ofs == nvram.ulNandPartOfsKb[NP_ROOTFS_1] )
+                {
+                    rootfs = NP_ROOTFS_1;
+                    rootfs_update = NP_ROOTFS_2;
+                }
+                else
+                {
+                    rootfs = NP_ROOTFS_2;
+                    rootfs_update = NP_ROOTFS_1;
+                }
+            }
+        }
+
+        /* RootFS partition */
+        bcm63XX_nand_parts[0].offset = nvram.ulNandPartOfsKb[rootfs]*1024;
+	bcm63XX_nand_parts[0].size = nvram.ulNandPartSizeKb[rootfs]*1024;
+        bcm63XX_nand_parts[0].ecclayout = mtd->ecclayout;
+
+        /* This partition is used for flashing images */
+        bcm63XX_nand_parts[4].offset = bcm63XX_nand_parts[0].offset;
+        bcm63XX_nand_parts[4].size = bcm63XX_nand_parts[0].size;
+        bcm63XX_nand_parts[4].ecclayout = mtd->ecclayout;
+
+        if (is_split_partition (mtd, bcm63XX_nand_parts[0].offset, bcm63XX_nand_parts[0].size, &split_offset))
+        {
+            /* RootFS partition */
+            bcm63XX_nand_parts[0].offset = split_offset;
+            bcm63XX_nand_parts[0].size -= (split_offset - nvram.ulNandPartOfsKb[rootfs]*1024);
+
+            /* BootFS partition */
+            bcm63XX_nand_parts[nandinfo->nr_parts].name = "bootfs";
+            bcm63XX_nand_parts[nandinfo->nr_parts].offset = nvram.ulNandPartOfsKb[rootfs]*1024;
+            bcm63XX_nand_parts[nandinfo->nr_parts].size = split_offset - nvram.ulNandPartOfsKb[rootfs]*1024;
+            bcm63XX_nand_parts[nandinfo->nr_parts].ecclayout = mtd->ecclayout;
+
+            if (kerSysIsRootfsSet() == false) {
+                kerSysSetBootParm("ubi.mtd", "0");
+                kerSysSetBootParm("root=", "ubi:rootfs_ubifs");
+                kerSysSetBootParm("rootfstype=", "ubifs");
+            }
+        }
+        else {
+            if (kerSysIsRootfsSet() == false) {
+                kerSysSetBootParm("root=", "mtd:rootfs");
+                kerSysSetBootParm("rootfstype=", "jffs2");
+            }
+        }
+        nandinfo->nr_parts++;
+
+        /* RootFS_update partition */
+        bcm63XX_nand_parts[1].offset = nvram.ulNandPartOfsKb[rootfs_update]*1024;
+	bcm63XX_nand_parts[1].size = nvram.ulNandPartSizeKb[rootfs_update]*1024;
+        bcm63XX_nand_parts[1].ecclayout = mtd->ecclayout;
+
+        /* This partition is used for flashing images */
+        bcm63XX_nand_parts[5].offset = bcm63XX_nand_parts[1].offset;
+        bcm63XX_nand_parts[5].size = bcm63XX_nand_parts[1].size;
+        bcm63XX_nand_parts[5].ecclayout = mtd->ecclayout;
+
+        if (is_split_partition (mtd, bcm63XX_nand_parts[1].offset, bcm63XX_nand_parts[1].size, &split_offset))
+        {
+            /* rootfs_update partition */
+            bcm63XX_nand_parts[1].offset = split_offset;
+            bcm63XX_nand_parts[1].size -= (split_offset - nvram.ulNandPartOfsKb[rootfs_update]*1024);
+
+            /* bootfs_update partition */
+            bcm63XX_nand_parts[nandinfo->nr_parts].name = "bootfs_update";
+            bcm63XX_nand_parts[nandinfo->nr_parts].offset = nvram.ulNandPartOfsKb[rootfs_update]*1024;
+            bcm63XX_nand_parts[nandinfo->nr_parts].size = split_offset - nvram.ulNandPartOfsKb[rootfs_update]*1024;
+            bcm63XX_nand_parts[nandinfo->nr_parts].ecclayout = mtd->ecclayout;
+        }
+        nandinfo->nr_parts++;
+
+        /* Data (psi, scratch pad) */
+        bcm63XX_nand_parts[2].offset = nvram.ulNandPartOfsKb[NP_DATA] * 1024;
+        bcm63XX_nand_parts[2].size = nvram.ulNandPartSizeKb[NP_DATA] * 1024;
+        bcm63XX_nand_parts[2].ecclayout = mtd->ecclayout;
+
+        part_num=nandinfo->nr_parts;
+        i=BCM_MAX_EXTRA_PARTITIONS-2; // skip DATA partition
+        while(i >= 0) {
+            if(nvram.part_info[i].size != 0xffff) {
+                //sz_bits -- 0b01 -- MB, 0b10 - GB , 0b10, 0b11 - reserved
+                switch((nvram.part_info[i].size & 0xc000)>>14) {
+                    case 0:
+                        extra_single_part_size=1<<20;//1024*1024;
+                        break;
+                    case 1:
+                        extra_single_part_size=1<<30;//1024*1024*1024;
+                        break;
+                    default:
+                        extra_single_part_size=0;
+                        break;
+                }
+                extra_single_part_size=(nvram.part_info[i].size&0x3fff)*extra_single_part_size;
+            if((extra_single_part_size&(~((uint64_t)mtd->erasesize-1))) != extra_single_part_size)
+                extra_single_part_size=(extra_single_part_size+mtd->erasesize);
+            extra_single_part_size =  (extra_single_part_size) & (~((uint64_t)mtd->erasesize-1));
+            if(extra_single_part_size >  mtd->erasesize) {
+                extra+=extra_single_part_size;
+                bcm63XX_nand_parts[part_num].name = misc_mtd_partition_names[i];
+                bcm63XX_nand_parts[part_num].offset = (nvram.ulNandPartOfsKb[NP_DATA] * 1024) - extra;
+                bcm63XX_nand_parts[part_num].size = extra_single_part_size;
+                bcm63XX_nand_parts[part_num].ecclayout = mtd->ecclayout;
+                nandinfo->nr_parts++;
+                part_num++;
+                }
+            }
+            i--;
+        }
+
+        /* Boot and NVRAM data */
+        bcm63XX_nand_parts[3].offset = nvram.ulNandPartOfsKb[NP_BOOT] * 1024;
+        bcm63XX_nand_parts[3].size = nvram.ulNandPartSizeKb[NP_BOOT] * 1024;
+        bcm63XX_nand_parts[3].ecclayout = mtd->ecclayout;
+
+        /* rootfs_update_data */
+        bcm63XX_nand_parts[6].offset = nvram.ulNandPartOfsKb[rootfs_update]*1024 + ((nvram.ulNandPartSizeKb[rootfs_update]*1024/2)/mtd->erasesize)*mtd->erasesize;
+        bcm63XX_nand_parts[6].size  = nvram.ulNandPartSizeKb[rootfs_update]*1024 - ((nvram.ulNandPartSizeKb[rootfs_update]*1024/2)/mtd->erasesize)*mtd->erasesize;
+        bcm63XX_nand_parts[6].ecclayout = mtd->ecclayout;
+
+        PRINTK("Part[0] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[0].name,
+            bcm63XX_nand_parts[0].size, bcm63XX_nand_parts[0].offset);
+        PRINTK("Part[1] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[1].name,
+            bcm63XX_nand_parts[1].size, bcm63XX_nand_parts[1].offset);
+        PRINTK("Part[2] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[2].name,
+            bcm63XX_nand_parts[2].size, bcm63XX_nand_parts[2].offset);
+        PRINTK("Part[3] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[3].name,
+            bcm63XX_nand_parts[3].size, bcm63XX_nand_parts[3].offset);
+        PRINTK("Part[4] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[4].name,
+            bcm63XX_nand_parts[4].size, bcm63XX_nand_parts[4].offset);
+        PRINTK("Part[5] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[5].name,
+            bcm63XX_nand_parts[5].size, bcm63XX_nand_parts[5].offset);
+        PRINTK("Part[6] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[6].name,
+            bcm63XX_nand_parts[6].size, bcm63XX_nand_parts[6].offset);
+#endif
+    }
+}
+
+
+static int __devinit brcmnanddrv_probe(struct platform_device *pdev)
+{
+    static int csi = 0; // Index into dev/nandInfo array
+    int cs = 0;  // Chip Select
+    int err = 0;
+    struct brcmnand_info* info = NULL;
+    static struct brcmnand_ctrl* ctrl = (struct brcmnand_ctrl*) 0;
+
+    if(!gPageBuffer &&
+       (gPageBuffer = kmalloc(sizeof(struct nand_buffers),GFP_KERNEL)) == NULL)
+    {
+        err = -ENOMEM;
+    }
+    else
+    {
+        if( (ctrl = kmalloc(sizeof(struct brcmnand_ctrl), GFP_KERNEL)) != NULL)
+        {
+            memset(ctrl, 0, sizeof(struct brcmnand_ctrl));
+            ctrl->state = FL_READY;
+            init_waitqueue_head(&ctrl->wq);
+            spin_lock_init(&ctrl->chip_lock);
+
+            if((info=kmalloc(sizeof(struct brcmnand_info),GFP_KERNEL)) != NULL)
+            {
+                gNandInfo[csi] = info;
+                memset(info, 0, sizeof(struct brcmnand_info));
+                info->brcmnand.ctrl = ctrl;
+                info->brcmnand.ctrl->numchips = gNumNand = 1;
+                info->brcmnand.csi = csi;
+
+                /* For now all devices share the same buffer */
+                info->brcmnand.ctrl->buffers =
+                    (struct nand_buffers*) gPageBuffer;
+
+                info->brcmnand.ctrl->numchips = gNumNand; 
+                info->brcmnand.chip_shift = 0; // Only 1 chip
+                info->brcmnand.priv = &info->mtd;
+                info->mtd.name = dev_name(&pdev->dev);
+                info->mtd.priv = &info->brcmnand;
+                info->mtd.owner = THIS_MODULE;
+
+                /* Enable the following for a flash based bad block table */
+                info->brcmnand.options |= NAND_BBT_USE_FLASH;
+
+                /* Each chip now will have its own BBT (per mtd handle) */
+                if (brcmnand_scan(&info->mtd, cs, gNumNand) == 0)
+                {
+                    PRINTK("Master size=%08llx\n", info->mtd.size); 
+                    brcmnanddrv_setup_mtd_partitions(info);
+                    mtd_device_register(&info->mtd, info->parts, info->nr_parts);
+                    dev_set_drvdata(&pdev->dev, info);
+                }
+                else
+                    err = -ENXIO;
+
+            }
+            else
+                err = -ENOMEM;
+
+        }
+        else
+            err = -ENOMEM;
+    }
+
+    if( err )
+    {
+        if( gPageBuffer )
+        {
+            kfree(gPageBuffer);
+            gPageBuffer = NULL;
+        }
+
+        if( ctrl )
+        {
+            kfree(ctrl);
+            ctrl = NULL;
+        }
+
+        if( info )
+        {
+            kfree(info);
+            info = NULL;
+        }
+    }
+
+    return( err );
+}
+
+static int __devexit brcmnanddrv_remove(struct platform_device *pdev)
+{
+    struct brcmnand_info *info = dev_get_drvdata(&pdev->dev);
+
+    dev_set_drvdata(&pdev->dev, NULL);
+
+    if (info)
+    {
+        mtd_device_unregister(&info->mtd);
+
+        brcmnand_release(&info->mtd);
+        kfree(gPageBuffer);
+        kfree(info);
+    }
+
+    return 0;
+}
+
+static int __init brcmnanddrv_init(void)
+{
+    int ret = 0;
+    int csi;
+    int ncsi;
+    char cmd[32] = "\0";
+    struct platform_device *pdev;
+
+    if (flash_get_flash_type() != FLASH_IFC_NAND)
+        return -ENODEV;
+
+    kerSysBlParmsGetStr(NAND_COMMAND_NAME, cmd, sizeof(cmd));
+    PRINTK("%s: brcmnanddrv_init - NANDCMD='%s'\n", __FUNCTION__, cmd);
+
+    if (cmd[0])
+    {
+        if (strcmp(cmd, "rescan") == 0)
+            gClearBBT = 1;
+        else if (strcmp(cmd, "showbbt") == 0)
+            gClearBBT = 2;
+        else if (strcmp(cmd, "eraseall") == 0)
+            gClearBBT = 8;
+        else if (strcmp(cmd, "erase") == 0)
+            gClearBBT = 7;
+        else if (strcmp(cmd, "clearbbt") == 0)
+            gClearBBT = 9;
+        else if (strcmp(cmd, "showcet") == 0)
+            gClearCET = 1;
+        else if (strcmp(cmd, "resetcet") == 0)
+            gClearCET = 2;
+        else if (strcmp(cmd, "disablecet") == 0)
+            gClearCET = 3;
+        else
+            printk(KERN_WARNING "%s: unknown command '%s'\n",
+                __FUNCTION__, cmd);
+    }
+    
+    for (csi=0; csi<NAND_MAX_CS; csi++)
+    {
+        gNandTiming1[csi] = 0;
+        gNandTiming2[csi] = 0;
+        gAccControl[csi] = 0;
+        gNandConfig[csi] = 0;
+    }
+
+    if (nacc == 1)
+        PRINTK("%s: nacc=%d, gAccControl[0]=%08lx, gNandConfig[0]=%08lx\n", \
+            __FUNCTION__, nacc, acc[0], nandcfg[0]);
+
+    if (nacc>1)
+        PRINTK("%s: nacc=%d, gAccControl[1]=%08lx, gNandConfig[1]=%08lx\n", \
+            __FUNCTION__, nacc, acc[1], nandcfg[1]);
+
+    for (csi=0; csi<nacc; csi++)
+        gAccControl[csi] = acc[csi];
+
+    for (csi=0; csi<ncfg; csi++)
+        gNandConfig[csi] = nandcfg[csi];
+
+    ncsi = max(nt1, nt2);
+    for (csi=0; csi<ncsi; csi++)
+    {
+        if (nt1 && csi < nt1)
+            gNandTiming1[csi] = t1[csi];
+
+        if (nt2 && csi < nt2)
+            gNandTiming2[csi] = t2[csi];
+        
+    }
+
+    printk (KERN_INFO DRIVER_INFO " (BrcmNand Controller)\n");
+    if( (pdev = platform_device_alloc(DRIVER_NAME, 0)) != NULL )
+    {
+        platform_device_add(pdev);
+        platform_device_put(pdev);
+        ret = platform_driver_register(&brcmnand_platform_driver);
+        if (ret >= 0)
+            request_resource(&iomem_resource, &brcmnand_resources[0]);
+        else
+            printk("brcmnanddrv_init: driver_register failed, err=%d\n", ret);
+    }
+    else
+        ret = -ENODEV;
+
+    return ret;
+}
+
+static void __exit brcmnanddrv_exit(void)
+{
+    release_resource(&brcmnand_resources[0]);
+    platform_driver_unregister(&brcmnand_platform_driver);
+}
+
+
+module_init(brcmnanddrv_init);
+module_exit(brcmnanddrv_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ton Truong <ttruong@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NAND flash driver");
+
+#endif //CONFIG_BCM_KF_MTD_BCMNAND
diff --git a/drivers/mtd/brcmnand/brcmnand_base.c b/drivers/mtd/brcmnand/brcmnand_base.c
new file mode 100644
index 0000000000000000000000000000000000000000..734fda934d951d45458451e2cb07ce6340a74f83
--- /dev/null
+++ b/drivers/mtd/brcmnand/brcmnand_base.c
@@ -0,0 +1,11481 @@
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+/*
+ *  drivers/mtd/brcmnand/brcmnand_base.c
+ *
+<:copyright-BRCM:2002:GPL/GPL:standard
+
+   Copyright (c) 2002 Broadcom Corporation
+   All Rights Reserved
+    
+ This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/byteorder/generic.h>
+#include <linux/reboot.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <asm/io.h>
+#include <asm/bug.h>
+#include <asm/delay.h>
+#include <linux/mtd/mtd64.h>
+#include <asm-generic/gcclib.h>
+#include <linux/slab.h>
+#include "bcm_map_part.h"
+#include "board.h"
+#include "shared_utils.h"
+
+#if defined(CONFIG_BCM_KF_NAND)
+#define NAND_COMPLEX_OOB_WRITE	0x00400000
+
+int g_nand_nop = 1;
+#endif
+
+#define DEBUG(...) do { } while(0)
+
+//#include "bbm.h"
+
+#include "brcmnand_priv.h"
+
+#define PRINTK(...) do {} while(0)
+//#define PRINTK printk
+//static char brcmNandMsg[1024];
+
+//#define DEBUG_HW_ECC
+
+//#define BRCMNAND_READ_VERIFY
+#undef BRCMNAND_READ_VERIFY
+
+//#ifdef CONFIG_MTD_BRCMNAND_VERIFY_WRITE
+//#define BRCMNAND_WRITE_VERIFY
+//#endif
+#undef BRCMNAND_WRITE_VERIFY
+
+//#define DEBUG_ISR
+#undef DEBUG_ISR
+#if defined( DEBUG_ISR )  || defined(BRCMNAND_READ_VERIFY) \
+	|| defined(BRCMNAND_WRITE_VERIFY)
+#if defined(DEBUG_ISR )  || defined(BRCMNAND_READ_VERIFY)
+#define EDU_DEBUG_4
+#endif
+#if defined(DEBUG_ISR )  || defined(BRCMNAND_WRITE_VERIFY)
+#define EDU_DEBUG_5
+#endif
+#endif
+
+
+#if defined( CONFIG_MTI_24K ) || defined( CONFIG_MTI_34K ) || defined( CONFIG_MTD_BRCMNAND_EDU )
+	#define PLATFORM_IOFLUSH_WAR()	__sync()
+#else
+	#define PLATFORM_IOFLUSH_WAR()	
+#endif
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+
+#include "edu.h"
+
+// Prototypes, also define whether polling or isr mode
+#include "eduproto.h"
+#endif // #ifdef CONFIG_MTD_BRCMNAND_EDU
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
+// Block0
+#define BCHP_NAND_ACC_CONTROL_0_BCH_4		(BRCMNAND_ECC_BCH_4 << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT)
+
+// Block n > 0
+#define BCHP_NAND_ACC_CONTROL_N_BCH_4		(BRCMNAND_ECC_BCH_4 << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT)
+#endif
+
+int gdebug=0;
+extern int edu_debug;
+
+
+// Whether we should clear the BBT to fix a previous error.
+/* This will eventually be on the command line, to allow a user to 
+ * clean the flash
+ */
+extern int gClearBBT;
+
+/* Number of NAND chips, only applicable to v1.0+ NAND controller */
+extern int gNumNandCS;
+
+/* The Chip Select [0..7] for the NAND chips from gNumNand above, only applicable to v1.0+ NAND controller */
+extern int gNandCS[];
+extern uint32_t gNandConfig[];
+extern uint32_t gAccControl[];
+
+// If wr_preempt_en is enabled, need to disable IRQ during NAND I/O
+int wr_preempt_en = 0;
+
+// Last known good ECC sector offset (512B sector that does not generate ECC error).  
+// used in HIF_INTR2 WAR.
+loff_t gLastKnownGoodEcc;
+
+#define DRIVER_NAME	"brcmnand"
+
+#define HW_AUTOOOB_LAYOUT_SIZE		32 /* should be enough */
+
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+/* Avoid infinite recursion between brcmnand_refresh_blk() and brcmnand_read_ecc() */
+static atomic_t inrefresh = ATOMIC_INIT(0); 
+static int brcmnand_refresh_blk(struct mtd_info *, loff_t);
+static int brcmnand_erase_nolock(struct mtd_info *, struct erase_info *, int);
+#endif
+
+/*
+ * ID options
+ */
+#define BRCMNAND_ID_HAS_BYTE3		0x00000001
+#define BRCMNAND_ID_HAS_BYTE4		0x00000002
+#define BRCMNAND_ID_HAS_BYTE5		0x00000004
+#define BRCMNAND_ID_HYNIX_LEGACY	0x00010000
+
+// TYPE2
+#define BRCMNAND_ID_HAS_BYTE4_T2		0x00000008
+#define BRCMNAND_ID_HAS_BYTE5_T2		0x00000010
+#define BRCMNAND_ID_HAS_BYTE6_T2		0x00000020
+
+#define BRCMNAND_ID_EXT_BYTES \
+	(BRCMNAND_ID_HAS_BYTE3|BRCMNAND_ID_HAS_BYTE4|BRCMNAND_ID_HAS_BYTE5)
+
+#define BRCMNAND_ID_EXT_BYTES_TYPE2 \
+	(BRCMNAND_ID_HAS_BYTE3|BRCMNAND_ID_HAS_BYTE4_T2|\
+	BRCMNAND_ID_HAS_BYTE5_T2|BRCMNAND_ID_HAS_BYTE6_T2)
+
+
+// MICRON M60A is similar to Type 1 with a few exceptions.
+#define BRCMNAND_ID_HAS_MICRON_M60A	0x00020000
+#define BRCMNAND_ID_EXT_MICRON_M60A	BRCMNAND_ID_EXT_BYTES
+
+// MICRON M61A ID encoding is a totally different (and dying beast, temporarily until ONFI)
+#define BRCMNAND_ID_HAS_MICRON_M61A	0x00040000
+
+#define BRCMNAND_ID_EXT_MICRON_M61A (BRCMNAND_ID_HAS_MICRON_M61A)
+
+#define BRCMNAND_ID_HAS_MICRON_M68A	0x00080000
+#define BRCMNAND_ID_EXT_MICRON_M68A \
+	(BRCMNAND_ID_HAS_MICRON_M60A|BRCMNAND_ID_HAS_MICRON_M68A)
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_4_0
+
+#define ONFI_RDPARAM_SIGNATURE_OFS	  0	
+#define ONFI_NBR_PARAM_PAGE_OFS		 14
+#define ONFI_RDPARAM_PAGESIZE_OFS	 80
+#define ONFI_RDPARAM_OOBSIZE_OFS		 84
+#define ONFI_RDPARAM_ECC_LEVEL_OFS	112
+#define ONFI_NBR_BITS_PER_CELL_OFS	102
+
+/* 
+ * The following def is for a dev with 3 replica of the param page
+ * Need to be adjusted according based on the actual nbr of param pages.
+ */
+ 
+#define ONFI_EXTPARAM_OFS				768
+#define ONFI_EXTPARAM_SIG1_OFS		768
+#define ONFI_EXTPARAM_SIG2_OFS		772
+#define ONFI_EXTPARAM_EXT_ECC_OFS	800
+#define ONFI_EXTPARAM_CODEWORK_OFS	801
+
+
+
+#define ONFI_SIGNATURE 		0x4F4E4649	/* "ONFI" */
+#define ONFI_EXTPARAM_SIG	0x45505053	/* "EPPS" */
+
+#endif
+
+typedef struct brcmnand_chip_Id {
+    	uint8 mafId, chipId;
+	uint8 chipId345[3];  /* ID bytes 3,4,5: Resolve ambiguity in chipId */
+	const char* chipIdStr;
+	uint32 eccLevel; /* Only for Samsung Type 2 */
+	uint32 sectorSize; /* Only for Samsung Type 2 */
+	uint32 nbrBlocks; // Only for devices that do not encode Size into ID string.
+	uint32 options;
+	uint32 idOptions;	// Whether chip has all 5 ID bytes
+	uint32 timing1, timing2; // Specify a non-zero value to override the default timings.
+	int nop;				// Number of partial writes per page
+	unsigned int ctrlVersion; // Required controller version if different than 0
+} brcmnand_chip_Id;
+
+/*
+ * List of supported chip
+ */
+static brcmnand_chip_Id brcmnand_chips[] = {
+	{	/* 0a */
+		.chipId = SAMSUNG_K9F1G08U0E,
+		.chipId345 = {0x00, 0x95, 0x41},
+		.mafId = FLASHTYPE_SAMSUNG,
+		.chipIdStr = "Samsung K9F1G08U0E",
+		.options = NAND_BBT_USE_FLASH | NAND_COMPLEX_OOB_WRITE,      /* Use BBT on flash */
+			//| NAND_COMPLEX_OOB_WRITE  /* Write data together with OOB for write_oob */
+		.idOptions = BRCMNAND_ID_EXT_BYTES,
+		.timing1 = 0,
+		.timing2 = 0,
+		.nop=1,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0,
+	},
+
+	{	/* 0b */
+		.chipId = SAMSUNG_K9F1G08U0A,
+		.mafId = FLASHTYPE_SAMSUNG,
+		.chipIdStr = "Samsung K9F1G08U0A/B/C/D",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+		.idOptions = 0,
+				//| NAND_COMPLEX_OOB_WRITE	/* Write data together with OOB for write_oob */
+		.timing1 = 0, //00070000,
+		.timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0, /* THT Verified on data-sheet 7/10/08: Allows 4 on main and 4 on OOB */
+	},
+
+	{	/* 1 */
+		.chipId = ST_NAND512W3A,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST ST_NAND512W3A",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, //0x6474555f, 
+		.timing2 = 0, //0x00000fc7,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+	{	/* 2 */
+		.chipId = ST_NAND256W3A,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST ST_NAND256W3A",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, //0x6474555f, 
+		.timing2 = 0, //0x00000fc7,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+#if 0 // EOL
+	{	/* 4 */
+		.chipId = HYNIX_HY27UF081G2M,
+		.mafId = FLASHTYPE_HYNIX,
+		.chipIdStr = "HYNIX HY27UF081G2M",
+		.options = NAND_BBT_USE_FLASH 
+			,
+	},
+#endif
+	/* This is the new version of HYNIX_HY27UF081G2M which is EOL.
+	 * Both use the same DevID
+	 */
+	{	/* 3 */
+		.chipId = HYNIX_HY27UF081G2A,
+		.mafId = FLASHTYPE_HYNIX,
+		.chipIdStr = "Hynix HY27UF081G2A",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+#if 0
+/* Obsoleted by the new Micron flashes */
+	{	/* 4 */
+		.chipId = MICRON_MT29F2G08AAB,
+		.mafId = FLASHTYPE_MICRON,
+		.chipIdStr = "MICRON_MT29F2G08AAB",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+/* This is just the 16 bit version of the above?
+	{
+		.chipId = MICRON_MT29F2G16AAB,
+		.mafId = FLASHTYPE_MICRON,
+		.chipIdStr = "MICRON_MT29F2G16AAB",
+		.options = NAND_BBT_USE_FLASH 
+			,
+	}
+*/
+#endif
+	{	/* 5 */
+		.chipId = SAMSUNG_K9F2G08U0A,
+		.mafId = FLASHTYPE_SAMSUNG,
+		.chipIdStr = "Samsung K9F2G08U0A",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
+	},
+
+#if 0
+/* 
+ * SW3556-862, SWLINUX-1459
+ * Samsung replaced this SLC part with a new SLC part, different block size and page size but re-use the same ID
+ * Side effect: The old flash part can no longer be supported.
+ */
+	{	/* 6 */
+		.chipId = SAMSUNG_K9K8G08U0A,
+		.mafId = FLASHTYPE_SAMSUNG,
+		.chipIdStr = "Samsung K9K8G08U0A",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
+	},
+#else
+	{	/* 6 Same old ID 0xD3, new part, so the old #define macro is kept, but IDstr is changed to reflect new part number */
+		.chipId = SAMSUNG_K9K8G08U0A,
+		.mafId = FLASHTYPE_SAMSUNG,
+		.chipIdStr = "Samsung K9F8G08U0M",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = BRCMNAND_ID_EXT_BYTES, /* New Samsung SLC has all 5 ID bytes defined */
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
+	},
+#endif
+
+
+	{	/* 7 */
+		.chipId = HYNIX_HY27UF082G2A,
+		.mafId = FLASHTYPE_HYNIX,
+		.chipIdStr = "Hynix HY27UF082G2A",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+#if 0
+/* EOL replaced by the following entry, with reduced NOP */
+
+	{	/* 8 */
+		.chipId = HYNIX_HY27UF084G2M,
+		.mafId = FLASHTYPE_HYNIX,
+		.chipIdStr = "Hynix HY27UF084G2M",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+#endif
+
+	{	/* 8 */
+		.chipId = HYNIX_HY27U4G8F2D,
+		.mafId = FLASHTYPE_HYNIX,
+		.chipIdStr = "Hynix HY27U4G8F2D",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = BRCMNAND_ID_EXT_BYTES|BRCMNAND_ID_HYNIX_LEGACY,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 9 */
+		.chipId = SPANSION_S30ML512P_08,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION S30ML512P_08",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 10 */
+		.chipId = SPANSION_S30ML512P_16,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION S30ML512P_16",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 11 */
+		.chipId = SPANSION_S30ML256P_08,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION S30ML256P_08",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 12 */
+		.chipId = SPANSION_S30ML256P_16,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION S30ML256P_16",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 13 */
+		.chipId = SPANSION_S30ML128P_08,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION S30ML128P_08",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 14 */
+		.chipId = SPANSION_S30ML128P_16,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION S30ML128P_16",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 15 */
+		.chipId = SPANSION_S30ML01GP_08,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION_S30ML01GP_08",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 16 */
+		.chipId = SPANSION_S30ML01GP_16,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION_S30ML01GP_16",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 17 */
+		.chipId = SPANSION_S30ML02GP_08,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION_S30ML02GP_08",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 18 */
+		.chipId = SPANSION_S30ML02GP_16,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION_S30ML02GP_16",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 19 */
+		.chipId = SPANSION_S30ML04GP_08,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION_S30ML04GP_08",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 20 */
+		.chipId = SPANSION_S30ML04GP_16,
+		.mafId = FLASHTYPE_SPANSION,
+		.chipIdStr = "SPANSION_S30ML04GP_16",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	{	/* 21 */
+		.chipId = ST_NAND128W3A,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST NAND128W3A",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=8,
+		.ctrlVersion = 0,
+	},
+
+	/* The following 6 ST chips only allow 4 writes per page, and requires version2.1 (4) of the controller or later */
+	{	/* 22 */
+		.chipId = ST_NAND01GW3B,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST NAND01GW3B2B",
+		.nbrBlocks = 1024, /* size=128MB, bsize=128K */
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
+	},
+
+#if 0
+//R version = 1.8V
+	{	/* 23 */ 
+		.chipId = ST_NAND01GR3B,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST NAND01GR3B2B",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
+	},
+
+	{	/* 24 */ 
+		.chipId = ST_NAND02GR3B,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST NAND02GR3B2C",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
+	},
+#endif
+	{	/* 25 */ 
+		.chipId = ST_NAND02GW3B,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST NAND02GW3B2C",
+		.nbrBlocks = 2048, /* size=256MB, bsize=128K */
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
+	},
+	
+	{	/* 26 */ 
+		.chipId = ST_NAND04GW3B,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST NAND04GW3B2B",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
+	},
+	{	/* 27 */ 
+		.chipId = ST_NAND08GW3B,
+		.mafId = FLASHTYPE_ST,
+		.chipIdStr = "ST NAND08GW3B2A",
+		.options = NAND_BBT_USE_FLASH,
+		.idOptions = 0,
+		.timing1 = 0, .timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
+	},
+		
+	{	/* 28a */
+		.chipId = SAMSUNG_K9LBG08U0M,
+		.chipId345 = {0x55, 0xB6, 0x78},
+		.mafId = FLASHTYPE_SAMSUNG,
+		.chipIdStr = "Samsung K9LBG08U0M",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+				//| NAND_COMPLEX_OOB_WRITE	/* Write data together with OOB for write_oob */
+		.idOptions = BRCMNAND_ID_EXT_BYTES,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=1,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 28b */
+		.chipId = SAMSUNG_K9LBG08U0D,
+		.chipId345 = {0xD5, 0x29, 0x38},
+		.nbrBlocks = 8192,
+		//.eccLevel = 8, ,  Will decode from ID string
+		.mafId = FLASHTYPE_SAMSUNG,
+		.chipIdStr = "Samsung K9LBG08UD",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+				//| NAND_COMPLEX_OOB_WRITE	/* Write data together with OOB for write_oob */
+		.idOptions = BRCMNAND_ID_EXT_BYTES_TYPE2,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=1,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 28c */
+		.chipId = SAMSUNG_K9LBG08U0E,
+		.chipId345 = {0xC5, 0x72, 0x54},  /* C5h, 72h, 54h, 42h */
+		.nbrBlocks = 4096, /* 4GB flash */
+		//.eccLevel = 24 per 1KB,  Will decode from ID string
+		.mafId = FLASHTYPE_SAMSUNG,
+		.chipIdStr = "Samsung K9LBG08UD",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+				//| NAND_COMPLEX_OOB_WRITE	/* Write data together with OOB for write_oob */
+		.idOptions = BRCMNAND_ID_EXT_BYTES_TYPE2,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=1,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_5_0, 
+	},
+
+	{	/* 29a */
+		.chipId = SAMSUNG_K9GAG08U0D,
+		.chipId345 = {0x94, 0x29, 0x34},
+		.mafId = FLASHTYPE_SAMSUNG,
+		.nbrBlocks = 4096,
+		//.eccLevel = 8 ,  Will decode from ID string
+		.chipIdStr = "Samsung K9GAG08U0D",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+				//| NAND_COMPLEX_OOB_WRITE	/* Write data together with OOB for write_oob */
+		.idOptions = BRCMNAND_ID_EXT_BYTES_TYPE2,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=1,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 29b */
+		.chipId = SAMSUNG_K9GAG08U0E,
+		.chipId345 = {0x84, 0x72, 0x50},  /* 84h, 72h, 50h, 42h */
+		.mafId = FLASHTYPE_SAMSUNG,
+		.nbrBlocks = 2048,
+		//.eccLevel = 24 per 1KB,  Will decode from ID string
+		.chipIdStr = "Samsung K9GAG08U0E",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+				//| NAND_COMPLEX_OOB_WRITE	/* Write data together with OOB for write_oob */
+		.idOptions = BRCMNAND_ID_EXT_BYTES_TYPE2,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=1,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_5_0, 
+	},
+
+	{	/* 30 */
+		.chipId = HYNIX_HY27UT088G2A,
+		.mafId = FLASHTYPE_HYNIX,
+		.chipIdStr = "HYNIX_HY27UT088G2A",
+		.options = NAND_BBT_USE_FLASH|NAND_SCAN_BI_3RD_PAGE, /* BBT on flash + BI on (last-2) page */
+				//| NAND_COMPLEX_OOB_WRITE	/* Write data together with OOB for write_oob */
+		.idOptions = BRCMNAND_ID_EXT_BYTES,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=1,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 31 */  
+		.chipId = HYNIX_HY27UAG8T2M,
+		.mafId = FLASHTYPE_HYNIX,
+		.chipIdStr = "HYNIX_HY27UAG8T2M",
+		.options = NAND_BBT_USE_FLASH|NAND_SCAN_BI_3RD_PAGE, /* BBT on flash + BI on (last-2) page */
+				//| NAND_COMPLEX_OOB_WRITE	/* Write data together with OOB for write_oob */
+		.idOptions = BRCMNAND_ID_EXT_BYTES,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=1,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 32 */  
+		.chipId = TOSHIBA_TC58NVG0S3ETA00,
+		.mafId = FLASHTYPE_TOSHIBA,
+		.chipIdStr = "TOSHIBA TC58NVG0S3ETA00",
+		.options = NAND_BBT_USE_FLASH, 
+		.idOptions = BRCMNAND_ID_EXT_BYTES,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=4,
+		.eccLevel=1,
+		.nbrBlocks = 1024,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0, 
+	},
+
+	{	/* 33 */  
+		.chipId = TOSHIBA_TC58NVG1S3ETAI5,
+		.mafId = FLASHTYPE_TOSHIBA,
+		.chipIdStr = "TOSHIBA TC58NVG1S3ETAI5",
+		.options = NAND_BBT_USE_FLASH, 
+		.idOptions = BRCMNAND_ID_EXT_BYTES,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=4,
+		.eccLevel=1,
+		.nbrBlocks = 2048,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0, 
+	},
+
+	{	/* 34 */  
+		.chipId = TOSHIBA_TC58NVG3S0ETA00,
+		.mafId = FLASHTYPE_TOSHIBA,
+		.chipIdStr = "TOSHIBA TC58NVG3S0ETA00",
+		.options = NAND_BBT_USE_FLASH, 
+		.idOptions = BRCMNAND_ID_EXT_BYTES,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=4,
+		.eccLevel=4,
+		.nbrBlocks = 4096,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 35 */
+		.chipId = MICRON_MT29F1G08ABA,
+		.mafId = FLASHTYPE_MICRON,
+		.chipIdStr = "MICRON MT29F1G08ABA",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+		.idOptions = BRCMNAND_ID_EXT_MICRON_M68A,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 36 */
+		.chipId = MICRON_MT29F2G08ABA,
+		.mafId = FLASHTYPE_MICRON,
+		.chipIdStr = "MICRON MT29F2G08ABA",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+		.idOptions = BRCMNAND_ID_EXT_MICRON_M68A, /* 69A actually */
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 37 */
+		.chipId = MICRON_MT29F4G08ABA,
+		.mafId = FLASHTYPE_MICRON,
+		.chipIdStr = "MICRON MT29F4G08ABA",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+		.idOptions = BRCMNAND_ID_EXT_MICRON_M60A,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+	{	/* 38 */
+		.chipId = MICRON_MT29F8G08ABA,
+		.mafId = FLASHTYPE_MICRON,
+		.chipIdStr = "MICRON MT29F8G08ABA",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+		.idOptions = BRCMNAND_ID_EXT_MICRON_M61A,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
+	},
+
+#if 0
+/* New Chip ID scheme in place and working, but as of 2631-2.5 these do not work yet, for some unknown reason */
+
+	{	/* 37 */
+		.mafId = FLASHTYPE_MICRON,
+		.chipId = MICRON_MT29F16G08ABA,
+		.chipId345 = {0x00, 0x26, 0x89},
+		.chipIdStr = "MICRON MT29F16G08ABA SLC",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+		.idOptions = BRCMNAND_ID_EXT_MICRON_M61A,
+		.timing1 = 0xFFFFFFFF, 
+		.timing2 = 0xFFFFFFFF,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0,  /* Require BCH-8 only */
+	},
+
+	{	/* 38 */
+		.mafId = FLASHTYPE_MICRON,
+		.chipId = MICRON_MT29F16G08CBA,
+		.chipId345 = {0x04, 0x46, 0x85},
+		.chipIdStr = "MICRON MT29F16G08CBA MLC",
+		.options = NAND_BBT_USE_FLASH, 		/* Use BBT on flash */
+		.idOptions = BRCMNAND_ID_EXT_MICRON_M61A,
+		.timing1 = 0, 
+		.timing2 = 0,
+		.nop=4,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_3,  /* Require BCH-12 */
+	},
+#endif
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_4_0
+
+	{	/* ONFI ENTRY */
+		.chipId = 0xFF,			
+		.mafId = 0xFF,			
+		.chipIdStr = "ONFI NAND CHIP",
+		.options = NAND_BBT_USE_FLASH,
+		.timing1 = 0, .timing2 = 0,
+		.ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_4_0,  /* ONFI capable NAND controllers */		
+	},
+#endif
+
+	{	/* LAST DUMMY ENTRY */
+		.chipId = 0,
+		.mafId = 0,
+		.chipIdStr = "UNSUPPORTED NAND CHIP",
+		.options = NAND_BBT_USE_FLASH,
+		.timing1 = 0, .timing2 = 0,
+		.ctrlVersion = 0,  		
+	}
+};
+
+
+
+// Max chip account for the last dummy entry
+#define BRCMNAND_MAX_CHIPS (ARRAY_SIZE(brcmnand_chips) - 1)
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_4_0
+
+#define BRCMNAND_ONFI_IDX (BRCMNAND_MAX_CHIPS - 1)
+#endif
+
+#include <mtd/brcmnand_oob.h> /* BRCMNAND controller defined OOB */
+
+static unsigned char ffchars[BRCMNAND_FCACHE_SIZE];
+
+//static unsigned char eccmask[128]; // Will be initialized during probe
+
+
+static uint32_t brcmnand_registerHoles[] = {
+
+	// 3.2 and earlier
+	0x1c,
+	0x44, 0x4c, 
+	0x5c, 
+	0x88, 0x8c, 
+	0xb8, 0xbc, 
+#if CONFIG_MTD_BRCMNAND_VERSION >=  CONFIG_MTD_BRCMNAND_VERS_3_3
+	0xc4, 0xc8, 0xcc,	
+#ifndef BCHP_NAND_ACC_CONTROL_CS3
+	0xf0, 0xf4, 0xf8,0xfc,
+#endif
+  #if CONFIG_MTD_BRCMNAND_VERSION >=  CONFIG_MTD_BRCMNAND_VERS_3_4
+  	0x100, 0x104, 0x108, 0x10c,
+  #endif
+	0x110, 0x114, 0x118, 0x11c, 
+	0x120, 0x124, 0x128, 0x12c, 
+#endif
+};
+
+static int brcmnand_wait(struct mtd_info *mtd, int state, uint32_t* pStatus);
+
+
+// Is there a register at the location
+static int inRegisterHoles(uint32_t reg)
+{
+	int i;
+	// Alas, 7420c0 and later register offsets are 0x0044xxxx compared to 0x0000xxxx in earlier versions
+	uint32_t regOffset = reg - BCHP_NAND_REVISION;
+
+	for (i=0; i < ARRAY_SIZE(brcmnand_registerHoles); i++) {
+		if (regOffset == brcmnand_registerHoles[i])
+			return 1; // In register hole
+	}
+	return 0; // Not in hole
+}
+
+
+static uint32_t brcmnand_ctrl_read(uint32_t nandCtrlReg) 
+{
+	uint32_t pReg = (BRCMNAND_CTRL_REGS 
+		+ nandCtrlReg - BCHP_NAND_REVISION);
+
+	if (nandCtrlReg < BCHP_NAND_REVISION || nandCtrlReg > BCHP_NAND_LAST_REG ||
+		(nandCtrlReg & 0x3) != 0) {
+		//printk("brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
+        return 0;
+	}
+if (gdebug > 3) printk("%s: CMDREG=%08x val=%08x\n", __FUNCTION__, 
+(unsigned int) nandCtrlReg, (unsigned int)BDEV_RD(pReg));
+
+	return (uint32_t) BDEV_RD(pReg);
+}
+
+
+static void brcmnand_ctrl_write(uint32_t nandCtrlReg, uint32_t val) 
+{
+	uint32_t pReg = (uint32_t) (BRCMNAND_CTRL_REGS + nandCtrlReg - BCHP_NAND_REVISION);
+
+	if (nandCtrlReg < BCHP_NAND_REVISION || nandCtrlReg > BCHP_NAND_LAST_REG ||
+		(nandCtrlReg & 0x3) != 0) {
+		printk( "brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
+	}
+	
+	BDEV_WR(pReg, val);
+	
+if (gdebug > 3) printk("%s: CMDREG=%08x val=%08x\n", __FUNCTION__, nandCtrlReg, val);
+}
+
+
+/*
+ * chip: BRCM NAND handle
+ * offset: offset from start of mtd, not necessarily the same as offset from chip.
+ * cmdEndAddr: 1 for CMD_END_ADDRESS, 0 for CMD_ADDRESS
+ * 
+ * Returns the real ldw of the address w.r.t. the chip.
+ */
+
+#if 0 // CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_3
+/* 
+ * Old codes assume all CSes have the same flash
+ * Here offset is the offset from CS0.
+ */
+static uint32_t brcmnand_ctrl_writeAddr(struct brcmnand_chip* chip, loff_t offset, int cmdEndAddr) 
+{
+#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_0_1
+	uint32_t pAddr = offset + chip->pbase;
+	uint32_t ldw = 0;
+
+	chip->ctrl_write(cmdEndAddr? BCHP_NAND_CMD_END_ADDRESS: BCHP_NAND_CMD_ADDRESS, pAddr);
+
+#else
+	uint32_t udw, ldw, cs;
+	DIunion chipOffset;
+	
+//char msg[24];
+
+
+	// cs is the index into chip->ctrl->CS[]
+	cs = (uint32_t) (offset >> chip->chip_shift);
+	// chipOffset is offset into the current CS
+
+	chipOffset.ll = offset & (chip->chipSize - 1);
+
+	if (cs >= chip->ctrl->numchips) {
+		printk(KERN_ERR "%s: Offset=%0llx outside of chip range cs=%d, chip->ctrl->CS[cs]=%d\n", 
+			__FUNCTION__,  offset, cs, chip->ctrl->CS[cs]);
+		BUG();
+		return 0;
+	}
+
+if (gdebug) printk("CS=%d, chip->ctrl->CS[cs]=%d\n", cs, chip->ctrl->CS[cs]);
+	// ldw is lower 32 bit of chipOffset, need to add pbase when on CS0 and XOR is ON.
+	if (!chip->xor_disable[cs]) {
+		ldw = chipOffset.s.low + chip->pbase;
+	}
+	else {
+		ldw = chipOffset.s.low;
+	}
+	
+	udw = chipOffset.s.high | (chip->ctrl->CS[cs] << 16);
+
+if (gdebug > 3) printk("%s: offset=%0llx  cs=%d ldw = %08x, udw = %08x\n", __FUNCTION__, offset, cs,  ldw, udw);
+	chip->ctrl_write(cmdEndAddr? BCHP_NAND_CMD_END_ADDRESS: BCHP_NAND_CMD_ADDRESS, ldw);
+	chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, udw);
+
+
+#endif
+	return (ldw); //(ldw ^ 0x1FC00000);
+}
+
+#else
+/* 
+ * Controller v3.3 or later allows heterogenous flashes
+ * Here offset is the offset from the start of the flash (CSn), as each flash has its own mtd handle
+ */
+
+static uint32_t brcmnand_ctrl_writeAddr(struct brcmnand_chip* chip, loff_t offset, int cmdEndAddr) 
+{
+	uint32_t udw, ldw, cs;
+	DIunion chipOffset;
+
+	chipOffset.ll = offset & (chip->chipSize - 1);
+	cs = chip->ctrl->CS[chip->csi];
+//if (gdebug) printk("CS=%d, chip->ctrl->CS[cs]=%d\n", cs, chip->ctrl->CS[chip->csi]);
+	// ldw is lower 32 bit of chipOffset, need to add pbase when on CS0 and XOR is ON.
+	if (!chip->xor_disable) {
+		ldw = chipOffset.s.low + chip->pbase;
+	}
+	else {
+		ldw = chipOffset.s.low;
+	}
+	
+	udw = chipOffset.s.high | (cs << 16);
+
+if (gdebug > 3) printk("%s: offset=%0llx  cs=%d ldw = %08x, udw = %08x\n", __FUNCTION__, offset, cs,  ldw, udw);
+	chip->ctrl_write(cmdEndAddr? BCHP_NAND_CMD_END_ADDRESS: BCHP_NAND_CMD_ADDRESS, ldw);
+	chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, udw);
+
+	return (ldw); 
+}
+
+#endif
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_3
+/*
+ * Workaround until threshold register is replicated for each CS
+ */
+static void
+brcmnand_reset_corr_threshold(struct brcmnand_chip* chip)
+{
+	static int once[NUM_NAND_CS];
+	
+	if (chip->ecclevel != 0 && chip->ecclevel != BRCMNAND_ECC_HAMMING) {
+		uint32_t corr_threshold = brcmnand_ctrl_read(BCHP_NAND_CORR_STAT_THRESHOLD)&BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_MASK;
+		uint32_t seventyfivepc;
+
+		seventyfivepc = (chip->ecclevel*3)/4;
+		if (!once[chip->csi]) {
+			once[chip->csi]=1;
+			printk(KERN_INFO "%s: default CORR ERR threshold  %d bits for CS%1d\n", 
+				__FUNCTION__, corr_threshold, chip->ctrl->CS[chip->csi]);
+PRINTK("ECC level threshold default value is %d bits for CS%1d\n", corr_threshold, chip->ctrl->CS[chip->csi]);
+		}
+		if (seventyfivepc != corr_threshold) {
+			if ((once[chip->csi])++ < 2) {
+				printk(KERN_INFO "%s: CORR ERR threshold changed to %d bits for CS%1d\n", 
+					__FUNCTION__, seventyfivepc, chip->ctrl->CS[chip->csi]);
+			}
+			seventyfivepc <<= BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT;
+                        seventyfivepc |= (brcmnand_ctrl_read(BCHP_NAND_CORR_STAT_THRESHOLD)&~BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_MASK);
+			brcmnand_ctrl_write(BCHP_NAND_CORR_STAT_THRESHOLD, seventyfivepc);
+		}
+	}
+}
+
+#else
+#define brcmnand_reset_corr_threshold(chip)
+#endif
+
+/*
+ * Disable ECC, and return the original ACC register (for restore)
+ */
+uint32_t brcmnand_disable_read_ecc(int cs)
+{
+	uint32_t acc0;
+	uint32_t acc;
+	
+	/* Disable ECC */
+	acc0 = brcmnand_ctrl_read(bchp_nand_acc_control(cs));
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+	acc = acc0 & ~(BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK | BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_MASK);
+#else
+	acc = acc0 & ~(BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK);
+#endif
+	brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc);
+
+	return acc0;
+}
+
+
+void brcmnand_restore_ecc(int cs, uint32_t orig_acc0) 
+{
+	brcmnand_ctrl_write(bchp_nand_acc_control(cs), orig_acc0);
+}
+	
+	// Restore acc
+
+#if 0
+/* Dont delete, may be useful for debugging */
+
+static void __maybe_unused print_diagnostics(struct brcmnand_chip* chip)
+{
+	uint32_t nand_acc_control = brcmnand_ctrl_read(BCHP_NAND_ACC_CONTROL);
+	uint32_t nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+	uint32_t nand_config = brcmnand_ctrl_read(BCHP_NAND_CONFIG);
+	uint32_t flash_id = brcmnand_ctrl_read(BCHP_NAND_FLASH_DEVICE_ID);
+	uint32_t pageAddr = brcmnand_ctrl_read(BCHP_NAND_PROGRAM_PAGE_ADDR);
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+	uint32_t pageAddrExt = brcmnand_ctrl_read(BCHP_NAND_PROGRAM_PAGE_EXT_ADDR);
+#endif
+
+	
+	//unsigned long nand_timing1 = brcmnand_ctrl_read(BCHP_NAND_TIMING_1);
+	//unsigned long nand_timing2 = brcmnand_ctrl_read(BCHP_NAND_TIMING_2);
+
+	printk(KERN_INFO "NAND_SELECT=%08x ACC_CONTROL=%08x, \tNAND_CONFIG=%08x, FLASH_ID=%08x\n", 
+		nand_select, nand_acc_control, nand_config, flash_id);
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+	printk("PAGE_EXT_ADDR=%08x\n", pageAddrExt);
+#endif
+	if (chip->ctrl->CS[0] == 0) {
+		uint32_t ebiCSBase0 = BDEV_RD(BCHP_EBI_CS_BASE_0);
+		printk(KERN_INFO "PAGE_ADDR=%08x, \tCS0_BASE=%08x\n", pageAddr, ebiCSBase0);
+	}
+	else {
+		uint32_t csNandBaseN = BDEV_RD(BCHP_EBI_CS_BASE_0 + 8*chip->ctrl->CS[0]);
+
+		printk(KERN_INFO "PAGE_ADDR=%08x, \tCS%-d_BASE=%08x\n", pageAddr, chip->ctrl->CS[0], csNandBaseN);
+		printk(KERN_INFO "pbase=%08lx, vbase=%p\n", chip->pbase, chip->vbase);
+	}
+}	
+#endif
+
+static void print_config_regs(struct mtd_info* mtd) 
+{
+	struct brcmnand_chip * chip = mtd->priv;
+
+	unsigned int cs = chip->ctrl->CS[chip->csi];
+	unsigned long nand_acc_control = brcmnand_ctrl_read(bchp_nand_acc_control(cs));
+	unsigned long nand_config = brcmnand_ctrl_read(bchp_nand_config(cs));
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_7_1
+	unsigned long nand_config_ext = brcmnand_ctrl_read(BCHP_NAND_CONFIG_EXT);
+#endif
+	unsigned long flash_id; // = brcmnand_ctrl_read(BCHP_NAND_FLASH_DEVICE_ID);
+	unsigned long nand_timing1 = brcmnand_ctrl_read(bchp_nand_timing1(cs));
+	unsigned long nand_timing2 = brcmnand_ctrl_read(bchp_nand_timing2(cs));
+	uint32_t status;
+	
+	/* 
+	 * Set CS before reading ID, same as in brcmnand_read_id
+	 */
+
+	/* Wait for CTRL_Ready */
+	brcmnand_wait(mtd, BRCMNAND_FL_READY, &status);
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+	/* Older version do not have EXT_ADDR registers */
+	chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
+	chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, cs << BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
+#endif  // Set EXT address if version >= 1.0
+
+	/* Send the command for reading device ID from controller */
+	chip->ctrl_write(BCHP_NAND_CMD_START, OP_DEVICE_ID_READ);
+	
+	/* Wait for CTRL_Ready */
+	brcmnand_wait(mtd, BRCMNAND_FL_READY, &status);
+
+
+	flash_id = chip->ctrl_read(BCHP_NAND_FLASH_DEVICE_ID);
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_7_1	
+	printk(KERN_INFO "\nFound NAND on CS%1d: ACC=%08lx, cfg=%08lx, cfg_ext=%08lx, flashId=%08lx, tim1=%08lx, tim2=%08lx\n", 
+	       cs, nand_acc_control, nand_config, nand_config_ext, flash_id, nand_timing1, nand_timing2);	
+#else
+	printk(KERN_INFO "\nFound NAND on CS%1d: ACC=%08lx, cfg=%08lx, flashId=%08lx, tim1=%08lx, tim2=%08lx\n", 
+		cs, nand_acc_control, nand_config, flash_id, nand_timing1, nand_timing2);	
+#endif
+}
+
+#define NUM_NAND_REGS 	(1+((BCHP_NAND_LAST_REG -BCHP_NAND_REVISION)/4))
+
+static void __maybe_unused print_nand_ctrl_regs(void)
+{
+	int i;
+	
+/* Avoid garbled output */
+int saveDebug = gdebug;
+gdebug = 0;
+
+	for (i=0; i<NUM_NAND_REGS; i++) {
+		uint32_t reg = (uint32_t) (BCHP_NAND_REVISION+(i*4));
+		uint32_t regval; 
+		//uint32_t regoff = reg - BCHP_NAND_REVISION; // i*4
+		
+		if ((i % 4) == 0) {
+			printk("\n%08x:", reg);
+		}
+
+		if (inRegisterHoles(reg)) {
+			regval = 0;
+		}
+		else {
+			regval = (uint32_t) brcmnand_ctrl_read(reg);
+		}
+		printk("  %08x", regval);
+	}
+gdebug = saveDebug;
+}
+
+void print_NandCtrl_Status(void)
+{
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+	uint32_t nand_cmd_addr = brcmnand_ctrl_read(BCHP_NAND_CMD_ADDRESS);
+	uint32_t nand_cmd_start = brcmnand_ctrl_read(BCHP_NAND_CMD_START);
+	uint32_t nand_intfc_stat = brcmnand_ctrl_read(BCHP_NAND_INTFC_STATUS);
+
+
+	uint32_t hif_intr2_status = (uint32_t) BDEV_RD(BCHP_HIF_INTR2_CPU_STATUS);
+	uint32_t hif_intr2_mask = (uint32_t) BDEV_RD(BCHP_HIF_INTR2_CPU_MASK_STATUS);
+	
+	printk("\nNandCtrl_Status: CMD_ADDR=%08x, CMD_START=%08x, INTFC_STATUS=%08x, HIF_INTR2_ST=%08x, HF_MSK=%08x\n", 
+		nand_cmd_addr, nand_cmd_start, nand_intfc_stat, hif_intr2_status, hif_intr2_mask);	
+#endif
+}
+
+#if 1
+void print_oobbuf(const unsigned char* buf, int len)
+{
+int i;
+
+
+if (!buf) {printk("NULL"); return;}
+ for (i=0; i<len; i++) {
+  if (i % 16 == 0 && i != 0) printk("\n");
+  else if (i % 4 == 0) printk(" ");
+  printk("%02x", buf[i]);
+ }
+ printk("\n");
+}
+
+void print_databuf(const unsigned char* buf, int len)
+{
+int i;
+
+
+ for (i=0; i<len; i++) {
+  if (i % 32 == 0) printk("\n%04x: ", i);
+  else if (i % 4 == 0) printk(" ");
+  printk("%02x", buf[i]);
+ }
+ printk("\n");
+}
+
+void print_oobreg(struct brcmnand_chip* chip) {
+	int i;
+
+	printk("OOB Register:");
+	for (i = 0; i < 4; i++) {
+		printk("%08x ",  chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
+	}
+	printk("\n");
+}
+#endif
+
+/*
+ * BRCMNAND controller always copies the data in 4 byte chunk, and in Big Endian mode
+ * from and to the flash.
+ * This routine assumes that dest and src are 4 byte aligned, and that len is a multiple of 4
+ (Restriction removed)
+
+ * TBD: 4/28/06: Remove restriction on count=512B, but do restrict the read from within a 512B section.
+ * Change brcmnand_memcpy32 to be 2 functions, one to-flash, and one from-flash,
+ * enforcing reading from/writing to flash on a 4B boundary, but relaxing on the buffer being on 4 byte boundary.
+ */
+
+
+static int brcmnand_from_flash_memcpy32(struct brcmnand_chip* chip, void* dest, loff_t offset, int len)
+{
+#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_0_1
+	u_char* flash = chip->vbase + offset;
+#else
+	volatile uint32_t* flash = (volatile uint32_t*) chip->vbase;
+#endif
+	volatile uint32_t* pucDest = (volatile uint32_t*) dest; 
+	volatile uint32_t* pucFlash = (volatile uint32_t*) flash; 
+	int i;
+
+#if 0
+	if (unlikely(((unsigned int) dest) & 0x3)) {
+		printk(KERN_ERR "brcmnand_memcpy32 dest=%p not DW aligned\n", dest);
+		return -EINVAL;
+	}
+#endif
+	if (unlikely(((unsigned int) flash) & 0x3)) {
+		printk(KERN_ERR "brcmnand_memcpy32 src=%p not DW aligned\n", flash);
+		return -EINVAL;
+	}
+	if (unlikely(len & 0x3)) {
+		printk(KERN_ERR "brcmnand_memcpy32 len=%d not DW aligned\n", len);
+		return -EINVAL;
+	}
+
+	/* THT: 12/04/08.  memcpy plays havoc with the NAND controller logic 
+	 * We have removed the alignment test, so we rely on the following codes to take care of it
+	 */
+	if (unlikely(((unsigned long) dest) & 0x3)) {
+		for (i=0; i< (len>>2); i++) {
+			// Flash is guaranteed to be DW aligned.  This forces the NAND controller
+			// to read 1-DW at a time, w/o peep-hole optimization allowed.
+			volatile uint32_t tmp = pucFlash[i];
+			u8* pSrc = (u8*) &tmp;
+			u8* pDest = (u8*) &pucDest[i];
+			pDest[0] = pSrc[0];
+			pDest[1] = pSrc[1];
+			pDest[2] = pSrc[2];
+			pDest[3] = pSrc[3];
+		}
+	}
+	else {
+		for (i=0; i< (len>>2); i++) {
+			pucDest[i] = pucFlash[i];
+		}
+	}
+
+	return 0;
+}
+
+
+/*
+ * Write to flash 512 bytes at a time.
+ *
+ * Can't just do a simple memcpy, since the HW NAND controller logic do the filtering
+ * (i.e. ECC correction) on the fly 4 bytes at a time
+ * This routine also takes care of alignment.
+ */
+static int brcmnand_to_flash_memcpy32(struct brcmnand_chip* chip, loff_t offset, const void* src, int len)
+{
+#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_0_1
+	u_char* flash = chip->vbase + offset;
+#else
+	u_char* flash = chip->vbase;
+#endif
+	int i;
+	volatile uint32_t* pDest = (volatile uint32_t*) flash;
+	volatile uint32_t* pSrc = (volatile uint32_t*) src;
+
+
+	if (unlikely((unsigned int) flash & 0x3)) {
+		printk(KERN_ERR "brcmnand_to_flash_memcpy32 dest=%p not DW aligned\n", flash);
+		BUG();
+	}
+
+	if (unlikely(len & 0x3)) {
+		printk(KERN_ERR "brcmnand_to_flash_memcpy32 len=%d not DW aligned\n", len);
+		BUG();
+	}
+
+if (gdebug) printk("%s: flash=%p, len=%d, src=%p\n", __FUNCTION__, flash, len, src);
+	
+
+	/*
+	 * THT: 12/08/08.  memcpy plays havoc with the NAND controller logic 
+	 * We have removed the alignment test, so we need these codes to take care of it
+	 */
+	if (unlikely((unsigned long) pSrc & 0x3)) {
+		for (i=0; i< (len>>2); i++) {
+			u8 *tmp = (u8 *) &pSrc[i];
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+			pDest[i] = ((uint32_t)(tmp[3] << 24) | (uint32_t)(tmp[2] << 16)
+				| (uint32_t)(tmp[1] << 8) | (uint32_t)(tmp[0] << 0));
+
+#else 
+			pDest[i] = ((uint32_t)(tmp[0] << 24) | (uint32_t)(tmp[1] << 16)
+				| (uint32_t)(tmp[2] << 8) | (uint32_t)(tmp[3] << 0));
+#endif
+		}
+	} else {
+		for (i=0; i< (len>>2); i++) {
+			pDest[i] = pSrc[i];
+		}
+	}
+
+	return 0;
+}
+
+//#define uint8_t unsigned char
+
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+
+/*
+ * Returns     0: No errors
+ *             1: Correctable error
+ *            -1: Uncorrectable error
+ */
+static int brcmnand_EDU_verify_ecc(struct brcmnand_chip* this, int state, uint32_t intr)
+{
+    int err = 1;       //  1 is no error, 2 is ECC correctable, 3 is EDU ECC correctable, -2 is ECC non-corr, -3 is EDU ECC non-corr
+    //uint32_t intr;
+    uint32_t status = 0;
+
+if (gdebug > 3 ) {
+printk("-->%s\n", __FUNCTION__);}
+
+    /* Only make sense on read */
+    if (state != BRCMNAND_FL_READING) 
+        return 0;
+
+    //intr = EDU_volatileRead(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS);
+    
+
+    // Maybe an EDU BUG?
+    if ((intr & BCHP_HIF_INTR2_CPU_STATUS_EDU_ERR_INTR_MASK) != 0x00000000)
+    { 
+        //Check EDU_ERR_STATUS:
+        status = EDU_volatileRead(EDU_ERR_STATUS);
+//printk("EDU_ERR_STATUS=%08x\n", status);
+        if((status & EDU_ERR_STATUS_NandECCuncor) != 0x00000000)
+        {
+            // EDU saw and NANDECCUNCORRERROR
+            err = BRCMEDU_UNCORRECTABLE_ECC_ERROR;
+if (gdebug > 3 ) printk("EDU_ERR_STATUS=%08x UNCORRECTABLE\n", status);
+        }
+
+        if((status & EDU_ERR_STATUS_NandECCcor) != 0x00000000)
+        {
+            err = BRCMEDU_CORRECTABLE_ECC_ERROR;
+if (gdebug > 3 ) printk("EDU_ERR_STATUS=%08x CORRECTABLE\n", status);
+        }  
+		
+	  if((status & EDU_ERR_STATUS_NandRBUS) != 0x00000000)
+        {
+            err = BRCMEDU_MEM_BUS_ERROR;
+if (gdebug > 3 ) printk("EDU_ERR_STATUS=%08x BUS ERROR\n", status);
+		return err; /* Return right away for retry */
+        }
+    }
+
+    /*
+      * Clear error status on Controller side, but before doing that, we need
+      * to make sure the controller is done with previous op.
+      */
+
+	  /*
+	   * Wait for Controller Ready Status bit, which indicates data and OOB are ready for Read
+	   */
+#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
+	if (!(intr & HIF_INTR2_CTRL_READY)) {
+		uint32_t rd_data;
+
+	
+	 	rd_data = ISR_cache_is_valid();
+
+		if (rd_data == 0) {
+	  	/* timed out */
+printk("%s: rd_data=0 TIMEOUT\n", __FUNCTION__);
+			err = BRCMNAND_TIMED_OUT;
+			return err;
+	  	}
+	}
+#endif
+
+
+    if ((intr & BCHP_HIF_INTR2_CPU_STATUS_NAND_CORR_INTR_MASK) != 0x00000000)
+    {
+
+        // Clear it
+        this->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
+        this->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
+       
+        //err = BRCMNAND_CORRECTABLE_ECC_ERROR;
+
+        // Clear the interrupt for next time
+        EDU_volatileWrite(BCHP_HIF_INTR2_CPU_CLEAR, BCHP_HIF_INTR2_CPU_CLEAR_NAND_CORR_INTR_MASK); 
+    }
+
+    if ((intr & BCHP_HIF_INTR2_CPU_STATUS_NAND_UNC_INTR_MASK) != 0x00000000) 
+    {
+        // Clear it
+        this->ctrl_write(BCHP_NAND_ECC_UNC_EXT_ADDR, 0);
+        this->ctrl_write(BCHP_NAND_ECC_UNC_ADDR, 0);
+
+        /*
+         * If the block was just erased, and have not yet been written to, this will be flagged,
+         * so this could be a false alarm
+         */
+
+        //err = BRCMNAND_UNCORRECTABLE_ECC_ERROR;
+
+        // Clear the interrupt for next time
+        EDU_volatileWrite(BCHP_HIF_INTR2_CPU_CLEAR, BCHP_HIF_INTR2_CPU_CLEAR_NAND_UNC_INTR_MASK); 
+    }
+if (gdebug > 3 ) {
+printk("<-- %s err = %d\n", __FUNCTION__, err);}
+    return err;
+}
+
+#endif
+
+/* The BCHP_HIF_INTR2_xxx registers don't exist on DSL chips so the old way of
+ * verifying ECC is used.
+ */
+#if !defined(CONFIG_BCM_KF_NAND)
+/* 
+ * SWLINUX-1584: Use HIF status register to check for errors.
+ * In the past we rely on the fact that the registers 
+ * 	BCHP_NAND_ECC_CORR_EXT_ADDR/BCHP_NAND_ECC_UNC_EXT_ADDR
+ * are not zeroes, but the indicators are ambiguous when the address is 0
+ * 
+ * Notes: 2618 still use the old way, because we are reluctant to change codes that
+ * are already in production.  In 2618 this is only called when address==0
+ */
+#define HIF_INTR2_ERR_MASK (\
+	BCHP_HIF_INTR2_CPU_STATUS_NAND_CORR_INTR_MASK |\
+	BCHP_HIF_INTR2_CPU_STATUS_NAND_UNC_INTR_MASK)
+	
+/*
+ * Returns	 0: BRCMNAND_SUCCESS:	No errors
+ *			 1: Correctable error
+ *			-1: Uncorrectable error
+ */
+static int brcmnand_ctrl_verify_ecc(struct brcmnand_chip* chip, int state, uint32_t notUsed)
+{
+	uint32_t intr_status = BDEV_RD(BCHP_HIF_INTR2_CPU_STATUS);
+
+if (gdebug > 3 ) {
+printk("%s: intr_status = %08x\n", __FUNCTION__, intr_status); }	 
+
+	/* Only make sense on read */
+	if (state != BRCMNAND_FL_READING) 
+		return BRCMNAND_SUCCESS;
+
+	if (intr_status & BCHP_HIF_INTR2_CPU_STATUS_NAND_UNC_INTR_MASK) {
+		// Clear Status Mask for sector 0 workaround
+		BDEV_WR(BCHP_HIF_INTR2_CPU_CLEAR, 
+			HIF_INTR2_ERR_MASK|BCHP_HIF_INTR2_CPU_STATUS_NAND_CTLRDY_INTR_MASK);
+#if 0 /* Already cleared with cpu-clear */
+		intr_status &= ~HIF_INTR2_ERR_MASK;
+		BDEV_WR(BCHP_HIF_INTR2_CPU_STATUS, intr_status);    
+#endif
+		return BRCMNAND_UNCORRECTABLE_ECC_ERROR;
+	}
+
+	else  if (intr_status & BCHP_HIF_INTR2_CPU_STATUS_NAND_CORR_INTR_MASK) {
+		BDEV_WR(BCHP_HIF_INTR2_CPU_CLEAR, 
+			HIF_INTR2_ERR_MASK|BCHP_HIF_INTR2_CPU_STATUS_NAND_CTLRDY_INTR_MASK);
+#if 0 /* Already cleared with cpu-clear */
+		intr_status &= ~HIF_INTR2_ERR_MASK;
+		BDEV_WR(BCHP_HIF_INTR2_CPU_STATUS, intr_status);    
+#endif
+		return BRCMNAND_CORRECTABLE_ECC_ERROR;
+	}
+
+	return BRCMNAND_SUCCESS;
+}
+
+
+#else
+/* Old ways of doing it: is ambiguous when offset == 0 */
+
+/*
+ * Returns	 0: BRCMNAND_SUCCESS:	No errors
+ *			 1: Correctable error
+ *			-1: Uncorrectable error
+ */
+static int brcmnand_ctrl_verify_ecc(struct brcmnand_chip* chip, int state, uint32_t notUsed)
+{
+	int err = 0;
+	uint32_t addr;
+	uint32_t extAddr = 0;
+
+if (gdebug > 3 ) {
+printk("-->%s\n", __FUNCTION__);}
+
+	/* Only make sense on read */
+	if (state != BRCMNAND_FL_READING) 
+		return BRCMNAND_SUCCESS;
+
+	addr = chip->ctrl_read(BCHP_NAND_ECC_CORR_ADDR);
+	if (addr) {
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+		extAddr = chip->ctrl_read(BCHP_NAND_ECC_CORR_EXT_ADDR);
+		// Clear it
+		chip->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
+#endif
+
+		// Clear it
+		chip->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
+		printk(KERN_WARNING "%s: Correctable ECC error at %08x:%08x\n", __FUNCTION__, extAddr, addr);
+		
+		/* Check to see if error occurs in Data or ECC */
+		err = BRCMNAND_CORRECTABLE_ECC_ERROR;
+	}
+
+	addr = chip->ctrl_read(BCHP_NAND_ECC_UNC_ADDR);
+	if (addr) {
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+		extAddr = chip->ctrl_read(BCHP_NAND_ECC_UNC_EXT_ADDR);
+		// Clear it
+		chip->ctrl_write(BCHP_NAND_ECC_UNC_EXT_ADDR, 0);
+#endif
+		chip->ctrl_write(BCHP_NAND_ECC_UNC_ADDR, 0);
+
+		/*
+		 * If the block was just erased, and have not yet been written to, this will be flagged,
+		 * so this could be a false alarm
+		 */
+
+		err = BRCMNAND_UNCORRECTABLE_ECC_ERROR;
+	}
+	return err;
+}
+
+#endif
+
+#if 0
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+
+static int (*brcmnand_verify_ecc) (struct brcmnand_chip* chip, int state, uint32_t intr) = brcmnand_EDU_verify_ecc;
+
+#else
+static int (*brcmnand_verify_ecc) (struct brcmnand_chip* chip, int state, uint32_t intr) = brcmnand_ctrl_verify_ecc;
+#endif //#ifdef CONFIG_MTD_BRCMNAND_EDU
+#endif
+
+
+/**
+ * brcmnand_wait - [DEFAULT] wait until the command is done
+ * @param mtd		MTD device structure
+ * @param state		state to select the max. timeout value
+ *
+ * Wait for command done. This applies to all BrcmNAND command
+ * Read can take up to 53, erase up to ?s and program up to 30 clk cycle ()
+ * according to general BrcmNAND specs
+ */
+static int brcmnand_wait(struct mtd_info *mtd, int state, uint32_t* pStatus)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	unsigned long timeout;
+	uint32_t ready;
+	uint32_t wait_for = BRCMNAND_FL_WRITING == state
+		? BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK|BCHP_NAND_INTFC_STATUS_FLASH_READY_MASK
+		: BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK;	
+
+	/* The 20 msec is enough */
+	timeout = jiffies + msecs_to_jiffies(10000); // THT: 3secs, for now
+	while (time_before(jiffies, timeout)) {
+		PLATFORM_IOFLUSH_WAR();
+		ready = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+
+		if ((ready & wait_for) == wait_for) {
+			*pStatus = ready;
+			return 0;
+		}
+
+		if (state != BRCMNAND_FL_READING && (!wr_preempt_en) && !in_interrupt())
+			cond_resched();
+		else
+			udelay(100);
+		//touch_softlockup_watchdog();
+	}
+
+	/*
+	 * Get here on timeout
+	 */
+	return -ETIMEDOUT;
+}
+
+
+
+/* 
+ * Returns 	 1: Success, no errors
+ * 			 0: Timeout
+ *			-1: Errors
+ */
+static int brcmnand_spare_is_valid(struct mtd_info* mtd,  int state, int raw) 
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	unsigned long timeout;
+	uint32_t ready;
+
+if (gdebug > 3 ) {
+printk("-->%s, raw=%d\n", __FUNCTION__, raw);}
+
+
+	/* The 20 msec is enough */
+	timeout = jiffies + msecs_to_jiffies(3000);  // 3 sec timeout for now
+	while (time_before(jiffies, timeout)) {
+		PLATFORM_IOFLUSH_WAR();
+		ready = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+
+		if (ready & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK && 
+		   (ready & BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_MASK)) {
+
+
+#if 0
+// THT 6/15/09: Reading OOB would not affect ECC
+			int ecc;
+
+			if (!raw) {
+				ecc = brcmnand_ctrl_verify_ecc(chip, state, 0);
+				if (ecc < 0) {
+//printk("%s: Uncorrectable ECC error at offset %08x\n", __FUNCTION__, (unsigned long) offset);
+					return -1;
+				}
+			}
+#endif
+			return 1;
+		}
+		if (state != BRCMNAND_FL_READING && !wr_preempt_en && !in_interrupt())
+			cond_resched();
+		else
+			udelay(100);
+	}
+
+	return 0; // Timed out
+}
+
+
+
+/* 
+ * Returns: Good: >= 0
+ *		    Error:  < 0
+ *
+ * BRCMNAND_CORRECTABLE_ECC_ERROR		(1)
+ * BRCMNAND_SUCCESS					(0)
+ * BRCMNAND_UNCORRECTABLE_ECC_ERROR	(-1)
+ * BRCMNAND_FLASH_STATUS_ERROR			(-2)
+ * BRCMNAND_TIMED_OUT					(-3)
+ *
+ * Is_Valid in the sense that the data is valid in the cache.  
+ * It does not means that the data is either correct or correctable.
+ */
+ 
+static int brcmnand_cache_is_valid(struct mtd_info* mtd,  int state, loff_t offset) 
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	unsigned long timeout;
+	uint32_t ready;
+
+if (gdebug > 3 ) {
+printk("%s: offset=%0llx\n", __FUNCTION__, offset);}
+
+	/* The 20 msec is enough */
+	timeout = jiffies + msecs_to_jiffies(3000); // 3 sec timeout for now
+	while (time_before(jiffies, timeout)) {
+		PLATFORM_IOFLUSH_WAR();
+		ready = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+
+		if ((ready & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK) 
+		&& (ready & BCHP_NAND_INTFC_STATUS_CACHE_VALID_MASK)) {
+			int ecc;
+
+			ecc = brcmnand_ctrl_verify_ecc(chip, state, 0);
+// Let caller handle it
+//printk("%s: Possible Uncorrectable ECC error at offset %08x\n", __FUNCTION__, (unsigned long) offset);
+if (gdebug > 3 && ecc) {
+printk("<--%s: ret = %d\n", __FUNCTION__, ecc);}
+			return ecc;
+			
+		}
+		if (state != BRCMNAND_FL_READING && (!wr_preempt_en) && !in_interrupt())
+			cond_resched();
+		else
+			udelay(100);
+
+	}
+
+if (gdebug > 3 ) {
+printk("<--%s: ret = TIMEOUT\n", __FUNCTION__);
+print_nand_ctrl_regs();
+}
+	return BRCMNAND_TIMED_OUT; // TimeOut
+}
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+
+/* 
+ * Returns: Good: >= 0
+ *		    Error:  < 0
+ *
+ * BRCMNAND_CORRECTABLE_ECC_ERROR		(1)
+ * BRCMNAND_SUCCESS					(0)
+ * BRCMNAND_TIMED_OUT					(-3)
+ * BRCMEDU_CORRECTABLE_ECC_ERROR        	(4)
+ * BRCMEDU_UNCORRECTABLE_ECC_ERROR      	(-4)
+ * BRCMEDU_MEM_BUS_ERROR				(-5)
+ */
+static int brcmnand_EDU_cache_is_valid(struct mtd_info* mtd,  int state, loff_t offset, uint32_t intr_status) 
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	int error = 0;
+	//unsigned long flags;
+	//uint32_t rd_data;
+
+if (gdebug > 3 ) {
+printk("%s: intr_status = %08x\n", __FUNCTION__, intr_status); }	 
+
+	  if (intr_status == 0) {
+	  	/* EDU_read timed out */
+printk("%s: intr_status=0 TIMEOUT\n", __FUNCTION__);
+		error = BRCMNAND_TIMED_OUT;
+		//goto out;
+	  }
+
+	else if (intr_status & HIF_INTR2_EDU_ERR)
+	{
+		error = brcmnand_EDU_verify_ecc(chip, state, intr_status);
+	}
+
+	/*
+	 * Success return, now make sure OOB area is ready to be read
+	 */
+	else {
+		//uint32_t rd_data;
+
+#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
+		/*
+		 * First check that HIF_INTR2_CTRL_READY is asserted
+		 * this is to avoid busy-waiting inside brcmnand_spare_is_valid
+		 */
+
+		if (!(intr_status & HIF_INTR2_CTRL_READY)) {
+			(void) ISR_cache_is_valid(); 
+		}
+		
+#endif
+		/*
+		 * Tests show that even with HIF_INTR2_CTRL_READY asserted,
+		 * OOB may not contain correct data till INTF_STATUS assert spare-valid
+		 */
+	 	(void) brcmnand_spare_is_valid(mtd, state, 1); // Dont want to call verify_ecc
+		error = 0;
+	}
+
+//out:
+        
+        //EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000);
+        EDU_reset_done();
+        EDU_volatileWrite(EDU_ERR_STATUS, 0x00000000);
+        EDU_volatileWrite(BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);    
+
+	  return error;
+}
+
+#endif  // CONFIG_MTD_BRCMNAND_EDU
+
+#if 0
+static int brcmnand_select_cache_is_valid(struct mtd_info* mtd,  int state, loff_t offset) 
+{
+    int ret = 0;
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+    ret =   brcmnand_EDU_cache_is_valid(mtd,state,offset);  
+#else
+    ret =   brcmnand_cache_is_valid(mtd,state,offset);  
+#endif
+    return ret;
+}
+#endif
+
+
+/*
+ * Returns 1 on success,
+ *		  0 on error
+ */
+
+
+static int brcmnand_ctrl_write_is_complete(struct mtd_info *mtd, int* outp_needBBT)
+{
+	int err;
+	uint32_t status;
+	uint32_t flashStatus = 0;
+
+	*outp_needBBT = 1;
+	err = brcmnand_wait(mtd, BRCMNAND_FL_WRITING, &status);
+	if (!err) {
+		if (status & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK) {
+			flashStatus = status & 0x01;
+			if (flashStatus) {
+				printk(KERN_INFO "%s: INTF Status = %08x\n", __FUNCTION__, status);
+			}
+			*outp_needBBT = flashStatus; // 0 = write completes with no errors
+			return 1;
+		}
+		else {
+			return 0;
+		}
+	}
+	return 0;
+}
+
+
+
+
+//#define EDU_DEBUG_2
+#undef EDU_DEBUG_2
+
+// EDU_DEBUG_4: Verify on Read
+//#define EDU_DEBUG_4
+//#undef EDU_DEBUG_4
+
+// EDU_DEBUG_5: Verify on Write
+//#define EDU_DEBUG_5
+//#undef EDU_DEBUG_5
+
+#if defined( EDU_DEBUG_2 ) || defined( EDU_DEBUG_4 ) || defined( EDU_DEBUG_5 )
+/* 3548 internal buffer is 4K in size */
+//static uint32_t edu_lbuf[2048];
+static uint32_t* edu_buf32;
+static uint8_t* edu_buf;   	// Used by EDU in Debug2
+static uint8_t* ctrl_buf;	// Used by Ctrl in Debug4
+static uint32_t ctrl_oob32[4];
+static uint8_t* ctrl_oob = (uint8_t*) ctrl_oob32;
+
+#define PATTERN 0xa55a0000
+
+#define EDU_BUFSIZE_B (512)
+// One before and one after
+#define EDU_BUF32_SIZE_B (EDU_BUFSIZE_B*3)
+
+// Same as above in DW instead
+#define EDU_BUFSIZE_DW (EDU_BUFSIZE_B/4)
+#define EDU_BUF32_SIZE_DW (EDU_BUF32_SIZE_B/4)
+
+// Real buffer starts at 1/3 
+#define EDU_BUF_START_DW (EDU_BUF32_SIZE_DW/3)
+
+
+static void init_edu_buf(void)
+{
+	/* Write pattern */
+	int i;
+
+	if (!edu_buf32) {
+		edu_buf32 = (uint32_t*) kmalloc(EDU_BUF32_SIZE_B, GFP_KERNEL);
+		if (!edu_buf32) {
+			printk("%s: Out of memory\n", __FUNCTION__);
+			BUG();
+		}
+			
+		edu_buf = ctrl_buf = (uint8_t*)  &edu_buf32[EDU_BUF_START_DW];
+		printk("%s: Buffer allocated at %p, %d bytes\n", __FUNCTION__, edu_buf32, EDU_BUF32_SIZE_B);
+		printk("Real buffer starts at %p\n", ctrl_buf);
+	}
+
+	for (i=0; i<EDU_BUF32_SIZE_DW; i++) {
+		edu_buf32[i] = PATTERN | i;
+	}	
+}
+
+static int verify_edu_buf(void) 
+{
+	int i;
+	int ret = 0;
+	
+	for (i=0; i<EDU_BUF_START_DW; i++) {
+		if (edu_buf32[i] != (PATTERN | i)) {
+			printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n", 
+				__FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
+			ret++;
+		}
+	}
+	for (i=EDU_BUF_START_DW+EDU_BUFSIZE_DW; i<EDU_BUF32_SIZE_DW; i++) {
+		if (edu_buf32[i] != (PATTERN | i)) {
+			printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n", 
+				__FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
+			ret++;
+		}
+	}
+if (ret) printk("+++++++++++++++ %s: %d DW overwritten by EDU\n", __FUNCTION__, ret);
+	return ret;
+}
+
+
+static uint8_t edu_write_buf[512];
+
+
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+#define NUM_EDU_REGS	(1+((BCHP_EDU_ERR_STATUS-BCHP_EDU_CONFIG)/4))
+#else
+#define NUM_EDU_REGS	1
+#endif
+
+#define MAX_DUMPS		20
+
+typedef struct nand_dump {
+	loff_t offset;
+	uint32_t physAddr;
+	struct brcmnand_chip* chip;
+	struct register_dump_t {
+		unsigned long timestamp;
+		uint32_t nand_regs[NUM_NAND_REGS]; // NAND register dump
+		uint32_t edu_regs[NUM_EDU_REGS];	// EDU register
+		uint32_t hif_intr2;		// HIF_INTR2 Interrupt status
+		uint8_t data[512];		// NAND controller cache
+	} dump[MAX_DUMPS];
+	//uint8_t udata[512]; 	// Uncached
+} nand_dump_t; // Before and after
+nand_dump_t nandDump; 
+int numDumps = 0;
+
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+static void print_dump_nand_regs(int which)
+{
+	int i;
+
+	printk("NAND registers snapshot #%d: TS=%0lx, offset=%0llx, PA=%08x\n", 
+		1+which, nandDump.dump[which].timestamp, nandDump.offset, nandDump.physAddr);
+	for (i=0; i<NUM_NAND_REGS; i++) {
+		if ((i % 4) == 0) {
+			printk("\n%08x:", BCHP_NAND_REVISION+(i*4));
+		}
+		printk("  %08x", nandDump.dump[which].nand_regs[i]);
+	}
+	printk("\nEDU registers:\n");
+	for (i=0; i<NUM_EDU_REGS; i++) {
+		if ((i % 4) == 0) {
+			printk("\n%08x:", BCHP_EDU_CONFIG+(i*4));
+		}
+		printk("  %08x", nandDump.dump[which].edu_regs[i]);
+	}
+	printk("\n HIF_INTR2_STATUS=%08x\n", nandDump.dump[which].hif_intr2);
+	printk("\nNAND controller Internal cache:\n");
+	print_databuf(nandDump.dump[which].data, 512);
+}
+
+void dump_nand_regs(struct brcmnand_chip* chip, loff_t offset, uint32_t pa, int which)
+{
+	int i;
+
+	/* We don't have the value of offset during snapshot #2 */
+	if (which == 0) {nandDump.offset = offset; nandDump.physAddr = pa;nandDump.chip = chip;}
+
+	nandDump.dump[which].timestamp = jiffies;
+	
+	for (i=0; i<NUM_NAND_REGS; i++) {
+		uint32_t reg = BCHP_NAND_REVISION+(i*4);
+		uint32_t regval;
+
+		if (inRegisterHoles(reg)) { // No NAND register at 0x281c
+			regval = 0;
+		}
+		else {
+			regval = brcmnand_ctrl_read(reg);
+		}
+ 		nandDump.dump[which].nand_regs[i] = regval;
+	}
+	for (i=0; i<NUM_EDU_REGS; i++) {
+ 		nandDump.dump[which].edu_regs[i] = EDU_volatileRead(BCHP_EDU_CONFIG + ( i*4));
+	}
+	nandDump.dump[which].hif_intr2 = BDEV_RD(BCHP_HIF_INTR2_CPU_STATUS);
+	brcmnand_from_flash_memcpy32(nandDump.chip, &nandDump.dump[which].data[0], nandDump.offset, 512);
+}
+
+#else
+
+#define print_dump_nand_regs(...)
+
+#define dump_nand_regs(...)
+
+#endif // EDU_DEBUG_2,4,5
+#endif
+
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+
+
+/*
+ * Returns 1 on success,
+ *		  0 on error
+ */
+
+static int brcmnand_EDU_write_is_complete(struct mtd_info *mtd, int* outp_needBBT)
+{
+	uint32_t hif_err = 0, edu_err;
+	int ret;
+	struct brcmnand_chip *chip = mtd->priv;
+
+	/* For Erase op, use the controller version */
+	if (chip->ctrl->state != BRCMNAND_FL_WRITING) {
+		return brcmnand_ctrl_write_is_complete(mtd, outp_needBBT);  
+	}
+    
+	*outp_needBBT = 0;
+
+
+#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
+  #if 0 // No need in Batch mode
+	// Unlike the Read case where we retry on everything, we either complete the write or die trying.
+	// Here we use retry only for ERESTARTSYS, relying on the fact that we write the same data 
+	// over the flash.
+	// Caution: Since this can be called from an interrupt context, we cannot call the regular brcmnand_wait()
+	// call, since those call schedule()
+	hif_err = ISR_wait_for_completion();
+	if ((hif_err == ERESTARTSYS) || (hif_err & HIF_INTR2_EBI_TIMEOUT))
+		return hif_err;
+  #endif // Batch mode
+#else
+	hif_err = EDU_poll(BCHP_HIF_INTR2_CPU_STATUS, 
+		HIF_INTR2_EDU_DONE|HIF_INTR2_CTRL_READY, 
+		HIF_INTR2_EDU_ERR, 
+		HIF_INTR2_EDU_DONE_MASK|HIF_INTR2_CTRL_READY);
+
+#endif
+
+
+	if (hif_err != 0) // No timeout
+	{
+		uint32_t flashStatus; // = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+		int retries = 20;
+
+#if 0
+if (!(hif_err & HIF_INTR2_EDU_DONE))
+printk("hif_err=%08x\n", hif_err);
+#endif			
+		
+		/******************* BUG BUG BUG *****************
+		 * THT 01/06/09: What if EDU returns bus error?  We should not mark the block bad then.
+		 */
+
+
+		//Clear interrupt:
+		//EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000);
+
+		flashStatus = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+
+		/* Just to be dead sure */
+		while (!(flashStatus & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK) && retries-- > 0) {
+			// Cant call the ctrl version, we are in ISR context
+			// ret = brcmnand_ctrl_write_is_complete(mtd, outp_needBBT); 
+			udelay(5000); // Wait for a total of 100 usec
+			//dump_nand_regs(chip, 0, 0, numDumps++);
+			flashStatus = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+		}
+
+		 //Get status:  should we check HIF_INTR2_ERR?
+		if (hif_err & HIF_INTR2_EDU_ERR)
+			edu_err = EDU_get_error_status_register();
+		else
+			edu_err = 0;
+
+		/* sanity check on last cmd status */
+		if ((edu_err & EDU_ERR_STATUS_NandWrite) && !(flashStatus & 0x1)) {
+			int cmd = chip->ctrl_read(BCHP_NAND_CMD_START);
+			printk(KERN_ERR"%s: false EDU write error status (edu_err: 0x%08X, flashStatus: 0x%08X) for NAND CMD %x  \n", 
+			          __FUNCTION__, edu_err, flashStatus, cmd);
+			edu_err = EDU_get_error_status_register();
+		}
+			
+		/* we primarily rely on NAND controller FLASH_STATUS bit 0, since EDU error may not be cleared yet */		
+		if ((edu_err & EDU_ERR_STATUS_NandWrite) && (flashStatus & 0x01)) {
+			/* // Write is complete, but not successful, flash error, will mark block bad */
+			*outp_needBBT = 1;
+			printk(KERN_ERR"%s: flash write error (edu_err: 0x%08X, flashStatus: 0x%08X)\n", 
+                  		__FUNCTION__, edu_err, flashStatus);
+              	ret = 1; // Write is complete, but not successful
+
+			goto out;
+		}
+		else if (edu_err) {
+			/* Write did not complete, bus error, will NOT mark block bad */
+			*outp_needBBT = 0;
+			printk("EDU_write_is_complete(): error 0x%08X\n", edu_err);
+			ret = 0;
+			goto out;
+		}
+
+		ret = 1; // Success    brcmnand_ctrl_write_is_complete(mtd, outp_needBBT);  
+		goto out;
+	}
+	else { // Write timeout
+		printk("%s: Write has timed out\n", __FUNCTION__);
+		//*outp_needBBT = 1;
+		ret = 0;
+		goto out;
+	}
+
+out:
+
+	EDU_reset_done();
+	EDU_volatileWrite(EDU_ERR_STATUS, 0x00000000);
+	EDU_volatileWrite(BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
+
+
+	//printk("EDU_write_is_complete(): error 2 hif_err: %08x\n", hif_err);
+
+	//Poll time out or did not return HIF_INTR2_EDU_DONE:
+	return ret;
+}
+
+
+static int (*brcmnand_write_is_complete) (struct mtd_info*, int*) = brcmnand_EDU_write_is_complete;
+
+#else
+static int (*brcmnand_write_is_complete) (struct mtd_info*, int*) = brcmnand_ctrl_write_is_complete;
+#endif //#ifdef CONFIG_MTD_BRCMNAND_EDU
+
+
+
+/**
+ * brcmnand_transfer_oob - [Internal] Transfer oob from chip->oob_poi to client buffer
+ * @chip:	nand chip structure
+ * @oob:	oob destination address
+ * @ops:	oob ops structure
+ * @len: OOB bytes to transfer
+ *
+ * Returns the pointer to the OOB where next byte should be read
+ */
+uint8_t *
+brcmnand_transfer_oob(struct brcmnand_chip *chip, uint8_t *oob,
+				  struct mtd_oob_ops *ops, int len)
+{
+	//size_t len = ops->ooblen;
+
+	switch(ops->mode) {
+
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_RAW:
+		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+		return oob + len;
+
+	case MTD_OPS_AUTO_OOB: {
+		struct nand_oobfree *free = chip->ecclayout->oobfree;
+		uint32_t boffs = 0, roffs = ops->ooboffs;
+		size_t bytes = 0;
+
+		for(; free->length && len; free++, len -= bytes) {
+			/* Read request not from offset 0 ? */
+			if (unlikely(roffs)) {
+				if (roffs >= free->length) {
+					roffs -= free->length;
+					continue;
+				}
+				boffs = free->offset + roffs;
+				bytes = min_t(size_t, len,
+					      (free->length - roffs));
+				roffs = 0;
+			} else {
+				bytes = min_t(size_t, len, free->length);
+				boffs = free->offset;
+			}
+#ifdef DEBUG_ISR
+printk("%s: AUTO: oob=%p, chip->oob_poi=%p, ooboffs=%d, len=%d, bytes=%d, boffs=%d\n",
+	__FUNCTION__, oob, chip->oob_poi, ops->ooboffs, len, bytes, boffs);
+#endif
+			memcpy(oob, chip->oob_poi + boffs, bytes);
+			oob += bytes;
+		}
+		return oob;
+	}
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+
+
+
+#undef DEBUG_UNCERR
+#ifdef DEBUG_UNCERR
+static uint32_t uncErrOob[7];
+static u_char uncErrData[512];
+#endif
+
+
+
+void brcmnand_post_mortem_dump(struct mtd_info* mtd, loff_t offset)
+{
+	int i;
+
+//Avoid garbled output
+int saveDebug=gdebug;
+gdebug=0;
+	
+	printk("%s at offset %llx\n", __FUNCTION__, offset);
+	dump_stack();
+	
+	printk("NAND registers snapshot \n");
+	for (i=0; i<NUM_NAND_REGS; i++) {
+		uint32_t reg = BCHP_NAND_REVISION+(i*4);
+		uint32_t regval;
+
+		if (inRegisterHoles(reg)) { // No NAND register at 0x281c
+			regval = 0;
+		}
+		else {
+			regval = brcmnand_ctrl_read(reg);
+		}
+		if ((i % 4) == 0) {
+			printk("\n%08x:", reg);
+		}
+		printk("  %08x", regval);
+	}
+
+gdebug=saveDebug;
+
+}
+
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_4
+/*
+ * Read the OOB bytes beyond 16B
+ *
+ * i:		DW index into OOB area
+ * p32:	DW pointer into OOB area
+ */
+static inline 
+void read_ext_spare_area(struct brcmnand_chip* chip, int i, uint32_t* p32)
+{
+	uint32_t dwoob;
+	int j;
+	int oobi;						/* Byte index into OOB area */
+	u_char* p8 = (u_char*) p32; 	/* Byte pointer into OOB area */
+	u_char* q = (u_char*) &dwoob;
+
+	/* If HW support it, copy OOB bytes beyond 16 bytes */
+
+	/* p8 and oobi index into byte-wise OOB, p32 index into DW-wise OOB */
+	oobi = i*4; 
+	
+	for (; i<8 && oobi < chip->eccOobSize; i++, oobi += 4) {
+		
+
+		/* This takes care of Endian-ness of the registers */
+		dwoob = be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_10 + (i-4)*4));
+if (gdebug > 3)
+{ printk("%s: dwoob=%08x\n", __FUNCTION__, dwoob);}
+
+		/* OOB size is an odd 27 bytes */
+		if (oobi+4 <= chip->eccOobSize) {
+			p32[i]= dwoob;
+		}
+		else { /* Trailing 3 bytes, column=pgSize+24,25,26*/
+			// remember that p8 = (u_char*) &p32[0];
+			for (j=0; oobi+j < chip->eccOobSize; j++) {
+				p8[oobi+j] = q[j];
+			}
+			break; /* Out of i loop */
+		}
+	}
+}
+
+#else
+#define read_ext_spare_area(...)
+#endif
+
+
+
+/*
+ * Returns 0 on success
+ * Expect a controller read was done before hand, and that the OOB data are read into NAND registers.
+ */
+static int brcmnand_handle_false_read_ecc_unc_errors(
+		struct mtd_info* mtd, 
+		void* buffer, u_char* oobarea, loff_t offset)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	//int retries = 2;
+	static uint32_t oobbuf[8]; // Sparea Area to handle ECC workaround, aligned on DW boundary
+	uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oobbuf[0]);
+	u_char* p8 = (u_char*) p32;
+	int ret = 0;
+
+	/* Flash chip returns errors 
+
+	|| There is a bug in the controller, where if one reads from an erased block that has NOT been written to,
+	|| this error is raised.  
+	|| (Writing to OOB area does not have any effect on this bug)
+	|| The workaround is to also look into the OOB area, to see if they are all 0xFF
+	
+	*/
+	//u_char oobbuf[16];
+	int erased, allFF;
+	int i;
+	uint32_t acc0;
+	//int valid;
+
+	/*
+	 * First disable Read ECC then re-try read OOB, because some times, the controller
+	 * just drop the op on ECC errors.
+	 */
+
+#if 1 /* Testing 1 2 3 */
+	/* Disable ECC */
+	acc0 = brcmnand_disable_read_ecc(chip->ctrl->CS[chip->csi]);
+
+	chip->ctrl_writeAddr(chip, offset, 0);
+	PLATFORM_IOFLUSH_WAR();
+	chip->ctrl_write(BCHP_NAND_CMD_START, OP_PAGE_READ);
+
+	// Wait until cache is filled up, disabling ECC checking
+	(void) brcmnand_spare_is_valid(mtd, BRCMNAND_FL_READING, 1);
+	
+	// Restore acc
+	brcmnand_restore_ecc(chip->ctrl->CS[chip->csi], acc0);
+#endif
+
+	for (i = 0; i < 4; i++) {
+		p32[i] = be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
+	}
+
+	read_ext_spare_area(chip, i, p32);
+	
+	if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
+		/* 
+		 * THT 9/16/10: Also guard against the case where all data bytes are 0x11 or 0x22,
+		 * in which case, this is a bonafide Uncorrectable error 
+		 *
+		 * Look at first 4 bytes from the flash, already guaranteed to be 512B aligned
+		 */
+#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_0_1
+		uint32_t* pFirstDW = (uint32_t*) (chip->vbase + offset);
+#else
+		uint32_t* pFirstDW = (uint32_t*)  chip->vbase;
+#endif
+
+		erased = (p8[6] == 0xff && p8[7] == 0xff && p8[8] == 0xff);
+		/* If first 4 bytes of data are not 0xFFFFFFFF, then this is a real UNC error */
+		allFF = (p8[6] == 0x00 && p8[7] == 0x00 && p8[8] == 0x00 && *pFirstDW == 0xFFFFFFFF);
+		
+if (gdebug > 3 ) 
+{printk("%s: offset=%0llx, erased=%d, allFF=%d\n", 
+__FUNCTION__, offset, erased, allFF);
+print_oobbuf(p8, 16);
+}
+	}
+	else if (chip->ecclevel >= BRCMNAND_ECC_BCH_1 && chip->ecclevel <= BRCMNAND_ECC_BCH_12) {
+		erased = 1;
+		allFF = 0; // Not sure for BCH.
+		// For BCH-n, the ECC bytes are at the end of the OOB area
+		for (i=chip->eccOobSize-chip->eccbytes; i<min(16,chip->eccOobSize); i++) {
+			erased = erased && (p8[i] == 0xff);
+			if (!erased) {
+				if (gdebug > 3 )
+					printk("p8[%d]=%02x\n", i, p8[i]); 
+				break;
+			}
+		}
+if (gdebug > 3 ) 
+{printk("%s: offset=%0llx, i=%d from %d to %d, eccOobSize=%d, eccbytes=%d, erased=%d, allFF=%d\n",
+__FUNCTION__, offset, i, chip->eccOobSize-chip->eccbytes, chip->eccOobSize,
+chip->eccOobSize, chip->eccbytes, erased, allFF);}
+	}
+	else {
+		printk("BUG: Unsupported ECC level %d\n", chip->ecclevel);
+		BUG();
+	}
+			
+	if ( erased || allFF) {
+		/* 
+		 * For the first case, the slice is an erased block, and the ECC bytes are all 0xFF,
+		 * for the 2nd, all bytes are 0xFF, so the Hamming Codes for it are all zeroes.
+		 * The current version of the BrcmNAND controller treats these as un-correctable errors.
+		 * For either case, fill data buffer with 0xff and return success.  The error has already
+		 * been cleared inside brcmnand_verify_ecc.
+		 * Both case will be handled correctly by the BrcmNand controller in later releases.
+		 */
+		p32 = (uint32_t*) buffer;
+		for (i=0; i < ECCSIZE(mtd)/4; i++) {
+			p32[i] = 0xFFFFFFFF;
+		}
+		ret = 0; // Success
+
+	}
+	else {
+		/* Real error: Disturb read returns uncorrectable errors */
+		ret = BRCMNAND_UNCORRECTABLE_ECC_ERROR;
+if (gdebug > 3 ) {printk("<-- %s: indeed uncorrectable ecc error\n", __FUNCTION__);}
+
+#ifdef DEBUG_UNCERR
+		
+		// Copy the data buffer 
+		brcmnand_from_flash_memcpy32(chip, uncErrData, offset, ECCSIZE(mtd));
+		for (i = 0; i < 4; i++) {
+			uncErrOob[i] = p32[i];
+		}
+
+		printk("%s: Uncorrectable error at offset %llx\n", __FUNCTION__, offset);
+		
+		printk("Data:\n");
+		print_databuf(uncErrData, ECCSIZE(mtd));
+		printk("Spare Area\n");
+		print_oobbuf((unsigned char*) &uncErrOob[0], 16);
+		
+		brcmnand_post_mortem_dump(mtd, offset);
+				
+#endif
+	}
+
+	return ret;
+}
+
+// THT PR50928: if wr_preempt is disabled, enable it to clear error
+int brcmnand_handle_ctrl_timeout(struct mtd_info* mtd, int retry)
+{
+	uint32_t acc;
+	struct brcmnand_chip* __maybe_unused chip = mtd->priv; 
+	
+	// First check to see if WR_PREEMPT is disabled
+	acc = brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]));
+	if (retry <= 2 && 0 == (acc & BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK)) {
+		acc |= BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK;
+		brcmnand_ctrl_write(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]), acc);
+		printk("Turn on WR_PREEMPT_EN\n");
+		return 1;
+	}
+	return 0;
+}
+
+void  brcmnand_Hamming_ecc(const uint8_t *data, uint8_t *ecc_code)
+{
+
+    int i,j;
+    static uint8_t o_ecc[24], temp[10];
+    static uint32_t b_din[128];
+    uint32_t* i_din = &b_din[0];
+    unsigned long pre_ecc;
+
+#if 0
+    // THT Use this block if there is a need for endian swapping
+    uint32_t i_din[128];
+    uint32_t* p32 = (uint32_t*) data; //  alignment guaranteed by caller.
+    
+	
+    for(i=0;i<128;i++) {
+        //i_din[i/4] = (long)(data[i+3]<<24 | data[i+2]<<16 | data[i+1]<<8 | data[i]);
+        i_din[i] = /*le32_to_cpu */(p32[i]);
+        //printk( "i_din[%d] = 0x%08.8x\n", i/4, i_din[i/4] );
+    }
+
+#else
+    	if (unlikely((unsigned int) data & 0x3)) {
+		memcpy((uint8_t*) i_din, data, 512);
+    	}
+	else  {
+		i_din = (uint32_t*) data;    
+    	}
+#endif
+
+    memset(o_ecc, 0, sizeof(o_ecc));
+
+    for(i=0;i<128;i++){
+        memset(temp, 0, sizeof(temp));
+        
+        for(j=0;j<32;j++){
+            temp[0]^=((i_din[i]& 0x55555555)>>j)&0x1;
+            temp[1]^=((i_din[i]& 0xAAAAAAAA)>>j)&0x1;
+            temp[2]^=((i_din[i]& 0x33333333)>>j)&0x1;
+            temp[3]^=((i_din[i]& 0xCCCCCCCC)>>j)&0x1;
+            temp[4]^=((i_din[i]& 0x0F0F0F0F)>>j)&0x1;
+            temp[5]^=((i_din[i]& 0xF0F0F0F0)>>j)&0x1;
+            temp[6]^=((i_din[i]& 0x00FF00FF)>>j)&0x1;
+            temp[7]^=((i_din[i]& 0xFF00FF00)>>j)&0x1;
+            temp[8]^=((i_din[i]& 0x0000FFFF)>>j)&0x1;
+            temp[9]^=((i_din[i]& 0xFFFF0000)>>j)&0x1;
+        }
+
+        for(j=0;j<10;j++)
+            o_ecc[j]^=temp[j];
+            
+        //o_ecc[0]^=temp[0];//P1'
+        //o_ecc[1]^=temp[1];//P1
+        //o_ecc[2]^=temp[2];//P2'
+        //o_ecc[3]^=temp[3];//P2
+        //o_ecc[4]^=temp[4];//P4'
+        //o_ecc[5]^=temp[5];//P4
+        //o_ecc[6]^=temp[6];//P8'
+        //o_ecc[7]^=temp[7];//P8
+        //o_ecc[8]^=temp[8];//P16'
+        //o_ecc[9]^=temp[9];//P16
+        
+        if(i%2){
+            for(j=0;j<32;j++)
+                o_ecc[11]^=(i_din[i]>>j)&0x1;//P32
+        }
+        else{
+            for(j=0;j<32;j++)
+                o_ecc[10]^=(i_din[i]>>j)&0x1;//P32'
+        }
+                
+        if((i&0x3)<2){
+            for(j=0;j<32;j++)
+                o_ecc[12]^=(i_din[i]>>j)&0x1;//P64'
+        }
+        else{
+            for(j=0;j<32;j++)
+                o_ecc[13]^=(i_din[i]>>j)&0x1;//P64
+        }
+        
+        if((i&0x7)<4){
+            for(j=0;j<32;j++)
+                o_ecc[14]^=(i_din[i]>>j)&0x1;//P128'
+        }
+        else{
+            for(j=0;j<32;j++)
+                o_ecc[15]^=(i_din[i]>>j)&0x1;//P128
+        }
+        
+        if((i&0xF)<8){
+            for(j=0;j<32;j++)
+                o_ecc[16]^=(i_din[i]>>j)&0x1;//P256'
+        }
+        else{
+            for(j=0;j<32;j++)
+                o_ecc[17]^=(i_din[i]>>j)&0x1;//P256
+        }
+        
+        if((i&0x1F)<16){
+            for(j=0;j<32;j++)
+                o_ecc[18]^=(i_din[i]>>j)&0x1;//P512'
+        }
+        else{
+            for(j=0;j<32;j++)
+                o_ecc[19]^=(i_din[i]>>j)&0x1;//P512
+        }
+        
+        if((i&0x3F)<32){
+            for(j=0;j<32;j++)
+                o_ecc[20]^=(i_din[i]>>j)&0x1;//P1024'
+        }
+        else{
+            for(j=0;j<32;j++)
+                o_ecc[21]^=(i_din[i]>>j)&0x1;//P1024
+        }
+        
+        if((i&0x7F)<64){
+            for(j=0;j<32;j++)
+                o_ecc[22]^=(i_din[i]>>j)&0x1;//P2048'
+        }
+        else{
+            for(j=0;j<32;j++)
+                o_ecc[23]^=(i_din[i]>>j)&0x1;//P2048
+        }
+        // print intermediate value
+        pre_ecc = 0;
+        for(j=23;j>=0;j--) {
+            pre_ecc = (pre_ecc << 1) | (o_ecc[j] ? 1 : 0 ); 
+        }
+//        printf( "pre_ecc[%d] = 0x%06.6x\n", i, pre_ecc );
+    }
+    //xprintf("P16':%x P16:%x P8':%x P8:%x\n",o_ecc[8],o_ecc[9],o_ecc[6],o_ecc[7]);
+    //xprintf("P1':%x P1:%x P2':%x P2:%x\n",o_ecc[0],o_ecc[1],o_ecc[2],o_ecc[3]);
+ // ecc_code[0] = ~(o_ecc[13]<<7 | o_ecc[12]<<6 | o_ecc[11]<<5 | o_ecc[10]<<4 | o_ecc[9]<<3 | o_ecc[8]<<2 | o_ecc[7]<<1 | o_ecc[6]);
+ // ecc_code[1] = ~(o_ecc[21]<<7 | o_ecc[20]<<6 | o_ecc[19]<<5 | o_ecc[18]<<4 | o_ecc[17]<<3 | o_ecc[16]<<2 | o_ecc[15]<<1 | o_ecc[14]);
+ // ecc_code[2] = ~(o_ecc[5]<<7 | o_ecc[4]<<6 | o_ecc[3]<<5 | o_ecc[2]<<4 | o_ecc[1]<<3 | o_ecc[0]<<2 | o_ecc[23]<<1 | o_ecc[22]);
+
+    ecc_code[0] = (o_ecc[ 7]<<7 | o_ecc[ 6]<<6 | o_ecc[ 5]<<5 | o_ecc[ 4]<<4 | o_ecc[ 3]<<3 | o_ecc[ 2]<<2 | o_ecc[ 1]<<1 | o_ecc[ 0]);
+    ecc_code[1] = (o_ecc[15]<<7 | o_ecc[14]<<6 | o_ecc[13]<<5 | o_ecc[12]<<4 | o_ecc[11]<<3 | o_ecc[10]<<2 | o_ecc[ 9]<<1 | o_ecc[ 8]);
+    ecc_code[2] = (o_ecc[23]<<7 | o_ecc[22]<<6 | o_ecc[21]<<5 | o_ecc[20]<<4 | o_ecc[19]<<3 | o_ecc[18]<<2 | o_ecc[17]<<1 | o_ecc[16]);
+    // printf("BROADCOM          ECC:0x%02X 0x%02X 0x%02X \n",ecc_code[0],ecc_code[1],ecc_code[2]);
+        //xprintf("BROADCOM          ECC:0x%02X 0x%02X 0x%02X \n",test[0],test[1],test[2]);
+}
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_1_1
+/* No workaround needed, fixed in HW */
+#define brcmnand_Hamming_WAR(...) (0)
+
+#else
+
+/*
+ * Workaround for Hamming ECC when correctable error is in the ECC bytes.
+ * Returns 0 if error was in data (no action needed), 1 if error was in ECC (use uncorrected data instead)
+ */
+static int brcmnand_Hamming_WAR(struct mtd_info* mtd, loff_t offset, void* buffer,
+	u_char* inp_hwECC, u_char* inoutp_swECC)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	static uint32_t ucdata[128];
+	u_char* uncorr_data = (u_char*) ucdata;
+	uint32_t  acc0;
+	int valid;
+	//unsigned long irqflags;
+	
+	int ret = 0, retries=2;
+	
+	/* Disable ECC */
+	acc0 = brcmnand_disable_read_ecc(chip->ctrl->CS[chip->csi]);
+
+	while (retries >= 0) {
+		if (wr_preempt_en) {
+			//local_irq_save(irqflags);
+		}
+
+/* This register doesn't exist on DSL chips. */
+#if !defined(CONFIG_BCM_KF_NAND)
+		// Mask Interrupt 
+		BDEV_WR(BCHP_HIF_INTR2_CPU_MASK_SET, HIF_INTR2_ERR_MASK);
+		// Clear Status Mask for sector 0 workaround
+		BDEV_WR(BCHP_HIF_INTR2_CPU_CLEAR, 
+			HIF_INTR2_ERR_MASK|BCHP_HIF_INTR2_CPU_STATUS_NAND_CTLRDY_INTR_MASK);
+#endif
+
+#if 0
+		/* Already cleared with cpu-clear */
+		intr_status = BDEV_RD(BCHP_HIF_INTR2_CPU_STATUS);
+		intr_status &= ~(HIF_INTR2_ERR_MASK);
+		BDEV_WR(BCHP_HIF_INTR2_CPU_STATUS, intr_status);
+#endif
+		
+		chip->ctrl_writeAddr(chip, offset, 0);
+		PLATFORM_IOFLUSH_WAR();
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_PAGE_READ);
+		
+		// Wait until cache is filled up
+		valid = brcmnand_cache_is_valid(mtd, BRCMNAND_FL_READING, offset);
+
+		if (wr_preempt_en) {
+			//local_irq_restore(irqflags);
+		}	
+
+		if (valid ==  BRCMNAND_TIMED_OUT) {
+			//Read has timed out 
+			ret = -ETIMEDOUT;
+			retries--;
+			// THT PR50928: if wr_preempt is disabled, enable it to clear error
+			wr_preempt_en = brcmnand_handle_ctrl_timeout(mtd, retries);
+			continue;  /* Retry */
+		}
+		else {
+			ret = 0;
+			break;
+		}
+	}
+
+	if (retries < 0) {
+		goto restore_ecc;
+	}
+
+	// Reread the uncorrected buffer.
+	brcmnand_from_flash_memcpy32(chip, uncorr_data, offset, ECCSIZE(mtd));
+
+	// Calculate Hamming Codes
+	brcmnand_Hamming_ecc(uncorr_data, inoutp_swECC);
+
+	// Compare ecc0 against ECC from HW
+	if ((inoutp_swECC[0] == inp_hwECC[0] && inoutp_swECC[1] == inp_hwECC[1] && 
+		inoutp_swECC[2] == inp_hwECC[2])
+		|| (inoutp_swECC[0] == 0x0 && inoutp_swECC[1] == 0x0 && inoutp_swECC[2] == 0x0 &&
+		     inp_hwECC[0] == 0xff && inp_hwECC[1] == 0xff && inp_hwECC[2] == 0xff)) {
+		// Error was in data bytes, correction made by HW is good, 
+		// or block was erased and no data written to it yet,
+		// send corrected data.
+		// printk("CORR error was handled properly by HW\n");
+		ret = 0;
+	}
+	else { // Error was in ECC, send uncorrected data
+		memcpy(buffer, uncorr_data, 512);
+	
+		ret = 1;
+		printk("CORR error was handled by SW at offset %0llx, HW=%02x%02x%02x, SW=%02x%02x%02x\n", 
+			offset, inp_hwECC[0], inp_hwECC[1], inp_hwECC[2],
+			inoutp_swECC[0], inoutp_swECC[1], inoutp_swECC[2]);
+	}
+
+restore_ecc:
+	// Restore acc
+	brcmnand_restore_ecc(chip->ctrl->CS[chip->csi], acc0);
+	return ret;
+}
+#endif
+
+
+
+/**
+ * brcmnand_posted_read_cache - [BrcmNAND Interface] Read the 512B cache area
+ * Assuming brcmnand_get_device() has been called to obtain exclusive lock
+ * @param mtd		MTD data structure
+ * @param oobarea	Spare area, pass NULL if not interested
+ * @param buffer	the databuffer to put/get data, pass NULL if only spare area is wanted.
+ * @param offset	offset to read from or write to, must be 512B aligned.
+ *
+ * Caller is responsible to pass a buffer that is
+ * (1) large enough for 512B for data and optionally an oobarea large enough for 16B.
+ * (2) 4-byte aligned.
+ *
+ * Read the cache area into buffer.  The size of the cache is mtd-->eccsize and is always 512B.
+ */
+
+//****************************************
+int in_verify;
+//****************************************
+
+static int brcmnand_ctrl_posted_read_cache(struct mtd_info* mtd, 
+		void* buffer, u_char* oobarea, loff_t offset)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	loff_t sliceOffset = offset & (~ (ECCSIZE(mtd) - 1));
+	int i, ret = 0;
+	static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
+	uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
+	u_char* __maybe_unused p8 = (u_char*) p32;
+
+	//unsigned long irqflags;	
+	int retries = 5, done=0;
+	int valid = 0;
+
+
+if (gdebug > 3 ) 
+{printk("%s: offset=%0llx, oobarea=%p\n", __FUNCTION__, offset, oobarea);}
+
+
+	if (unlikely(offset - sliceOffset)) {
+		printk(KERN_ERR "%s: offset %08x is not cache aligned, sliceOffset=%08lx, CacheSize=%d\n", 
+			__FUNCTION__, (unsigned int) offset, (unsigned long) sliceOffset, ECCSIZE(mtd));
+		return -EINVAL;
+	}
+
+	while (retries > 0 && !done) {
+/* This register doesn't exist on DSL chips. */
+#if !defined(CONFIG_BCM_KF_NAND)
+		uint32_t intr_status;  
+		
+		if (wr_preempt_en) {
+			//local_irq_save(irqflags);
+		}
+
+		// Mask Interrupt 
+		BDEV_WR(BCHP_HIF_INTR2_CPU_MASK_SET, HIF_INTR2_ERR_MASK);
+		// Clear Status Mask for sector 0 workaround
+		BDEV_WR(BCHP_HIF_INTR2_CPU_CLEAR, 
+			HIF_INTR2_ERR_MASK|BCHP_HIF_INTR2_CPU_STATUS_NAND_CTLRDY_INTR_MASK);
+if (gdebug > 3) {
+intr_status = BDEV_RD(BCHP_HIF_INTR2_CPU_STATUS);
+printk("%s: before intr_status=%08x\n", __FUNCTION__, intr_status);
+}
+#endif
+
+#if 0 /* Already done by cpu-clear */
+		intr_status = BDEV_RD(BCHP_HIF_INTR2_CPU_STATUS);
+		intr_status &= ~(HIF_INTR2_ERR_MASK);
+		BDEV_WR(BCHP_HIF_INTR2_CPU_STATUS, intr_status);
+#endif
+		
+		chip->ctrl_writeAddr(chip, sliceOffset, 0);
+		PLATFORM_IOFLUSH_WAR();
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_PAGE_READ);
+
+		// Wait until cache is filled up
+		valid = brcmnand_cache_is_valid(mtd, BRCMNAND_FL_READING, offset);
+
+		if (wr_preempt_en) {
+			//local_irq_restore(irqflags);
+		}
+
+		switch (valid) {
+
+		case BRCMNAND_SUCCESS: /* Success, no errors */
+			// Remember last good 512B-sector read.  Needed for HIF_INTR2 war.
+			//if (0 == gLastKnownGoodEcc)
+				gLastKnownGoodEcc = offset;
+			
+			/* FALLTHROUGH */
+
+		case BRCMNAND_CORRECTABLE_ECC_ERROR: 
+			if (buffer) {
+				brcmnand_from_flash_memcpy32(chip, buffer, offset, ECCSIZE(mtd));
+			}
+
+#ifndef DEBUG_HW_ECC
+			if (oobarea || (ret == BRCMNAND_CORRECTABLE_ECC_ERROR)) 
+#endif
+			{
+				PLATFORM_IOFLUSH_WAR();
+				for (i = 0; i < 4; i++) {
+					p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
+				}
+
+				read_ext_spare_area(chip, i, p32);
+
+if (gdebug > 3) 
+{printk("%s: offset=%0llx, oob=\n", __FUNCTION__, sliceOffset); print_oobbuf(oobarea, chip->eccOobSize);}
+			}
+
+ #ifndef DEBUG_HW_ECC // Comment out for debugging
+
+			/* Make sure error was not in ECC bytes */
+			if (ret == BRCMNAND_CORRECTABLE_ECC_ERROR && 
+				chip->ecclevel == BRCMNAND_ECC_HAMMING) 
+ #endif
+
+			{
+				
+				char ecc0[3]; // SW ECC, manually calculated
+				
+				if (brcmnand_Hamming_WAR(mtd, offset, buffer, &p8[6], &ecc0[0])) {
+					/* Error was in ECC, update it from calculated value */
+					if (oobarea) {
+						oobarea[6] = ecc0[0];
+						oobarea[7] = ecc0[1];
+						oobarea[8] = ecc0[2];
+					}
+				}
+				
+			}
+
+			
+			// SWLINUX-1495:		
+			if (valid == BRCMNAND_CORRECTABLE_ECC_ERROR) 
+				ret = BRCMNAND_CORRECTABLE_ECC_ERROR;
+			else
+				ret = 0;
+		 
+			done = 1;
+			break;
+			
+		case BRCMNAND_UNCORRECTABLE_ECC_ERROR:
+			ret = brcmnand_handle_false_read_ecc_unc_errors(mtd, buffer, oobarea, offset);
+			done = 1;
+			break;
+			
+		case BRCMNAND_FLASH_STATUS_ERROR:
+			printk(KERN_ERR "brcmnand_cache_is_valid returns 0\n");
+			ret = -EBADMSG;
+			done = 1;
+			break;		
+			
+		case BRCMNAND_TIMED_OUT:
+			//Read has timed out 
+			ret = -ETIMEDOUT;
+			if (!wr_preempt_en) {
+				retries--;
+				// THT PR50928: if wr_preempt is disabled, enable it to clear error
+				wr_preempt_en = brcmnand_handle_ctrl_timeout(mtd, retries);
+				continue;  /* Retry */
+			}
+			else {
+				done = 1;
+				break;
+			}
+
+		default:
+			BUG_ON(1);
+			/* Should never gets here */
+			ret = -EFAULT;
+			done = 1;
+		}
+	
+	}
+
+	if (wr_preempt_en) {
+		uint32_t acc;
+		
+		acc = brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]));
+	
+		acc &= ~BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK;
+		brcmnand_ctrl_write(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]), acc);
+	}
+
+	
+if (gdebug > 3 ) {
+printk("<-- %s: offset=%0llx\n", __FUNCTION__, offset);
+print_databuf(buffer, 32);
+}
+
+#if defined( EDU_DEBUG ) || defined (BRCMNAND_READ_VERIFY )
+//if (in_verify <=0) 
+if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
+u_char edu_sw_ecc[4];
+
+	brcmnand_Hamming_ecc(buffer, edu_sw_ecc);
+
+if ((p8[6] != edu_sw_ecc[0] || p8[7] != edu_sw_ecc[1] || p8[8] != edu_sw_ecc[2])
+	&& !(p8[6]==0xff && p8[7]==0xff && p8[8]==0xff &&
+		edu_sw_ecc[0]==0x0 && edu_sw_ecc[1]==0x0 && edu_sw_ecc[2]==0x0)
+) {
+	 printk("!!!!!!!!! %s: offset=%0llx ECC=%02x%02x%02x, OOB:",
+in_verify < 0 ? "WR" : "RD",
+offset, edu_sw_ecc[0], edu_sw_ecc[1], edu_sw_ecc[2]);
+	 print_oobbuf(p8, 16);
+	 BUG();
+}
+}
+#endif
+
+//gdebug=0;
+
+	return ret;
+}
+
+
+/*
+ * Clear the controller cache by reading at a location we don't normally read
+ */
+static void __maybe_unused debug_clear_ctrl_cache(struct mtd_info* mtd)
+{
+	/* clear the internal cache by writing a new address */
+	struct brcmnand_chip* chip = mtd->priv;
+	loff_t offset = chip->chipSize-chip->blockSize; // Start of BBT region
+	//uint32_t intr_status;  
+
+/* This register doesn't exist on DSL chips. */
+#if !defined(CONFIG_BCM_KF_NAND)
+	// Mask Interrupt 
+	BDEV_WR(BCHP_HIF_INTR2_CPU_MASK_SET, HIF_INTR2_ERR_MASK);
+	// Clear Status Mask for sector 0 workaround
+	BDEV_WR(BCHP_HIF_INTR2_CPU_CLEAR, 
+		HIF_INTR2_ERR_MASK|BCHP_HIF_INTR2_CPU_STATUS_NAND_CTLRDY_INTR_MASK);
+#endif
+
+#if 0
+	/* Already cleared with cpu-clear */
+	intr_status = BDEV_RD(BCHP_HIF_INTR2_CPU_STATUS);
+	intr_status &= ~(HIF_INTR2_ERR_MASK);
+	BDEV_WR(BCHP_HIF_INTR2_CPU_STATUS, intr_status);
+#endif
+
+	chip->ctrl_writeAddr(chip, offset, 0); 
+	PLATFORM_IOFLUSH_WAR();
+	chip->ctrl_write(BCHP_NAND_CMD_START, OP_PAGE_READ);
+
+	// Wait until cache is filled up
+	(void) brcmnand_cache_is_valid(mtd, BRCMNAND_FL_READING, offset);
+}
+	
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+
+
+extern int EDU_buffer_OK(volatile void* addr, int command);
+
+
+#if 1
+uint32_t debug_buf32[512];
+static u_char* ver_buf = (u_char*) &debug_buf32[0];
+static u_char ver_oob[16];
+
+
+
+	
+static inline void debug_EDU_read(struct mtd_info* mtd, 
+        void* edu_buffer, u_char* edu_oob, loff_t offset, uint32_t intr_status, 
+        uint32_t edu_status, u_char* edu_sw_ecc)
+{
+	int ret;
+	struct brcmnand_chip* chip = mtd->priv;
+
+in_verify = 1;	
+
+	if (chip->ecclevel != BRCMNAND_ECC_HAMMING) {
+		// Nothing to verify for now
+		return;
+	}
+	
+	/* First off, clear the controller internal cache by writing a new address */
+	debug_clear_ctrl_cache(mtd);
+	
+	/* Now do the actual verification read */
+	ret = brcmnand_ctrl_posted_read_cache(mtd, ver_buf, ver_oob, offset);
+in_verify = 0;
+
+	if (ret) {
+		printk("************** %s: int_posted_read failed at %0llx\n", __FUNCTION__, offset);
+	}
+	/* Verify OOB area */
+	if (edu_oob) {
+		if (0 != memcmp(edu_oob, ver_oob, 16)) {
+			printk("$$$$$$$$$$$$$$$ %s: offset=%0llx, EDU_ECC=%02x%02x%02x, int_ECC=%02x%02x%02x\n",
+				__FUNCTION__, offset,
+				edu_oob[6], edu_oob[7], edu_oob[8], ver_oob[6], ver_oob[7], ver_oob[8]);
+			printk("EDU_oob:"); print_oobbuf(edu_oob, 16);
+			printk("int_oob:"); print_oobbuf(ver_oob, 16);
+			BUG();
+		}
+	}
+
+	/* Verify Data area */
+	brcmnand_Hamming_ecc(edu_buffer, edu_sw_ecc);
+
+	if (ver_oob[6] != edu_sw_ecc[0] || ver_oob[7] != edu_sw_ecc[1] || 
+		ver_oob[8] != edu_sw_ecc[2]) {
+			
+		if (ver_oob[6] == 0xff && ver_oob[7] == 0xff && ver_oob[8] == 0xff 
+			&& edu_sw_ecc[0] == 0 && edu_sw_ecc[1] == 0 && edu_sw_ecc[2] == 0)
+			; // Its OK. block was just erased, so posted_read returns all 0xFF while Hamming computes to all zeroes.
+		else {
+			printk("@@@@@@@@@@@@@@@ %s: offset=%0llx, INTR_status=%08x, EDU_status=%08x, int_ECC=%02x%02x%02x, EDU_ECC=%02x%02x%02x\n",
+				__FUNCTION__, offset, intr_status, edu_status,
+				ver_oob[6], ver_oob[7], ver_oob[8], edu_sw_ecc[0], edu_sw_ecc[1], edu_sw_ecc[2]);
+
+			printk("------------ EDU Buffer:\n");
+			print_databuf(edu_buffer, 512);
+			printk("\n\n++++++++++++INT Buffer:\n");
+			print_databuf(ver_buf, 512);
+			BUG();
+		}
+	}
+}
+#endif
+
+
+#ifdef EDU_DEBUG_4
+int edu_read_verify(struct mtd_info *mtd, char* buffer, char* oobarea, loff_t offset)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
+	uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
+int ctrlret;
+
+PRINTK("%s: buffer=%08x, ctrlbuf=%08x, oobarea=%08x, ctrl_oob=%08x, offset=%08llx\n", __FUNCTION__, 
+	buffer, ctrl_buf, oobarea, ctrl_oob, offset);
+
+
+
+	ctrlret = brcmnand_ctrl_posted_read_cache(mtd, ctrl_buf, ctrl_oob, offset);
+	//verify_edu_buf();
+	// Compare buffer returned from EDU and Ctrl reads:
+	if (0 != memcmp(ctrl_buf, buffer, 512)) {
+printk("$$$$$$$$$$$$ EDU Read: offset=%08llx\n", offset);
+print_databuf(buffer, 512);
+printk("------------ Ctrl Read: \n");
+print_databuf(ctrl_buf, 512);
+		BUG();
+	}
+	if (oobarea) 
+	{
+		if (0 != memcmp(p32, ctrl_oob, 16)) {
+printk("########## Ctrl OOB:\n");
+print_oobbuf(ctrl_oob, 16);
+printk("------------ EDU OOB: \n");
+print_oobbuf(p32, 16);
+/* Which one is correct?  Since the data buffers agree, use Hamming codes */
+			if (chip->ecclevel == BRCMNAND_ECC_HAMMING) 
+			{
+				unsigned char ecc1[3]; // SW ECC, manually calculated
+				brcmnand_Hamming_WAR(mtd, offset, buffer, &ctrl_oob[6], &ecc1[0]);
+				printk("Hamming ECC=%02x%02x%02x\n", ecc1[0], ecc1[1], ecc1[2]);
+			}
+			BUG();
+		}
+	}
+	return 0;
+}
+#endif // Verify EDU on Read
+
+
+/*
+ * Read completion after EDU_Read is called.
+ * In ISR mode, this routine is run in interrupt context
+ */
+int
+brcmnand_edu_read_comp_intr(struct mtd_info* mtd, 
+        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	uint32_t intfc_status;
+	int i;
+	static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
+	uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
+	int retries=20;
+	
+	if (intr_status & HIF_INTR2_EDU_ERR) {
+		printk("%s: Should not call me with EDU ERR\n", __FUNCTION__);
+		BUG();
+	}
+	intfc_status = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+	while (!(intfc_status & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK) && retries > 0) {
+		retries--;
+		udelay(5); // NAND guaranteed to finish read within 90us, this should be plenty of time
+		intfc_status = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+	}
+	if (retries <= 0) {
+		printk("%s: Impossible, HIF_INTR2_CTRL_READY already asserted, intr_status=%08x, offset=%llx\n", 
+			__FUNCTION__, intr_status, offset);
+		//BUG();		Should assert here, but don't want to crash.  HW guy guaranteed that it is set!!!!
+	}
+
+	// Remember last good sector read.  Needed for HIF_INTR2 workaround.
+	gLastKnownGoodEcc = offset;
+  	if (oobarea) 
+	{
+		PLATFORM_IOFLUSH_WAR();
+		for (i = 0; i < 4; i++) {
+			p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
+		}
+
+		read_ext_spare_area(chip, i, p32);
+		
+if (gdebug > 3) 
+{printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); print_oobbuf((u_char*) &p32[0], chip->eccOobSize);}
+	}      
+
+	return 0;       
+}
+
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_3
+
+/*
+ * Read WAR after EDU_Read is called, and EDU returns errors.
+ * 
+ * This routine can only be called in process context (for controller version < 3.3)
+ * For controllers vers 3.3 or later, the other routine is called instead.
+ */
+int
+brcmnand_edu_read_completion(struct mtd_info* mtd, 
+        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	uint32_t edu_err_status;
+	static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
+	uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
+	u_char* p8 = (u_char*) p32;
+	int ecc;
+	int ret = 0, i;
+
+
+	if (in_interrupt()) {
+		printk(KERN_ERR "%s cannot be run in interrupt context\n", __FUNCTION__);
+		BUG();
+	}
+
+
+	if (intr_status & HIF_INTR2_EDU_ERR) {
+		if (wr_preempt_en) {
+			//local_irq_restore(irqflags);
+		}
+		edu_err_status = EDU_volatileRead(EDU_ERR_STATUS);
+
+		// Attemp to clear it, but has no effect, (VLSI PR2389) but we still do it for completeness: 	
+		EDU_volatileWrite(EDU_ERR_STATUS, 0x00000000);
+		EDU_volatileWrite(BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
+
+
+/**** WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR */
+		/* Do a dummy read on a known good ECC sector to clear error */
+		if (edu_err_status) {
+			static uint8_t myBuf2[512+31];
+			// EDU aligned
+			uint8_t* tmpBuf = (uint8_t*)  ((((unsigned int) &myBuf2[0]) + 31) & (~31));
+			
+			// We start from the BBT, since these would (hopefully) always be good sectors.
+			loff_t tmpOffset = chip->chipSize - 512;
+
+			// First make sure that there is a last known good sector
+			while (gLastKnownGoodEcc == 0 && tmpOffset >= 0) {
+				ret = brcmnand_ctrl_posted_read_cache(mtd, tmpBuf, NULL, tmpOffset);
+				tmpOffset -= 512;
+			}
+			if (tmpOffset >= 0) {
+				uint32_t __maybe_unused lkgs;
+				// Clear the error condition
+				//(void) brcmnand_EDU_posted_read_cache(mtd, tmpBuf, NULL, gLastKnownGoodEcc);
+
+
+				 // Use Register Array
+				// EDU_ldw = BCHP_PHYSICAL_OFFSET + BCHP_NAND_FLASH_CACHEi_ARRAY_BASE;
+#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
+				// Reset EDU
+				ISR_push_request(mtd, tmpBuf, NULL, tmpOffset);
+#else
+				lkgs =  chip->ctrl_writeAddr(chip, gLastKnownGoodEcc, 0);
+				PLATFORM_IOFLUSH_WAR(); 
+				intr_status = EDU_read(buffer, lkgs);
+#endif
+
+				ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, p8, offset);
+
+				return ret;
+			}
+			// else there can be no workaround possible, use controller read
+			else {
+				return brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
+			}
+		}
+/**** ENDWAR ENDWAR ENDWAR ENDWAR */
+
+
+	}
+		
+	/*
+	 * Wait for Controller ready, which indicates the OOB and buffer are ready to be read.
+	 */
+	ecc = brcmnand_EDU_cache_is_valid(mtd, BRCMNAND_FL_READING,  offset, intr_status);
+
+	if (wr_preempt_en) {
+		//local_irq_restore(irqflags);
+	}
+
+if (gdebug > 3) printk("brcmnand_EDU_cache_is_valid returns ecc=%d\n", ecc);
+
+	switch (ecc) {
+	case BRCMNAND_TIMED_OUT:
+		//Read has timed out 
+/* THT: Here we don't retry using EDU, but use ctrl_read instead */
+PRINTK("++++++++++++++++ %s: EDU_read timed out, trying non-EDU read at offset %0llx\n", 
+__FUNCTION__, offset);
+		ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
+	 	goto out;
+		
+	case BRCMEDU_MEM_BUS_ERROR: /* Not enough bandwidth, or bus hung */
+		/* Retry using int */
+PRINTK("++++++++++++++++++++++++ %s: EDU_read returns %08x, trying non-EDU read\n", __FUNCTION__, ret);
+		ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
+ 		goto out;
+
+	case BRCMNAND_SUCCESS: /* Success, no errors */
+		// Remember last good sector read.  Needed for HIF_INTR2 workaround.
+		//if (0 == gLastKnownGoodEcc)
+		gLastKnownGoodEcc = offset;
+      		if (oobarea) 
+		{
+			PLATFORM_IOFLUSH_WAR();
+			for (i = 0; i < 4; i++) {
+				p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
+			}
+
+			read_ext_spare_area(chip, i, p32);
+			
+if (gdebug > 3) 
+{printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); 
+print_oobbuf((u_char*) &p32[0], chip->eccOobSize);}
+		}      
+		ret = 0;            // Success!
+		break;
+
+	case BRCMEDU_CORRECTABLE_ECC_ERROR:
+		/* FALLTHRU */                
+      case BRCMNAND_CORRECTABLE_ECC_ERROR:
+
+printk("+++++++++++++++ CORRECTABLE_ECC: offset=%0llx  ++++++++++++++++++++\n", offset);
+		// Have to manually copy.  EDU drops the buffer on error - even correctable errors
+		if (buffer) {
+			brcmnand_from_flash_memcpy32(chip, buffer, offset, ECCSIZE(mtd));
+		}
+
+		/* Relies on CTRL_READY set */
+		{
+			PLATFORM_IOFLUSH_WAR();
+			for (i = 0; i < 4; i++) {
+				p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
+			}
+
+			read_ext_spare_area(chip, i, p32);
+			
+if (gdebug > 3) {printk("CORRECTABLE: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); 
+print_oobbuf(oobarea, chip->eccOobSize);}
+		}
+
+#ifndef DEBUG_HW_ECC // Comment out for debugging
+		/* Make sure error was not in ECC bytes */
+		if (chip->ecclevel == BRCMNAND_ECC_HAMMING) 
+#endif
+
+		{
+
+			unsigned char ecc0[3]; // SW ECC, manually calculated
+			if (brcmnand_Hamming_WAR(mtd, offset, buffer, &p8[6], &ecc0[0])) {
+				/* Error was in ECC, update it from calculated value */
+				if (oobarea) {
+					oobarea[6] = ecc0[0];
+					oobarea[7] = ecc0[1];
+					oobarea[8] = ecc0[2];
+				}
+			}
+		}
+		ret = 0;
+
+		/* Report back to UBI, so that it can initiate a refresh */
+		(mtd->ecc_stats.corrected)++;      	
+		break;
+
+	case BRCMEDU_UNCORRECTABLE_ECC_ERROR:
+	case BRCMNAND_UNCORRECTABLE_ECC_ERROR:
+		{
+			int valid;
+		
+
+PRINTK("************* UNCORRECTABLE_ECC (offset=%0llx) ********************\n", offset);
+			/*
+			 * THT: Since EDU does not handle OOB area, unlike the UNC ERR case of the ctrl read,
+			 * we have to explicitly read the OOB, before calling the WAR routine.
+			 */
+			chip->ctrl_writeAddr(chip, offset, 0);
+			chip->ctrl_write(BCHP_NAND_CMD_START, OP_SPARE_AREA_READ);
+
+			// Wait until spare area is filled up
+
+			valid = brcmnand_spare_is_valid(mtd, BRCMNAND_FL_READING, 1);
+			if (valid > 0) {
+				ret = brcmnand_handle_false_read_ecc_unc_errors(mtd, buffer, oobarea, offset);
+			}
+			else if (valid == 0) {
+PRINTK("************* UNCORRECTABLE_ECC (offset=%0llx) valid==0 ********************\n", offset);
+				ret = -ETIMEDOUT;;
+			}
+			else { // < 0: UNCOR Error
+PRINTK("************* UNCORRECTABLE_ECC (offset=%0llx) valid!=0 ********************\n", offset);
+				ret = -EBADMSG;
+			}
+		}
+		break;
+		
+	case BRCMNAND_FLASH_STATUS_ERROR:
+		printk(KERN_ERR "brcmnand_cache_is_valid returns 0\n");
+		ret = -EBADMSG;
+		break;		
+
+	default:
+		BUG_ON(1);
+		/* Should never gets here */
+		ret = -EFAULT;
+		
+	}
+
+	if (wr_preempt_en) {
+		uint32_t acc;
+		
+		acc = brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]));
+	
+		acc &= ~BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK;
+		brcmnand_ctrl_write(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]), acc);
+	}
+    
+out:
+
+
+//gdebug=0;
+    return ret;
+}
+
+#else
+
+/*
+ * Read WAR after EDU_Read is called, and EDU returns errors.
+ * 
+ * For controllers vers 3.3 or later only, and can run in Interrupt context.
+ */
+int
+brcmnand_edu_read_completion(struct mtd_info* mtd, 
+        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	uint32_t edu_err_status;
+	static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
+	uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
+	u_char* __maybe_unused p8 = (u_char*) p32;
+	int ecc;
+	int ret = 0, i;
+
+	if (intr_status & HIF_INTR2_EDU_ERR) {
+		
+		edu_err_status = EDU_volatileRead(EDU_ERR_STATUS);
+
+		EDU_volatileWrite(EDU_ERR_STATUS, 0x00000000);
+		EDU_volatileWrite(BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
+
+	}
+		
+	/*
+	 * Wait for Controller ready, which indicates the OOB and buffer are ready to be read.
+	 */
+	ecc = brcmnand_EDU_cache_is_valid(mtd, BRCMNAND_FL_READING,  offset, intr_status);
+
+if (gdebug > 3) printk("brcmnand_EDU_cache_is_valid returns ecc=%d\n", ecc);
+
+	switch (ecc) {
+	case BRCMNAND_TIMED_OUT:
+		//Read has timed out 
+
+		ret = BRCMNAND_TIMED_OUT; // Let ISR handles it with process context retry
+		
+	 	goto out;
+		
+	case BRCMEDU_MEM_BUS_ERROR: /* Not enough bandwidth, or bus hung */
+		
+		/* Retry using int */
+PRINTK("++++++++++++++++++++++++ %s: EDU_read returns %08x, trying non-EDU read\n", __FUNCTION__, ret);
+		ret = BRCMEDU_MEM_BUS_ERROR; // Let ISR handles it with process context retry
+ 		goto out;
+
+	case BRCMNAND_SUCCESS: /* Success, no errors */
+		// Remember last good sector read.  Needed for HIF_INTR2 workaround.
+		//if (0 == gLastKnownGoodEcc)
+		gLastKnownGoodEcc = offset;
+      		if (oobarea) 
+		{
+			PLATFORM_IOFLUSH_WAR();
+			for (i = 0; i < 4; i++) {
+				p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
+			}
+
+			read_ext_spare_area(chip, i, p32);
+			
+if (gdebug > 3) {
+printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); 
+print_oobbuf((u_char*) &p32[0], chip->eccOobSize);}
+		}      
+		ret = 0;            // Success!
+		break;
+
+	case BRCMEDU_CORRECTABLE_ECC_ERROR:
+		/* FALLTHRU */                
+      case BRCMNAND_CORRECTABLE_ECC_ERROR:
+
+PRINTK("+++++++++++++++ CORRECTABLE_ECC: offset=%0llx  ++++++++++++++++++++\n", offset);
+		// Have to manually copy.  EDU drops the buffer on error - even correctable errors
+		if (buffer) {
+			brcmnand_from_flash_memcpy32(chip, buffer, offset, ECCSIZE(mtd));
+		}
+
+		/* Relies on CTRL_READY set */
+		{
+			PLATFORM_IOFLUSH_WAR();
+			for (i = 0; i < 4; i++) {
+				p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
+			}
+
+			read_ext_spare_area(chip, i, p32);
+			
+if (gdebug > 3) 
+{printk("CORRECTABLE: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); 
+print_oobbuf(oobarea, chip->eccOobSize);}
+		}
+
+#ifndef DEBUG_HW_ECC // Comment out for debugging
+		/* Make sure error was not in ECC bytes */
+		if (chip->ecclevel == BRCMNAND_ECC_HAMMING) 
+#endif
+
+		{
+
+			unsigned char ecc0[3]; // SW ECC, manually calculated
+			if (brcmnand_Hamming_WAR(mtd, offset, buffer, &p8[6], &ecc0[0])) {
+				/* Error was in ECC, update it from calculated value */
+				if (oobarea) {
+					oobarea[6] = ecc0[0];
+					oobarea[7] = ecc0[1];
+					oobarea[8] = ecc0[2];
+				}
+			}
+		}
+
+		/* Report back to UBI, so that it can initiate a refresh */
+		(mtd->ecc_stats.corrected)++;     
+		ret = 0;
+		break;
+
+	case BRCMEDU_UNCORRECTABLE_ECC_ERROR:
+	case BRCMNAND_UNCORRECTABLE_ECC_ERROR:
+		{
+			int valid;
+		
+
+PRINTK("************* UNCORRECTABLE_ECC (offset=%0llx) ********************\n", offset);
+			/*
+			 * THT: Since EDU does not handle OOB area, unlike the UNC ERR case of the ctrl read,
+			 * we have to explicitly read the OOB, before calling the WAR routine.
+			 */
+			chip->ctrl_writeAddr(chip, offset, 0);
+			chip->ctrl_write(BCHP_NAND_CMD_START, OP_SPARE_AREA_READ);
+
+			// Wait until spare area is filled up
+
+			valid = brcmnand_spare_is_valid(mtd, BRCMNAND_FL_READING, 1);
+			if (valid > 0) {
+				ret = brcmnand_handle_false_read_ecc_unc_errors(mtd, buffer, oobarea, offset);
+			}
+			else if (valid == 0) {
+PRINTK("************* UNCORRECTABLE_ECC (offset=%0llx) valid==0 ********************\n", offset);
+				ret = -ETIMEDOUT;;
+			}
+			else { // < 0: UNCOR Error
+PRINTK("************* UNCORRECTABLE_ECC (offset=%0llx) valid!=0 ********************\n", offset);
+				ret = -EBADMSG;
+			}
+		}
+		break;
+		
+	case BRCMNAND_FLASH_STATUS_ERROR:
+		printk(KERN_ERR "brcmnand_cache_is_valid returns 0\n");
+		ret = -EBADMSG;
+		break;		
+
+	default:
+		BUG_ON(1);
+		/* Should never gets here */
+		ret = -EFAULT;
+		
+	}
+
+	if (wr_preempt_en) {
+		uint32_t acc;
+		
+		acc = brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]));
+	
+		acc &= ~BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK;
+		brcmnand_ctrl_write(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]), acc);
+	}
+    
+out:
+
+
+//gdebug=0;
+    return ret;
+}
+
+
+#endif // Controller < 3.3, need Read WAR
+
+  #ifndef CONFIG_MTD_BRCMNAND_ISR_QUEUE
+/**
+ * brcmnand_posted_read_cache - [BrcmNAND Interface] Read the 512B cache area
+ * Assuming brcmnand_get_device() has been called to obtain exclusive lock
+ * @param mtd        MTD data structure
+ * @param oobarea    Spare area, pass NULL if not interested
+ * @param buffer    the databuffer to put/get data, pass NULL if only spare area is wanted.
+ * @param offset    offset to read from or write to, must be 512B aligned.
+ * @param raw: Ignore BBT bytes when raw = 1
+ *
+ * Caller is responsible to pass a buffer that is
+ * (1) large enough for 512B for data and optionally an oobarea large enough for 16B.
+ * (2) 4-byte aligned.
+ *
+ * Read the cache area into buffer.  The size of the cache is mtd-->eccsize and is always 512B.
+ */
+
+
+static int brcmnand_EDU_posted_read_cache(struct mtd_info* mtd, 
+        void* buffer, u_char* oobarea, loff_t offset)
+{
+
+	//int ecc;
+
+	struct brcmnand_chip* chip = mtd->priv;
+	loff_t sliceOffset = offset & (~ (ECCSIZE(mtd) - 1));
+	int i, ret = 0;
+	//static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
+	//uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
+	//u_char* p8 = (u_char*) p32;
+	uint32_t EDU_ldw;
+	uint32_t intr_status;
+	unsigned long irqflags;
+	int retries = 5;
+	
+int save_debug;
+uint32_t edu_status;
+
+#ifdef EDU_DEBUG_2
+u_char* save_buf = buffer;
+#endif
+
+//if((offset >= (0x3a8148 & ~(0x1FF))) && (offset < ((0x3a8298+0x1F) & ~(0x1FF)))) gdebug=4;
+//gdebug = 4;
+if (gdebug > 3) {
+printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__,  offset, buffer, oobarea);}
+
+#if 0 //def EDU_DEBUG_4
+printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__,  offset, buffer, oobarea);
+#endif
+
+
+	if (unlikely(offset - sliceOffset)) {
+		printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n", 
+                __FUNCTION__, offset, sliceOffset, ECCSIZE(mtd));
+		ret = -EINVAL;
+		return (ret);
+	}
+
+//#if 0 // Testing 1 2 3
+	if (unlikely(!EDU_buffer_OK(buffer, EDU_READ))) 
+//#endif
+	{
+if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
+		/* EDU does not work on non-aligned buffers */
+		ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
+		return (ret);
+	}
+
+	if (wr_preempt_en) {
+		// local_irq_save(irqflags);
+	}
+
+#if defined( EDU_DEBUG_2 )
+	init_edu_buf();
+
+	buffer = edu_buf;
+
+#elif defined( EDU_DEBUG_4 )
+	init_edu_buf();
+	
+#endif
+
+	intr_status = 0;
+	do {
+
+		EDU_ldw =  chip->ctrl_writeAddr(chip, sliceOffset, 0);
+		PLATFORM_IOFLUSH_WAR(); 
+
+		if (intr_status & HIF_INTR2_EBI_TIMEOUT) {
+			EDU_volatileWrite(BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EBI_TIMEOUT);
+		}
+		intr_status = EDU_read(buffer, EDU_ldw);
+		
+	} while (retries-- > 0 && ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT) ));
+
+
+	ret = brcmnand_edu_read_completion(mtd, buffer, oobarea, offset, intr_status);
+
+//gdebug=0;
+    return ret;
+}
+
+
+
+static int (*brcmnand_posted_read_cache)(struct mtd_info*, 
+		void*, u_char*, loff_t) = brcmnand_EDU_posted_read_cache;
+  
+  #else /* Queue Mode */
+static int (*brcmnand_posted_read_cache)(struct mtd_info*, 
+		void*, u_char*, loff_t) = brcmnand_ctrl_posted_read_cache;
+  #endif
+
+#else 
+static int (*brcmnand_posted_read_cache)(struct mtd_info*, 
+		void*, u_char*, loff_t) = brcmnand_ctrl_posted_read_cache;
+#endif
+
+/**
+ * brcmnand_posted_read_oob - [BrcmNAND Interface] Read the spare area
+ * @param mtd		MTD data structure
+ * @param oobarea	Spare area, pass NULL if not interested
+ * @param offset	offset to read from or write to
+ *
+ * This is a little bit faster than brcmnand_posted_read, making this command useful for improving
+ * the performance of BBT management.
+ * The 512B flash cache is invalidated.
+ *
+ * Read the cache area into buffer.  The size of the cache is mtd->writesize and is always 512B,
+ * for this version of the BrcmNAND controller.
+ */
+static int brcmnand_posted_read_oob(struct mtd_info* mtd, 
+		u_char* oobarea, loff_t offset, int raw)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	loff_t sliceOffset = offset & (~(ECCSIZE(mtd) - 1));
+	int i, ret = 0, valid, done = 0;
+	int retries = 5;
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
+	uint32_t acc1 = brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]));
+	//unsigned long irqflags;
+	
+//char msg[20];
+
+	static uint8_t myBuf2[512+31]; // Place holder only.
+	static uint8_t* myBuf = NULL;
+
+	/*
+	 * Force alignment on 32B boundary
+	 */
+	if (!myBuf) {
+		myBuf = (uint8_t*)  ((((unsigned int) &myBuf2[0]) + 31) & (~31));
+	}
+	
+  #if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_3_0
+  	// Revert to cache read if acc is enabled
+	if (acc1 & BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK) {
+		// PR2516.  Not a very good WAR, but the affected chips (3548A0,7443A0) have been EOL'ed
+		return brcmnand_ctrl_posted_read_cache(mtd, (void*) myBuf, oobarea, offset);
+	}
+
+  #else /* 3.1 or later */
+ 	// If BCH codes, force full page read to activate ECC correction on OOB bytes.
+ 	// relies on the fact that brcmnand_disable_read_ecc() turns off both bllk0 and blkn bits
+	if ((acc1 & BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK) &&
+	     chip->ecclevel != BRCMNAND_ECC_HAMMING && 
+	     chip->ecclevel != BRCMNAND_ECC_DISABLE) 
+	{
+		return brcmnand_ctrl_posted_read_cache(mtd, (void*) myBuf, oobarea, offset);
+	}
+  #endif
+#endif
+
+if (gdebug > 3 ) PRINTK("->%s: offset=%0llx\n", __FUNCTION__, offset);
+if (gdebug > 3 ) PRINTK("->%s: sliceOffset=%0llx\n", __FUNCTION__, sliceOffset);
+if (gdebug > 3 ) PRINTK("eccsize = %d\n", ECCSIZE(mtd));
+
+if (gdebug > 3 ) {
+printk("-->%s: offset=%0llx\n", __FUNCTION__,  offset); }
+
+	while (retries > 0 && !done) {
+		if (unlikely(sliceOffset - offset)) {
+			printk(KERN_ERR "%s: offset %0llx is not cache aligned\n", 
+				__FUNCTION__, offset);
+			return -EINVAL;
+		}
+
+		if (wr_preempt_en) {
+			//local_irq_save(irqflags);
+		}
+
+		chip->ctrl_writeAddr(chip, sliceOffset, 0);
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_SPARE_AREA_READ);
+
+		// Wait until spare area is filled up
+
+		valid = brcmnand_spare_is_valid(mtd, BRCMNAND_FL_READING, raw);
+		if (wr_preempt_en) {
+			//local_irq_restore(irqflags);
+		}
+		switch (valid) {
+		case 1:
+			if (oobarea) {
+				uint32_t* p32 = (uint32_t*) oobarea;
+				
+				for (i = 0; i < 4; i++) {
+					p32[i] = be32_to_cpu(chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + (i<<2)));
+				}
+
+				read_ext_spare_area(chip, i, p32);
+				
+if (gdebug > 3) {
+printk("%s: offset=%0llx, oob=\n", __FUNCTION__, sliceOffset); 
+print_oobbuf(oobarea, chip->eccOobSize);}
+
+			}
+			
+			ret = 0;
+			done = 1;
+			break;
+
+		case -1:
+			ret = -EBADMSG;
+//if (gdebug > 3 )
+	{PRINTK("%s: ret = -EBADMSG\n", __FUNCTION__);}
+			/* brcmnand_spare_is_valid also clears the error bit, so just retry it */
+
+			retries--;
+			break;
+			
+		case 0:
+			//Read has timed out 
+			ret = -ETIMEDOUT;
+{PRINTK("%s: ret = -ETIMEDOUT\n", __FUNCTION__);}
+			retries--;
+			// THT PR50928: if wr_preempt is disabled, enable it to clear error
+			wr_preempt_en = brcmnand_handle_ctrl_timeout(mtd, retries);
+			continue;  /* Retry */
+			
+		default:
+			BUG_ON(1);
+			/* NOTREACHED */
+			ret = -EINVAL;
+			done = 1;
+			break; /* Should never gets here */
+		}
+
+	}	
+	if (wr_preempt_en) {
+		uint32_t acc;
+		
+		acc = brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]));
+	
+		acc &= ~BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK;
+		brcmnand_ctrl_write(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]), acc);
+	}	
+
+//if (gdebug > 3 ) 
+if (0) // == (offset & (mtd->erasesize-1))) 
+{
+PRINTK("<--%s: offset=%08x\n", __FUNCTION__, (uint32_t) offset); 
+print_oobbuf(oobarea, 16);}
+	return ret;
+}
+
+
+//#ifdef CONFIG_MTD_BRCMNAND_EDU
+
+//#define EDU_DEBUG_3
+#undef EDU_DEBUG_3
+
+#if 0 //defined( EDU_DEBUG_3 ) || defined( EDU_DEBUG_5 ) || defined(BRCMNAND_WRITE_VERIFY )
+
+
+/*
+ * Returns 0 on no errors.
+ * THis should never be called, because partial writes may screw up the verify-read.
+ */
+static int edu_write_verify(struct mtd_info *mtd,
+        const void* buffer, const u_char* oobarea, loff_t offset)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	static uint8_t sw_ecc[4];
+	static uint32_t read_oob[4];
+	static uint8_t write_oob[16];
+	uint8_t* oobpoi = (uint8_t*) &read_oob[0];
+	int ret = 0;
+
+	// Dump the register, done immediately after EDU_Write returns
+	// dump_nand_regs(chip, offset);
+
+	if ( chip->ecclevel != BRCMNAND_ECC_HAMMING) {
+		// Read back the data, but first clear the internal cache first.
+		debug_clear_ctrl_cache(mtd);
+
+		ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
+		if (ret) {
+			printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
+			goto out;
+		}
+		if (0 != memcmp(buffer, edu_write_buf, 512)) {
+			printk("+++++++++++++++++++++++ %s: WRITE buffer differ with READ-Back buffer\n",
+			__FUNCTION__);
+			ret = (-1);
+			goto out;
+		}
+		if (oobarea) { /* For BCH, the ECC is at the end */
+			// Number of bytes to compare (with ECC bytes taken out)
+			int numFree = min(16, chip->eccOobSize - chip->eccbytes);
+			
+			if (memcmp(oobarea, oobpoi, numFree)) {
+				printk("+++++++++++++++++++++++ %s: BCH-%-d OOB comp failed, numFree=%d\n", 
+					__FUNCTION__, chip->ecclevel, numFree);
+				printk("In OOB:\n"); print_oobbuf(oobarea, 16);
+				printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
+				ret = (-2);
+				goto out;
+			}
+		}
+		return 0;
+	}
+	
+	// Calculate the ECC
+	// brcmnand_Hamming_ecc(buffer, sw_ecc);
+
+	// Read back the data, but first clear the internal cache first.
+	debug_clear_ctrl_cache(mtd);
+
+in_verify = -1;		
+	ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
+in_verify = 0;
+
+	if (ret) {
+		printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
+		goto out;
+	}
+
+#if 0
+	if (sw_ecc[0] != oobpoi[6] || sw_ecc[1] != oobpoi[7] || sw_ecc[2] != oobpoi[8]) {
+printk("+++++++++++++++++++++++ %s: SWECC=%02x%02x%02x ReadOOB=%02x%02x%02x, buffer=%p, offset=%0llx\n",
+			__FUNCTION__, 
+			sw_ecc[0], sw_ecc[1], sw_ecc[2], oobpoi[6], oobpoi[7], oobpoi[8], buffer, offset);
+		
+		ret = (-1);
+		goto out;
+	}
+#endif
+
+	// Verify the OOB if not NULL
+	if (oobarea) {
+		//memcpy(write_oob, oobarea, 16);
+		//write_oob[6] = sw_ecc[0];
+		//write_oob[7] = sw_ecc[1];
+		//write_oob[8] = sw_ecc[2];
+		if (memcmp(oobarea, oobpoi, 6) || memcmp(&oobarea[9], &oobpoi[9],7)) {
+			printk("+++++++++++++++++++++++ %s: OOB comp Hamming failed\n", __FUNCTION__);
+			printk("In OOB:\n"); print_oobbuf(oobarea, 16);
+			printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
+			ret = (-2);
+			goto out;
+		}
+	}
+
+out:
+if (ret) {
+	int i, j, k;
+	uint8_t* writeBuf = (uint8_t*) buffer;
+//for (i=0; i<2; i++) 
+{
+// Let user land completes its run to avoid garbled printout
+//schedule();
+for (j=0; j<512; j++) {
+	if (writeBuf[j] != edu_write_buf[j]) {
+		printk("Buffers differ at offset %04x\n", j);
+		break;
+	}
+}
+printk("$$$$$$$$$$$$$$$$$ Register dump:\n");
+printk("\n");
+printk("\n");
+printk("\n");
+printk("\n");
+for (k=0; k<numDumps; k++) {
+printk("\n");
+printk("\n");
+printk("$$$$$$$$$$$$$$$$$ Register dump snapshot #%d:\n", k+1);
+print_dump_nand_regs(k);
+printk("\n");
+}
+printk("\n");
+printk("\n");
+printk("EDU_write 99, ret=%d, offset=%0llx, buffer=%p\n", ret, offset, buffer);
+printk("Write buffer:\n"); print_databuf(buffer, 512);
+if (oobarea) { printk("Write OOB: "); print_oobbuf(oobarea, 512); }
+printk("Read back buffer:\n"); print_databuf(edu_write_buf, 512);
+if (oobarea) { printk("Read OOB: "); print_oobbuf(write_oob, 512); }
+
+//printk("$$$$$$$$$$$$$$$$$ Register dump:\n");
+//print_dump_nand_regs();
+}
+}
+	return ret;
+}
+
+
+#else
+#define edu_write_verify(...) (0)
+#endif
+
+
+/**
+ * brcmnand_posted_write - [BrcmNAND Interface] Write a buffer to the flash cache
+ * Assuming brcmnand_get_device() has been called to obtain exclusive lock
+ *
+ * @param mtd		MTD data structure
+ * @param buffer		the databuffer to put/get data
+ * @param oobarea	Spare area, pass NULL if not interested
+ * @param offset	offset to write to, and must be 512B aligned
+ *
+ * Write to the cache area TBD 4/26/06
+ */
+static int brcmnand_ctrl_posted_write_cache(struct mtd_info *mtd,
+		const void* buffer, const u_char* oobarea, loff_t offset)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	loff_t sliceOffset = offset & (~ (ECCSIZE(mtd) - 1));
+	uint32_t* p32;
+	int i, needBBT=0;
+	int ret;
+
+	//char msg[20];
+
+
+if (gdebug > 3 ) {
+printk("--> %s: offset=%0llx\n", __FUNCTION__, offset);
+print_databuf(buffer, 32);}
+
+	if (unlikely(sliceOffset - offset)) {
+		printk(KERN_ERR "%s: offset %0llx is not cache aligned\n", 
+			__FUNCTION__, offset);
+
+		ret =  -EINVAL;
+		goto out;
+	}
+	chip->ctrl_writeAddr(chip, sliceOffset, 0);
+
+
+	if (buffer) {
+if (gdebug > 3 ) {print_databuf(buffer, 32);}
+		brcmnand_to_flash_memcpy32(chip, offset, buffer, ECCSIZE(mtd));
+	}
+#if defined(CONFIG_BCM_KF_NAND)
+	/* Must write data when NAND_COMPLEX_OOB_WRITE */
+	else if (chip->options & NAND_COMPLEX_OOB_WRITE) {
+		brcmnand_to_flash_memcpy32(chip, offset, ffchars, ECCSIZE(mtd));
+	}
+#endif
+
+
+//printk("30\n");
+	if (oobarea) {
+		p32 = (uint32_t*) oobarea;
+if (gdebug > 3) {printk("%s: oob=\n", __FUNCTION__); print_oobbuf(oobarea, 16);}
+	}
+	else {
+		// Fill with 0xFF if don't want to change OOB
+		p32 = (uint32_t*) &ffchars[0];
+	}
+
+//printk("40\n");
+	for (i = 0; i < 4; i++) {
+		chip->ctrl_write(BCHP_NAND_SPARE_AREA_WRITE_OFS_0 + i*4, cpu_to_be32(p32[i]));
+	}
+
+	PLATFORM_IOFLUSH_WAR();
+	chip->ctrl_write(BCHP_NAND_CMD_START, OP_PROGRAM_PAGE);
+//printk("50\n");
+
+	// Wait until flash is ready
+	if (brcmnand_ctrl_write_is_complete(mtd, &needBBT)) {
+		if (!needBBT) {
+			ret = 0;
+			goto out;
+		}
+	
+		else { // Need BBT
+			printk(KERN_WARNING "%s: Flash Status Error @%0llx\n", __FUNCTION__,  offset);
+//printk("80 block mark bad\n");
+			// SWLINUX-1495: Let UBI do it on returning -EIO
+			ret = -EIO;
+			chip->block_markbad(mtd, offset);
+			goto out;
+		}
+	}
+	//Write has timed out or read found bad block. TBD: Find out which is which
+	printk(KERN_INFO "%s: Timeout\n", __FUNCTION__);
+	ret = -ETIMEDOUT;
+
+out:
+//printk("99\n");
+
+	return ret;
+}
+
+
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+   #ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
+
+   /*
+    * Performs WAR for queue-write. Currently, it is always called with needBBT=1
+    * Runs in process context.
+    * Return 0 on success, error codes on errors.
+    */
+int
+brcmnand_edu_write_war(struct mtd_info *mtd,
+        const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status, 
+        int needBBT)
+{
+	//struct brcmnand_chip* __maybe_unused chip = mtd->priv;
+	int ret = 0;
+
+
+	if (!(intr_status & HIF_INTR2_CTRL_READY)) {
+		printk("%s: Impossible, ctrl-ready asserted in interrupt handler\n", __FUNCTION__);
+		BUG();
+	}
+
+	if (!needBBT) 
+	{
+		ret = 0;
+	}
+	else
+	{ // Need BBT
+		printk(KERN_WARNING "%s: Flash Status Error @%0llx\n", __FUNCTION__,  offset);
+
+		// SWLINUX-1495: Let UBI do it on returning -EIO
+		ret = -EIO;
+		chip->block_markbad(mtd, offset);
+	}
+
+#if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
+//gdebug = 0;
+ 	if (0 == ret) {
+		if (edu_write_verify(mtd, buffer, oobarea, offset)) {
+			BUG();
+		}
+ 	}
+
+#endif
+	return ret;
+}
+
+// When buffer is nor aligned as per EDU requirement, use controller-write
+static int (*brcmnand_posted_write_cache)(struct mtd_info*, 
+		const void*, const u_char*, loff_t) = brcmnand_ctrl_posted_write_cache; 
+
+  #else //#ifndef CONFIG_MTD_BRCMNAND_ISR_QUEUE
+
+/*
+ * Write completion after EDU_Read is called.
+ * Non-Queue mode
+ */
+static int
+brcmnand_edu_write_completion(struct mtd_info *mtd,
+        const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status, uint32_t physAddr)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	int comp;
+	int needBBT;
+	int ret;
+
+
+#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
+	if (!(intr_status & HIF_INTR2_CTRL_READY)) {
+		printk("%s: Impossible, ctrl-ready asserted in interrupt handler\n", __FUNCTION__);
+		BUG();
+	}
+#else
+	// Wait until flash is ready.  
+	// Becareful here.  Since this can be called in interrupt context,
+	// we cannot call sleep or schedule()
+	comp = brcmnand_EDU_write_is_complete(mtd, &needBBT);
+
+	// Already done in interrupt handler
+	(void) dma_unmap_single(NULL, physAddr, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
+#endif
+
+	if (comp) 
+	{
+		if (!needBBT) 
+		{
+			ret = 0;
+			goto out;
+		}
+		else
+		{ // Need BBT
+			printk(KERN_WARNING "%s: Flash Status Error @%0llx\n", __FUNCTION__,  offset);
+			// SWLINUX-1495: Let UBI do it on returning -EIO
+			ret = -EIO;
+			chip->block_markbad(mtd, offset);
+			
+			goto out;
+		}
+	}
+
+	//Write has timed out or read found bad block. TBD: Find out which is which
+	printk(KERN_INFO "%s: Timeout at offset %0llx\n", __FUNCTION__, offset);
+	// Marking bad block
+	if (needBBT) {
+		printk(KERN_WARNING "%s: Flash Status Error @%0llx\n", __FUNCTION__,  offset);
+		// SWLINUX-1495: Let UBI do it on returning -EIO
+		ret = -EIO;
+		chip->block_markbad(mtd, offset);
+			
+		goto out;
+	}		
+	ret = -ETIMEDOUT;
+
+out:
+
+#if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
+//gdebug = 0;
+ 	if (0 == ret) {
+		if (edu_write_verify(mtd, buffer, oobarea, offset)) {
+			BUG();
+		}
+ 	}
+
+#endif
+	return ret;
+}
+
+
+/**
+ * brcmnand_posted_write - [BrcmNAND Interface] Write a buffer to the flash cache
+ * Assuming brcmnand_get_device() has been called to obtain exclusive lock
+ *
+ * @param mtd        MTD data structure
+ * @param buffer    the databuffer to put/get data
+ * @param oobarea    Spare area, pass NULL if not interested
+ * @param offset    offset to write to, and must be 512B aligned
+ *
+ * Write to the cache area TBD 4/26/06
+ */
+static int brcmnand_EDU_posted_write_cache(struct mtd_info *mtd,
+        const void* buffer, const u_char* oobarea, loff_t offset)
+{
+	uint32_t* p32;
+	int i; 
+	int ret;
+	int comp = 0;
+
+	struct brcmnand_chip* chip = mtd->priv;    
+	int needBBT=0;
+	loff_t sliceOffset = offset & (~ (ECCSIZE(mtd) - 1));
+	uint32_t EDU_ldw;
+	int retries = 5;
+	uint32_t physAddr;
+
+#ifdef WR_BADBLOCK_SIMULATION
+	unsigned long tmp = (unsigned long) offset;
+	DIunion wrFailLocationOffset;
+#endif
+
+//gdebug = 4;
+
+// printk("%s\n", __FUNCTION__);
+// printk("EDU10\n");
+	if (unlikely(sliceOffset - offset)) {
+		printk(KERN_ERR "%s: offset %0llx is not cache aligned\n", 
+			__FUNCTION__, offset);
+
+		ret =  -EINVAL;
+		goto out;
+	}
+
+	if (unlikely(!EDU_buffer_OK(buffer, EDU_WRITE))) {
+		// EDU requires the buffer to be DW-aligned
+PRINTK("%s: Buffer %p not suitable for EDU at %0llx, trying ctrl read op\n", __FUNCTION__, buffer, offset);
+		ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
+		goto out;
+	}
+
+	ret = ERESTARTSYS;
+	do {
+		EDU_ldw = chip->ctrl_writeAddr(chip, sliceOffset, 0);
+
+// printk("EDU20\n");
+
+		if (oobarea) {
+			p32 = (uint32_t*) oobarea;
+if (gdebug) {printk("%s: oob=\n", __FUNCTION__); print_oobbuf(oobarea, 16);}
+		}
+		else {
+			// Fill with 0xFF if don't want to change OOB
+			p32 = (uint32_t*) &ffchars[0];
+		}
+
+// printk("EDU40\n");
+		for (i = 0; i < 4; i++) {
+			chip->ctrl_write(BCHP_NAND_SPARE_AREA_WRITE_OFS_0 + i*4, cpu_to_be32(p32[i]));
+		}
+
+		PLATFORM_IOFLUSH_WAR(); // Check if this line may be taken-out
+
+
+		if (ret & HIF_INTR2_EBI_TIMEOUT) {
+			EDU_volatileWrite(BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EBI_TIMEOUT);
+		}
+		ret = EDU_write(buffer, EDU_ldw, &physAddr);
+
+		if (ret) {
+			// Nothing we can do, because, unlike read op, where we can just call the traditional read,
+			// here we may need to erase the flash first before we can write again.
+//printk("EDU_write returns %d, trying ctrl write \n", ret);
+//			ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
+			goto out;
+		}
+	
+//printk("EDU50\n");
+
+		// Wait until flash is ready
+		comp = brcmnand_EDU_write_is_complete(mtd, &needBBT);
+
+		(void) dma_unmap_single(NULL, physAddr, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
+	}while (retries-- > 0 && ((ret == ERESTARTSYS) || (ret & HIF_INTR2_EBI_TIMEOUT)));
+
+	if (retries <= 0 && ((ret == ERESTARTSYS) || (ret & HIF_INTR2_EBI_TIMEOUT))) { 
+printk("%s: brcmnand_EDU_write_is_complete timeout, intr_status=%08x\n", __FUNCTION__, ret);
+		ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
+		goto out;
+	}
+
+
+
+	if (comp) 
+	{
+		if (!needBBT) 
+		{
+			ret = 0;
+			goto out;
+		}
+		else
+		{ // Need BBT
+			printk(KERN_WARNING "%s: Flash Status Error @%0llx\n", __FUNCTION__,  offset);
+
+			// SWLINUX-1495: Let UBI do it on returning -EIO
+			ret = -EIO;
+			chip->block_markbad(mtd, offset);
+			goto out;
+		}
+	}
+
+	//Write has timed out or read found bad block. TBD: Find out which is which
+	printk(KERN_INFO "%s: Timeout at offset %0llx\n", __FUNCTION__, offset);
+	// Marking bad block
+	if (needBBT) {
+		printk(KERN_WARNING "%s: Flash Status Error @%0llx\n", __FUNCTION__,  offset);
+
+		// SWLINUX-1495: Let UBI do it on returning -EIO
+		ret = -EIO;
+		chip->block_markbad(mtd, offset);
+		goto out;
+	}		
+	ret = -ETIMEDOUT;
+
+out:
+
+
+#if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
+//gdebug = 0;
+ 	if (0 == ret) {
+		if (edu_write_verify(mtd, buffer, oobarea, offset)) {
+			BUG();
+		}
+ 	}
+
+#endif
+
+    return ret;
+}
+
+static int (*brcmnand_posted_write_cache)(struct mtd_info*, 
+		const void*, const u_char*, loff_t) = brcmnand_EDU_posted_write_cache; 
+  #endif
+
+#else /* No EDU */
+static int (*brcmnand_posted_write_cache)(struct mtd_info*, 
+		const void*, const u_char*, loff_t) = brcmnand_ctrl_posted_write_cache;
+
+#endif  //  EDU PRESENT
+
+
+
+/**
+ * brcmnand_posted_write_oob - [BrcmNAND Interface] Write the spare area
+ * @param mtd		MTD data structure
+ * @param oobarea	Spare area, pass NULL if not interested.  Must be able to 
+ *					hold mtd->oobsize (16) bytes.
+ * @param offset	offset to write to, and must be 512B aligned
+ *
+ */
+static int brcmnand_posted_write_oob(struct mtd_info *mtd,
+		const u_char* oobarea, loff_t offset, int isFromMarkBadBlock)
+{
+	struct brcmnand_chip* chip = mtd->priv;
+	loff_t sliceOffset = offset & (~ (ECCSIZE(mtd) - 1));
+	uint32_t* p32;
+	int i, needBBT=0;
+
+#if defined(CONFIG_BCM_KF_NAND)
+	uint32_t partial_page_wr_dis;
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_4_0
+	uint32_t acc;
+
+	acc = chip->ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]));
+	partial_page_wr_dis = !(acc & BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_MASK);
+#else
+	partial_page_wr_dis = 0;
+#endif
+#endif
+
+if (gdebug > 3 ) {
+printk("-->%s, offset=%0llx\n", __FUNCTION__,  offset);
+print_oobbuf(oobarea, 16);
+}
+	
+
+	if (unlikely(sliceOffset - offset)) {
+		printk(KERN_ERR "%s: offset %0llx is not cache aligned\n", 
+			__FUNCTION__, offset);
+	}
+
+	chip->ctrl_writeAddr(chip, sliceOffset, 0);
+
+	// assert oobarea here
+	BUG_ON(!oobarea);	
+	p32 = (uint32_t*) oobarea;
+
+#if defined(CONFIG_BCM_KF_NAND)
+	/* Must write data when NAND_COMPLEX_OOB_WRITE option is set.  Wite 0xFFs
+	 * to data and ECC locations.
+	 */
+	if ((chip->options & NAND_COMPLEX_OOB_WRITE) || partial_page_wr_dis) {
+		u_char* p8 = (u_char*) p32;
+		struct nand_ecclayout *oobinfo = chip->ecclayout;
+
+		brcmnand_to_flash_memcpy32(chip, offset, ffchars, ECCSIZE(mtd));
+		for (i = 0; i < oobinfo->eccbytes; i++) {
+			p8[oobinfo->eccpos[i]] = 0xff;
+		}
+	}
+#endif
+
+	for (i = 0; i < 4; i++) {
+		chip->ctrl_write(BCHP_NAND_SPARE_AREA_WRITE_OFS_0 + i*4,  cpu_to_be32 (p32[i]));
+	}
+
+	PLATFORM_IOFLUSH_WAR();
+#if defined(CONFIG_BCM_KF_NAND)
+	if ((chip->options & NAND_COMPLEX_OOB_WRITE) || partial_page_wr_dis) {
+
+		/* Disable ECC so 0xFFs are stored in the ECC offsets. Doing
+		 * this allows the next page write to store the ECC correctly.
+		 * If the ECC is not disabled here, then a ECC value will be
+		 * stored at the ECC offsets.  This will cause the ECC value
+		 * on the next write to be stored incorrectly.
+		 */
+		uint32_t acc = chip->ctrl_read(BCHP_NAND_ACC_CONTROL);
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+		chip->ctrl_write(BCHP_NAND_ACC_CONTROL,
+			(acc & ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK |
+			BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK)));
+#else
+		chip->ctrl_write(BCHP_NAND_ACC_CONTROL,
+			(acc & ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK)));
+#endif
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_PROGRAM_PAGE);
+
+		// Wait until flash is ready
+		if (brcmnand_ctrl_write_is_complete(mtd, &needBBT)) {
+			chip->ctrl_write(BCHP_NAND_ACC_CONTROL, acc);
+			return 0;
+		}
+
+		chip->ctrl_write(BCHP_NAND_ACC_CONTROL, acc);
+	}
+	else 
+	{
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_PROGRAM_SPARE_AREA);
+
+		// Wait until flash is ready
+		if (brcmnand_ctrl_write_is_complete(mtd, &needBBT)) {
+			return 0;
+		}
+	}
+#else
+#if 0
+	if (chip->options & NAND_COMPLEX_OOB_WRITE) {
+//printk("****** Workaround, using OP_PROGRAM_PAGE instead of OP_PROGRAM_SPARE_AREA\n");
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_PROGRAM_PAGE);
+	}
+	else 
+#endif
+	{
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_PROGRAM_SPARE_AREA);
+	}
+
+	// Wait until flash is ready
+	if (brcmnand_ctrl_write_is_complete(mtd, &needBBT)) {
+		return 0;
+	}
+#endif /* CONFIG_BCM_KF_NAND */
+
+    
+	if (needBBT){
+
+		int ret;
+		
+		printk(KERN_WARNING "%s: Flash Status Error @%0llx\n", __FUNCTION__,  offset);
+
+		// SWLINUX-1495: Let UBI do it on returning -EIO
+		ret = -EIO;
+        
+		if (!isFromMarkBadBlock)
+            chip->block_markbad(mtd, offset);
+        
+		return (ret);
+	}
+
+	return -ETIMEDOUT;
+	
+}
+
+
+
+/**
+ * brcmnand_get_device - [GENERIC] Get chip for selected access
+ * @param mtd		MTD device structure
+ * @param new_state	the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int brcmnand_get_device(struct mtd_info *mtd, int new_state)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+
+	if (chip) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		/*
+		 * Grab the lock and see if the device is available
+		 */
+		while (1) {
+			spin_lock(&chip->ctrl->chip_lock);
+
+			if (chip->ctrl->state == BRCMNAND_FL_READY) {
+				chip->ctrl->state = new_state;
+				spin_unlock(&chip->ctrl->chip_lock);
+				break;
+			}
+			if (new_state == BRCMNAND_FL_PM_SUSPENDED) {
+				spin_unlock(&chip->ctrl->chip_lock);
+				return (chip->ctrl->state == BRCMNAND_FL_PM_SUSPENDED) ? 0 : -EAGAIN;
+			}
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			add_wait_queue(&chip->ctrl->wq, &wait);
+			spin_unlock(&chip->ctrl->chip_lock);
+			if (!wr_preempt_en && !in_interrupt())
+				schedule();
+			remove_wait_queue(&chip->ctrl->wq, &wait);
+		}
+
+		return 0;
+	}
+	else
+		return -EINVAL;
+}
+
+#if 0
+/* No longer used */
+static struct brcmnand_chip* 
+brcmnand_get_device_exclusive(void)
+{
+	struct brcmnand_chip * chip = (struct brcmnand_chip*) get_brcmnand_handle();
+	struct mtd_info *mtd; 
+	int ret;
+
+	mtd = (struct mtd_info*) chip->priv;
+
+	if (mtd) {
+		ret = brcmnand_get_device(mtd, BRCMNAND_FL_XIP);
+	}
+	else 
+		ret = -1;
+	if (0 == ret)
+		return chip;
+	else
+		return ((struct brcmnand_chip *) 0);
+}
+
+
+#endif
+
+/**
+ * brcmnand_release_device - [GENERIC] release chip
+ * @param mtd		MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void brcmnand_release_device(struct mtd_info *mtd)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+
+	/* Release the chip */
+	spin_lock(&chip->ctrl->chip_lock);
+	chip->ctrl->state = BRCMNAND_FL_READY;
+	wake_up(&chip->ctrl->wq);
+	spin_unlock(&chip->ctrl->chip_lock);
+}
+
+
+
+/**
+ * brcmnand_read_page - {REPLACEABLE] hardware ecc based page read function
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure.  The OOB buf is stored here on return
+ * @buf:	buffer to store read data
+ *
+ * Not for syndrome calculating ecc controllers which need a special oob layout
+ */
+static int 
+brcmnand_read_page(struct mtd_info *mtd,
+				uint8_t *outp_buf, uint8_t* outp_oob, uint64_t page)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+	int eccstep;
+	int dataRead = 0;
+	int oobRead = 0;
+	int ret = 0;
+	uint64_t offset = ((uint64_t) page) << chip->page_shift;
+	int corrected = 0; // Only update stats once per page
+	int uncorrected = 0; // Only update stats once per page
+
+if (gdebug > 3 ) {
+printk("-->%s, page=%0llx\n", __FUNCTION__, page);}
+
+	chip->pagebuf = page;
+
+	for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
+		ret = brcmnand_posted_read_cache(mtd, &outp_buf[dataRead], 
+					outp_oob ? &outp_oob[oobRead] : NULL, 
+					offset + dataRead);
+if (gdebug>3 && ret) printk("%s 1: calling brcmnand_posted_read_cache returns %d\n",
+__FUNCTION__, ret);
+		if (ret == BRCMNAND_CORRECTABLE_ECC_ERROR) {
+			if ( !corrected) {
+				(mtd->ecc_stats.corrected)++;
+				corrected = 1;
+			}
+			ret = 0;
+		} 
+		else if (ret == BRCMNAND_UNCORRECTABLE_ECC_ERROR) {
+			if ( !uncorrected) {
+				(mtd->ecc_stats.failed)++;
+				uncorrected = 1;
+			}
+			ret = 0;
+		} 
+		else if (ret < 0) {
+			printk(KERN_ERR "%s: 3: brcmnand_posted_read_cache failed at offset=%0llx, ret=%d\n", 
+				__FUNCTION__, offset + dataRead, ret);
+			return ret;
+		}
+		dataRead += chip->eccsize;
+		oobRead += chip->eccOobSize;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+static uint8_t * gblk_buf = NULL;
+#endif
+
+
+/**
+ * brcmnand_read_page_oob - {REPLACABLE] hardware ecc based page read function
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure.  The OOB buf is stored in the oob_poi ptr on return
+ *
+ * Not for syndrome calculating ecc controllers which need a special oob layout
+ */
+static int 
+brcmnand_read_page_oob(struct mtd_info *mtd, 
+				uint8_t* outp_oob, uint64_t  page)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+	int eccstep;
+	int dataRead = 0;
+	int oobRead = 0;
+	int corrected = 0; // Only update stats once per page
+	int uncorrected = 0; // Only update stats once per page
+	int ret = 0;
+	uint64_t offset = page << chip->page_shift;
+
+
+if (gdebug > 3 ) {
+printk("-->%s, offset=%0llx\n", __FUNCTION__, offset);}
+
+	chip->pagebuf = page;
+
+	for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
+//gdebug=4;
+		ret = brcmnand_posted_read_oob(mtd, &outp_oob[oobRead], 
+					offset + dataRead, 1);
+//gdebug=0;
+if (gdebug>3 && ret) printk("%s 2: calling brcmnand_posted_read_oob returns %d\n",
+__FUNCTION__, ret);
+		if (ret == BRCMNAND_CORRECTABLE_ECC_ERROR) {
+			if ( !corrected) {
+				(mtd->ecc_stats.corrected)++;
+				corrected = 1;
+			}
+			ret = 0;
+		}
+		else if (ret == BRCMNAND_UNCORRECTABLE_ECC_ERROR) {
+			if ( !uncorrected) {
+				(mtd->ecc_stats.failed)++;
+				uncorrected = 1;
+			}
+			ret = 0;
+		}
+		else if (ret < 0) {
+			printk(KERN_ERR "%s: 3: posted read oob failed at offset=%0llx, ret=%d\n", 
+				__FUNCTION__, offset + dataRead, ret);
+			return ret;
+		}
+		dataRead += chip->eccsize;
+		oobRead += chip->eccOobSize;
+	}
+
+if (gdebug>3 && ret) printk("%s returns %d\n",
+__FUNCTION__, ret);
+
+if (gdebug > 3 ) {
+printk("<--%s offset=%0llx, ret=%d\n", __FUNCTION__, offset, ret);
+print_oobbuf(outp_oob, mtd->oobsize); }
+	return ret;
+}
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+static int brcmnand_refresh_blk(struct mtd_info *mtd, loff_t from)
+{
+	struct brcmnand_chip *chip = mtd->priv;
+	int i, j, k, numpages, ret, count = 0, nonecccount = 0;
+	uint8_t *blk_buf;	/* Store one block of data (including OOB) */
+	unsigned int  pg_idx, oob_idx;
+	uint64_t realpage;
+	struct erase_info *instr;
+	//int gdebug = 1; 
+	struct nand_ecclayout *oobinfo;
+	uint8_t *oobptr;
+	uint32_t *oobptr32;
+	loff_t blkbegin;
+	unsigned int block_size;
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+	chip->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
+#endif
+	chip->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
+
+	DEBUG(MTD_DEBUG_LEVEL3, "Inside %s: from=%0llx\n", __FUNCTION__, from);
+	printk(KERN_INFO "%s: Performing block refresh for correctable ECC error at %0llx\n",
+		__FUNCTION__, from);
+	pg_idx = 0;
+	oob_idx = mtd->writesize;
+	numpages = mtd->erasesize/mtd->writesize;
+	block_size = (1 << chip->erase_shift);
+	blkbegin = (from & (~(mtd->erasesize-1)));
+	realpage = blkbegin >> chip->page_shift;
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+	if (!gblk_buf) {
+		gblk_buf = BRCMNAND_malloc(numpages*(mtd->writesize + mtd->oobsize));
+	}
+	blk_buf = gblk_buf;
+
+#else
+	blk_buf = (uint8_t *) BRCMNAND_malloc(numpages*(mtd->writesize + mtd->oobsize));
+#endif
+
+	if (unlikely(blk_buf == NULL)) {
+		printk(KERN_ERR "%s: buffer allocation failed\n", __FUNCTION__);
+		return -1;
+	}
+
+	memset(blk_buf, 0xff, numpages*(mtd->writesize + mtd->oobsize));
+
+	if (unlikely(gdebug > 0)) {
+		printk("---> %s: from = %0llx, numpages = %d, realpage = %0llx\n",\
+				__FUNCTION__,  from, numpages, realpage);
+		printk("     Locking flash for read ... \n");
+	}
+
+	/* Read an entire block */
+	brcmnand_get_device(mtd, BRCMNAND_FL_READING);
+	for (i = 0; i < numpages; i++) {
+		ret = chip->read_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
+		if (ret < 0) {
+#ifndef CONFIG_MTD_BRCMNAND_EDU
+			BRCMNAND_free(blk_buf);
+// #else re-use for EDU
+#endif
+			brcmnand_release_device(mtd);
+			return -1;
+		}
+		//printk("DEBUG -> Reading %d realpage = %x %x ret = %d oob = %x\n", i, realpage, *(blk_buf+pg_idx), ret, *(blk_buf + oob_idx));
+		//print_oobbuf(blk_buf+oob_idx, mtd->oobsize);
+		pg_idx += mtd->writesize + mtd->oobsize;
+		oob_idx += mtd->oobsize + mtd->writesize;
+		realpage++;
+	}
+	if (unlikely(gdebug > 0)) {
+		printk("---> %s:  Read -> erase\n", __FUNCTION__);
+	}
+	chip->ctrl->state = BRCMNAND_FL_ERASING;
+
+	/* Erase the block */
+	instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+	if (instr == NULL) {
+		printk(KERN_WARNING "kmalloc for erase_info failed\n");
+#ifndef CONFIG_MTD_BRCMNAND_EDU
+		BRCMNAND_free(blk_buf);
+// #else re-use for EDU
+#endif
+		brcmnand_release_device(mtd);
+		return -ENOMEM;
+	}
+	memset(instr, 0, sizeof(struct erase_info));
+	instr->mtd = mtd;
+	instr->addr = blkbegin;
+	instr->len = mtd->erasesize;
+	if (unlikely(gdebug > 0)) {
+		printk("DEBUG -> erasing %0llx, %0llx %d\n",instr->addr, instr->len, chip->ctrl->state);
+	}
+	ret = brcmnand_erase_nolock(mtd, instr, 0);
+	if (ret) {
+#ifndef CONFIG_MTD_BRCMNAND_EDU
+		BRCMNAND_free(blk_buf);
+// #else re-use for EDU
+#endif
+		kfree(instr);
+		brcmnand_release_device(mtd);
+		printk(KERN_WARNING " %s Erase failed %d\n", __FUNCTION__, ret);
+		return ret;
+	}
+	kfree(instr);
+
+	/* Write the entire block */
+	pg_idx = 0;
+	oob_idx = mtd->writesize;
+	realpage = blkbegin >> chip->page_shift;
+	if (unlikely(gdebug > 0)) {
+		printk("---> %s: Erase -> write ... %d\n", __FUNCTION__, chip->ctrl->state);
+	}
+	oobinfo = chip->ecclayout;
+	chip->ctrl->state = BRCMNAND_FL_WRITING;
+	for (i = 0; i < numpages; i++) {
+		/* Avoid writing empty pages */
+		count = 0;
+		nonecccount = 0;
+		oobptr = (uint8_t *) (blk_buf + oob_idx);
+		oobptr32 = (uint32_t *) (blk_buf + oob_idx);
+		for (j = 0; j < oobinfo->eccbytes; j++) {
+			if (oobptr[oobinfo->eccpos[j]] == 0xff) { count++; }
+		}
+		for (k = 0; k < mtd->oobsize/4; k++) {
+			if (oobptr32[k] == 0xffffffff) { nonecccount++; }
+		}
+		/* Skip this page if ECC is 0xff */
+		if (count == j && nonecccount == k) {
+			pg_idx += mtd->writesize + mtd->oobsize;
+			oob_idx += mtd->oobsize + mtd->writesize;
+			realpage++;
+			continue;
+		}
+		/* Skip this page, but write the OOB */
+		if (count == j && nonecccount != k) {
+			ret = chip->write_page_oob(mtd, blk_buf + oob_idx, realpage, 0);
+			if (ret) {
+#ifndef CONFIG_MTD_BRCMNAND_EDU
+				BRCMNAND_free(blk_buf);
+// #else re-use for EDU
+#endif
+				brcmnand_release_device(mtd);
+				return ret;
+			}
+			pg_idx += mtd->writesize + mtd->oobsize;
+			oob_idx += mtd->oobsize + mtd->writesize;
+			realpage++;
+			continue;
+		}
+		for (j = 0; j < oobinfo->eccbytes; j++) {
+			oobptr[oobinfo->eccpos[j]] = 0xff;
+		}
+		ret = chip->write_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
+		if (ret) {
+#ifndef CONFIG_MTD_BRCMNAND_EDU
+			BRCMNAND_free(blk_buf);
+// #else re-use for EDU
+#endif
+			brcmnand_release_device(mtd);
+			return ret; 
+		}
+		pg_idx += mtd->writesize + mtd->oobsize;
+		oob_idx += mtd->oobsize + mtd->writesize;
+		realpage++;
+	}
+	brcmnand_release_device(mtd);
+#ifndef CONFIG_MTD_BRCMNAND_EDU
+	BRCMNAND_free(blk_buf);
+// #else re-use for EDU
+#endif
+	printk(KERN_INFO "%s: block refresh success\n", __FUNCTION__);
+
+	return 0;
+}
+#endif
+
+
+#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
+/*
+ * EDU ISR Implementation
+ */
+
+ 
+/*
+ * Submit the read op, then return immediately, without waiting for completion.
+ * Assuming queue lock held (with interrupt disable).
+ */
+static void 
+EDU_submit_read(eduIsrNode_t* req)
+{
+	struct brcmnand_chip* chip = (struct brcmnand_chip*) req->mtd->priv;
+	uint32_t edu_status;
+	
+	// THT: TBD: Need to adjust for cache line size here, especially on 7420.
+	req->physAddr = dma_map_single(NULL, req->buffer, EDU_LENGTH_VALUE, DMA_FROM_DEVICE);
+
+ 	spin_lock(&req->lock);
+
+ 	req->edu_ldw =  chip->ctrl_writeAddr(chip, req->offset, 0);
+	PLATFORM_IOFLUSH_WAR(); 
+
+	//req->cmd = EDU_READ;
+	req->opComplete = ISR_OP_SUBMITTED;
+	req->status = 0;
+
+	// We must also wait for Ctlr_Ready, otherwise the OOB is not correct, since we read the OOB bytes off the controller
+
+	req->mask = HIF_INTR2_EDU_CLEAR_MASK|HIF_INTR2_CTRL_READY;
+	req->expect = HIF_INTR2_EDU_DONE;
+	// On error we also want Ctrlr-Ready because for COR ERR, the Hamming WAR depends on the OOB bytes.
+	req->error = HIF_INTR2_EDU_ERR;
+	req->intr = HIF_INTR2_EDU_DONE_MASK;
+	req->expired = jiffies + 3*HZ;
+
+	edu_status = EDU_volatileRead(EDU_STATUS);
+	// Enable HIF_INTR2 only when we submit the first job in double buffering scheme
+	if (0 == (edu_status & BCHP_EDU_STATUS_Active_MASK)) {
+		ISR_enable_irq(req);
+	}
+
+        //EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000);
+       EDU_reset_done();
+
+       EDU_volatileWrite(EDU_ERR_STATUS, 0x00000000);
+        
+	EDU_volatileWrite(EDU_LENGTH, EDU_LENGTH_VALUE);
+
+	EDU_waitForNoPendingAndActiveBit();
+
+	EDU_issue_command(req->physAddr , req->edu_ldw, EDU_READ);
+
+	spin_unlock(&req->lock);
+	return;
+
+} 
+
+int EDU_submit_write(eduIsrNode_t* req)
+{
+	struct brcmnand_chip* chip = (struct brcmnand_chip*) req->mtd->priv;
+	uint32_t* p32;
+	int i;
+
+	spin_lock(&req->lock);
+	// EDU is not a PCI device
+	// THT: TBD: Need to adjust for cache line size here, especially on 7420.
+	req->physAddr  = dma_map_single(NULL, req->buffer, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
+
+	if (!(req->physAddr)) {
+		spin_unlock(&req->lock);
+		return (-1);
+	}
+
+
+	req->edu_ldw = chip->ctrl_writeAddr(chip, req->offset, 0);
+
+
+	if (req->oobarea) {
+		p32 = (uint32_t*) req->oobarea;
+if (gdebug) {printk("%s: oob=\n", __FUNCTION__); print_oobbuf(req->oobarea, 16);}
+	}
+	else {
+		// Fill with 0xFF if don't want to change OOB
+		p32 = (uint32_t*) &ffchars[0];
+	}
+
+// printk("EDU40\n");
+	for (i = 0; i < 4; i++) {
+		chip->ctrl_write(BCHP_NAND_SPARE_AREA_WRITE_OFS_0 + i*4, cpu_to_be32(p32[i]));
+	}
+
+	PLATFORM_IOFLUSH_WAR(); // Check if this line may be taken-out
+	
+	/*
+	 * Enable L2 Interrupt
+	 */
+	//req->cmd = EDU_WRITE;
+	req->opComplete = ISR_OP_SUBMITTED;
+	req->status = 0;
+	
+	/* On write we wait for both DMA done|error and Flash Status */
+	req->mask = HIF_INTR2_EDU_CLEAR_MASK|HIF_INTR2_CTRL_READY;
+	req->expect = HIF_INTR2_EDU_DONE;
+	req->error = HIF_INTR2_EDU_ERR;
+	req->intr = HIF_INTR2_EDU_DONE_MASK|HIF_INTR2_CTRL_READY;
+
+	
+	ISR_enable_irq(req);
+
+	//EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000); 
+	EDU_reset_done();
+	EDU_volatileWrite(EDU_ERR_STATUS, 0x00000000); 
+
+	EDU_volatileWrite(EDU_LENGTH, EDU_LENGTH_VALUE);
+
+	EDU_issue_command(req->physAddr, req->edu_ldw, EDU_WRITE); /* 1: Is a Read, 0 Is a Write */
+	spin_unlock(&req->lock);
+	return 0;
+}
+
+
+/*
+ * Submit the first entry that is in queued state,
+ * assuming queue lock has been held by caller.
+ * 
+ * @doubleBuffering indicates whether we need to submit just 1 job or until EDU is full (double buffering)
+ * Return the number of job submitted (either 1 or zero), as we don't support doublebuffering yet.
+ *
+ * In current version (v3.3 controller), since EDU only have 1 register for EDU_ERR_STATUS,
+ * we can't really do double-buffering without losing the returned status of the previous read-op.
+ */
+int
+brcmnand_isr_submit_job(void)
+{
+	uint32_t edu_pending;
+	eduIsrNode_t* req;
+	//struct list_head* node;
+	int numReq = 0;
+
+//printk("-->%s\n", __FUNCTION__);
+//ISR_print_queue();
+
+	list_for_each_entry(req, &gJobQ.jobQ, list) {
+		//req = container_of(node, eduIsrNode_t, list);
+		switch (req->opComplete) {
+		case ISR_OP_QUEUED:
+			edu_pending = EDU_volatileRead(EDU_STATUS); 
+			if (!(BCHP_EDU_STATUS_Pending_MASK & edu_pending)) {
+				if (gJobQ.cmd == EDU_READ) {
+					EDU_submit_read(req);
+				}
+				else if (gJobQ.cmd == EDU_WRITE) {
+					EDU_submit_write(req);
+				}
+				else {
+					printk("%s: Invalid op\n", __FUNCTION__);
+					BUG();
+				}
+				numReq++;
+#ifdef EDU_DOUBLE_BUFFER_READ
+				if (/*doubleBuffering &&*/ numReq < 2) {
+					continue;
+				}
+#endif
+			}
+PRINTK("<-- %s: numReq=%d\n", __FUNCTION__, numReq);
+			return numReq; 
+			
+		case ISR_OP_COMPLETED:
+		case ISR_OP_SUBMITTED:
+		case ISR_OP_NEED_WAR:
+		case ISR_OP_TIMEDOUT:
+			/* next entry */
+			continue;
+		case ISR_OP_COMP_WITH_ERROR:
+			break;
+		}
+	}
+PRINTK("<-- %s: numReq=%d\n", __FUNCTION__, numReq);
+	return numReq;
+}
+
+/*
+ * Queue the entire page, then wait for completion
+ */
+static int
+brcmnand_isr_read_page(struct mtd_info *mtd,
+				uint8_t *outp_buf, uint8_t* outp_oob, uint64_t page)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+	int eccstep;
+	int dataRead = 0;
+	int oobRead = 0;
+	int ret = 0;
+	uint64_t offset = ((uint64_t) page) << chip->page_shift;
+	int submitted = 0;
+	unsigned long flags;
+
+//if (1/* (int) offset <= 0x2000 /*gdebug > 3 */) {
+//printk("-->%s, offset=%08x\n", __FUNCTION__, (uint32_t) offset);}
+if (gdebug > 3 ) {
+printk("-->%s, page=%0llx, buffer=%p\n", __FUNCTION__, page, outp_buf);}
+
+
+#if 0 // No need to check, we are aligned on a page
+	if (unlikely(offset - sliceOffset)) {
+		printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n", 
+                __FUNCTION__, offset, sliceOffset, ECCSIZE(mtd));
+		ret = -EINVAL;
+		goto out;
+	}
+#endif
+
+
+	if (unlikely(!EDU_buffer_OK(outp_buf, EDU_READ))) 
+	{
+if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
+		/* EDU does not work on non-aligned buffers */
+		ret = brcmnand_read_page(mtd, outp_buf, outp_oob, page);
+		return (ret);
+	}
+
+	chip->pagebuf = page;
+
+	spin_lock_irqsave(&gJobQ.lock, flags);
+	if (!list_empty(&gJobQ.jobQ)) {
+		printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
+//ISR_print_queue();
+		BUG();
+	}
+	gJobQ.cmd = EDU_READ;
+	gJobQ.needWakeUp = 0;
+	
+	for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
+		eduIsrNode_t* req;
+		/*
+		 * Queue the 512B sector read, then read the EDU pending bit, 
+		 * and issue read command, if EDU is available for read.
+		 */
+		req = ISR_queue_read_request(mtd, &outp_buf[dataRead], 
+					outp_oob ? &outp_oob[oobRead] : NULL, 
+					offset + dataRead);
+				
+		dataRead += chip->eccsize;
+		oobRead += chip->eccOobSize;
+	}
+	//BUG_ON(submitted != 1);
+	
+	
+
+	/* Kick start it.  The ISR will submit the next job */
+	if (!submitted) {
+		submitted = brcmnand_isr_submit_job();
+	}
+	
+	while (!list_empty(&gJobQ.jobQ)) {
+		spin_unlock_irqrestore(&gJobQ.lock, flags);
+		ret = ISR_wait_for_queue_completion();
+		spin_lock_irqsave(&gJobQ.lock, flags);
+	}
+	spin_unlock_irqrestore(&gJobQ.lock, flags);
+	return ret;
+}
+
+
+/*
+ * Queue several pages for small page SLC, then wait for completion,
+ * assuming that 
+ * (1) offset is aligned on a 512B boundary
+ * (2) that outp_buf is aligned on a 32B boundary.
+ * (3) Not in raw mode
+ * This routine only works when ECC-size = Page-Size (Small SLC flashes), and relies on the fact
+ * that the internal buffer can hold several data+OOB buffers for several small pages at once.
+ *
+ * The OOB are read into chip->ctrl->buffers->OOB.
+ * The Queue Size and chip->ctrl->buffers->oob are chosen such that the OOB
+ * will all fit inside the buffers.
+ * After a batch of jobs is completed, the OOB is then copied to the output OOB parameter.
+ * To keep it simple stupid, this routine cannot handle Raw mode Read.
+ *
+ * Arguments:
+ * @mtd: 		MTD handle
+ * @outp_buf		Data buffer, passed from file system driver
+ * @inoutpp_oob	Address of OOB buffer, passed INOUT from file system driver
+ * @startPage	page 0 of batch
+ * @numPages	nbr of pages in batch
+ * @ops			MTD ops from file system driver.  We only look at the OOB mode (raw vs auto vs inplace)
+ */
+static int
+brcmnand_isr_read_pages(struct mtd_info *mtd,
+				uint8_t *outp_buf, uint8_t** inoutpp_oob, uint64_t startPage, int numPages,
+				struct mtd_oob_ops *ops)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+	int dataRead = 0;
+	int oobRead = 0;
+	int ret = 0;
+	uint64_t offset = ((uint64_t) startPage) << chip->page_shift;
+	int submitted = 0;
+	unsigned long flags;
+	int page;
+	u_char* oob = inoutpp_oob ? *inoutpp_oob : NULL;
+	u_char* oobpoi = NULL;
+	u_char* buf = outp_buf;
+	int ooblen;
+
+
+	/* Paranoia */
+	if (chip->pageSize != chip->eccsize) {
+		printk("%s: Can only be called on small page flash\n", __FUNCTION__);
+		BUG();
+	}
+
+	if (ops->mode == MTD_OPS_RAW) {
+		printk("%s: Can only be called when not in RAW mode\n", __FUNCTION__);
+		BUG();
+	}
+	else if (ops->mode == MTD_OPS_PLACE_OOB) {
+		ooblen = mtd->oobsize;
+	}
+	else if (ops->mode == MTD_OPS_AUTO_OOB) {
+		ooblen = mtd->ecclayout->oobavail;
+	}
+#ifdef DEBUG_ISR
+printk("-->%s: mtd=%p, buf=%p, &oob=%p, oob=%p\n", __FUNCTION__, 
+mtd, outp_buf, inoutpp_oob, inoutpp_oob? *inoutpp_oob: NULL);
+#endif	
+
+	spin_lock_irqsave(&gJobQ.lock, flags);
+	if (!list_empty(&gJobQ.jobQ)) {
+		printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
+//ISR_print_queue();
+		BUG();
+	}
+	gJobQ.cmd = EDU_READ;
+	gJobQ.needWakeUp = 0;
+
+	if (inoutpp_oob && *inoutpp_oob) {
+		// In batch mode, read OOB into internal OOB buffer first.
+		// This pointer will be advanced because oob_transfer depends on it.
+		chip->oob_poi= BRCMNAND_OOBBUF(chip->ctrl->buffers);
+		oobpoi = chip->oob_poi; // This pointer remains fixed
+	}
+//gdebug=4;	
+	for (page = 0; page < numPages && ret == 0; page++) {
+		eduIsrNode_t* req;
+
+		req = ISR_queue_read_request(mtd, buf, 
+					(inoutpp_oob && *inoutpp_oob) ? &oobpoi[oobRead] : NULL, 
+					offset + dataRead);
+				
+		dataRead += chip->eccsize;
+		oobRead += chip->eccOobSize;
+		buf += chip->eccsize;
+	}
+//gdebug=0;
+	//BUG_ON(submitted != 1);
+	
+	/* Kick start it.  The ISR will submit the next job */
+	if (!submitted) {
+		submitted = brcmnand_isr_submit_job();
+	}
+	
+	while (!list_empty(&gJobQ.jobQ)) {
+		spin_unlock_irqrestore(&gJobQ.lock, flags);
+		ret = ISR_wait_for_queue_completion();
+		spin_lock_irqsave(&gJobQ.lock, flags);
+	}
+	spin_unlock_irqrestore(&gJobQ.lock, flags);
+
+	if (ret) {
+		/* Abort, and return error to file system */
+		return ret;
+	}
+
+
+	/* Format OOB, from chip->OOB buffers */
+	
+	buf = outp_buf;
+	oob = (inoutpp_oob && *inoutpp_oob) ? *inoutpp_oob : NULL;
+	dataRead = 0;
+	oobRead = 0;
+PRINTK("%s: B4 transfer OOB: buf=%p, chip->buffers=%p, offset=%08llx\n",
+__FUNCTION__, buf, chip->ctrl->buffers, offset + dataRead);
+
+	// Reset oob_poi to beginning of OOB buffer.  
+	// This will get advanced, cuz brcmnand_transfer_oob depends on it.
+	chip->oob_poi = BRCMNAND_OOBBUF(chip->ctrl->buffers);
+	// oobpoi pointer does not change in for loop
+	oobpoi = chip->oob_poi; 
+
+	for (page=0; page < numPages && ret == 0; page++) {
+		u_char* newoob = NULL;
+
+#ifdef EDU_DEBUG_4 /* Read verify */
+		ret = edu_read_verify(mtd, buf, 
+				(inoutpp_oob && *inoutpp_oob) ? &oobpoi[oobRead] : NULL, 
+				offset + dataRead);
+	
+		if (ret) BUG();
+#endif
+
+		if (unlikely(inoutpp_oob && *inoutpp_oob)) {
+			newoob = brcmnand_transfer_oob(chip, oob, ops, ooblen);
+			chip->oob_poi += chip->eccOobSize;
+			oob = newoob;
+			// oobpoi stays the same
+		}
+
+		dataRead += chip->eccsize;
+		oobRead += chip->eccOobSize;
+		buf += chip->eccsize;
+
+	} /* for */
+
+	if (unlikely(inoutpp_oob && *inoutpp_oob)) {
+		*inoutpp_oob = oob;
+	}
+
+PRINTK("<-- %s\n", __FUNCTION__);
+	
+	return 0;
+}
+
+
+/**
+ * brcmnand_isr_read_page_oob - {REPLACABLE] hardware ecc based page read function
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure.  The OOB buf is stored in the oob_poi ptr on return
+ *
+ * Not for syndrome calculating ecc controllers which need a special oob layout
+ */
+static int 
+brcmnand_isr_read_page_oob(struct mtd_info *mtd, 
+				uint8_t* outp_oob, uint64_t  page)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+
+	/*
+	 * if BCH codes, use full page read to activate ECC on OOB area
+	 */
+	if (chip->ecclevel != BRCMNAND_ECC_HAMMING && chip->ecclevel != BRCMNAND_ECC_DISABLE) {
+		return brcmnand_isr_read_page(mtd, chip->ctrl->buffers->databuf, outp_oob, page);
+	}
+	
+	else {
+		return brcmnand_read_page_oob(mtd, outp_oob, page);
+	}
+}
+
+
+
+
+#endif
+
+
+/**
+ * brcmnand_do_read_ops - [Internal] Read data with ECC
+ *
+ * @mtd:	MTD device structure
+ * @from:	offset to read from
+ * @ops:		oob ops structure
+ * @raw:		read raw data format when TRUE
+ *
+ * Internal function. Called with chip held.
+ */
+
+//#define EDU_DEBUG_1
+#undef EDU_DEBUG_1
+
+#ifdef EDU_DEBUG_1
+//static uint32_t debug_oob[32];
+static char* debug_sig = "brcmnandTesting";
+
+static struct nand_buffers debug_dbuf;
+//static uint8_t debug_dbuf = (uint8_t*) debug_databuf;
+
+#endif
+static int brcmnand_do_read_ops(struct mtd_info *mtd, loff_t from,
+			    struct mtd_oob_ops *ops)
+{
+	unsigned int bytes, col;
+	uint64_t realpage;
+	int aligned;
+	struct brcmnand_chip *chip = mtd->priv;
+	struct mtd_ecc_stats stats;
+	//int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+	//int sndcmd = 1;
+	int ret = 0;
+	uint32_t readlen = ops->len;
+	uint8_t *bufpoi, *oob, *buf;
+	int __maybe_unused numPages;
+	int __maybe_unused buffer_aligned = 0;
+	int ooblen;
+
+
+	if (ops->mode == MTD_OPS_AUTO_OOB) {
+		ooblen = mtd->ecclayout->oobavail;
+	}
+	else  {
+		ooblen = mtd->oobsize;
+	}
+//int nonBatch = 0;
+
+	/* Remember the current CORR error count */
+	stats = mtd->ecc_stats;
+
+	// THT: BrcmNAND controller treats multiple chip as one logical chip.
+	//chipnr = (int)(from >> chip->chip_shift);
+	//chip->select_chip(mtd, chipnr);
+
+	realpage = (uint64_t) from >> chip->page_shift;
+	//page = realpage & chip->pagemask;
+
+	col = mtd64_ll_low(from & (mtd->writesize - 1));
+	
+#ifndef EDU_DEBUG_1
+/* Debugging 12/27/08 */
+	chip->oob_poi = BRCMNAND_OOBBUF(chip->ctrl->buffers);
+#else
+	chip->oob_poi = BRCMNAND_OOBBUF(&debug_dbuf);
+
+#endif
+
+	buf = ops->datbuf;
+	oob = ops->oobbuf;
+
+#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
+	/*
+	 * Group several pages for submission for small page NAND
+	 */
+	if (chip->pageSize == chip->eccsize && ops->mode != MTD_OPS_RAW) {
+		while(1) {
+//nonBatch = 0;
+			bytes = min(mtd->writesize - col, readlen);
+			// (1) Writing partial or full page
+			aligned = (bytes == mtd->writesize);
+
+			// If writing full page, use user buffer, otherwise, internal buffer
+			bufpoi = aligned ? buf : chip->ctrl->buffers->databuf;
+			
+			// (2) Buffer satisfies 32B alignment required by EDU?
+			buffer_aligned = EDU_buffer_OK(bufpoi, EDU_READ);
+
+			// (3) Batch mode if writing more than 1 pages.
+			numPages = min(MAX_JOB_QUEUE_SIZE, (int)readlen>>chip->page_shift);
+
+			// Only do Batch mode if all 3 conditions are satisfied.
+			if (!aligned || !buffer_aligned || numPages <= 1) {
+				/* Submit 1 page at a time */
+
+				numPages = 1; // We count partial page read
+				ret = chip->read_page(mtd, bufpoi, chip->oob_poi, realpage);				
+
+				if (ret < 0)
+					break;
+
+				/* Transfer not aligned data */
+				if (!aligned) {
+					chip->pagebuf = realpage;
+					memcpy(buf, &bufpoi[col], bytes);
+				}
+				buf += bytes;
+
+				if (unlikely(oob)) {
+					/* if (ops->mode != MTD_OPS_RAW) */
+					oob = brcmnand_transfer_oob(chip, oob, ops, ooblen);
+					
+				}
+
+			}
+			else {
+				/* 
+				  * Batch job possible, all 3 conditions are met
+				  * bufpoi = Data buffer from FS driver
+				  * oob = OOB buffer from FS driver
+				  */	
+				bytes = numPages*mtd->writesize;
+
+				ret = brcmnand_isr_read_pages(mtd, bufpoi, oob? &oob : NULL, realpage, numPages, ops);
+
+				if (ret < 0)
+					break;
+
+				buf += bytes; /* Advance Read pointer */
+
+			}
+
+
+			readlen -= bytes;
+
+			if (!readlen)
+				break;
+
+			/* For subsequent reads align to page boundary. */
+			col = 0;
+			/* Increment page address */
+			realpage += numPages;
+		}
+		goto out;	
+	}
+	else 
+#endif
+	{
+		while(1) {
+			bytes = min(mtd->writesize - col, readlen);
+			aligned = (bytes == mtd->writesize);
+			
+			bufpoi = aligned ? buf : chip->ctrl->buffers->databuf;
+
+			ret = chip->read_page(mtd, bufpoi, chip->oob_poi, realpage);
+
+			if (ret < 0)
+				break;
+
+			/* Transfer not aligned data */
+			if (!aligned) {
+				chip->pagebuf = realpage;
+				memcpy(buf, &bufpoi[col], bytes);
+			}
+
+			buf += bytes;
+
+			if (unlikely(oob)) {
+				/* Raw mode does data:oob:data:oob */
+				if (ops->mode != MTD_OPS_RAW)
+					oob = brcmnand_transfer_oob(chip, oob, ops, ooblen);
+				else {
+					buf = brcmnand_transfer_oob(chip, buf, ops, ooblen);
+				}
+			}
+
+
+			readlen -= bytes;
+
+			if (!readlen)
+				break;
+
+			/* For subsequent reads align to page boundary. */
+			col = 0;
+			/* Increment page address */
+			realpage++;
+
+		}
+	}
+	
+out: __maybe_unused
+//gdebug=0;
+
+	ops->retlen = ops->len - (size_t) readlen;
+
+
+	if (ret)
+		return ret;
+
+	if (mtd->ecc_stats.failed - stats.failed)
+		return -EBADMSG;
+
+	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+
+
+/**
+ * brcmnand_read - [MTD Interface] MTD compability function for nand_do_read_ecc
+ * @mtd:	MTD device structure
+ * @from:	offset to read from
+ * @len:	number of bytes to read
+ * @retlen:	pointer to variable to store the number of read bytes
+ * @buf:	the databuffer to put data
+ *
+ * Get hold of the chip and call nand_do_read
+ */
+static int brcmnand_read(struct mtd_info *mtd, loff_t from, size_t len,
+		     size_t *retlen, uint8_t *buf)
+{
+	struct brcmnand_chip *chip = mtd->priv;
+	int ret;
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+	int status;
+#endif
+
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: from=%0llx\n", __FUNCTION__, from);
+
+if (gdebug > 3 ) {
+printk("-->%s, offset=%0llx, len=%08x\n", __FUNCTION__, from, len);}
+
+	/* Do not allow reads past end of device */
+
+	if (unlikely((from + len) > device_size(mtd)))
+		return -EINVAL;
+	
+	if (!len)
+		return 0;
+
+	brcmnand_get_device(mtd, BRCMNAND_FL_READING);
+
+	chip->ops.mode = MTD_OPS_AUTO_OOB;
+	chip->ops.len = len;
+	chip->ops.datbuf = buf;
+	chip->ops.oobbuf = NULL;
+
+	brcmnand_reset_corr_threshold(chip);
+	
+	ret = brcmnand_do_read_ops(mtd, from, &chip->ops);
+
+	*retlen = chip->ops.retlen;
+
+	brcmnand_release_device(mtd);
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+        /* use atomic_inc_return instead two seperate atomic_read and atomic_inc call because
+        there is race condition between these two calls if it is preempted after first call but
+        right before the second atomic call */
+	if (unlikely(ret == -EUCLEAN)) {
+		if (atomic_inc_return(&inrefresh) == 1) {
+			if (brcmnand_refresh_blk(mtd, from) == 0) { 
+				ret = 0; 
+			}
+			if (likely(chip->cet)) {
+				if (likely(chip->cet->flags != BRCMNAND_CET_DISABLED)) {
+					if (brcmnand_cet_update(mtd, from, &status) == 0) {
+
+/*
+ * PR57272: Provide workaround for BCH-n ECC HW bug when # error bits >= 4 
+ * We will not mark a block bad when the a correctable error already happened on the same page
+ */
+#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_3_4
+						ret = 0;
+#else
+						if (status) {
+							ret = -EUCLEAN;
+						} else {
+							ret = 0;
+						}
+#endif
+					}
+					if (gdebug > 3) {
+						printk(KERN_INFO "DEBUG -> %s ret = %d, status = %d\n", __FUNCTION__, ret, status);
+					}
+				}
+			}
+		}
+		atomic_dec(&inrefresh);
+	}
+#endif
+	return ret;
+}
+
+
+
+/**
+ * brcmnand_do_read_oob - [Intern] BRCMNAND read out-of-band
+ * @mtd:	MTD device structure
+ * @from:	offset to read from
+ * @ops:	oob operations description structure
+ *
+ * BRCMNAND read out-of-band data from the spare area
+ */
+static int brcmnand_do_read_oob(struct mtd_info *mtd, loff_t from,
+			    struct mtd_oob_ops *ops)
+{
+	int realpage = 1;
+	struct brcmnand_chip *chip = mtd->priv;
+	//int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+	int toBeReadlen = ops->ooblen;
+	int readlen = 0;
+	int len; /* Number of OOB bytes to read each page */
+	uint8_t *buf = ops->oobbuf;
+	int ret = 0;
+	
+if (gdebug > 3 ) 
+{printk("-->%s, offset=%0llx, buf=%p, len=%d, ooblen=%d\n", __FUNCTION__, from, buf, toBeReadlen, ops->ooblen);}
+
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n",
+	      __FUNCTION__, (unsigned long long)from, toBeReadlen);
+
+	//chipnr = (int)(from >> chip->chip_shift);
+	//chip->select_chip(mtd, chipnr);
+
+	if (ops->mode == MTD_OPS_AUTO_OOB)
+		len = chip->ecclayout->oobavail;
+	else
+		len = mtd->oobsize;
+
+	if (unlikely(ops->ooboffs >= len)) {
+		DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
+			"Attempt to start read outside oob\n");
+		return -EINVAL;
+	}
+
+	/* Do not allow reads past end of device */
+	if (unlikely(from >= mtd->size ||
+		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+					(from >> chip->page_shift)) * len)) {
+		DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
+			"Attempt read beyond end of device\n");
+		return -EINVAL;
+	}
+
+
+	/* Shift to get page */
+	realpage = (int)(from >> chip->page_shift);
+	//page = realpage & chip->pagemask;
+
+	chip->oob_poi = BRCMNAND_OOBBUF(chip->ctrl->buffers);
+	brcmnand_reset_corr_threshold(chip);
+
+	while (toBeReadlen > 0) {
+		ret = chip->read_page_oob(mtd, chip->oob_poi, realpage);
+		if (ret) { // Abnormal return
+			ops->oobretlen = readlen;
+			return ret;
+		}
+		
+		buf = brcmnand_transfer_oob(chip, buf, ops, len);
+
+		toBeReadlen -= len;
+		readlen += len;
+
+		/* Increment page address */
+		realpage++;
+
+	}
+
+	ops->oobretlen = ops->ooblen;
+	return ret;
+}
+
+
+/**
+ * brcmnand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd:	MTD device structure
+ * @from:	offset to read from
+ * @ops:	oob operation description structure
+ *
+ * NAND read data and/or out-of-band data
+ */
+static int brcmnand_read_oob(struct mtd_info *mtd, loff_t from,
+			 struct mtd_oob_ops *ops)
+{
+//	struct brcmnand_chip *chip = mtd->priv;
+	int ret = -ENOTSUPP;
+	//int raw;
+
+if (gdebug > 3 ) {
+printk("-->%s, offset=%lx len=%x, databuf=%p\n", __FUNCTION__, 
+(unsigned long)from, (unsigned)ops->len, ops->datbuf);}
+
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: from=%0llx\n", __FUNCTION__, from);
+
+	ops->retlen = 0;
+
+	/* Do not allow reads past end of device */
+
+	brcmnand_get_device(mtd, BRCMNAND_FL_READING);
+
+#if 0
+	switch(ops->mode) {
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_AUTO_OOB:
+		raw = 0;
+		break;
+
+	case MTD_OPS_RAW:
+		raw = 1;
+		break;
+
+	default:
+		goto out;
+	}
+#endif
+
+	if (!ops->datbuf) {
+		ret = brcmnand_do_read_oob(mtd, from, ops);
+	} else {
+		if (unlikely((from + ops->len) > device_size(mtd)))
+		{
+			DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end of device\n", __FUNCTION__);
+			ret = -EINVAL;
+		} else {
+			ret = brcmnand_do_read_ops(mtd, from, ops);
+		}
+	}
+
+
+// out:
+	brcmnand_release_device(mtd);
+if (gdebug > 3 ) {printk("<-- %s: ret=%d\n", __FUNCTION__, ret);}
+	return ret;
+}
+
+
+
+
+
+#ifdef CONFIG_MTD_BRCMNAND_VERIFY_WRITE
+
+#if 0
+/*
+ * Returns 0 on success, 
+ */
+static int brcmnand_verify_pageoob_priv(struct mtd_info *mtd, loff_t offset, 
+	const u_char* fsbuf, int fslen, u_char* oob_buf, int ooblen, struct nand_oobinfo* oobsel, 
+	int autoplace, int raw)
+{
+	//struct brcmnand_chip * chip = mtd->priv;
+	int ret = 0;
+	int complen;
+
+	
+	if (autoplace) {
+
+		complen = min_t(int, ooblen, fslen);
+
+		/* We may have read more from the OOB area, so just compare the min of the 2 */
+		if (complen == fslen) {
+			ret = memcmp(fsbuf, oob_buf, complen);
+			if (ret) {
+{
+printk("Autoplace Comparison failed at %08x, ooblen=%d fslen=%d left=\n", 
+	__ll_low(offset), ooblen, fslen);
+print_oobbuf(fsbuf, fslen);
+printk("\nRight=\n"); print_oobbuf(oob_buf, ooblen);
+dump_stack();
+}
+				goto comparison_failed;
+			}
+		}
+		else {
+printk("%s: OOB comparison failed, ooblen=%d is less than fslen=%d\n", 
+		__FUNCTION__, ooblen, fslen);
+			return  -EBADMSG;
+		}
+	}
+	else { // No autoplace.  Skip over non-freebytes
+
+		/* 
+		 * THT:
+		 * WIth YAFFS1, the FS codes overwrite an already written chunks quite a lot
+		 * (without erasing it first, that is!!!!!)
+		 * For those write accesses, it does not make sense to check the write ops
+		 * because they are going to fail every time
+		 */
+		
+
+#if 0
+		int i, len; 
+		
+		for (i = 0; oobsel->oobfree[i][1] && i < ARRAY_SIZE(oobsel->oobfree); i++) {
+			int from = oobsel->oobfree[i][0];
+			int num = oobsel->oobfree[i][1];
+			int len1 = num;
+
+			if (num == 0) break; // End of oobsel
+			
+			if ((from+num) > fslen) len1 = fslen-from;
+			ret = memcmp(&fsbuf[from], &oob_buf[from], len1);
+			if (ret) {
+				printk(KERN_ERR "%s: comparison at offset=%08x, i=%d from=%d failed., num=%d\n", 
+					__FUNCTION__, i, __ll_low(offset), from, num); 
+if (gdebug > 3) 
+{
+printk("No autoplace Comparison failed at %08x, ooblen=%d fslen=%d left=\n", 
+	__ll_low(offset), ooblen, fslen);
+print_oobbuf(&fsbuf[0], fslen);
+printk("\nRight=\n"); print_oobbuf(&oob_buf[0], ooblen);
+dump_stack();
+}
+				goto comparison_failed;
+			}
+			if ((from+num) >= fslen) break;
+			len += num;
+		}
+#endif
+	}
+	return ret;
+
+
+comparison_failed:
+	{
+		//unsigned long nand_timing1 = brcmnand_ctrl_read(BCHP_NAND_TIMING_1);
+		//unsigned long nand_timing2 = brcmnand_ctrl_read(BCHP_NAND_TIMING_2);
+		//u_char raw_oob[NAND_MAX_OOBSIZE];
+		//int retlen;
+		//struct nand_oobinfo noauto_oobsel;
+
+		printk("Comparison Failed\n");
+		print_diagnostics(chip);
+		
+		//noauto_oobsel = *oobsel;
+		//noauto_oobsel.useecc = MTD_NANDECC_PLACEONLY;
+		//brcmnand_read_pageoob(mtd, offset, raw_oob, &retlen, &noauto_oobsel, 0, raw);
+//if (gdebug) { printk("oob="); print_oobbuf(raw_oob, retlen);}
+//printk("<-- %s: comparison failed\n", __FUNCTION__);
+
+	
+		return -EBADMSG;
+	}
+}
+#endif
+
+
+/**
+ * brcmnand_verify_page - [GENERIC] verify the chip contents after a write
+ * @param mtd		MTD device structure
+ * @param dbuf		the databuffer to verify 
+ * @param dlen		the length of the data buffer, and should beequal to mtd->writesize
+ * @param oobbuf		the length of the file system OOB data and should be exactly
+ *                             chip->oobavail (for autoplace) or mtd->oobsize otherise
+ *					bytes to verify. (ignored for Hamming)
+ * @param ooblen
+ *
+ * Returns 0 on success, 1 on errors.
+ * Assumes that lock on.  Munges the internal data and OOB buffers.
+ */
+//#define MYDEBUG
+static u_char verify_buf[NAND_MAX_PAGESIZE+512];
+static u_char v_oob_buf [NAND_MAX_OOBSIZE];
+static int brcmnand_verify_page(struct mtd_info *mtd, loff_t addr, 
+		const u_char *dbuf, int dlen, 
+		const u_char* inp_oob, int ooblen
+		)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	
+	int ret = 0; // Matched
+	//int ooblen=0, datalen=0;
+	//int complen;
+	u_char* oobbuf = v_oob_buf;
+	uint64_t page;
+	int eccstep;
+	// Align Vbuf on 512B
+	u_char* vbuf = (u_char*) ( ((unsigned long) verify_buf + chip->eccsize-1) 
+		& ~( chip->eccsize-1));
+
+if (gdebug > 3) printk("-->%s: addr=%0llx\n", __FUNCTION__, addr);
+
+	/* 
+	 * Only do it for Hamming codes because
+	 * (1) We can't do it for BCH until we can read the full OOB area for BCH-8
+	 * (2) OOB area is included in ECC calculation for BCH, so no need to check it
+	 *      separately.
+	 */
+
+
+#if 1
+	page = ((uint64_t) addr) >> chip->page_shift;
+	// Must read entire page
+	ret = chip->read_page(mtd, vbuf, oobbuf, page);
+	if (ret) {
+		printk(KERN_ERR "%s: brcmnand_read_page at %08x failed ret=%d\n", 
+			__FUNCTION__, (unsigned int) addr, ret);
+		brcmnand_post_mortem_dump(mtd, addr);
+		return ret;
+	}
+
+#endif
+
+	if (chip->ecclevel != BRCMNAND_ECC_HAMMING) {
+		return ret; // We won't verify the OOB if not Hamming
+	}
+
+	/* 
+	 * If there are no Input Buffer, there is nothing to verify.
+	 * Reading the page should be enough.
+	 */
+	if (!dbuf || dlen <= 0)
+		return 0;
+	
+	for (eccstep=0; eccstep < chip->eccsteps; eccstep++) {
+		int pageOffset = eccstep*chip->eccsize;
+		int oobOffset = eccstep*chip->eccOobSize;
+		u_char sw_ecc[4];  // SW ECC
+		u_char* oobp = &oobbuf[oobOffset]; // returned from read op, contains HW ECC.
+
+		brcmnand_Hamming_ecc(&dbuf[pageOffset], sw_ecc);
+
+		if (sw_ecc[0] != oobp[6] || sw_ecc[1] != oobp[7] || sw_ecc[2] != oobp[8]) {
+			if (oobp[6] == 0xff && oobp[7] == 0xff && oobp[8] == 0xff 
+				&& sw_ecc[0] == 0 && sw_ecc[1] == 0 && sw_ecc[2] == 0) 
+				; // OK
+			else {
+				printk("%s: Verification failed at %0llx.  HW ECC=%02x%02x%02x, SW ECC=%02x%02x%02x\n",
+					__FUNCTION__, addr,
+					oobp[6], oobp[7], oobp[8], sw_ecc[0], sw_ecc[1], sw_ecc[2]);
+				ret = 1;
+				break;
+			}
+		}
+
+		// Verify the OOB if not NULL
+		if (inp_oob) {
+			if (memcmp(&inp_oob[oobOffset], oobp, 6) || memcmp(&inp_oob[oobOffset+9], &oobp[9],7)) {
+				printk("+++++++++++++++++++++++ %s: OOB comp Hamming failed\n", __FUNCTION__);
+				printk("In OOB:\n"); print_oobbuf(&inp_oob[oobOffset], 16);
+				printk("\nVerify OOB:\n"); print_oobbuf(oobp, 16);
+				ret = (-2);
+				break;
+			}
+		}
+	}
+
+	return ret;
+}
+
+#if 1
+
+#define brcmnand_verify_pageoob(...)		(0)
+
+#else
+
+/**
+ * brcmnand_verify_pageoob - [GENERIC] verify the chip contents after a write
+ * @param mtd		MTD device structure
+ * @param dbuf		the databuffer to verify
+ * @param dlen		the length of the data buffer, and should be less than mtd->writesize
+ * @param fsbuf		the file system OOB data 
+ * @param fslen		the length of the file system buffer
+ * @param oobsel		Specify how to write the OOB data
+ * @param autoplace	Specify how to write the OOB data
+ * @param raw		Ignore the Bad Block Indicator when true
+ *
+ * Assumes that lock on.  Munges the OOB internal buffer.
+ */
+static int brcmnand_verify_pageoob(struct mtd_info *mtd, loff_t addr, const u_char* fsbuf, int fslen,
+		struct nand_oobinfo *oobsel, int autoplace, int raw)
+{
+//	struct brcmnand_chip * chip = mtd->priv;
+	//u_char* data_buf = chip->data_buf;
+	u_char oob_buf[NAND_MAX_OOBSIZE]; // = chip->oob_buf;
+	int ret = 0;
+	//int complen;
+	//char tmpfsbuf[NAND_MAX_OOBSIZE]; // Max oob size we support.
+	int ooblen = 0;
+
+if(gdebug) printk("-->%s addr=%08x, fslen=%d, autoplace=%d, raw=%d\n", __FUNCTION__, __ll_low(addr),
+	fslen, autoplace, raw);
+
+	// Must read entire page
+	ret = brcmnand_read_pageoob(mtd, addr, oob_buf, &ooblen, oobsel, autoplace, raw);
+
+	if (ret) {
+		printk(KERN_ERR "%s: brcmnand_read_page at %08x failed ret=%d\n",
+			__FUNCTION__, (unsigned int) addr, ret);
+		return ret;
+	}
+
+if(gdebug) printk("%s: Calling verify_pageoob_priv(addr=%08x, fslen=%d, ooblen=%d\n", 
+	__FUNCTION__, __ll_low(addr), fslen, ooblen);
+	ret = brcmnand_verify_pageoob_priv(mtd, addr, fsbuf, fslen, oob_buf, ooblen, oobsel, autoplace, raw);
+
+	return ret;
+}
+
+#endif
+
+#else
+#define brcmnand_verify_page(...)	(0)
+#define brcmnand_verify_pageoob(...)		(0)
+//#define brcmnand_verify_oob(...)		(0)
+#endif
+
+
+
+/**
+ * brcmnand_write_page - [INTERNAL] write one page
+ * @mtd:	MTD device structure
+ * @chip:	NAND chip descriptor
+ * @inp_buf:	the data to write
+ * @inp_oob:	the spare area to write
+ * @page:	page number to write
+ * @cached:	cached programming [removed]
+ */
+static int 
+brcmnand_write_page(struct mtd_info *mtd,
+			   const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t page)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+	int eccstep;
+	int dataWritten = 0;
+	int oobWritten = 0;
+	int ret = 0;
+	uint64_t offset = page << chip->page_shift;
+
+
+if (gdebug > 3 ) {
+printk("-->%s, offset=%0llx\n", __FUNCTION__, offset);}
+
+	chip->pagebuf = page;
+
+	for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
+		ret = brcmnand_posted_write_cache(mtd, &inp_buf[dataWritten], 
+					inp_oob ? &inp_oob[oobWritten]  : NULL, 
+					offset + dataWritten);
+		
+		if (ret < 0) {
+			printk(KERN_ERR "%s: brcmnand_posted_write_cache failed at offset=%0llx, ret=%d\n", 
+				__FUNCTION__, offset + dataWritten, ret);
+			// TBD: Return the the number of bytes written at block boundary.
+			dataWritten = 0;
+			return ret;
+		}
+		dataWritten += chip->eccsize;
+		oobWritten += chip->eccOobSize;
+	}
+
+	// TBD
+#ifdef BRCMNAND_WRITE_VERIFY
+if (0 == ret) {
+int vret;
+//gdebug = 0;
+	vret = brcmnand_verify_page(mtd, offset, inp_buf, mtd->writesize, inp_oob, chip->eccOobSize);
+//gdebug=save_debug;
+	if (vret) BUG();
+}
+#endif
+
+
+	return ret;
+}
+
+#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
+
+/*
+ * Queue the entire page, then wait for completion
+ */
+static int
+brcmnand_isr_write_page(struct mtd_info *mtd,
+			   const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t page)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+	int eccstep;
+	int dataWritten = 0;
+	int oobWritten = 0;
+	int ret = 0;
+	uint64_t offset = page << chip->page_shift;
+
+	int submitted = 0;
+	unsigned long flags;
+
+if (gdebug > 3 ) {
+printk("-->%s, page=%0llx\n", __FUNCTION__, page);}
+
+
+#if 0 // No need to check, we are aligned on a page
+	if (unlikely(offset - sliceOffset)) {
+		printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n", 
+                __FUNCTION__, offset, sliceOffset, ECCSIZE(mtd));
+		ret = -EINVAL;
+		goto out;
+	}
+#endif
+
+
+	if (unlikely(!EDU_buffer_OK((volatile void *)inp_buf, EDU_WRITE))) 
+	{
+if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
+		/* EDU does not work on non-aligned buffers */
+		ret = brcmnand_write_page(mtd, inp_buf, inp_oob, page);
+		return (ret);
+	}
+
+	chip->pagebuf = page;
+
+	spin_lock_irqsave(&gJobQ.lock, flags);
+	if (!list_empty(&gJobQ.jobQ)) {
+		printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
+		BUG();
+	}
+	gJobQ.cmd = EDU_WRITE;
+	gJobQ.needWakeUp = 0;
+
+
+	for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
+		eduIsrNode_t* req;
+		/*
+		 * Queue the 512B sector read, then read the EDU pending bit, 
+		 * and issue read command, if EDU is available for read.
+		 */
+		req = ISR_queue_write_request(mtd, &inp_buf[dataWritten], 
+					inp_oob ? &inp_oob[oobWritten]  : NULL, 
+					offset + dataWritten);
+		
+		dataWritten += chip->eccsize;
+		oobWritten += chip->eccOobSize;
+	}
+	
+	
+	/*
+	 * Kick start it.  The ISR will submit the next job
+	 */
+	if (!submitted) {
+		submitted = brcmnand_isr_submit_job();
+	}
+	
+	while (!list_empty(&gJobQ.jobQ)) {
+		spin_unlock_irqrestore(&gJobQ.lock, flags);
+		ret = ISR_wait_for_queue_completion();
+		if (ret) {
+			dataWritten = 0;
+		}
+		spin_lock_irqsave(&gJobQ.lock, flags);
+	}
+	spin_unlock_irqrestore(&gJobQ.lock, flags);
+	return ret;
+
+}
+
+/*
+ * Queue the several pages, then wait for completion
+ * For 512B page sizes only.
+ */
+static int
+brcmnand_isr_write_pages(struct mtd_info *mtd,
+			   const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t startPage, int numPages)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+	int dataWritten = 0;
+	int oobWritten = 0;
+	int ret = 0;
+	uint64_t offset = startPage << chip->page_shift;
+	int page;
+
+	int submitted = 0;
+	unsigned long flags;
+
+#if 0
+ /* Already checked by caller */
+	if (unlikely(!EDU_buffer_OK(inp_buf, EDU_WRITE))) 
+	{
+if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
+		/* EDU does not work on non-aligned buffers */
+		ret = brcmnand_write_page(mtd, inp_buf, inp_oob, startPage);
+		return (ret);
+	}
+#endif
+	/* Paranoia */
+	if (chip->pageSize != chip->eccsize) {
+		printk("%s: Can only be called on small page flash\n", __FUNCTION__);
+		BUG();
+	}
+
+	spin_lock_irqsave(&gJobQ.lock, flags);
+	if (!list_empty(&gJobQ.jobQ)) {
+		printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
+		BUG();
+	}
+	gJobQ.cmd = EDU_WRITE;
+	gJobQ.needWakeUp = 0;
+
+//gdebug=4;
+	for (page = 0; page < numPages && ret == 0; page++) {
+		eduIsrNode_t* req;
+		/*
+		 * Queue the 512B sector read, then read the EDU pending bit, 
+		 * and issue read command, if EDU is available for read.
+		 */
+
+		req = ISR_queue_write_request(mtd, &inp_buf[dataWritten], 
+					inp_oob ? &inp_oob[oobWritten]  : NULL, 
+					offset + dataWritten);
+		
+		dataWritten += chip->eccsize;
+		oobWritten += chip->eccOobSize;
+	}
+//gdebug=0;	
+	
+	
+	/*
+	 * Kick start it.  The ISR will submit the next job
+	 * We do it here, in order to avoid having to obtain the queue lock
+	 * inside the ISR, in preparation for an RCU implementation.
+	 */
+	if (!submitted) {
+		submitted = brcmnand_isr_submit_job();
+	}
+	
+	while (!list_empty(&gJobQ.jobQ)) {
+		spin_unlock_irqrestore(&gJobQ.lock, flags);
+		ret = ISR_wait_for_queue_completion();
+		if (ret) {
+			dataWritten = 0;
+		}
+		spin_lock_irqsave(&gJobQ.lock, flags);
+	}
+	spin_unlock_irqrestore(&gJobQ.lock, flags);
+
+
+#ifdef EDU_DEBUG_5
+{int ret1;
+/* Verify */
+	dataWritten = 0;
+	oobWritten = 0;
+	for (page = 0; page < numPages && ret == 0; page++) {
+		ret1 = edu_write_verify(mtd, &inp_buf[dataWritten], 
+					inp_oob ? &inp_oob[oobWritten]  : NULL, 
+					offset + dataWritten);
+		if (ret1) BUG();
+		dataWritten += chip->eccsize;
+		oobWritten += chip->eccOobSize;
+		
+	}
+}
+#endif
+	return ret;
+
+}
+
+
+#endif
+
+
+
+/**
+ * brcmnand_fill_oob - [Internal] Transfer client buffer to oob
+ * @chip:	nand chip structure
+ * @oob:	oob data buffer
+ * @ops:	oob ops structure
+ *
+ * Returns the pointer to the OOB where next byte should be written to
+ */
+uint8_t *
+brcmnand_fill_oob(struct brcmnand_chip *chip, uint8_t *oob, struct mtd_oob_ops *ops)
+{
+	// Already written in previous passes, relying on oob being intialized to ops->oobbuf
+	size_t writtenLen = oob - ops->oobbuf; 
+	size_t len = ops->ooblen - writtenLen;
+
+	
+	switch(ops->mode) {
+
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_RAW:
+		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+		return oob + len;
+
+	case MTD_OPS_AUTO_OOB: {
+		struct nand_oobfree *free = chip->ecclayout->oobfree;
+		uint32_t boffs = 0, woffs = ops->ooboffs;
+		size_t bytes = 0;
+		int oobavail = chip->ecclayout->oobavail;
+
+		if ((ops->ooboffs + ops->ooblen) > oobavail)
+			return ERR_PTR(-EINVAL);
+
+		memset(chip->oob_poi + ops->ooboffs, 0xff, oobavail - ops->ooboffs);
+
+		for(; free->length && len; free++, len -= bytes) {
+			/* Write request not from offset 0 ? */
+			if (unlikely(woffs)) {
+				if (woffs >= free->length) {
+					woffs -= free->length;
+					continue;
+				}
+				boffs = free->offset + woffs;
+				bytes = min_t(size_t, len,
+					      (free->length - woffs));
+				woffs = 0;
+			} else {
+				bytes = min_t(size_t, len, free->length);
+				boffs = free->offset;
+			}
+			memcpy(chip->oob_poi + boffs, oob, bytes);
+			oob += bytes;
+		}
+		return oob;
+	}
+	default:
+		BUG();
+	}
+	return ERR_PTR(-EINVAL);
+}
+
+
+#define NOTALIGNED(x) ((int) (x & (mtd->writesize-1)) != 0)
+
+/**
+ * brcmnand_do_write_ops - [Internal] BRCMNAND write with ECC
+ * @mtd:	MTD device structure
+ * @to:		offset to write to
+ * @ops:	oob operations description structure
+ *
+ * BRCMNAND write with ECC
+ */
+static int brcmnand_do_write_ops(struct mtd_info *mtd, loff_t to,
+			     struct mtd_oob_ops *ops)
+{
+	uint64_t realpage;
+	int blockmask;
+	struct brcmnand_chip *chip = mtd->priv;
+	uint32_t writelen = ops->len;
+	uint8_t *oob = ops->oobbuf; //brcmnand_fill_oob relies on this
+	uint8_t *buf = ops->datbuf;
+	int bytes = mtd->writesize;
+	int ret = 0;
+	int numPages; 
+	int buffer_aligned = 0;
+
+#if defined(CONFIG_BCM_KF_NAND)
+	uint8_t oobarea[512];
+	int read_oob = 0;
+	if( !oob &&
+        ((chip->options & NAND_COMPLEX_OOB_WRITE) != 0 || 
+	     (chip->ecclevel >= BRCMNAND_ECC_BCH_1 &&
+          chip->ecclevel <= BRCMNAND_ECC_BCH_12)) )
+	{
+		read_oob = 1;
+		oob = (uint8_t *) (((uint32_t) oobarea + 0x0f) & ~0x0f);
+		brcmnand_read_page_oob(mtd, oob, to >> chip->page_shift); 
+		ops->mode = MTD_OPS_PLACE_OOB;
+		ops->ooboffs = 0;
+		ops->ooblen = chip->eccsteps * chip->eccOobSize;
+		ops->oobbuf = oob;
+	}
+#endif
+
+DEBUG(MTD_DEBUG_LEVEL3, "-->%s, offset=%0llx\n", __FUNCTION__, to);
+
+	ops->retlen = 0;
+
+	/* reject writes, which are not page aligned */
+	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+		printk(KERN_NOTICE "nand_write: "
+		       "Attempt to write not page aligned data\n");
+		return -EINVAL;
+	}
+
+	if (!writelen)
+		return 0;
+
+/* BrcmNAND multi-chips are treated as one logical chip *
+	chipnr = (int)(to >> chip->chip_shift);
+	chip->select_chip(mtd, chipnr);
+*/
+
+
+
+	realpage = to >> chip->page_shift;
+	//page = realpage & chip->pagemask;
+	blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+
+	/* Invalidate the page cache, when we write to the cached page */
+	if ((chip->pagebuf !=  -1LL) && 
+		(to <= (chip->pagebuf << chip->page_shift)) &&
+	    	((to + ops->len) > (chip->pagebuf << chip->page_shift) )) 
+	{
+		chip->pagebuf = -1LL;
+	}
+
+	/* THT: Provide buffer for brcmnand_fill_oob */
+	if (unlikely(oob)) {
+		chip->oob_poi = BRCMNAND_OOBBUF(chip->ctrl->buffers);
+	}
+	else {
+		chip->oob_poi = NULL;
+	}
+
+#ifdef  CONFIG_MTD_BRCMNAND_ISR_QUEUE
+	/* Buffer must be aligned for EDU */
+	buffer_aligned = EDU_buffer_OK(buf, EDU_WRITE);
+
+#else /* Dont care */
+	buffer_aligned = 0;
+#endif
+
+	while(1) {
+
+#ifdef  CONFIG_MTD_BRCMNAND_ISR_QUEUE
+		/*
+		 * Group several pages for submission for small page NAND
+		 */
+		numPages = min(MAX_JOB_QUEUE_SIZE, (int)writelen>>chip->page_shift);
+
+		// If Batch mode		
+		if (buffer_aligned && numPages > 1 && chip->pageSize == chip->eccsize) {
+			int j;
+
+			/* Submit min(queueSize, len/512B) at a time */
+			//numPages = min(MAX_JOB_QUEUE_SIZE, writelen>>chip->page_shift);			
+			bytes = chip->eccsize*numPages;
+
+			if (unlikely(oob)) {
+				//u_char* newoob;
+				for (j=0; j<numPages; j++) {
+					oob = brcmnand_fill_oob(chip, oob, ops);
+					if (IS_ERR(oob))
+						return PTR_ERR(oob);
+					/* THT: oob now points to where to read next, 
+					 * chip->oob_poi contains the OOB to be written
+					 */
+					/* In batch mode, we advance the OOB pointer to the next OOB slot 
+					 * using chip->oob_poi
+					 */
+					chip->oob_poi += chip->eccOobSize;
+				}
+				// Reset chip->oob_poi to beginning of OOB buffer for submission.
+				chip->oob_poi = BRCMNAND_OOBBUF(chip->ctrl->buffers);
+			}
+			
+			ret = brcmnand_isr_write_pages(mtd, buf, chip->oob_poi, realpage, numPages);
+			if (ret) {
+				ops->retlen = 0;
+				return ret;
+			}
+
+		}
+		
+		else /* Else submit one page at a time */
+
+#endif
+		/* Submit one page at a time */
+		{ 
+			numPages = 1;
+			bytes = mtd->writesize;
+			
+			if (unlikely(oob)) {
+				chip->oob_poi = BRCMNAND_OOBBUF(chip->ctrl->buffers);
+				oob = brcmnand_fill_oob(chip, oob, ops);
+				if (IS_ERR(oob))
+					return PTR_ERR(oob);
+				/* THT: oob now points to where to read next, 
+				 * chip->oob_poi contains the OOB to be written
+				 */
+			}
+
+			ret = chip->write_page(mtd, buf, chip->oob_poi, realpage);
+
+		}
+
+		if (ret)
+			break;
+
+		writelen -= bytes;
+		if (!writelen)
+			break;
+
+		buf += bytes;
+		realpage += numPages;
+	}
+
+#if defined(CONFIG_BCM_KF_NAND)
+	if( read_oob )
+	{
+		ops->ooboffs = 0;
+		ops->ooblen = 0;
+		ops->oobbuf = NULL;
+	}
+#endif
+
+	ops->retlen = ops->len - writelen;
+	if (unlikely(oob))
+		ops->oobretlen = ops->ooblen;
+	DEBUG(MTD_DEBUG_LEVEL3, "<-- %s\n", __FUNCTION__);
+	return ret;
+}
+
+
+/**
+ * BRCMnand_write - [MTD Interface] brcmNAND write with ECC
+ * @mtd:	MTD device structure
+ * @to:		offset to write to
+ * @len:	number of bytes to write
+ * @retlen:	pointer to variable to store the number of written bytes
+ * @buf:	the data to write
+ *
+ * BRCMNAND write with ECC
+ */
+static int brcmnand_write(struct mtd_info *mtd, loff_t to, size_t len,
+			  size_t *retlen, const uint8_t *buf)
+{
+	struct brcmnand_chip *chip = mtd->priv;
+	int ret;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: to=%0llx\n", __FUNCTION__, to);
+
+if (gdebug > 3 ) {
+printk("-->%s, offset=%0llx\n", __FUNCTION__, to);}
+
+	if( kerSysIsDyingGaspTriggered() )
+	{
+		printk("system is losing power, abort nand write offset %lx len %x,\n", (unsigned long)to, len);
+		return -EINVAL;
+	}
+
+
+	/* Do not allow writes past end of device */
+	if (unlikely((to + len) > device_size(mtd))) {
+  		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write beyond end of device\n",
+			__FUNCTION__);
+		printk("brcmnand_write Attempt to write beyond end of device to 0x%x, len 0x%x, size of device 0x%x\n", (int)to, len, (int)device_size(mtd) );
+	}	
+	if (!len)
+		return 0;
+	
+	brcmnand_get_device(mtd, BRCMNAND_FL_WRITING);
+
+	chip->ops.len = len;
+	chip->ops.datbuf = (uint8_t *)buf;
+	chip->ops.oobbuf = NULL;
+
+	ret = brcmnand_do_write_ops(mtd, to, &chip->ops);
+
+	*retlen = chip->ops.retlen;
+
+	brcmnand_release_device(mtd);
+	return ret;
+}
+
+
+/**
+ * brcmnand_write_page_oob - [INTERNAL] write one page
+ * @mtd:	MTD device structure
+ * @chip:	NAND chip descriptor.  The oob_poi pointer points to the OOB buffer.
+ * @page:	page number to write
+ */
+static int brcmnand_write_page_oob(struct mtd_info *mtd, 
+			   const uint8_t* inp_oob, uint64_t page, int isFromMarkBadBlock)
+{
+	struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
+	int eccstep;
+	int oobWritten = 0;
+	int ret = 0;
+	uint64_t offset = page << chip->page_shift;
+
+	chip->pagebuf = page;
+
+	for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
+		ret = brcmnand_posted_write_oob(mtd,  &inp_oob[oobWritten] , 
+					offset, isFromMarkBadBlock);
+//gdebug=0;		
+		if (ret < 0) {
+			printk(KERN_ERR "%s: brcmnand_posted_write_oob failed at offset=%0llx, ret=%d\n", 
+				__FUNCTION__, offset, ret);
+			return ret;
+		}
+		offset = offset + chip->eccsize;
+		oobWritten += chip->eccOobSize;
+	}
+
+	// TBD
+	ret = brcmnand_verify_pageoob();
+
+if (gdebug > 3 ) {
+printk("<--%s offset=%0llx\n", __FUNCTION__,  page << chip->page_shift);
+print_oobbuf(inp_oob, mtd->oobsize);}
+	return ret;
+}
+
+
+/**
+ * brcmnand_do_write_oob - [Internal] BrcmNAND write out-of-band
+ * @mtd:	MTD device structure
+ * @to:		offset to write to
+ * @ops:	oob operation description structure
+ *
+ * BrcmNAND write out-of-band, no data.
+ */
+static int 
+brcmnand_do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+#if !defined(CONFIG_BCM_KF_NAND)
+	int numPages;
+#endif
+	int page;
+	int status = 0;
+	struct brcmnand_chip *chip = mtd->priv;
+	u_char* oob = ops->oobbuf;;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", __FUNCTION__,
+	      (unsigned int)to, (int)ops->len);
+if (gdebug > 3 ) {
+printk("-->%s, to=%08x, len=%d\n", __FUNCTION__, (uint32_t) to, (int)ops->len);}
+
+	/* Do not allow write past end of page */
+	if ((ops->ooboffs + ops->len) > mtd->oobsize) {
+		DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: "
+		      "Attempt to write past end of page\n");
+		return -EINVAL;
+	}
+
+/* BrcmNAND treats multiple chips as a single logical chip
+	chipnr = (int)(to >> chip->chip_shift);
+	chip->select_chip(mtd, chipnr);
+*/
+
+	/* Shift to get page */
+	page = to >> chip->page_shift;
+
+	/* Invalidate the page cache, if we write to the cached page */
+	if ((int64_t) page == chip->pagebuf)
+		chip->pagebuf = -1LL;
+	
+/* The #else case executes in an infinite loop. */
+#if defined(CONFIG_BCM_KF_NAND)
+	if (unlikely(oob)) {
+		chip->oob_poi = BRCMNAND_OOBBUF(chip->ctrl->buffers);
+		memset(chip->oob_poi, 0xff, mtd->oobsize);
+		oob = brcmnand_fill_oob(chip, oob, ops);
+		if (IS_ERR(oob))
+			return PTR_ERR(oob);
+		/* THT: oob now points to where to read next, 
+		 * chip->oob_poi contains the OOB to be written
+		 */
+	}
+
+	status = chip->write_page_oob(mtd, chip->oob_poi, page, 0);
+#else
+	while(1) {
+		/* Submit one page at a time */
+		 
+		numPages = 1;
+		
+		if (unlikely(oob)) {
+			chip->oob_poi = BRCMNAND_OOBBUF(chip->ctrl->buffers);
+			memset(chip->oob_poi, 0xff, mtd->oobsize);
+			oob = brcmnand_fill_oob(chip, oob, ops);
+			if (IS_ERR(oob))
+				return PTR_ERR(oob);
+			/* THT: oob now points to where to read next, 
+			 * chip->oob_poi contains the OOB to be written
+			 */
+		}
+
+		status |= chip->write_page_oob(mtd, chip->oob_poi, page, 0);
+
+		if (status)
+			break;
+		
+		page += numPages;
+	}// Write 1 page OOB
+#endif
+	
+	if (status)
+		return status;
+
+	ops->oobretlen = ops->ooblen;
+
+	return 0;
+}
+
+/**
+ * brcmnand_write_oob - [MTD Interface] BrcmNAND write data and/or out-of-band
+ * @mtd:	MTD device structure
+ * @to:		offset to write to
+ * @ops:	oob operation description structure
+ */
+static int 
+brcmnand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+	struct brcmnand_chip *chip = mtd->priv;
+	int ret = -ENOTSUPP;
+#if defined(CONFIG_BCM_KF_NAND)
+	if ((chip->nop == 1) && ops->ooblen && !ops->len) // quit if writing OOB only to NOP=1 parallel NAND device
+	{
+		ops->retlen = 0;
+		ops->oobretlen = ops->ooblen;
+		return(0);
+	}
+#endif
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: to=%0llx\n", __FUNCTION__, to);
+
+if (gdebug > 3 ) {
+printk("-->%s, offset=%0llx, len=%08x\n", __FUNCTION__,  to, (int) ops->len);}
+
+	if( kerSysIsDyingGaspTriggered() )
+	{
+		printk("system is losing power, abort nand write oob offset %lx\n", (unsigned long)to);
+		return -EINVAL;
+	}
+
+	ops->retlen = 0;
+#if defined(CONFIG_BCM_KF_NAND)
+ 	if (!ops->datbuf)
+	{
+		ops->len = ops->ooblen;
+        }
+#endif
+
+	/* Do not allow writes past end of device */
+
+	if (unlikely((to + ops->len) > device_size(mtd))) 
+	{
+		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write beyond end of device\n",
+			__FUNCTION__);
+		printk("brcmnand_write_oob Attempt to write beyond end of device to 0x%x, len 0x%x, size of device 0x%x\n", (int)to, ops->len, (int)device_size(mtd) );
+		return -EINVAL;
+	}
+
+	brcmnand_get_device(mtd, BRCMNAND_FL_WRITING);
+
+
+	if (!ops->datbuf)
+		ret = brcmnand_do_write_oob(mtd, to, ops);
+	else
+		ret = brcmnand_do_write_ops(mtd, to, ops);
+
+#if 0
+	if (unlikely(ops->mode == MTD_OPS_RAW))
+		chip->ecc.write_page = write_page;
+#endif
+
+ //out:
+	brcmnand_release_device(mtd);
+	return ret;
+}
+
+/**
+ * brcmnand_writev - [MTD Interface] compabilty function for brcmnand_writev_ecc
+ * @param mtd		MTD device structure
+ * @param vecs		the iovectors to write
+ * @param count		number of vectors
+ * @param to		offset to write to
+ * @param retlen	pointer to variable to store the number of written bytes
+ *
+ * BrcmNAND write with kvec. 
+ */
+static int brcmnand_writev(struct mtd_info *mtd, const struct kvec *vecs,
+	unsigned long count, loff_t to, size_t *retlen)
+{
+	int i, len, total_len, ret = -EIO, written = 0,  buflen;
+	uint32_t page;
+	int numpages = 0;
+	struct brcmnand_chip * chip = mtd->priv;
+	//int	ppblock = (1 << (chip->phys_erase_shift - chip->page_shift));
+	u_char *bufstart = NULL;
+	//u_char tmp_oob[NAND_MAX_OOBSIZE];
+	u_char *data_buf;
+
+
+if (gdebug > 3 ) {
+printk("-->%s, offset=%08x\n", __FUNCTION__, (uint32_t) to);}
+
+	if( kerSysIsDyingGaspTriggered() )
+		return -EINVAL;
+
+
+	/* Preset written len for early exit */
+	*retlen = 0;
+
+	/* Calculate total length of data */
+	total_len = 0;
+	for (i = 0; i < count; i++)
+		total_len += vecs[i].iov_len;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i, count = %ld, eccbuf=%p, total_len=%d\n", 
+		__FUNCTION__, (unsigned int) to, (unsigned int) total_len, count, NULL, total_len);
+
+	/* Do not allow write past end of the device */
+
+
+	if (unlikely((to + total_len) > device_size(mtd)))
+	{
+		DEBUG(MTD_DEBUG_LEVEL0, "brcmnand_writev_ecc: Attempted write past end of device\n");
+		return -EINVAL;
+	}
+
+	/* Reject writes, which are not page aligned */
+        if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(total_len))) {
+                DEBUG(MTD_DEBUG_LEVEL0, "brcmnand_writev_ecc: Attempt to write data not aligned to page\n");
+                return -EINVAL;
+        }
+
+	/* Grab the lock and see if the device is available */
+	brcmnand_get_device(mtd, BRCMNAND_FL_WRITING);
+
+	/* Setup start page, we know that to is aligned on page boundary */
+	page = to > chip->page_shift;
+
+	data_buf = BRCMNAND_malloc(sizeof(u_char)*mtd->writesize);
+	if (unlikely(data_buf == NULL)) {
+		printk(KERN_ERR "%s: vmalloc failed\n", __FUNCTION__);
+		return -ENOMEM;
+	}
+	/* Loop until all keve's data has been written */
+	len = 0; 		// How many data from current iovec has been written
+	written = 0;	// How many bytes have been written so far in all
+	buflen = 0;	// How many bytes from the buffer has been copied to.
+	while (count) {
+		/* If the given tuple is >= pagesize then
+		 * write it out from the iov
+		 */
+		// THT: We must also account for the partial buffer left over from previous iovec
+		if ((buflen + vecs->iov_len - len) >= mtd->writesize) {
+			/* Calc number of pages we can write
+			 * out of this iov in one go */
+			numpages = (buflen + vecs->iov_len - len) >> chip->page_shift;
+
+
+			//oob = 0;
+			for (i = 0; i < numpages; i++) {
+				if (0 == buflen) { // If we start a new page
+					bufstart = &((u_char *)vecs->iov_base)[len];
+				}
+				else { // Reuse existing partial buffer, partial refill to end of page
+					memcpy(&bufstart[buflen], &((u_char *)vecs->iov_base)[len], mtd->writesize - buflen);
+				}
+
+				ret = chip->write_page (mtd, bufstart, NULL, page);
+				bufstart = NULL;
+
+				if (ret) {
+					printk("%s: brcmnand_write_page failed, ret=%d\n", __FUNCTION__, ret);
+					goto out;
+				}
+				len += mtd->writesize - buflen;
+				buflen = 0;
+				//oob += oobretlen;
+				page++;
+				written += mtd->writesize;
+			}
+			/* Check, if we have to switch to the next tuple */
+			if (len >= (int) vecs->iov_len) {
+				vecs++;
+				len = 0;
+				count--;
+			}
+		} else { // (vecs->iov_len - len) <  mtd->writesize)
+			/*
+			 * We must use the internal buffer, read data out of each
+			 * tuple until we have a full page to write
+			 */
+			
+
+			/*
+			 * THT: Changed to use memcpy which is more efficient than byte copying, does not work yet
+			 *  Here we know that 0 < vecs->iov_len - len < mtd->writesize, and len is not necessarily 0
+			 */
+			// While we have iovec to write and a partial buffer to fill
+			while (count && (buflen < mtd->writesize)) {
+				
+				// Start new buffer?
+				if (0 == buflen) {
+					bufstart = data_buf;
+				}
+				if (vecs->iov_base != NULL && (vecs->iov_len - len) > 0) {
+					// We fill up to the page
+					int fillLen = min_t(int, vecs->iov_len - len, mtd->writesize - buflen);
+					
+					memcpy(&data_buf[buflen], &((u_char*) vecs->iov_base)[len], fillLen);
+					buflen += fillLen;
+					len += fillLen;
+				}
+				/* Check, if we have to switch to the next tuple */
+				if (len >= (int) vecs->iov_len) {
+					vecs++;
+					len = 0;
+					count--;
+				}
+
+			}
+			// Write out a full page if we have enough, otherwise loop back to the top
+			if (buflen == mtd->writesize) {
+				
+				numpages = 1;
+				
+				ret = chip->write_page (mtd, bufstart, NULL, page);
+				if (ret) {
+					printk("%s: brcmnand_write_page failed, ret=%d\n", __FUNCTION__, ret);
+					goto out;
+				}
+				page++;
+				written += mtd->writesize;
+				
+				bufstart = NULL;
+				buflen = 0;
+			}
+		}
+
+		/* All done ? */
+		if (!count) {
+			if (buflen) { // Partial page left un-written.  Imposible, as we check for totalLen being multiple of pageSize above.
+				printk("%s: %d bytes left unwritten with writev_ecc at offset %0llx\n", 
+					__FUNCTION__, buflen, ((uint64_t) page) << chip->page_shift);
+				BUG();
+			}
+			break;
+		}
+
+	}
+	ret = 0;
+out:
+	/* Deselect and wake up anyone waiting on the device */
+	brcmnand_release_device(mtd);
+
+	BRCMNAND_free(data_buf);
+	*retlen = written;
+//if (*retlen <= 0) printk("%s returns retlen=%d, ret=%d, startAddr=%08x\n", __FUNCTION__, *retlen, ret, startAddr);
+//printk("<-- %s: retlen=%d\n", __FUNCTION__, *retlen);
+	return ret;
+}
+
+#if 0
+/**
+ * brcmnand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @mtd:	MTD device structure
+ * @ofs:	offset from device start
+ * @getchip:	0, if the chip is already selected
+ *
+ * Check, if the block is bad.
+ */
+static int brcmnand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+	int res = 0, ret = 0;
+	uint32_t page;
+	struct brcmnand_chip *chip = mtd->priv;
+	u16 bad;
+	uint8_t oob[NAND_MAX_OOBSIZE];
+	//uint8_t* saved_poi;
+
+	if (getchip) {
+		page = __ll_RightShift(ofs, chip->page_shift);
+
+#if 0
+		chipnr = (int)(ofs >> chip->chip_shift);
+#endif
+
+		brcmnand_get_device(mtd, BRCMNAND_FL_READING);
+
+#if 0
+		/* Select the NAND device */
+		chip->select_chip(mtd, chipnr);
+#endif
+	} 
+	page = __ll_RightShift(ofs, chip->page_shift);
+
+	ret = chip->read_page_oob(mtd, oob, page);
+	if (ret) {
+		return 1;
+	}
+
+	if (chip->options & NAND_BUSWIDTH_16) {
+		bad = (u16) cpu_to_le16(*(uint16*) (oob[chip->badblockpos]));
+		if (chip->badblockpos & 0x1)
+			bad >>= 8;
+		if ((bad & 0xFF) != 0xff)
+			res = 1;
+	} else {
+		if (oob[chip->badblockpos] != 0xff)
+			res = 1;
+	}
+
+	if (getchip)
+		brcmnand_release_device(mtd);
+
+	return res;
+}
+#endif
+
+
+/**
+ * brcmnand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @param mtd		MTD device structure
+ * @param ofs		offset from device start
+ * @param getchip	0, if the chip is already selected
+ * @param allowbbt	1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int brcmnand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip, int allowbbt)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	int res;
+#if defined(CONFIG_BCM_KF_NAND)
+	if (ofs < mtd->erasesize)
+		return(0);
+#endif
+	if (getchip) {
+		brcmnand_get_device(mtd, BRCMNAND_FL_READING);
+	}
+	
+	// BBT already initialized
+	if (chip->isbad_bbt) {
+	
+		/* Return info from the table */
+		res = chip->isbad_bbt(mtd, ofs, allowbbt);
+	}
+	else {
+		res = brcmnand_isbad_raw(mtd, ofs);
+	}
+
+	if (getchip) {
+		brcmnand_release_device(mtd);
+	}
+
+// if (res) PRINTK("%s: on Block at %0llx, ret=%d\n", __FUNCTION__, ofs, res);
+
+	return res;
+}
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+/**
+ * brcmnand_erase_nolock - [Private] erase block(s)
+ * @param mtd		MTD device structure
+ * @param instr		erase instruction
+ * @allowBBT			allow erase of BBT
+ *
+ * Erase one ore more blocks
+ * ** FIXME ** This code does not work for multiple chips that span an address space > 4GB
+ * Similar to BBT, except does not use locks and no alignment checks
+ * Assumes lock held by caller
+ */
+static int brcmnand_erase_nolock(struct mtd_info *mtd, struct erase_info *instr, int allowbbt)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	unsigned int block_size;
+	loff_t addr;
+	int len;
+	int ret = 0;
+	int needBBT;
+	
+	block_size = (1 << chip->erase_shift);
+	instr->fail_addr = 0xffffffffffffffffULL;
+
+	/* Clear ECC registers */
+	chip->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
+	chip->ctrl_write(BCHP_NAND_ECC_UNC_ADDR, 0);
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+	chip->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
+	chip->ctrl_write(BCHP_NAND_ECC_UNC_EXT_ADDR, 0);
+#endif
+
+	/* Loop throught the pages */
+	len = instr->len;
+	addr = instr->addr;
+	instr->state = MTD_ERASING;
+
+	while (len) {
+		/* Check if we have a bad block, we do not erase bad blocks */
+		if (brcmnand_block_checkbad(mtd, addr, 0, allowbbt)) {
+			printk (KERN_ERR "%s: attempt to erase a bad block at addr 0x%08x\n", __FUNCTION__, (unsigned int) addr);
+			instr->state = MTD_ERASE_FAILED;
+			goto erase_one_block;
+		}
+		chip->ctrl_writeAddr(chip, addr, 0);
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_BLOCK_ERASE);
+
+		/* Wait until flash is ready */
+		ret = brcmnand_ctrl_write_is_complete(mtd, &needBBT);
+
+		/* Check, if it is write protected: TBD */
+		if (needBBT ) {
+			if ( !allowbbt) {
+				printk(KERN_ERR "brcmnand_erase: Failed erase, block %d, flash status=%08x\n", 
+						(unsigned int) (addr >> chip->erase_shift), needBBT);
+				instr->state = MTD_ERASE_FAILED;
+				instr->fail_addr = addr;
+				printk(KERN_WARNING "%s: Marking bad block @%08x\n", __FUNCTION__, (unsigned int) addr);
+				(void) chip->block_markbad(mtd, addr);
+				goto erase_one_block;
+			}
+		}
+erase_one_block:
+		len -= block_size;
+		addr = addr + block_size;
+	}
+
+	instr->state = MTD_ERASE_DONE;
+	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+	/* Do call back function */
+	if (!ret) {
+		mtd_erase_callback(instr);
+	}
+
+	return ret;
+}
+#endif
+
+
+
+/**
+ * brcmnand_erase_bbt - [Private] erase block(s)
+ * @param mtd		MTD device structure
+ * @param instr		erase instruction
+ * @allowBBT			allow erase of BBT
+ * @doNotUseBBT		Do not look up in BBT
+ *
+ * Erase one ore more blocks
+ * ** FIXME ** This code does not work for multiple chips that span an address space > 4GB
+ */
+static int 
+brcmnand_erase_bbt(struct mtd_info *mtd, struct erase_info *instr, int allowbbt, int doNotUseBBT)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	unsigned int block_size;
+	loff_t addr;
+	int len;
+	int ret = 0;
+	int needBBT;
+
+
+
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: start = %0llx, len = %08x\n", __FUNCTION__, 
+		instr->addr, (unsigned int) instr->len);
+//PRINTK( "%s: start = 0x%08x, len = %08x\n", __FUNCTION__, (unsigned int) instr->addr, (unsigned int) instr->len);
+
+	block_size = (1 << chip->erase_shift);
+
+
+	/* Start address must align on block boundary */
+	if (unlikely(instr->addr & (block_size - 1))) 
+	{
+		DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __FUNCTION__);
+//if (gdebug > 3 ) 
+	{printk( "%s: Unaligned address\n", __FUNCTION__);}
+		return -EINVAL;
+	}
+
+	/* Length must align on block boundary */
+	if (unlikely(instr->len & (block_size - 1))) 
+	{
+		DEBUG(MTD_DEBUG_LEVEL0, 
+"%s: Length not block aligned, len=%08x, blocksize=%08x, chip->blkSize=%08x, chip->erase=%d\n",
+		__FUNCTION__, (unsigned int)instr->len, (unsigned int)block_size,
+		(unsigned int)chip->blockSize, chip->erase_shift);
+PRINTK(  
+"%s: Length not block aligned, len=%08x, blocksize=%08x, chip->blkSize=%08x, chip->erase=%d\n",
+		__FUNCTION__, (unsigned int)instr->len, (unsigned int)block_size,
+		(unsigned int)chip->blockSize, chip->erase_shift);
+		return -EINVAL;
+	}
+
+	/* Do not allow erase past end of device */
+	if (unlikely((instr->len + instr->addr) > device_size(mtd)))
+	{
+
+		DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n", __FUNCTION__);
+//if (gdebug > 3 ) 
+	{printk(KERN_WARNING "%s: Erase past end of device, instr_addr=%016llx, instr->len=%08x, mtd->size=%16llx\n", 
+	__FUNCTION__, (unsigned long long)instr->addr,
+	(unsigned int)instr->len, device_size(mtd));}
+
+		return -EINVAL;
+	}
+
+
+	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+
+	/*
+	 * Clear ECC registers 
+	 */
+	chip->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
+	chip->ctrl_write(BCHP_NAND_ECC_UNC_ADDR, 0);
+	
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+	chip->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
+	chip->ctrl_write(BCHP_NAND_ECC_UNC_EXT_ADDR, 0);
+#endif
+
+	/* Loop throught the pages */
+	len = instr->len;
+	addr = instr->addr;
+	instr->state = MTD_ERASING;
+
+	while (len) {
+
+
+/* THT: We cannot call brcmnand_block_checkbad which just look at the BBT,
+// since this code is also called when we create the BBT.
+// We must look at the actual bits, or have a flag to tell the driver
+// to read the BI directly from the OOB, bypassing the BBT
+ */
+		/* Check if we have a bad block, we do not erase bad blocks */
+		if (brcmnand_block_checkbad(mtd, addr, 0, allowbbt)) {
+			printk (KERN_ERR "%s: attempt to erase a bad block at addr 0x%08x\n", __FUNCTION__, (unsigned int) addr);
+			// THT I believe we should allow the erase to go on to the next block in this case.
+			instr->state = MTD_ERASE_FAILED;
+//dump_stack();
+			goto erase_exit;
+		}
+
+		//chip->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
+
+		chip->ctrl_writeAddr(chip, addr, 0);
+	
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_BLOCK_ERASE);
+
+		// Wait until flash is ready
+		ret = brcmnand_ctrl_write_is_complete(mtd, &needBBT);
+		
+		/* Check, if it is write protected: TBD */
+		if (needBBT ) {
+			if ( !allowbbt) {
+				printk(KERN_ERR "brcmnand_erase: Failed erase, block %d, flash status=%08x\n", 
+					(unsigned int) (addr >> chip->erase_shift), needBBT);
+				
+				instr->state = MTD_ERASE_FAILED;
+				instr->fail_addr = addr;
+
+				printk(KERN_WARNING "%s: Marking bad block @%08x\n", __FUNCTION__, (unsigned int) addr);
+				(void) chip->block_markbad(mtd, addr);
+				goto erase_exit;
+			}
+		}
+		len -= block_size;
+		addr = addr + block_size;
+	}
+
+	instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+	/* Do call back function */
+	if (!ret) {
+		mtd_erase_callback(instr);
+	}
+
+	DEBUG(MTD_DEBUG_LEVEL0, "<--%s\n", __FUNCTION__);
+	return ret;
+}
+
+
+/**
+ * brcmnand_erase - [MTD Interface] erase block(s)
+ * @param mtd		MTD device structure
+ * @param instr		erase instruction
+ *
+ * Erase one ore more blocks
+ */
+static int brcmnand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	int ret = 0;
+	unsigned int block_size;
+	int allowbbt = 0;
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+	loff_t addr;
+	int len;
+
+	/* save the instr len and addr first because JFFS2 caller free instr when instr->callback is called */
+	len = instr->len;
+	addr = instr->addr;
+
+
+#endif
+
+	if( kerSysIsDyingGaspTriggered() )
+	{
+		printk("system is losing power, abort nand erase offset %lx len %x,\n", (unsigned long) instr->addr, (int)instr->len);
+		return -EINVAL;
+	}
+
+	DEBUG(MTD_DEBUG_LEVEL3, "-->%s addr=%08lx, len=%d\n", __FUNCTION__,
+		(unsigned long) instr->addr, (int)instr->len);
+
+	/* Grab the lock and see if the device is available */
+	brcmnand_get_device(mtd, BRCMNAND_FL_ERASING);
+
+	block_size = (1 << chip->erase_shift);
+
+	ret = brcmnand_erase_bbt(mtd, instr, allowbbt, 0); // Do not allow erase of BBT, and use BBT
+
+	/* Deselect and wake up anyone waiting on the device */
+	brcmnand_release_device(mtd);
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+	if (chip->cet) {
+		if (chip->cet->flags != BRCMNAND_CET_DISABLED && 
+				chip->cet->flags != BRCMNAND_CET_LAZY && allowbbt != 1) {
+			while (len) {
+				/* Skip if bad block */
+				if (brcmnand_block_checkbad(mtd, addr, 0, allowbbt)) {
+					printk (KERN_ERR "%s: attempt to erase a bad block at addr 0x%08x\n", __FUNCTION__, (unsigned int) addr);
+					len -= block_size;
+					addr = addr + block_size;
+					continue;
+				}
+				if(brcmnand_cet_erasecallback(mtd, addr) < 0) {
+					printk(KERN_INFO "Error in CET erase callback, disabling CET\n");
+					chip->cet->flags = BRCMNAND_CET_DISABLED;
+				}
+				len -= block_size;
+				addr = addr + block_size;
+			}
+		}
+	}
+#endif
+	return ret;
+}
+
+/**
+ * brcmnand_sync - [MTD Interface] sync
+ * @param mtd		MTD device structure
+ *
+ * Sync is actually a wait for chip ready function
+ */
+static void brcmnand_sync(struct mtd_info *mtd)
+{
+	DEBUG(MTD_DEBUG_LEVEL3, "brcmnand_sync: called\n");
+
+	/* Grab the lock and see if the device is available */
+	brcmnand_get_device(mtd, BRCMNAND_FL_SYNCING);
+
+	PLATFORM_IOFLUSH_WAR();
+
+	/* Release it and go back */
+	brcmnand_release_device(mtd);
+}
+
+
+/**
+ * brcmnand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
+ * @param mtd		MTD device structure
+ * @param ofs		offset relative to mtd start
+ *
+ * Check whether the block is bad
+ */
+static int brcmnand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+	//struct brcmnand_chip * chip = mtd->priv;
+	
+	DEBUG(MTD_DEBUG_LEVEL3, "-->%s ofs=%0llx\n", __FUNCTION__, ofs);
+	
+	/* Check for invalid offset */
+	if (ofs > device_size(mtd))
+	{
+		return -EINVAL;
+	}
+
+	return brcmnand_block_checkbad(mtd, ofs, 1, 0);
+}
+
+/**
+ * brcmnand_default_block_markbad - [DEFAULT] mark a block bad
+ * @param mtd		MTD device structure
+ * @param ofs		offset from device start
+ *
+ * This is the default implementation, which can be overridden by
+ * a hardware specific driver.
+ */
+static int brcmnand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	//struct bbm_info *bbm = chip->bbm;
+	// THT: 3/20/07: We restrict ourselves to only support x8.  
+	// Revisit this for x16.
+	u_char bbmarker[1] = {0};  // CFE and BBS uses 0x0F, Linux by default uses 0
+								//so we can use this to mark the difference
+	u_char buf[NAND_MAX_OOBSIZE];
+	//size_t retlen;
+	uint32_t block, page;
+	int dir;
+	uint64_t ulofs;
+	int ret;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "-->%s ofs=%0llx\n", __FUNCTION__,  ofs);
+//printk("-->%s ofs=%0llx\n", __FUNCTION__,  ofs);
+
+	// Page align offset
+	ulofs = ((uint64_t) ofs) & (~ chip->page_mask);
+	
+
+	/* Get block number.  Block is guaranteed to be < 2*32 */
+	block = (uint32_t) (ulofs >> chip->erase_shift);
+
+	// Block align offset
+	ulofs = ((uint64_t) block) << chip->erase_shift;
+
+	if (!NAND_IS_MLC(chip)) { // SLC chip, mark first and 2nd page as bad.
+printk(KERN_INFO "Mark SLC flash as bad at offset %0llx, badblockpos=%d\n", ofs, chip->badblockpos);
+		page = block << (chip->erase_shift - chip->page_shift);
+		dir = 1;
+	}
+	else { // MLC chip, mark last and previous page as bad.
+printk(KERN_INFO "Mark MLC flash as bad at offset %0llx\n", ofs);
+		page = ((block+1) << (chip->erase_shift - chip->page_shift)) - 1;
+		dir = -1;
+	}
+      if (chip->bbt) {
+                chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 
+      	}
+
+	memcpy(buf, ffchars, sizeof(buf));
+	memcpy(&buf[chip->badblockpos], bbmarker, sizeof(bbmarker));
+
+	// Lock already held by caller, so cant call mtd->write_oob directly
+	ret = chip->write_page_oob(mtd, buf, page, 1);
+	if (ret) {
+		printk(KERN_INFO "Mark bad page %d failed with retval=%d\n", page, ret);
+	}
+
+	// Mark 2nd page as bad, ignoring last write
+	page += dir;
+	// Lock already held by caller, so cant call mtd->write_oob directly
+DEBUG(MTD_DEBUG_LEVEL3, "%s Calling chip->write_page(page=%08x)\n", __FUNCTION__, page);
+	ret = chip->write_page_oob(mtd, buf, page, 1);
+	if (ret) {
+		printk(KERN_INFO "Mark bad page %d failed with retval=%d\n", page, ret);
+	}
+
+	/*
+	 * According to the HW guy, even if the write fails, the controller have written 
+	 * a 0 pattern that certainly would have written a non 0xFF value into the BI marker.
+	 *
+	 * Ignoring ret.  Even if we fail to write the BI bytes, just ignore it, 
+	 * and mark the block as bad in the BBT
+	 */
+DEBUG(MTD_DEBUG_LEVEL3, "%s Calling brcmnand_update_bbt(ulofs=%0llx))\n", __FUNCTION__, ulofs);
+	(void) brcmnand_update_bbt(mtd, ulofs);
+	//if (!ret)
+	mtd->ecc_stats.badblocks++;
+	return ret;
+}
+
+/**
+ * brcmnand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
+ * @param mtd		MTD device structure
+ * @param ofs		offset relative to mtd start
+ *
+ * Mark the block as bad
+ */
+static int brcmnand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	int ret;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "-->%s ofs=%08x\n", __FUNCTION__, (unsigned int) ofs);
+	
+	if( kerSysIsDyingGaspTriggered() )
+		return -EINVAL;
+
+	ret = brcmnand_block_isbad(mtd, ofs);
+	if (ret) {
+		/* If it was bad already, return success and do nothing */
+		if (ret > 0)
+			return 0;
+		return ret;
+	}
+
+	return chip->block_markbad(mtd, ofs);
+}
+
+/**
+ * brcmnand_unlock - [MTD Interface] Unlock block(s)
+ * @param mtd		MTD device structure
+ * @param ofs		offset relative to mtd start
+ * @param len		number of bytes to unlock
+ *
+ * Unlock one or more blocks
+ */
+static int brcmnand_unlock(struct mtd_info *mtd, loff_t llofs, uint64_t len)
+{
+
+#if 0
+// Only Samsung Small flash uses this.
+
+	struct brcmnand_chip * chip = mtd->priv;
+	int status;
+	uint64_t blkAddr, ofs = (uint64_t) llofs;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "-->%s llofs=%08x, len=%d\n", __FUNCTION__,
+ 		(unsigned long) llofs, (int) len);
+
+
+
+	/* Block lock scheme */
+	for (blkAddr = ofs; blkAddr <  (ofs + len); blkAddr = blkAddr + chip->blockSize) {
+
+		/* The following 2 commands share the same CMD_EXT_ADDR, as the block never cross a CS boundary */
+		chip->ctrl_writeAddr(chip, blkAddr, 0); 
+		/* Set end block address */
+		chip->ctrl_writeAddr(chip, blkAddr + chip->blockSize - 1, 1);
+		/* Write unlock command */
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_BLOCKS_UNLOCK);
+
+
+		/* There's no return value */
+		chip->wait(mtd, BRCMNAND_FL_UNLOCKING, &status);
+
+		if (status & 0x0f)  
+			printk(KERN_ERR "block = %0llx, wp status = 0x%x\n", blkAddr, status);
+
+		/* Check lock status */
+		chip->ctrl_writeAddr(chip, blkAddr, 0); 
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_READ_BLOCKS_LOCK_STATUS);
+		status = chip->ctrl_read(BCHP_NAND_BLOCK_LOCK_STATUS);
+		//status = chip->read_word(chip->base + ONENAND_REG_WP_STATUS);
+
+	}
+#endif
+	return 0;
+}
+
+
+/**
+ * brcmnand_print_device_info - Print device ID
+ * @param device        device ID
+ *
+ * Print device ID
+ */
+static void brcmnand_print_device_info(brcmnand_chip_Id* chipId, struct mtd_info* mtd) 
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	int cs = chip->ctrl->CS[chip->csi];
+
+	printk(KERN_INFO "BrcmNAND mfg %x %x %s %dMB on CS%d\n",
+                chipId->mafId, chipId->chipId, chipId->chipIdStr,\
+	       	mtd64_ll_low(chip->chipSize >> 20), cs);
+
+	print_config_regs(mtd);
+
+}
+
+/*
+ * Calculate the bit fields FUL_ADR_BYTES, COL_ADR_BYTES and BLK_ADR_BYTES
+ * without which, Micron flashes - which do not have traditional decode-ID opcode 90H-00H -
+ * would not work.
+ *
+ * @chip: Structure containing the page size, block size, and device size.
+ * @nand_config: nand_config register with page size, block size, device size already encoded.
+ *
+ * returns the updated nand_config register.
+ */
+uint32_t
+brcmnand_compute_adr_bytes(struct brcmnand_chip* chip, uint32_t nand_config)
+{
+	
+	uint32_t nbrPages;
+	uint32_t fulAdrBytes, colAdrBytes, blkAdrBytes, nbrPagesShift;
+
+	colAdrBytes = 2;
+
+PRINTK("-->%s, chip->chipSize=%llx\n", __FUNCTION__, chip->chipSize);
+	
+	nbrPages = (uint32_t) (chip->chipSize >> chip->page_shift);
+	nbrPagesShift = ffs(nbrPages)-1; /* = power of 2*/
+	blkAdrBytes =  (nbrPagesShift+7)/8;
+	
+	fulAdrBytes = colAdrBytes + blkAdrBytes;
+
+	nand_config &= ~(BCHP_NAND_CONFIG_FUL_ADR_BYTES_MASK 
+					| BCHP_NAND_CONFIG_COL_ADR_BYTES_MASK
+					|BCHP_NAND_CONFIG_BLK_ADR_BYTES_MASK);
+	nand_config |= (fulAdrBytes << BCHP_NAND_CONFIG_FUL_ADR_BYTES_SHIFT)
+					| (colAdrBytes << BCHP_NAND_CONFIG_COL_ADR_BYTES_SHIFT)
+					| (blkAdrBytes << BCHP_NAND_CONFIG_BLK_ADR_BYTES_SHIFT);
+PRINTK("%s: nbrPages=%x, blkAdrBytes=%d, colAdrBytes=%d, nand_config=%08x\n",
+	__FUNCTION__, nbrPages, blkAdrBytes, colAdrBytes, nand_config);
+	return nand_config;
+}
+
+/*
+ * bit 31: 	1 = OTP read-only
+ * 	v2.1 and earlier: 30: 		Page Size: 0 = PG_SIZE_512, 1 = PG_SIZE_2KB version 
+ * 28-29: 	Block size: 3=512K, 1 = 128K, 0 = 16K, 2 = 8K
+ * 24-27:	Device_Size
+ *			0:	4MB
+ *			1:	8MB
+ *			2: 	16MB
+ *			3:	32MB
+ *			4:	64MB
+ *			5:	128MB
+ *			6: 	256MB
+ *			7:	512MB
+ *			8:	1GB
+ *			9:	2GB
+ *			10:	4GB  << Hit limit of MTD struct here.
+ *			11:	8GB
+ *			12:	16GB
+ *			13:	32GB
+ *			14:	64GB
+ *			15:	128GB
+ * 23:		Dev_Width 0 = Byte8, 1 = Word16
+ *   v2.1 and earlier:22-19: 	Reserved
+ *   v2.2 and later:  21:20	page Size
+ * 18:16:	Full Address Bytes
+ * 15		Reserved
+ * 14:12	Col_Adr_Bytes
+ * 11:		Reserved
+ * 10-08	Blk_Adr_Bytes
+ * 07-00	Reserved
+ */
+ 
+void
+brcmnand_decode_config(struct brcmnand_chip* chip, uint32_t nand_config)
+{
+	unsigned int chipSizeShift;
+	unsigned int blk_size_cfg;
+	unsigned int page_size_cfg;
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_7_1
+	uint32_t nand_config_ext = brcmnand_ctrl_read(BCHP_NAND_CONFIG_EXT);
+
+        blk_size_cfg = (nand_config_ext & BCHP_NAND_CONFIG_BLOCK_SIZE_MASK) 
+	    >> BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT;
+        page_size_cfg = (nand_config_ext & BCHP_NAND_CONFIG_PAGE_SIZE_MASK) 
+	    >> BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT;
+#else
+        blk_size_cfg = (nand_config & BCHP_NAND_CONFIG_BLOCK_SIZE_MASK) 
+	    >> BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT;
+        page_size_cfg = (nand_config & BCHP_NAND_CONFIG_PAGE_SIZE_MASK) 
+	    >> BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT;
+#endif
+	//chip->chipSize = (nand_config & 0x07000000) >> (24 - 20);
+
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_2_2
+	// Version 2.1 or earlier: 2 bit field 28:29
+	switch (blk_size_cfg) {
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB:
+			chip->blockSize = 512 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_8KB:
+			chip->blockSize = 8 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB:	
+			chip->blockSize = 128 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB:
+			chip->blockSize = 16 << 10;
+			break;
+	}
+#else
+	// Version 2.2 or later: 3 bits 28:30
+	if (chip->blockSize != (1<<20)) {
+		switch (blk_size_cfg) {
+  #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_7_1
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_8192KB:
+			chip->blockSize = 8192 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_4096KB:
+			chip->blockSize = 4096 << 10;
+			break;
+  #endif
+  #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_4_0
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_1024KB:
+			chip->blockSize = 1024 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_2048KB:
+			chip->blockSize = 2048 << 10;
+			break;
+  #endif
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_256KB:
+			chip->blockSize = 256 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB:
+			chip->blockSize = 512 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_8KB:
+			chip->blockSize = 8 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB:	
+			chip->blockSize = 128 << 10;
+			break;
+		case BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB:
+			chip->blockSize = 16 << 10;
+			break;
+		}
+	}
+	/* 
+	 * 1MB block size:
+	 * Nothing to do, we have already recorded it
+	 */
+#endif
+		
+	
+	chip->erase_shift = ffs(chip->blockSize) - 1;
+printk("Block size=%08x, erase shift=%d\n", chip->blockSize, chip->erase_shift);
+
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_2_2
+	// Version 2.1 or earlier: Bit 30
+	switch(page_size_cfg) {
+		case BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512:
+			chip->pageSize= 512;
+			break;
+		case BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB:
+			chip->pageSize = 2048;
+			break;
+	}
+	
+#else
+	// Version 2.2 or later, bits 20:21
+	switch(page_size_cfg) {
+		case BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512:
+			chip->pageSize= 512;
+			break;
+		case BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB:
+			chip->pageSize = 2048;
+			break;
+		case BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_4KB:
+			chip->pageSize = 4096;
+			break;
+  #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_7_0
+		case BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_8KB:
+			chip->pageSize = 8192;
+			break;
+  #elif CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_4
+  		case BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_8KB:
+			{
+				uint32_t ctrlVersion = brcmnand_ctrl_read(BCHP_NAND_REVISION);
+				
+				/* Only if the controller supports it: */
+				chip->pageSize = 8192;
+				if (!(ctrlVersion & BCHP_NAND_REVISION_8KB_PAGE_SUPPORT_MASK)) {
+					printk(KERN_ERR "Un-supported page size 8KB\n");
+					BUG();
+				}
+  			}
+			break;
+  #else /* Version 3.3 or earlier */
+  		case 3:
+			printk(KERN_ERR "Un-supported page size\n");
+			chip->pageSize = 0; // Let it crash
+			BUG();
+			break;
+  #endif
+			
+	}
+#endif
+
+	chip->page_shift = ffs(chip->pageSize) - 1;
+	chip->page_mask = (1 << chip->page_shift) - 1;
+
+	chipSizeShift = (nand_config & BCHP_NAND_CONFIG_DEVICE_SIZE_MASK) 
+		>> BCHP_NAND_CONFIG_DEVICE_SIZE_SHIFT;
+
+	chip->chipSize = 4ULL << (20 + chipSizeShift);
+
+	chip->busWidth = 1 + ((nand_config & BCHP_NAND_CONFIG_DEVICE_WIDTH_MASK) 
+		>> BCHP_NAND_CONFIG_DEVICE_WIDTH_SHIFT);
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_7_1
+	printk(KERN_INFO "NAND Config: Reg=%08x, Config Ext: Reg=%08x, chipSize=%d MB, blockSize=%dK, erase_shift=%x\n",
+	        nand_config, nand_config_ext, mtd64_ll_low(chip->chipSize >> 20), chip->blockSize >> 10, chip->erase_shift);
+#else
+	printk(KERN_INFO "NAND Config: Reg=%08x, chipSize=%d MB, blockSize=%dK, erase_shift=%x\n",
+		nand_config, mtd64_ll_low(chip->chipSize >> 20), chip->blockSize >> 10, chip->erase_shift);
+#endif
+	printk(KERN_INFO "busWidth=%d, pageSize=%dB, page_shift=%d, page_mask=%08x\n", 
+		chip->busWidth, chip->pageSize, chip->page_shift , chip->page_mask);
+
+}
+
+/*
+ * Adjust timing pattern if specified in chip ID list
+ * Also take dummy entries, but no adjustments will be made.
+ */
+static void brcmnand_adjust_timings(struct brcmnand_chip *this, brcmnand_chip_Id* chip)
+{
+	int csi = this->csi;
+	int __maybe_unused cs = this->ctrl->CS[this->csi];
+	
+	unsigned long nand_timing1 = this->ctrl_read(bchp_nand_timing1(cs));
+	unsigned long nand_timing1_b4;
+	unsigned long nand_timing2 = this->ctrl_read(bchp_nand_timing2(cs));
+	unsigned long nand_timing2_b4;
+	extern uint32_t gNandTiming1[];
+	extern uint32_t gNandTiming2[];
+	
+	
+
+	/*
+	 * Override database values with kernel command line values
+	 */
+	 if (0 != gNandTiming1[csi] || 0 != gNandTiming2[csi]) {
+		if (0 != gNandTiming1[csi] ) {
+			chip->timing1 = gNandTiming1[csi] ;
+			//this->ctrl_write(BCHP_NAND_TIMING_1, gNandTiming1);
+		}
+		if (0 != gNandTiming2[csi]) {
+			chip->timing2 = gNandTiming2[csi] ;
+			//this->ctrl_write(BCHP_NAND_TIMING_2, gNandTiming2);
+		}
+		//return;
+	 }
+	
+	// Adjust NAND timings from database or command line
+	if (chip->timing1) {
+		nand_timing1_b4 = nand_timing1;
+
+		if (chip->timing1 & BCHP_NAND_TIMING_1_tWP_MASK) {
+			nand_timing1 &= ~BCHP_NAND_TIMING_1_tWP_MASK;
+			nand_timing1 |= (chip->timing1 & BCHP_NAND_TIMING_1_tWP_MASK);  
+		}
+		if (chip->timing1 & BCHP_NAND_TIMING_1_tWH_MASK) {
+			nand_timing1 &= ~BCHP_NAND_TIMING_1_tWH_MASK;
+			nand_timing1 |= (chip->timing1 & BCHP_NAND_TIMING_1_tWH_MASK);  
+		}
+		if (chip->timing1 & BCHP_NAND_TIMING_1_tRP_MASK) {
+			nand_timing1 &= ~BCHP_NAND_TIMING_1_tRP_MASK;
+			nand_timing1 |= (chip->timing1 & BCHP_NAND_TIMING_1_tRP_MASK);  
+		}
+		if (chip->timing1 & BCHP_NAND_TIMING_1_tREH_MASK) {
+			nand_timing1 &= ~BCHP_NAND_TIMING_1_tREH_MASK;
+			nand_timing1 |= (chip->timing1 & BCHP_NAND_TIMING_1_tREH_MASK);  
+		}
+		if (chip->timing1 & BCHP_NAND_TIMING_1_tCS_MASK) {
+			nand_timing1 &= ~BCHP_NAND_TIMING_1_tCS_MASK;
+			nand_timing1 |= (chip->timing1 & BCHP_NAND_TIMING_1_tCS_MASK);  
+		}
+		if (chip->timing1 & BCHP_NAND_TIMING_1_tCLH_MASK) {
+			nand_timing1 &= ~BCHP_NAND_TIMING_1_tCLH_MASK;
+			nand_timing1 |= (chip->timing1 & BCHP_NAND_TIMING_1_tCLH_MASK);  
+		}
+		if (chip->timing1 & BCHP_NAND_TIMING_1_tALH_MASK) {
+			nand_timing1 &= ~BCHP_NAND_TIMING_1_tALH_MASK;
+			nand_timing1 |= (chip->timing1 & BCHP_NAND_TIMING_1_tALH_MASK);  
+		}
+		if (chip->timing1 & BCHP_NAND_TIMING_1_tADL_MASK) {
+			nand_timing1 &= ~BCHP_NAND_TIMING_1_tADL_MASK;
+			nand_timing1 |= (chip->timing1 & BCHP_NAND_TIMING_1_tADL_MASK);  
+		}
+
+
+		this->ctrl_write(bchp_nand_timing1(cs), nand_timing1);
+
+if (gdebug > 3 ) {printk("Adjust timing1: Was %08lx, changed to %08lx\n", nand_timing1_b4, nand_timing1);}
+	}
+	else {
+printk("timing1 not adjusted: %08lx\n", nand_timing1);
+	}
+
+	// Adjust NAND timings:
+	if (chip->timing2) {
+		nand_timing2_b4 = nand_timing2;
+
+		if (chip->timing2 & BCHP_NAND_TIMING_2_tWB_MASK) {
+			nand_timing2 &= ~BCHP_NAND_TIMING_2_tWB_MASK;
+			nand_timing2 |= (chip->timing2 & BCHP_NAND_TIMING_2_tWB_MASK);  
+		}
+		if (chip->timing2 & BCHP_NAND_TIMING_2_tWHR_MASK) {
+			nand_timing2 &= ~BCHP_NAND_TIMING_2_tWHR_MASK;
+			nand_timing2 |= (chip->timing2 & BCHP_NAND_TIMING_2_tWHR_MASK);  
+		}
+		if (chip->timing2 & BCHP_NAND_TIMING_2_tREAD_MASK) {
+			nand_timing2 &= ~BCHP_NAND_TIMING_2_tREAD_MASK;
+			nand_timing2 |= (chip->timing2 & BCHP_NAND_TIMING_2_tREAD_MASK);  
+		}
+
+		this->ctrl_write(bchp_nand_timing2(cs), nand_timing2);
+
+if (gdebug > 3 ) {printk("Adjust timing2: Was %08lx, changed to %08lx\n", nand_timing2_b4, nand_timing2);}
+	}
+	else {
+printk("timing2 not adjusted: %08lx\n", nand_timing2);
+	}
+}
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_2_2
+static int
+is_ecc_strong(int registerEcc, int requiredEcc)
+{
+	if (registerEcc == BRCMNAND_ECC_HAMMING)
+		registerEcc = 1;
+	
+	else if (registerEcc == BRCMNAND_ECC_DISABLE)
+		return 1; // Internal ECC is always stronger
+		
+	if (requiredEcc == BRCMNAND_ECC_HAMMING)
+		requiredEcc = 1;
+	
+	return (registerEcc >= requiredEcc);
+}
+
+
+
+static void
+brcmnand_set_acccontrol(struct brcmnand_chip * chip , unsigned int chipSelect, 
+	uint32_t pageSize, uint16_t oobSizePerPage, int reqEcc, int codeWorkSize, int nbrBitsPerCell)
+{
+	int actualReqEcc = reqEcc;
+	int eccLevel;
+	uint32_t b1Ksector = 0;
+	uint32_t acc0, acc;
+	int oobPerSector = oobSizePerPage/(pageSize/512);
+	uint32_t cellinfo;
+
+
+PRINTK("-->%s(oob=%d, ecc=%d, cw=%d)\n", __FUNCTION__, oobSizePerPage, reqEcc, codeWorkSize);
+
+	if (oobPerSector == 28)
+		oobPerSector = 27; /* Reduce Micron oobsize to match other vendors in order to share codes */
+
+	if (codeWorkSize == 1024) {
+		actualReqEcc = reqEcc/2;
+		b1Ksector = 1;
+	}
+	
+	acc = acc0 = chip->ctrl_read(bchp_nand_acc_control(chipSelect));
+	eccLevel = (acc & BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK) 
+		>> BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT;
+	if (!is_ecc_strong(eccLevel, actualReqEcc)) {
+		eccLevel = actualReqEcc;
+	}
+
+	acc &= ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+		|BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_MASK
+#endif
+		|BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK);
+
+printk("eccLevel=%d, 1Ksector=%d, oob=%d\n", eccLevel, b1Ksector, oobPerSector);
+
+	acc |= eccLevel << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT;
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+	acc |= b1Ksector << BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_SHIFT;
+#endif
+	acc |= oobPerSector << BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT;
+	
+	if (chipSelect == 0) {
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+		acc &= ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+			|BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_0_MASK
+#endif
+			|BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_MASK);
+		acc |= eccLevel << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT;
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+		acc |= b1Ksector << BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_0_SHIFT;
+#endif
+		acc |= oobPerSector << BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_SHIFT;
+#endif
+	}
+
+	/* Clear FAST_PGM_RDIN, PARTIAL_PAGE_EN if MLC */
+	cellinfo = ffs(nbrBitsPerCell)-2;
+
+PRINTK("cellinfo=%d\n", cellinfo);
+
+	chip->cellinfo = cellinfo << 2; /* Mask is 0x0C */
+
+printk("nbrBitsPerCell=%d, cellinfo=%d, chip->cellinfo=%08x\n", nbrBitsPerCell, cellinfo, chip->cellinfo);
+	if (NAND_IS_MLC(chip)) {
+		acc  &= ~(
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+                         BCHP_NAND_ACC_CONTROL_FAST_PGM_RDIN_MASK |
+#endif
+			 BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_MASK);
+	}
+		
+
+	chip->ctrl_write(bchp_nand_acc_control(chipSelect), acc);
+printk("<--%s: acc b4: %08x, after: %08x\n", __FUNCTION__, acc0, acc);
+	
+}
+
+
+/*
+ * Override database values with kernel command line values and
+ * set internal data structure values - when the flash ID is NOT in the database -
+ * using the values set by the CFE
+ */
+static void brcmnand_adjust_acccontrol(struct brcmnand_chip *chip, int isONFI, int inIdTable, int idTableIdx)
+{
+	int cs = chip->ctrl->CS[chip->csi];
+	unsigned long nand_acc_b4 = chip->ctrl_read(bchp_nand_acc_control(cs));
+	unsigned long nand_acc = nand_acc_b4;
+	int eccLevel;
+	int oobSize;
+	int updateInternalData = 0;
+
+PRINTK("%s: gAccControl[CS=%d]=%08x, ACC=%08lx\n", 
+	__FUNCTION__, cs, gAccControl[chip->csi], nand_acc_b4);
+
+	 if (cs != 0 && 0 != gAccControl[chip->csi]) {
+		nand_acc = gAccControl[chip->csi] ;
+	 	chip->ctrl_write(bchp_nand_acc_control(cs), nand_acc);
+		printk("NAND ACC CONTROL on CS%1d changed to %08x, from %08lx,\n", cs, 
+			chip->ctrl_read(bchp_nand_acc_control(cs)), nand_acc_b4);
+
+		updateInternalData = 1;
+	 }
+	 else if (!inIdTable && !isONFI) {
+	 	updateInternalData = 1;
+	 }
+	 /* Update ACC-CONTROL when not on CS0 */
+	 else if (cs != 0 && inIdTable) {
+	 	int oobSizePerPage = chip->eccOobSize*(chip->pageSize/512);
+		brcmnand_set_acccontrol(chip , cs, 
+			chip->pageSize, oobSizePerPage, chip->reqEccLevel, chip->eccSectorSize, 2+NAND_IS_MLC(chip));
+	 }
+	 
+
+	 
+	/*
+	 * update InternalData is TRUE only when
+	 * (a) We are on CS0, and the chip is not in the database, in which case we use the values
+	 *		used by the CFE.
+	 * (b) an ACC value was passed on the command line
+	 */
+		
+		/* Update ECC level */
+	if (updateInternalData) {
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_3
+		eccLevel = (nand_acc & BCHP_NAND_ACC_CONTROL_CS1_ECC_LEVEL_MASK) >> 
+			BCHP_NAND_ACC_CONTROL_CS1_ECC_LEVEL_SHIFT;
+		oobSize = (nand_acc & BCHP_NAND_ACC_CONTROL_CS1_SPARE_AREA_SIZE_MASK) >>
+			BCHP_NAND_ACC_CONTROL_CS1_SPARE_AREA_SIZE_SHIFT;
+
+#else
+		eccLevel = (nand_acc & BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK) >> 
+			BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT;
+		oobSize = (nand_acc & BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK) >>
+			BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT;
+#endif
+
+		chip->ecclevel = eccLevel;
+		printk("ECC level changed to %d\n", eccLevel);
+		
+		chip->eccOobSize = oobSize;
+		printk("OOB size changed to %d\n", oobSize);
+		
+		/* Assume MLC if both RDIN and PARTIAL_PAGE are disabled */
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+		/* this section needs to be commented out because we now turn off partial page writes
+                 * to support NOP=1 devices and this code may trigger MLC=true for an unidentified NAND device,
+                 * causing the NAND device to be accessed improperly
+		if (0 == (nand_acc & (
+                                BCHP_NAND_ACC_CONTROL_FAST_PGM_RDIN_MASK |
+				BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_MASK))) {
+			chip->cellinfo = 0x04; // MLC NAND
+			printk("Flash type changed to MLC\n");
+		}
+                */
+#endif
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+		if (nand_acc & BCHP_NAND_ACC_CONTROL_CS1_SECTOR_SIZE_1K_MASK) {
+			chip->eccSectorSize = 1024;
+			printk("Sector size changed to 1024\n");
+		}
+#endif
+	 }
+}
+#endif
+
+
+static void 
+brcmnand_read_id(struct mtd_info *mtd, unsigned int chipSelect, unsigned long* dev_id)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	uint32_t status;
+	uint32_t nandConfig = chip->ctrl_read(bchp_nand_config(chipSelect));
+	uint32_t csNandSelect = 0;
+	uint32_t nandSelect = 0;
+
+	if (chipSelect > 0) { // Do not re-initialize when on CS0, Bootloader already done that
+
+  #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+  	/* Older version do not have EXT_ADDR registers */
+		chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
+		chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
+  #endif  //
+  
+		chip->ctrl_write(BCHP_NAND_CMD_START, 
+			BCHP_NAND_CMD_START_OPCODE_FLASH_RESET << BCHP_NAND_CMD_START_OPCODE_SHIFT);
+
+	}
+/* Wait for CTRL_Ready */
+	brcmnand_wait(mtd, BRCMNAND_FL_READY, &status);
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
+	nandSelect = chip->ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+
+printk("B4: NandSelect=%08x, nandConfig=%08x, chipSelect=%d\n", nandSelect, nandConfig, chipSelect);
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+	/* Older version do not have EXT_ADDR registers */
+	chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
+	chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
+#endif  // Set EXT address if version >= 1.0
+
+	// Has CFE initialized the register?  
+		if (0 == (nandSelect & BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK)) {
+		
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_0_1
+		csNandSelect = 1<<(BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_SHIFT + chipSelect);
+
+// v1.0 does not define it
+#elif CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_2_0
+			csNandSelect = 1<<(BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT + chipSelect);
+
+#endif // If brcmNAND Version >= 1.0
+
+		nandSelect = BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK | csNandSelect;
+		chip->ctrl_write(BCHP_NAND_CS_NAND_SELECT, nandSelect);
+	}
+
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_7_0
+	/*NAND controller rev7 perform auto detect again when _AUTO_DEVICE_ID is set on receiving
+          read id cmd and update the nand config reg. CFE overwrite this config register in case hw
+          auto detect is wrong(such as MXIC 512Mb MX30LF1208AA) when system boot.Clear the AUTO Dev 
+          Id bit to avoid incorrect config setting */
+	nandSelect = chip->ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+	nandSelect &=~BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK; 
+	chip->ctrl_write(BCHP_NAND_CS_NAND_SELECT, nandSelect);          
+#endif
+
+	/* Send the command for reading device ID from controller */
+	chip->ctrl_write(BCHP_NAND_CMD_START, OP_DEVICE_ID_READ);
+	
+	/* Wait for CTRL_Ready */
+	brcmnand_wait(mtd, BRCMNAND_FL_READY, &status);
+			 
+#endif // if BrcmNAND Version >= 0.1
+	
+
+	*dev_id = chip->ctrl_read(BCHP_NAND_FLASH_DEVICE_ID);
+
+	printk("%s: CS%1d: dev_id=%08x\n", __FUNCTION__, chipSelect, (unsigned int) *dev_id);
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
+	nandSelect = chip->ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+#endif
+
+	nandConfig = chip->ctrl_read(bchp_nand_config(chip->ctrl->CS[chip->csi]));
+
+printk("After: NandSelect=%08x, nandConfig=%08x\n", nandSelect, nandConfig);
+}
+
+
+#if (CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0 && CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_7_0)
+/* 
+ * Type-1 ID string, called from brcmnand_probe with the following condition
+ * if ((brcmnand_chips[i].idOptions & BRCMNAND_ID_HAS_BYTE4) && 
+ *	(brcmnand_chips[i].idOptions & BRCMNAND_ID_HAS_BYTE5)) 
+ *
+ * returns the updated nand_config register value.
+ *
+ * This routine will need to set chip->chipSize and chip->page_shift in order to compute
+ * the address bytes in the NAND_CONFIG register.
+ */
+static uint32_t
+decode_ID_type1(struct brcmnand_chip * chip, 
+	unsigned char brcmnand_maf_id, unsigned char brcmnand_dev_id, uint32_t idOptions, uint32_t nbrBlocks)
+{
+	uint32_t nand_config = chip->ctrl_read(bchp_nand_config(chip->ctrl->CS[chip->csi])); // returned value
+
+
+/* Read 5th ID byte if MLC type */
+//if (chip->cellinfo) 
+
+/* THT SWLINUX 1459: Some new SLCs have 5th ID byte defined, not just MLC */
+// if (brcmnand_chips[i].idOptions & BRCMNAND_ID_HAS_BYTE5)
+
+
+	unsigned long devIdExt = chip->ctrl_read(BCHP_NAND_FLASH_DEVICE_ID_EXT);
+	unsigned char devId5thByte = (devIdExt & 0xff000000) >> 24;
+	unsigned int nbrPlanes = 0;
+	unsigned int planeSizeMB = 0, chipSizeMB, nandConfigChipSize;
+	unsigned char devId4thdByte =  (chip->device_id  & 0xff);
+	unsigned int pageSize = 0, pageSizeBits = 0;
+	unsigned int blockSize = 0, blockSizeBits = 0;
+	//unsigned int oobSize;
+
+PRINTK("%s: mafID=%02x, devID=%02x, ID4=%02x, ID5=%02x\n", 
+	__FUNCTION__, brcmnand_maf_id, brcmnand_dev_id, 
+	devId4thdByte, devId5thByte);
+
+	// if (brcmnand_chips[i].idOptions & BRCMNAND_ID_HAS_BYTE4) 
+	
+/*---------------- 4th ID byte: page size, block size and OOB size ---------------- */
+	switch(brcmnand_maf_id) {
+	case FLASHTYPE_SAMSUNG:
+	case FLASHTYPE_HYNIX:	
+	case FLASHTYPE_TOSHIBA:
+	case FLASHTYPE_MICRON:
+		pageSize = 1024 << (devId4thdByte & SAMSUNG_4THID_PAGESIZE_MASK);
+		blockSize = (64*1024) << ((devId4thdByte & SAMSUNG_4THID_BLKSIZE_MASK) >> 4);
+		//oobSize = devId4thdByte & SAMSUNG_4THID_OOBSIZE_MASK ? 16 : 8;
+		chip->page_shift = ffs(pageSize) - 1;
+		chip->erase_shift = ffs(blockSize) - 1;
+
+		
+PRINTK("Updating Config Reg: Block & Page Size: B4: %08x, blockSize=%08x, pageSize=%d\n", 
+nand_config, blockSize, pageSize);
+		/* Update Config Register: Block Size */
+		switch(blockSize) {
+		case 512*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB;
+			break;
+		case 128*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB;
+			break;
+		case 16*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB;
+			break;
+		case 256*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_256KB;
+			break;
+		}
+		nand_config &= ~(BCHP_NAND_CONFIG_BLOCK_SIZE_MASK);
+		nand_config |= (blockSizeBits << BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT); 
+
+		/* Update Config Register: Page Size */
+		switch(pageSize) {
+		case 512:
+			pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512;
+			break;
+		case 2048:
+			pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB;
+			break;
+		case 4096:
+			pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_4KB;
+			break;
+		}
+		nand_config &= ~(BCHP_NAND_CONFIG_PAGE_SIZE_MASK);
+		nand_config |= (pageSizeBits << BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT); 
+		chip->ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);	
+PRINTK("Updating Config Reg: Block & Page Size: After: %08x\n", nand_config);
+		break;
+		
+	default:
+		printk(KERN_ERR "4th ID Byte: Device requiring Controller V3.0 in database, but not handled\n");
+		//BUG();
+	}
+
+
+	if (nbrBlocks > 0) {
+		chip->chipSize = ((uint64_t) nbrBlocks) << chip->erase_shift;
+		PRINTK("nbrBlocks=%d, blockSize=%d, blkShift=%d, chip Size = %llx\n", nbrBlocks, blockSize, chip->erase_shift, chip->chipSize);
+		chipSizeMB = (uint32_t) (chip->chipSize >> 20);
+	}
+	else { /* Use 5th byte plane size & nbrPlanes to compute chip size */
+/*---------------- 5th ID byte ------------------------- */
+		switch(brcmnand_maf_id) {
+		case FLASHTYPE_SAMSUNG:
+		case FLASHTYPE_HYNIX:		
+		case FLASHTYPE_TOSHIBA:
+		case FLASHTYPE_MICRON:
+	PRINTK("5th ID byte = %02x, extID = %08lx\n", devId5thByte, devIdExt);
+
+			switch(devId5thByte & SAMSUNG_5THID_NRPLANE_MASK) {
+			case SAMSUNG_5THID_NRPLANE_1:
+				nbrPlanes = 1;
+				break;
+			case SAMSUNG_5THID_NRPLANE_2:
+				nbrPlanes = 2;
+				break;
+			case SAMSUNG_5THID_NRPLANE_4:
+				nbrPlanes = 4;
+				break;
+			case SAMSUNG_5THID_NRPLANE_8:
+				nbrPlanes = 8;
+				break;
+			}
+	PRINTK("nbrPlanes = %d\n", nbrPlanes);
+		}
+
+		switch(brcmnand_maf_id) {
+		case FLASHTYPE_SAMSUNG:
+		case FLASHTYPE_MICRON:
+			if (idOptions & BRCMNAND_ID_HAS_MICRON_M68A) {
+				planeSizeMB=128;
+			}
+			else {
+				/* Samsung Plane Size
+				#define SAMSUNG_5THID_PLANESZ_64Mb	0x00
+				#define SAMSUNG_5THID_PLANESZ_128Mb	0x10
+				#define SAMSUNG_5THID_PLANESZ_256Mb	0x20
+				#define SAMSUNG_5THID_PLANESZ_512Mb	0x30
+				#define SAMSUNG_5THID_PLANESZ_1Gb	0x40
+				#define SAMSUNG_5THID_PLANESZ_2Gb	0x50
+				#define SAMSUNG_5THID_PLANESZ_4Gb	0x60
+				#define SAMSUNG_5THID_PLANESZ_8Gb	0x70
+				*/
+				// planeSize starts at (64Mb/8) = 8MB;
+				planeSizeMB = 8 << ((devId5thByte & SAMSUNG_5THID_PLANESZ_MASK) >> 4);
+			}
+	PRINTK("planSizeMB = %dMB\n", planeSizeMB);
+			break;
+
+		case FLASHTYPE_HYNIX:
+			if (idOptions & BRCMNAND_ID_HYNIX_LEGACY) {
+				// planeSize starts at (64Mb/8) = 8MB, same as Samsung
+				planeSizeMB = 8 << ((devId5thByte & HYNIX_5THID_LEG_PLANESZ_MASK) >> 4);
+			}
+			else {
+			  /* Hynix Plane Size, Type 2
+			  #define HYNIX_5THID_PLANESZ_MASK	0x70
+			  #define HYNIX_5THID_PLANESZ_512Mb	0x00
+			  #define HYNIX_5THID_PLANESZ_1Gb		0x10
+			  #define HYNIX_5THID_PLANESZ_2Gb		0x20
+			  #define HYNIX_5THID_PLANESZ_4Gb		0x30
+			  #define HYNIX_5THID_PLANESZ_8Gb		0x40
+			  #define HYNIX_5THID_PLANESZ_RSVD1	0x50
+			  #define HYNIX_5THID_PLANESZ_RSVD2	0x60
+			  #define HYNIX_5THID_PLANESZ_RSVD3	0x70
+			  */
+			  // planeSize starts at (512Mb/8) = 64MB;
+			  planeSizeMB = 64 << ((devId5thByte & SAMSUNG_5THID_PLANESZ_MASK) >> 4);
+			}
+			break;				
+
+		case FLASHTYPE_TOSHIBA:
+			/* No Plane Size defined */
+			// THT Nothing to do, size is hardcoded in chip array.
+			// planeSizeMB = 64; /* hard-coded for TC58NVG0S3ETA00 */
+			break;
+
+		/* TBD Add other mfg ID here */
+
+		} /* End 5th ID byte */
+		
+		chipSizeMB = planeSizeMB*nbrPlanes;
+		chip->chipSize = ((uint64_t) chipSizeMB) << 20;
+		PRINTK("planeSizeMB = %d, chipSizeMB=%d,0x%04x, planeSizeMask=%08x\n", planeSizeMB, chipSizeMB, chipSizeMB, devId5thByte & SAMSUNG_5THID_PLANESZ_MASK);
+	}
+	
+	/* NAND Config register starts at 4MB for chip size */
+	nandConfigChipSize = ffs(chipSizeMB >> 2) - 1;
+
+PRINTK("nandConfigChipSize = %04x\n", nandConfigChipSize);
+	/* Correct chip Size accordingly, bit 24-27 */
+	nand_config &= ~(BCHP_NAND_CONFIG_DEVICE_SIZE_MASK);
+	nand_config |= (nandConfigChipSize << BCHP_NAND_CONFIG_DEVICE_SIZE_SHIFT); 
+
+	return nand_config;
+}
+
+
+/*
+ * Type-2 ID string, called from brcmnand_probe with the following condition
+ * if ((brcmnand_chips[i].idOptions & BRCMNAND_ID_EXT_BYTES_TYPE2) == 
+ *				BRCMNAND_ID_EXT_BYTES_TYPE2) 
+ *
+ * returns the updated nand_config register value.
+ *
+ * This routine will need to set chip->chipSize and chip->page_shift in order to compute
+ * the address bytes in the NAND_CONFIG register.
+ */
+static uint32_t
+decode_ID_type2(struct brcmnand_chip * chip, 
+	unsigned char brcmnand_maf_id, unsigned char brcmnand_dev_id, uint32_t nbrBlocks, 
+	uint32* pEccLevel, uint32* pSectorSize)
+{
+	uint32_t nand_config = chip->ctrl_read(bchp_nand_config(chip->ctrl->CS[chip->csi])); // returned value
+	unsigned char devId4thdByte =  (chip->device_id  & 0xff);
+	unsigned int pageSize = 0, pageSizeBits = 0;
+	unsigned int blockSize = 0, blockSizeBits = 0;
+	//unsigned int oobSize;
+	unsigned int oobSize, oobSizePerPage = 0;
+	uint32_t chipSizeMB, nandConfigChipSize;
+	uint32_t devIdExt, eccShift, reqEcc;
+	unsigned char devId5thByte;
+
+PRINTK("%s: mafID=%02x, devID=%02x, ID4=%02x\n", 
+	__FUNCTION__, brcmnand_maf_id, brcmnand_dev_id, 
+	devId4thdByte);
+
+	/*---------------- 4th ID byte: page size, block size and OOB size ---------------- */
+	switch(brcmnand_maf_id) {
+	case FLASHTYPE_SAMSUNG:
+	case FLASHTYPE_HYNIX:	
+		pageSize = 2048 << (devId4thdByte & SAMSUNG2_4THID_PAGESIZE_MASK);
+		/* **FIXME**, when Samsung use the Reserved bits */
+		blockSize = (128*1024) << ((devId4thdByte & SAMSUNG2_4THID_BLKSIZE_MASK) >> 4);
+		chip->blockSize = blockSize;
+		switch(devId4thdByte & SAMSUNG2_4THID_OOBSIZE_MASK) {
+		case SAMSUNG2_4THID_OOBSIZE_PERPAGE_128:
+			oobSizePerPage = 128;
+			break;
+			
+		case SAMSUNG2_4THID_OOBSIZE_PERPAGE_218:
+			oobSizePerPage = 218;
+			break;
+			
+		case SAMSUNG2_4THID_OOBSIZE_PERPAGE_400:  /* 16 per 512B */
+			oobSizePerPage = 400;
+			break;
+			
+		case SAMSUNG2_4THID_OOBSIZE_PERPAGE_436: /* 27.5 per 512B */
+			oobSizePerPage = 436;
+			break;
+		}
+		oobSize = oobSizePerPage/(pageSize/512);
+		// Record it here, but will check it when we know about the ECC level.
+		chip->eccOobSize = oobSize;
+PRINTK("oobSizePerPage=%d, eccOobSize=%d, pageSize=%u, blockSize=%u\n", 	
+	oobSizePerPage, chip->eccOobSize, pageSize, blockSize);	
+PRINTK("Updating Config Reg T2: Block & Page Size: B4: %08x\n", nand_config);
+
+		chip->page_shift = ffs(pageSize) - 1;
+
+		/* Update Config Register: Block Size */
+		switch(blockSize) {
+		case 512*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB;
+			break;
+		case 128*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB;
+			break;
+		case 16*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB;
+			break;
+		case 256*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_256KB;
+			break;
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+		case 1024*1024:
+			blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_1024KB;
+			break;
+#endif
+		}
+PRINTK("%s:  blockSizeBits=%08x, NANDCONFIG B4=%08x\n", __FUNCTION__, blockSizeBits, nand_config);
+		nand_config &= ~(BCHP_NAND_CONFIG_BLOCK_SIZE_MASK);
+		nand_config |= (blockSizeBits << BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT); 
+PRINTK("%s:   NANDCONFIG After=%08x\n", __FUNCTION__,  nand_config);
+
+		/* Update Config Register: Page Size */
+		switch(pageSize) {
+		case 512:
+			pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512;
+			break;
+		case 2048:
+			pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB;
+			break;
+		case 4096:
+			pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_4KB;
+			break;
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_4
+		case 8192:
+			pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_8KB;
+			break;
+#endif
+		}
+PRINTK("%s:  pageSizeBits=%08x, NANDCONFIG B4=%08x\n", __FUNCTION__, pageSizeBits, nand_config);
+		nand_config &= ~(BCHP_NAND_CONFIG_PAGE_SIZE_MASK);
+		nand_config |= (pageSizeBits << BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT); 
+			
+
+		break;
+		
+	default:
+		printk(KERN_ERR "4th ID Byte: Device requiring Controller V3.0 in database, but not handled\n");
+		//BUG();
+	}
+
+#if 1
+/* 
+ * Now we hard-code the flash size in the ID array, because Samsung type 2 flashes are MLC flashes,  
+ * so tend to be used on CSn, n != 0, and thus the CFE may not configure it properly
+ */
+ PRINTK("nbrBlocks=%d, blockSize=%d\n",  nbrBlocks, chip->blockSize);
+ 	chip->chipSize = ((uint64_t) nbrBlocks) * chip->blockSize;
+	chipSizeMB = (uint32_t) (chip->chipSize >> 20);
+	nandConfigChipSize = ffs(chipSizeMB >> 2) - 1;
+
+PRINTK("chipSize=%dMB, nandConfigChipSize = %04x\n", chipSizeMB, nandConfigChipSize);
+	/* Encode chip Size accordingly, bit 24-27 */
+	nand_config &= ~(BCHP_NAND_CONFIG_DEVICE_SIZE_MASK);
+	nand_config |= (nandConfigChipSize << BCHP_NAND_CONFIG_DEVICE_SIZE_SHIFT); 
+
+PRINTK("Updating Config Reg on CS%1d:  %08x\n", chip->ctrl->CS[chip->csi], nand_config);
+	chip->ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);
+
+/*---------------- 5th ID byte: ECC level and plane number ---------------- */
+	devIdExt = chip->ctrl_read(BCHP_NAND_FLASH_DEVICE_ID_EXT);
+	devId5thByte = (devIdExt & 0xff000000) >> 24;
+	reqEcc = (devId5thByte & SAMSUNG2_5THID_ECCLVL_MASK);
+	switch(reqEcc) {
+	case SAMSUNG2_5THID_ECCLVL_1BIT: /* 0x00 */
+	case SAMSUNG2_5THID_ECCLVL_2BIT: /*	 0x10 */
+	case SAMSUNG2_5THID_ECCLVL_4BIT: /*	 0x20 */
+	case SAMSUNG2_5THID_ECCLVL_8BIT: /*	 0x30 */
+	case SAMSUNG2_5THID_ECCLVL_16BIT: /* 0x40 */
+		eccShift = reqEcc >> 4;
+		*pEccLevel =  1 << eccShift;
+		*pSectorSize = 512;
+		break;
+		
+	case SAMSUNG2_5THID_ECCLVL_24BIT_1KB: /* 0x50 */
+		*pEccLevel = 24;
+		*pSectorSize = 1024;
+	}
+	
+PRINTK("Required ECC level = %ld, devIdExt=%08x, eccShift=%02x, sector Size=%ld\n", 
+	*pEccLevel, devIdExt, eccShift, *pSectorSize);
+	
+#else
+	/* For type 2, ID bytes do not yield flash Size, but CONFIG registers have that info */
+
+	chipSizeShift = (nand_config & BCHP_NAND_CONFIG_DEVICE_SIZE_MASK) >> BCHP_NAND_CONFIG_DEVICE_SIZE_SHIFT;
+	chip->chipSize = 4ULL << (20 + chipSizeShift);
+
+#endif
+
+	return nand_config;
+}
+
+
+/*
+ * Type-2 ID string, called from brcmnand_probe with the following condition
+ * if ((brcmnand_chips[i].idOptions & BRCMNAND_ID_EXT_BYTES_TYPE2) == 
+ *				BRCMNAND_ID_EXT_BYTES_TYPE2) 
+ *
+ * returns the updated nand_config register value.
+ *
+ * This routine will need to set chip->chipSize and chip->page_shift in order to compute
+ * the address bytes in the NAND_CONFIG register.
+ */
+static uint32_t
+decode_ID_M61A(struct brcmnand_chip * chip, 
+	unsigned char brcmnand_maf_id, unsigned char brcmnand_dev_id)
+{
+	uint32_t nand_config = chip->ctrl_read(bchp_nand_config(chip->ctrl->CS[chip->csi])); // returned value
+	unsigned char devId4thdByte =  (chip->device_id  & 0xff);
+	unsigned int pageSize = 0, pageSizeBits = 0;
+	unsigned int blockSize = 0, blockSizeBits = 0;
+	//unsigned int oobSize;
+	unsigned int oobSize, oobSizePerPage = 0;
+	uint32_t pagesPerBlock, pagesPerBlockBits;
+	unsigned long devIdExt = chip->ctrl_read(BCHP_NAND_FLASH_DEVICE_ID_EXT);
+	unsigned char devId5thByte = (devIdExt & 0xff000000) >> 24;
+	unsigned int nbrPlanes = 0;
+	unsigned int chipSizeMB, nandConfigChipSize;
+	unsigned int blkPerLun, nbrBlksBits;
+
+PRINTK("%s: mafID=%02x, devID=%02x, ID4=%02x\n", 
+	__FUNCTION__, brcmnand_maf_id, brcmnand_dev_id, 
+	devId4thdByte);
+
+	/* Byte2: Voltage and size are not reliable */
+
+	/* 3rd ID byte, same as Samsung Type 1 */
+
+	/*---------------- 4th ID byte: page size, block size and OOB size ---------------- */
+	pageSize = 1024 << (devId4thdByte & SAMSUNG_4THID_PAGESIZE_MASK);
+	chip->page_shift = ffs(pageSize) - 1;
+
+	/* Block Size */
+	pagesPerBlockBits = (devId4thdByte & MICRON_M61A_4THID_PGPBLK_MASK) >> 4;
+	pagesPerBlock = 32<<pagesPerBlockBits;
+	blockSize = pagesPerBlock * pageSize;
+	
+
+	switch(devId4thdByte & MICRON_M61A_4THID_OOBSIZE_MASK) {
+	case MICRON_M61A_4THID_OOBSIZE_28B:
+		oobSize = 27; /* Use only 27 to conform to other vendors */
+		oobSizePerPage = oobSize*(pageSize/512);
+		break;
+		
+	}
+	
+	
+	// Record it here, but will check it when we know about the ECC level.
+	chip->eccOobSize = oobSize;
+PRINTK("oobSizePerPage=%d, eccOobSize=%d, pageSize=%u, blockSize=%u\n", 	
+	oobSizePerPage, chip->eccOobSize, pageSize, blockSize);	
+
+PRINTK("Updating Config Reg M61A: Block & Page Size: B4: %08x\n", nand_config);
+
+	/* Update Config Register: Block Size */
+	switch(blockSize) {
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+	case 1024*1024:
+		blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_1024KB;
+		break;
+#else
+	case 1024*1024:
+		/* For version 3.x controller, we don't have a bit defined for this */
+		/* FALLTHROUGH */
+#endif
+	case 512*1024:
+		blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB;
+		break;
+	case 128*1024:
+		blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB;
+		break;
+	case 16*1024:
+		blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB;
+		break;
+	case 256*1024:
+		blockSizeBits = BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_256KB;
+		break;
+	
+	}
+	/* Record Block Size, since we can't rely on NAND_CONFIG */
+	chip->blockSize = blockSize;
+	
+PRINTK("%s:  blockSizeBits=%08x, NANDCONFIG B4=%08x\n", __FUNCTION__, blockSizeBits, nand_config);
+	nand_config &= ~(BCHP_NAND_CONFIG_BLOCK_SIZE_MASK);
+	nand_config |= (blockSizeBits << BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT); 
+PRINTK("%s:   NANDCONFIG After=%08x\n", __FUNCTION__,  nand_config);
+
+	/* Update Config Register: Page Size */
+	switch(pageSize) {
+	case 512:
+		pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512;
+		break;
+	case 2048:
+		pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB;
+		break;
+	case 4096:
+		pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_4KB;
+		break;
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_4
+	case 8192:
+		pageSizeBits = BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_8KB;
+		break;
+#endif
+	
+	}
+
+
+/*---------------- 5th ID byte ------------------------- */
+
+PRINTK("5th ID byte = %02x, extID = %08lx\n", devId5thByte, devIdExt);
+
+
+	nbrPlanes = 1 << (devId5thByte & MICRON_M61A_5THID_PLN_PER_LUN_MASK) ;
+	nbrBlksBits = (devId5thByte & MICRON_M61A_5THID_BLK_PER_LUN_MASK) >> 2;
+	blkPerLun = 1024 << nbrBlksBits;
+	
+	chipSizeMB = (blkPerLun*blockSize) >>20;
+PRINTK("chipSizeMB=%d,0x%04x, planeSizeMask=%08x\n",  chipSizeMB, chipSizeMB, devId5thByte & SAMSUNG_5THID_PLANESZ_MASK);
+	/* NAND Config register starts at 4MB for chip size */
+	nandConfigChipSize = ffs(chipSizeMB >> 2) - 1;
+
+	chip->chipSize = ((uint64_t) chipSizeMB) << 20;
+
+PRINTK("nandConfigChipSize = %04x\n", nandConfigChipSize);
+	/* Correct chip Size accordingly, bit 24-27 */
+	nand_config &= ~(BCHP_NAND_CONFIG_DEVICE_SIZE_MASK);
+	nand_config |= (nandConfigChipSize << BCHP_NAND_CONFIG_DEVICE_SIZE_SHIFT); 
+	chip->ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);				
+
+
+PRINTK("%s:  pageSizeBits=%08x, NANDCONFIG B4=%08x\n", __FUNCTION__, pageSizeBits, nand_config);
+	nand_config &= ~(BCHP_NAND_CONFIG_PAGE_SIZE_MASK);
+	nand_config |= (pageSizeBits << BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT); 
+	chip->ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);	
+PRINTK("Updating Config Reg on CS%1d: Block & Page Size: After: %08x\n", chip->ctrl->CS[chip->csi], nand_config);
+	
+	return nand_config;
+}
+#endif
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_4_0
+
+/* Returns the 32bit integer at the aligned offset */
+static inline uint32_t brcmnand_flashcache_read(void* pDest, uint32_t ofs, int size) 
+{
+	uint32_t aligned_ofs = ofs & 0xFFFFFFFC;
+	volatile uint32_t* p32FCache = (volatile uint32_t*) BVIRTADDR(BCHP_NAND_FLASH_CACHEi_ARRAY_BASE);
+	uint32_t pReg = (BCHP_NAND_FLASH_CACHEi_ARRAY_BASE + aligned_ofs);
+	volatile u_char* p8FCache = (volatile u_char*) p32FCache;
+	uint32_t u32;
+	u_char* p8 = (u_char*) &u32;
+	
+	if ((size + ofs) > (aligned_ofs+4)) {
+		printk("%s: Cannot read across DW boundary ofs=%d, size=%d\n", __FUNCTION__, ofs, size);
+		return 0;
+	}
+
+	u32 = be32_to_cpu((uint32_t) BDEV_RD(pReg));
+	if (pDest) {
+		memcpy(pDest, &p8[ofs-aligned_ofs], size);
+	}
+	
+if (gdebug > 3) 
+{
+printk("%s: OFS=%d, EBIAddr=%08x val=%08x, p8=%02x%02x%02x%02x\n", __FUNCTION__, 
+	(unsigned int) aligned_ofs, pReg, (unsigned int)BDEV_RD(pReg), 
+	p8FCache[aligned_ofs], p8FCache[aligned_ofs+1], p8FCache[aligned_ofs+2], p8FCache[aligned_ofs+3]);
+}
+
+	return ((uint32_t) BDEV_RD(pReg));
+}
+
+
+static void __maybe_unused
+debug_print_flashcache(struct mtd_info *mtd)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	static uint32_t fbuffer[128];
+	volatile uint32_t* fcache = (volatile uint32_t*) chip->vbase;
+	int i;
+int saveDebug = gdebug;
+gdebug=0;
+
+	// Copy the data buffer 
+
+	for (i=0; i< 128; i++) {
+		fbuffer[i] = (uint32_t) (fcache[i]);
+	}
+	
+	printk("Flash Cache:\n");
+	print_databuf((u_char*) &fbuffer[0], 512);
+
+	brcmnand_post_mortem_dump(mtd, 0);
+gdebug=saveDebug;
+}
+
+#if 0
+/*
+ * Return 0 for ready, TIMEOUT for error
+ */
+static int brcmnand_wait_for_cache_ready(struct brcmnand_chip* chip) 
+{
+	unsigned long timeout;
+	uint32_t ready;
+
+	//udelay(100000); /* 100 ms */
+	/* The 20 msec is enough */
+	timeout = jiffies + msecs_to_jiffies(3000); // 3 sec timeout for now
+	while (time_before(jiffies, timeout)) {
+		//PLATFORM_IOFLUSH_WAR();
+		ready = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+
+		if ((ready & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK) 
+		&& (ready & BCHP_NAND_INTFC_STATUS_CACHE_VALID_MASK)) {
+			
+			return BRCMNAND_SUCCESS;
+			
+		}
+		//if ( !in_interrupt())
+		//	cond_resched();
+		//else
+		//	udelay(10000);
+	}
+
+if (gdebug > 3 ) {
+printk("<--%s: ret = TIMEOUT\n", __FUNCTION__);
+print_nand_ctrl_regs();
+}
+	return BRCMNAND_TIMED_OUT; // TimeOut
+}
+#endif
+
+typedef enum {
+		BRCMNAND_READY, 
+		BRCMNAND_CTRL_BUSY, 
+		BRCMNAND_CTRL_READY,
+		BRCMNAND_CACHE_VALID
+		} brcmnand_ctrl_state_t;
+
+static int brcmnand_monitor_intfc(struct mtd_info *mtd, brcmnand_ctrl_state_t waitfor, uint32_t* pStatus)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	unsigned long timeout;
+	uint32_t ready;
+	brcmnand_ctrl_state_t state = BRCMNAND_READY;
+	int ret =  -ETIMEDOUT;
+	//unsigned long irqflags;
+
+// Dont want printk to cause missing a transition of INTFC
+int save_debug = gdebug;
+uint32_t prev_ready;
+//gdebug = 0;
+
+	//local_irq_save(irqflags);
+	prev_ready = *pStatus = ready = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+	timeout = jiffies + msecs_to_jiffies(2000); // THT: 1000 msec, for now
+	while (time_before(jiffies, timeout) ) {
+		switch (state) {
+		case BRCMNAND_READY: /* Wait for ctrl-busy */
+			if (!(ready & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK)) {
+				state = BRCMNAND_CTRL_BUSY;
+if (save_debug) printk("%s: waitfor=%d, Got ctrl-busy, intfc=%08x\n", __FUNCTION__, waitfor, ready);
+			}
+			/* If we cgot cache valid, skip ctrl-busy */
+			if ((waitfor == BRCMNAND_CACHE_VALID) 
+			&& (ready & BCHP_NAND_INTFC_STATUS_CACHE_VALID_MASK)) {
+				state = BRCMNAND_CTRL_READY;
+				ret = BRCMNAND_SUCCESS;
+				goto exit_monitor;
+			}
+			break;
+		case BRCMNAND_CTRL_BUSY: /* Wait for ctrl-ready */
+			if ((waitfor == BRCMNAND_CTRL_READY) &&
+			(ready & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK)) {
+				state = BRCMNAND_CTRL_READY;
+				ret = BRCMNAND_SUCCESS;
+				goto exit_monitor;
+			}
+			else if ((waitfor == BRCMNAND_CACHE_VALID) 
+			&& (ready & BCHP_NAND_INTFC_STATUS_CACHE_VALID_MASK)) {
+				state = BRCMNAND_CTRL_READY;
+				ret = BRCMNAND_SUCCESS;
+				goto exit_monitor;
+			}
+			break;
+		case BRCMNAND_CTRL_READY:
+			if (waitfor == BRCMNAND_CTRL_READY) {
+				ret = BRCMNAND_SUCCESS;
+				goto exit_monitor;
+			}
+			break;
+		case BRCMNAND_CACHE_VALID:
+			if (waitfor == BRCMNAND_CACHE_VALID) {
+				ret = BRCMNAND_SUCCESS;
+				goto exit_monitor;
+			}
+			break;
+		}
+if (prev_ready != ready) printk("prev_ready=%08x, ready=%08x\n", prev_ready, ready);
+		prev_ready = ready;
+		*pStatus = ready = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
+	}
+
+exit_monitor:
+gdebug = save_debug;
+
+	//local_irq_restore(irqflags);
+	
+if (save_debug) printk("%s: waitfor=%d, return %d, intfc=%08x\n", __FUNCTION__, waitfor, ret, *pStatus);
+	return ret;
+}
+
+
+
+
+/*
+ * Decode flash geometry using ONFI
+ * returns 1 on success, 0 on failure
+ */
+static int
+brcmnand_ONFI_decode(struct mtd_info *mtd, unsigned int chipSelect, 
+	uint32_t* outp_pageSize, uint16_t* outp_oobSize, int* outp_reqEcc, int* outp_codeWorkSize)
+{
+	int skipDecodeID = 0; /* Returned value */
+	struct brcmnand_chip * chip = mtd->priv;
+	uint32_t u32;
+	uint8_t eccLevel;
+	uint32_t nand_config0, nand_config;
+	uint32_t acc;
+	int status, retries;
+	uint32_t nand_select;
+	int ret;
+	uint32_t timing2;
+	uint8_t nbrParamPages, nbrBitsPerCell;
+	uint32_t extParamOffset, extParamFCacheOffset;
+
+//gdebug=4;
+if (gdebug>3) printk("-->%s, chipSelect=%d\n", __FUNCTION__, chipSelect);
+
+#if 1
+	/* Skip ONFI if on CS0, Boot loader already done that */
+	if (chipSelect == 0) { // Do not re-initialize when on CS0, Bootloader already done that	
+		return 0;
+	}
+#else
+	/*
+	 * Even though we cannot boot on CS0 on 7422a0, we still need to go through the
+	 * ONFI decode procedure, in order to initialize internal data structure
+	 */
+	if (chipSelect == 0) { // Do not re-initialize when on CS0, Bootloader already done that	
+		//TBD
+		return 0;
+	}	 
+#endif
+	
+	chip->vbase = (void*) BVIRTADDR(BCHP_NAND_FLASH_CACHEi_ARRAY_BASE); 
+
+#if 1
+	if (chipSelect != 0) 
+	{
+		uint32_t nand_acc;
+		
+		if (gNandConfig[chip->csi] != 0) {
+			nand_config = gNandConfig[chip->csi];
+			chip->ctrl_write(bchp_nand_config(chipSelect), nand_config);
+
+			if (chip->csi==0) /* No NAND on CS0 */
+				chip->ctrl_write(BCHP_NAND_CONFIG, nand_config);
+		}
+
+		if (0 != gAccControl[chip->csi]) {
+			nand_acc = gAccControl[chip->csi] ;
+		 	chip->ctrl_write(bchp_nand_acc_control(chipSelect), nand_acc);
+			if (chip->csi==0)
+				chip->ctrl_write(BCHP_NAND_ACC_CONTROL, nand_acc);
+		}
+	}
+	
+#endif
+
+
+
+
+	retries = 1;
+	while (retries > 0) {
+
+PRINTK("************  Retries = %d\n", retries);
+
+#if 1
+		nand_config0 = brcmnand_ctrl_read(bchp_nand_config(chipSelect));
+
+
+#endif
+	
+
+		/* Setup READ ID and AUTOCONFIG */
+		nand_select = chip->ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+		chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
+		chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << 
+			BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
+		nand_select |= BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK;
+		nand_select &= ~(1<<(chipSelect+BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_SHIFT));
+		chip->ctrl_write(BCHP_NAND_CS_NAND_SELECT, nand_select);
+
+		//udelay(10000); /* 10 ms */
+
+
+		chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
+		chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << 
+			BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
+
+		chip->ctrl_write(BCHP_NAND_CMD_START, 
+			BCHP_NAND_CMD_START_OPCODE_NULL << BCHP_NAND_CMD_START_OPCODE_SHIFT);
+
+		/* Wait for controller busy then ready */
+		ret = brcmnand_monitor_intfc(mtd, BRCMNAND_CTRL_READY, &status);
+
+
+
+		// udelay(1000);  // 1 sec
+		
+		// Change timing to conform to ONFI
+		timing2 = chip->ctrl_read(bchp_nand_timing2(chipSelect));
+PRINTK("Old timing2 value=%08x\n", timing2);
+		timing2 &= ~(BCHP_NAND_TIMING_2_tWHR_MASK);
+		timing2 |= 11 << BCHP_NAND_TIMING_2_tWHR_SHIFT;
+PRINTK("New timing2 value=%08x\n", timing2);
+		chip->ctrl_write(bchp_nand_timing2(chipSelect), timing2);
+
+
+		nand_config = brcmnand_ctrl_read(bchp_nand_config(chipSelect));
+PRINTK("B4 status READ, nand_config0=%08x, nand_config1=%08x, ret=%d\n", nand_config0, nand_config, ret);	
+
+
+
+		nand_config = brcmnand_ctrl_read(bchp_nand_config(chipSelect));
+PRINTK("B4 PARAM READ, nand_config0=%08x, nand_config1=%08x, ret=%d\n", nand_config0, nand_config, ret);	
+
+		
+		chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
+		chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << 
+			BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
+
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_PARAMETER_READ);
+
+		/* Wait for controller busy then cache-valid */
+		ret = brcmnand_monitor_intfc(mtd, BRCMNAND_CACHE_VALID, &status);
+
+		
+		/*
+		 * Verify ONFI capability
+		 */
+		u32 = brcmnand_flashcache_read(NULL, ONFI_RDPARAM_SIGNATURE_OFS, sizeof(u32));
+	
+
+		if (u32 == ONFI_SIGNATURE) {
+			printk("%s: Found ONFI signature.  Looking for %08x found %08x, ret=%d\n",
+				__FUNCTION__, ONFI_SIGNATURE, u32, ret);
+
+			break;
+		}
+
+
+		retries--;
+
+		/* Flash Reset */
+		brcmnand_wait(mtd, BRCMNAND_FL_READING, &status);
+		chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
+		chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << 
+			BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
+		chip->ctrl_write(BCHP_NAND_CMD_START,  OP_FLASH_RESET);
+		
+		brcmnand_wait(mtd, BRCMNAND_FL_READING, &status);
+
+	}
+	
+
+PRINTK("ONFI sig=%08x\n", *((volatile unsigned int*) chip->vbase));
+
+
+	/*
+	 * Verify ONFI capability
+	 */
+	u32 = brcmnand_flashcache_read(NULL, ONFI_RDPARAM_SIGNATURE_OFS, sizeof(u32));
+
+
+	if (u32 != ONFI_SIGNATURE) {
+		printk("%s: Cannot find ONFI signature.  Looking for %08x found %08x\n",
+			__FUNCTION__, ONFI_SIGNATURE, u32);
+
+		// debug_print_flashcache(mtd);
+		skipDecodeID = 0;
+		goto onfi_exit;
+	}
+
+	
+	// ONFI read-parameter was successful
+	nand_config = brcmnand_ctrl_read(bchp_nand_config(chipSelect));
+	
+	if (nand_config != nand_config0) {
+		printk("Original nand_config=%08x, ONFI nand_config=%08x\n",
+			nand_config0, nand_config);
+	}
+
+	/* Page Size */
+	u32 = brcmnand_flashcache_read(outp_pageSize, ONFI_RDPARAM_PAGESIZE_OFS, sizeof(u32));
+	
+
+	/* OOB Size */
+	u32 = brcmnand_flashcache_read(outp_oobSize, ONFI_RDPARAM_OOBSIZE_OFS, sizeof(*outp_oobSize));
+	//*outp_oobSize = be16_to_cpu(*outp_oobSize);
+PRINTK("oobSize = %d, u32=%08x\n", *outp_oobSize, u32);
+
+	/* MLC or SLC */
+	u32 = brcmnand_flashcache_read(&nbrBitsPerCell, ONFI_NBR_BITS_PER_CELL_OFS, sizeof(nbrBitsPerCell));
+PRINTK("nbrBitsPerCell = %d, u32=%08x\n", nbrBitsPerCell, u32);
+
+	/* Required ECC level */
+	u32 = brcmnand_flashcache_read(&eccLevel, ONFI_RDPARAM_ECC_LEVEL_OFS, sizeof(eccLevel));
+
+PRINTK("EccLevel = [%08x], %02x, pageSize=%d, oobSize=%d\n", u32, eccLevel, *outp_pageSize, *outp_oobSize);
+
+	if (eccLevel != 0xFF) { /* Codework is 512B */
+		*outp_reqEcc = eccLevel;
+		*outp_codeWorkSize = 512;
+		skipDecodeID = 1;
+	}
+	else { /* Codework is NOT 512B */
+		//int offset = 512;
+		uint32_t extParamSig = 0;
+
+		/* First find out how many param pages there are */
+		(void) brcmnand_flashcache_read(&nbrParamPages, ONFI_NBR_PARAM_PAGE_OFS, sizeof(nbrParamPages));
+
+
+		extParamOffset = (256*nbrParamPages);
+		extParamFCacheOffset = extParamOffset & ~(512-1); // ALign on 512B
+PRINTK("nbrParamPages = %d, offset=%d, extCacheOffset=%d\n", nbrParamPages, extParamOffset, extParamFCacheOffset);		
+		
+//gdebug=4;
+
+			/* Turn off 1KB sector size */
+		acc = chip->ctrl_read(bchp_nand_acc_control(chipSelect));
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+		acc &= ~(BCHP_NAND_ACC_CONTROL_CS1_SECTOR_SIZE_1K_MASK);
+#endif
+		//acc &= ~(BCHP_NAND_ACC_CONTROL_CS1_SPARE_AREA_SIZE_MASK);
+		//acc |= (*oobSize) << BCHP_NAND_ACC_CONTROL_CS1_SPARE_AREA_SIZE_SHIFT;
+		chip->ctrl_write(bchp_nand_acc_control(chipSelect), acc);
+
+		
+		/* Bring in next 512B */
+		chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, extParamFCacheOffset);
+		chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS,  
+			chipSelect << BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
+
+		chip->ctrl_write(BCHP_NAND_CMD_START, OP_PARAMETER_CHANGE_COL);	
+
+		/* Wait for controller busy then ready */
+		ret = brcmnand_monitor_intfc(mtd, BRCMNAND_CACHE_VALID, &status);
+
+		/*
+		 * Verify EXT PARAM signature
+		 * Need to adjust offset based on the number of Read-Param pages 
+		 */
+		
+		u32 = brcmnand_flashcache_read(NULL, 
+			ONFI_EXTPARAM_SIG1_OFS-ONFI_EXTPARAM_OFS+(extParamOffset-extParamFCacheOffset), 2);
+		extParamSig = (u32 & 0xFFFF) << 16;
+PRINTK("EPPS1: u32=%08x, eppsig=%08x\n", u32, extParamSig);
+		u32 = brcmnand_flashcache_read(NULL, 
+			ONFI_EXTPARAM_SIG2_OFS-ONFI_EXTPARAM_OFS+(extParamOffset-extParamFCacheOffset), 2);
+		extParamSig |= (u32 >> 16);
+PRINTK("EPPS2: u32=%08x, eppsig=%08x\n", u32, extParamSig);
+
+		if (ONFI_EXTPARAM_SIG != extParamSig) {
+			printk("%s: EXT PARAM not found, looking for %08x, found %08x\n",
+				__FUNCTION__, ONFI_EXTPARAM_SIG, extParamSig);
+			debug_print_flashcache(mtd);
+			skipDecodeID = 0;
+		}
+		else {
+			uint8_t powerOf2;
+			uint8_t eccLevel;
+
+			
+			u32 = brcmnand_flashcache_read(&powerOf2, 
+					ONFI_EXTPARAM_CODEWORK_OFS-ONFI_EXTPARAM_OFS+(extParamOffset-extParamFCacheOffset), 
+					sizeof(powerOf2));
+		
+			//powerOf2 = (u32 & (0x00FF0000)) >> 16;
+			*outp_codeWorkSize = 1 << powerOf2;
+PRINTK("codeWorkSize power = %d, codeWorkSize=%d, u32=%08x\n", powerOf2, *outp_codeWorkSize, u32);		
+			u32 = brcmnand_flashcache_read(&eccLevel, 
+					ONFI_EXTPARAM_EXT_ECC_OFS-ONFI_EXTPARAM_OFS+(extParamOffset-extParamFCacheOffset), 
+					sizeof(eccLevel));
+			*outp_reqEcc = eccLevel;
+PRINTK("eccLevel=%d, u32=%08x\n", *outp_reqEcc, u32);	
+			skipDecodeID = 1;
+			
+		}
+		
+	}
+	
+	if (skipDecodeID) {
+		printk("reqEcc=%d, codeWork=%d\n", *outp_reqEcc, *outp_codeWorkSize);
+		brcmnand_set_acccontrol(chip, chipSelect, 
+			*outp_pageSize, *outp_oobSize, *outp_reqEcc, *outp_codeWorkSize, nbrBitsPerCell);
+	}
+//gdebug = 0;
+
+onfi_exit:
+
+	//local_irq_restore(irqflags);
+
+	return skipDecodeID;
+}
+
+#else
+/* Non-ONFI chips */
+
+#define brcmnand_ONFI_decode(...) (0)
+
+#endif
+
+
+/**
+ * brcmnand_probe - [BrcmNAND Interface] Probe the BrcmNAND device
+ * @param mtd		MTD device structure
+ *
+ * BrcmNAND detection method:
+ *   Compare the the values from command with ones from register
+ *
+ * 8/13/08:
+ * V3.0+: Add celltype probe for MLC
+ */
+static int brcmnand_probe(struct mtd_info *mtd, unsigned int chipSelect)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	unsigned char brcmnand_maf_id, brcmnand_dev_id;
+	uint32_t nand_config = 0;
+	int version_id;
+	//int density;
+	int i = BRCMNAND_MAX_CHIPS+1;
+	int isONFI = 0; 	/* Set when chips (flash & ctrl) are ONFI capable */
+	int foundInIdTable = 0;	/* Set when flash ID found in ID table */
+	int skipIdLookup = 0;
+	uint32_t __maybe_unused pageSize=0;
+	uint16_t __maybe_unused oobSize=0;
+	int __maybe_unused reqEcc=0;
+	uint32_t __maybe_unused codeWork=0;
+
+
+	/*
+	 * Special treatment for Spansion OrNand chips which do not conform to standard ID
+	 */
+
+	chip->disableECC = 0;
+	chip->cellinfo = 0; // default to SLC, will read 3rd byte ID later for v3.0+ controller
+	chip->eccOobSize = 16; // Will fix it if we have a Type2 ID flash (from which we know the actual OOB size */
+	
+
+	isONFI = brcmnand_ONFI_decode(mtd, chipSelect,
+		&pageSize, &oobSize, &reqEcc, &codeWork);
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_4_0
+	if (isONFI) { /* ONFI capable */
+		/* NAND CONFIG register already encoded by NAND controller */
+		chip->eccSectorSize = codeWork;
+		chip->eccOobSize = oobSize/(pageSize/BRCMNAND_FCACHE_SIZE);
+		if (codeWork == BRCMNAND_FCACHE_SIZE) {
+			chip->reqEccLevel = reqEcc;
+		}
+		else {
+			chip->reqEccLevel = (reqEcc*BRCMNAND_FCACHE_SIZE)/codeWork;
+		}
+		/* TBD Check for required ECC level here */
+		nand_config = chip->ctrl_read(bchp_nand_config(chipSelect));
+		i = BRCMNAND_ONFI_IDX;
+	}
+
+	/* Else fallback to Read ID */
+	else 
+#endif
+	{
+		/* Read manufacturer and device IDs from Controller */
+		brcmnand_read_id(mtd, chipSelect, &chip->device_id);
+
+        if (chip->device_id == 0) {
+            printk(KERN_ERR "NAND Flash not detected\n");
+            return (-EINVAL);
+        }
+
+		brcmnand_maf_id = (chip->device_id >> 24) & 0xff;
+		brcmnand_dev_id = (chip->device_id >> 16) & 0xff;
+
+		/* Look up in our table for infos on device */
+		for (i=0; i < BRCMNAND_MAX_CHIPS; i++) {
+			if (brcmnand_dev_id == brcmnand_chips[i].chipId 
+				&& brcmnand_maf_id == brcmnand_chips[i].mafId) {
+				
+				/* No ambiguity in ID#3,4,5 */
+				if (brcmnand_chips[i].chipId345[0] == 0x0 
+					&& brcmnand_chips[i].chipId345[1] == 0x0 
+					&& brcmnand_chips[i].chipId345[2] == 0x0) {
+					foundInIdTable = 1;
+					break; 
+				}
+
+				/* Must resolve ambiguity */
+				else if (brcmnand_dev_id == brcmnand_chips[i+1].chipId 
+					&& brcmnand_maf_id == brcmnand_chips[i+1].mafId) {
+				
+					uint32_t extID;
+					uint8_t id3, id4, id5;
+
+					id3 = (chip->device_id >> 8) & 0xff;
+					id4 = (chip->device_id & 0xff);
+
+					extID = chip->ctrl_read(BCHP_NAND_FLASH_DEVICE_ID_EXT);
+					id5 = (extID & 0xff000000) >> 24;
+
+					if (brcmnand_chips[i].chipId345[0] == id3
+						&& brcmnand_chips[i].chipId345[1] == id4
+						&& brcmnand_chips[i].chipId345[2] == id5) {
+
+						foundInIdTable = 1;
+						break;
+					}
+					else if (brcmnand_chips[i+1].chipId345[0] == id3
+						&& brcmnand_chips[i+1].chipId345[1] == id4
+						&& brcmnand_chips[i+1].chipId345[2] == id5) {
+						
+						i = i+1;
+						foundInIdTable = 1;
+						break;
+					}
+					/* Else not match */
+				}
+			}
+		}
+
+		if (i >= BRCMNAND_MAX_CHIPS) {
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_0_0
+			printk(KERN_ERR "DevId %08x may not be supported\n", (unsigned int) chip->device_id);
+			/* Because of the bug in the controller in the first version,
+			 * if we can't identify the chip, we punt
+			 */
+			return (-EINVAL);
+#else
+			printk(KERN_WARNING"DevId %08x may not be supported.  Will use config info\n", (unsigned int) chip->device_id);
+#endif
+		}
+		else {
+			// Record NOP if known
+			chip->nop = brcmnand_chips[i].nop;
+		}
+
+		/*
+		 * Check to see if the NAND chip requires any special controller version
+		 */
+		if (brcmnand_chips[i].ctrlVersion > CONFIG_MTD_BRCMNAND_VERSION) {
+			printk(KERN_ERR "#########################################################\n");
+			printk(KERN_ERR "DevId %s requires controller version %d or later, but STB is version %d\n",
+				brcmnand_chips[i].chipIdStr, brcmnand_chips[i].ctrlVersion, CONFIG_MTD_BRCMNAND_VERSION);
+			printk(KERN_ERR "#########################################################\n");
+#if defined(CONFIG_BCM_KF_NAND)
+            return (-EINVAL);
+#endif
+		}
+
+
+		// If not on CS0 && config is passed as command line, use it and skip decoding ID.
+		if (chip->csi != 0 && gNandConfig[chip->csi] != 0) {
+			skipIdLookup = 1;
+			nand_config = gNandConfig[chip->csi];
+			brcmnand_ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);
+		}
+		else {
+			nand_config = brcmnand_ctrl_read(bchp_nand_config(chip->ctrl->CS[chip->csi]));
+		}
+
+	/*------------- 3rd ID byte --------------------*/	
+#if !defined(CONFIG_BCM_KF_NAND)
+		if (!skipIdLookup && FLASHTYPE_SPANSION == brcmnand_maf_id) {
+			unsigned char devId3rdByte =  (chip->device_id >> 8) & 0xff;
+
+			switch (devId3rdByte) {
+				case 0x04:
+				case 0x00:
+					/* ECC Needed, device with up to 2% bad blocks */
+					break;
+
+				case 0x01:
+				case 0x03:
+					/* ECC NOT Needed, device is 100% valid blocks */
+					chip->disableECC = 1;
+					break;
+			}
+			/* Correct erase Block Size to read 512K for all Spansion OrNand chips */
+			nand_config &= ~(0x3 << 28);
+			nand_config |= (0x3 << 28); // bit 29:28 = 3 ===> 512K erase block
+			brcmnand_ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);
+		}
+		/* Else if NAND is found in suppported table */
+		else if (foundInIdTable) {
+#else
+		if (foundInIdTable) {
+#endif
+		
+
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_0_0
+			// Workaround for bug in 7400A0 returning invalid config
+			switch(i) { 
+				case 0: /* SamSung NAND 1Gbit */
+				case 1: /* ST NAND 1Gbit */
+				case 4:
+				case 5:
+					/* Large page, 128K erase block
+					   PAGE_SIZE = 0x1 = 1b = PG_SIZE_2KB
+					   BLOCK_SIZE = 0x1 = 01b = BK_SIZE_128KB
+					   DEVICE_SIZE = 0x5 = 101b = DVC_SIZE_128MB
+					   DEVICE_WIDTH = 0x0 = 0b = DVC_WIDTH_8
+					   FUL_ADR_BYTES = 5 = 101b
+					   COL_ADR_BYTES = 2 = 010b
+					   BLK_ADR_BYTES = 3 = 011b
+					 */
+					nand_config &= ~0x30000000;
+					nand_config |= 0x10000000; // bit 29:28 = 1 ===> 128K erase block
+					//nand_config = 0x55042200; //128MB, 0x55052300  for 256MB
+					brcmnand_ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);
+
+					break;
+
+				case 2:
+				case 3:
+					/* Small page, 16K erase block
+					   PAGE_SIZE = 0x0 = 0b = PG_SIZE_512B
+					   BLOCK_SIZE = 0x0 = 0b = BK_SIZE_16KB
+					   DEVICE_SIZE = 0x5 = 101b = DVC_SIZE_128MB
+					   DEVICE_WIDTH = 0x0 = 0b = DVC_WIDTH_8
+					   FUL_ADR_BYTES = 5 = 101b
+					   COL_ADR_BYTES = 2 = 010b
+					   BLK_ADR_BYTES = 3 = 011b
+					 */
+					nand_config &= ~0x70000000;
+					brcmnand_ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);
+
+					break;
+
+				default:
+					printk(KERN_ERR "%s: DevId %08x not supported\n", __FUNCTION__, (unsigned int) chip->device_id);
+					BUG();
+					break;
+			}
+/* NAND VERSION 7.1 use two config register, need to update all of decode_id_xxx function. But these special chips 
+should already be supported in 7.1 and no manual id decoding is needed */
+#elif (CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0 && CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_7_0)
+			
+			if ((brcmnand_chips[i].idOptions & BRCMNAND_ID_EXT_BYTES) == 
+					BRCMNAND_ID_EXT_BYTES ||
+				(brcmnand_chips[i].idOptions & BRCMNAND_ID_EXT_BYTES_TYPE2) == 
+					BRCMNAND_ID_EXT_BYTES_TYPE2 ||
+				(brcmnand_chips[i].idOptions & BRCMNAND_ID_EXT_MICRON_M60A) == 
+					BRCMNAND_ID_EXT_MICRON_M60A ||
+				(brcmnand_chips[i].idOptions & BRCMNAND_ID_EXT_MICRON_M61A) == 
+					BRCMNAND_ID_EXT_MICRON_M61A
+			) {
+				unsigned char devId3rdByte =  (chip->device_id >> 8) & 0xff;
+
+				chip->cellinfo = devId3rdByte & NAND_CI_CELLTYPE_MSK;
+
+				/* Read 5th ID byte if MLC type */
+				//if (chip->cellinfo) 
+
+				/* THT SWLINUX 1459: Some new SLCs have 5th ID byte defined, not just MLC */
+				/* Type-1 ID string */
+				if ((brcmnand_chips[i].idOptions & BRCMNAND_ID_HAS_BYTE4) && 
+					(brcmnand_chips[i].idOptions & BRCMNAND_ID_HAS_BYTE5)) 
+				{
+					nand_config = decode_ID_type1(chip, brcmnand_maf_id, brcmnand_dev_id, 
+						brcmnand_chips[i].idOptions, brcmnand_chips[i].nbrBlocks);
+				}
+
+				/* Type-2 ID string */
+				else if ((brcmnand_chips[i].idOptions & BRCMNAND_ID_EXT_BYTES_TYPE2) == 
+					BRCMNAND_ID_EXT_BYTES_TYPE2) 
+				{
+					brcmnand_chips[i].eccLevel = 0;
+					nand_config = decode_ID_type2(chip, brcmnand_maf_id, brcmnand_dev_id, 
+									brcmnand_chips[i].nbrBlocks, 
+									&brcmnand_chips[i].eccLevel, 
+									&brcmnand_chips[i].sectorSize);
+				}
+
+				else if  ((brcmnand_chips[i].idOptions & BRCMNAND_ID_EXT_MICRON_M61A) == 
+					BRCMNAND_ID_EXT_MICRON_M61A) 
+				{
+					nand_config = decode_ID_M61A(chip, brcmnand_maf_id, brcmnand_dev_id);
+				}
+
+				if (!skipIdLookup) {
+
+					/* Make sure that ColAddrBytes bits are correct */
+					nand_config = brcmnand_compute_adr_bytes(chip, nand_config);
+					
+					chip->ctrl_write(bchp_nand_config(chip->ctrl->CS[chip->csi]), nand_config);				
+
+					PRINTK("%s: NAND_CONFIG=%08x\n", __FUNCTION__, nand_config);
+				}
+
+
+			}
+
+			/* Else no 3rd ID byte, rely on NAND controller to identify the chip
+			else {
+			}
+			*/
+#endif // V3.0 Controller
+			if (foundInIdTable && brcmnand_chips[i].eccLevel) {
+				if (brcmnand_chips[i].sectorSize == 1024) {
+					chip->reqEccLevel = brcmnand_chips[i].eccLevel;
+					chip->eccSectorSize = 1024;
+				}
+				else {
+					chip->reqEccLevel = brcmnand_chips[i].eccLevel;
+					chip->eccSectorSize = 512;
+				}
+				switch (chip->reqEccLevel) {
+				case 15:
+					chip->ecclevel = BRCMNAND_ECC_HAMMING;
+					break;
+				case 4:
+					chip->ecclevel = BRCMNAND_ECC_BCH_4;
+					break;
+				case 8:
+					chip->ecclevel = BRCMNAND_ECC_BCH_8;
+					break;
+				case 12:
+				case 24:
+					chip->ecclevel = BRCMNAND_ECC_BCH_12;
+					break;
+				}
+printk("%s: Ecc level set to %d, sectorSize=%d from ID table\n", __FUNCTION__, chip->reqEccLevel, chip->eccSectorSize);
+			}
+		}
+
+		/* ID not in table, and no CONFIG REG was passed at command line */
+		else if (!skipIdLookup && !foundInIdTable) {
+#if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_2_2
+			uint32_t acc;
+			
+			/* 
+			* else chip ID not found in table, just use what the NAND controller says.
+			* We operate under the premise that if it goes this far, the controller/CFE may
+			* have done something right.  It is not guaranteed to work, however
+			*/
+			/*
+			* Do nothing, we will decode the controller CONFIG register for
+			* for flash geometry
+			*/
+
+			/*
+			 * Also, we need to find out the size of the OOB from ACC_CONTROL reg
+			 */
+			acc = brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi]));  
+			chip->eccOobSize = 
+				(acc & BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK) >> 
+					BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT;
+
+			printk("Spare Area Size = %dB/512B\n", chip->eccOobSize);
+#endif
+			nand_config = chip->ctrl_read(bchp_nand_config(chip->ctrl->CS[chip->csi]));  
+		}
+	}
+
+	/*
+	 * else ID not in database, but CONFIG reg was passed at command line, already handled
+	 */
+	
+	/* 
+	 * For some ID case, the ID decode does not yield all informations,
+	 * so we read it back, making sure that NAND CONFIG register and chip-> struct
+	 * have matching infos.
+	 */
+	brcmnand_decode_config(chip, nand_config);
+
+	// Also works for dummy entries, but no adjustments possible
+	brcmnand_adjust_timings(chip, &brcmnand_chips[i]);
+
+#if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_2_2
+	// Adjust perchip NAND ACC CONTROL 
+	// updateInternalData = not ONFI .or. not in ID table
+	brcmnand_adjust_acccontrol(chip, isONFI, foundInIdTable, i);
+#endif
+
+	/* Flash device information */
+	brcmnand_print_device_info(&brcmnand_chips[i], mtd);
+	chip->options = brcmnand_chips[i].options;
+		
+	/* BrcmNAND page size & block size */	
+	mtd->writesize = chip->pageSize; 	
+	mtd->writebufsize = mtd->writesize;
+	// OOB size for MLC NAND varies depend on the chip
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_0
+	mtd->oobsize = mtd->writesize >> 5; // tht - 16 byte OOB for 512B page, 64B for 2K page
+#else
+	chip->eccsteps = chip->pageSize/chip->eccsize;
+	mtd->oobsize = chip->eccOobSize * chip->eccsteps;
+#endif
+	mtd->erasesize = chip->blockSize;
+
+	/* Fix me: When we have both a NOR and NAND flash on board */
+	/* For now, we will adjust the mtd->size for version 0.0 and 0.1 later in scan routine */
+
+	if (chip->ctrl->numchips == 0) 
+		chip->ctrl->numchips = 1;
+
+#if 0
+/* This is old codes, now after we switch to support multiple configs, size is per chip size  */
+	chip->mtdSize = chip->chipSize * chip->ctrl->numchips;
+
+	/*
+	 * THT: This is tricky.  We use mtd->size == 0 as an indicator whether the size
+	 * fit inside a uint32_t.  In the case it overflow, size is returned by
+	 * the inline function device_size(mtd), which is num_eraseblocks*block_size
+	 */
+	if (mtd64_ll_high(chip->mtdSize)) { // Beyond 4GB limit
+		mtd->size = 0; 
+	}
+	else {
+		mtd->size = mtd64_ll_low(chip->mtdSize);
+	}
+/*  */
+#endif
+
+	mtd->size = chip->mtdSize = chip->chipSize;
+	
+
+	//mtd->num_eraseblocks = chip->mtdSize >> chip->erase_shift;
+
+	/* Version ID */
+	version_id = chip->ctrl_read(BCHP_NAND_REVISION);
+
+	printk(KERN_INFO "BrcmNAND version = 0x%04x %dMB @%08lx\n", 
+		version_id, mtd64_ll_low(chip->chipSize>>20), chip->pbase);
+
+gdebug=0;
+
+	return 0;
+}
+
+/**
+ * brcmnand_suspend - [MTD Interface] Suspend the BrcmNAND flash
+ * @param mtd		MTD device structure
+ */
+static int brcmnand_suspend(struct mtd_info *mtd)
+{
+	DEBUG(MTD_DEBUG_LEVEL3, "-->%s  \n", __FUNCTION__);
+	return brcmnand_get_device(mtd, BRCMNAND_FL_PM_SUSPENDED);
+}
+
+/**
+ * brcmnand_resume - [MTD Interface] Resume the BrcmNAND flash
+ * @param mtd		MTD device structure
+ */
+static void brcmnand_resume(struct mtd_info *mtd)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+
+	DEBUG(MTD_DEBUG_LEVEL3, "-->%s  \n", __FUNCTION__);
+	if (chip->ctrl->state == BRCMNAND_FL_PM_SUSPENDED)
+		brcmnand_release_device(mtd);
+	else
+		printk(KERN_ERR "resume() called for the chip which is not"
+				"in suspended state\n");
+}
+
+#if 0
+
+static void fill_ecccmp_mask(struct mtd_info *mtd)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	int i, len;
+	
+	struct nand_oobfree *free = chip->ecclayout->oobfree;
+	unsigned char* myEccMask = (unsigned char*) eccmask; // Defeat const
+
+	/* 
+	 * Should we rely on eccmask being zeroed out
+	 */
+	for (i=0; i < ARRAY_SIZE(eccmask); i++) {
+		myEccMask[i] = 0;
+	}
+	/* Write 0xFF where there is a free byte */
+	for (i = 0, len = 0; 
+		len < chip->oobavail && len < mtd->oobsize && i < MTD_MAX_OOBFREE_ENTRIES; 
+		i++) 
+	{
+		int to = free[i].offset;
+		int num = free[i].length;
+
+		if (num == 0) break; // End marker reached
+		memcpy (&myEccMask[to], ffchars, num);
+		len += num;
+	}
+}
+#endif
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_3
+/* Not needed when version >=3.3, as newer chip allow different NAND */
+
+/*
+ * Make sure that all NAND chips have same ID
+ */
+static int
+brcmnand_validate_cs(struct mtd_info *mtd )
+{
+
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_3
+	struct brcmnand_chip* chip = (struct brcmnand_chip*) mtd->priv;
+	int i;
+	unsigned long dev_id;
+	
+	// Now verify that a NAND chip is at the CS
+	for (i=0; i<chip->ctrl->numchips; i++) {
+		brcmnand_read_id(mtd, chip->ctrl->CS[i], &dev_id);
+
+		if (dev_id != chip->device_id) {
+			printk(KERN_ERR "Device ID for CS[%1d] = %08lx, Device ID for CS[%1d] = %08lx\n",
+				chip->ctrl->CS[0], chip->device_id, chip->ctrl->CS[i], dev_id);
+			return 1;
+	}
+
+		printk("Found NAND flash on Chip Select %d, chipSize=%dMB, usable size=%dMB, base=%lx\n", 
+				chip->ctrl->CS[i], mtd64_ll_low(chip->chipSize >> 20),
+				mtd64_ll_low(device_size(mtd) >> 20), chip->pbase);
+
+}
+	return 0;
+
+#else
+	/* Version 3.3 and later allows multiple IDs */
+	struct brcmnand_chip* chip = (struct brcmnand_chip*) mtd->priv;
+	int i;
+	unsigned long dev_id;
+	
+	// Now verify that a NAND chip is at the CS
+	for (i=0; i<chip->ctrl->numchips; i++) {
+		brcmnand_read_id(mtd, chip->ctrl->CS[i], &dev_id);
+
+/*
+		if (dev_id != chip->device_id) {
+			printk(KERN_ERR "Device ID for CS[%1d] = %08lx, Device ID for CS[%1d] = %08lx\n",
+				chip->ctrl->CS[0], chip->device_id, chip->ctrl->CS[i], dev_id);
+			return 1;
+		}
+*/
+		printk("Found NAND flash on Chip Select %d, chipSize=%dMB, usable size=%dMB, base=%lx\n", 
+				chip->ctrl->CS[i], mtd64_ll_low(chip->chipSize >> 20),
+				mtd64_ll_low(device_size(mtd) >> 20), chip->pbase);
+
+	}
+	return 0;
+#endif
+}
+
+#endif /* Version < 3.3 */
+
+#if	0	/* jipeng - avoid undefined variable error in 7408A0 */
+/*
+ * CS0 reset values are gone by now, since the bootloader disabled CS0 before booting Linux
+ * in order to give the EBI address space to NAND.
+ * We will need to read strap_ebi_rom_size in order to reconstruct the CS0 values
+ * This will not be a problem, since in order to boot with NAND on CSn (n != 0), the board
+ * must be strapped for NOR.
+ */
+static unsigned int __maybe_unused
+get_rom_size(unsigned long* outp_cs0Base)
+{
+	volatile unsigned long strap_ebi_rom_size, sun_top_ctrl_strap_value;
+	uint32_t romSize = 0;
+
+#if defined(BCHP_SUN_TOP_CTRL_STRAP_VALUE_0_strap_ebi_rom_size_MASK)
+	sun_top_ctrl_strap_value = (volatile unsigned long) BDEV_RD(BCHP_SUN_TOP_CTRL_STRAP_VALUE_0);
+	strap_ebi_rom_size = sun_top_ctrl_strap_value & BCHP_SUN_TOP_CTRL_STRAP_VALUE_0_strap_ebi_rom_size_MASK;
+	strap_ebi_rom_size >>= BCHP_SUN_TOP_CTRL_STRAP_VALUE_0_strap_ebi_rom_size_SHIFT;
+#elif defined(BCHP_SUN_TOP_CTRL_STRAP_VALUE_strap_ebi_rom_size_MASK)
+	sun_top_ctrl_strap_value = (volatile unsigned long) BDEV_RD(BCHP_SUN_TOP_CTRL_STRAP_VALUE);
+	strap_ebi_rom_size = sun_top_ctrl_strap_value & BCHP_SUN_TOP_CTRL_STRAP_VALUE_strap_ebi_rom_size_MASK;
+	strap_ebi_rom_size >>= BCHP_SUN_TOP_CTRL_STRAP_VALUE_strap_ebi_rom_size_SHIFT;
+#elif defined(BCHP_SUN_TOP_CTRL_STRAP_VALUE_0_strap_bus_mode_MASK)
+	romSize = 512<<10; /* 512K */
+	*outp_cs0Base = 0x1FC00000;
+	return romSize;
+#elif !defined(CONFIG_BRCM_HAS_NOR)
+	printk("FIXME: no strap option for rom size on 3548/7408\n");
+	BUG();
+#else
+	/* all new 40nm chips */
+	return 64 << 20;
+#endif
+
+	// Here we expect these values to remain the same across platforms.
+	// Some customers want to have a 2MB NOR flash, but I don't see how that is possible.
+	switch(strap_ebi_rom_size) {
+	case 0:
+		romSize = 64<<20;
+		*outp_cs0Base = (0x20000000 - romSize) | BCHP_EBI_CS_BASE_0_size_SIZE_64MB;
+		break;
+	case 1:
+		romSize = 16<<20;
+		*outp_cs0Base = (0x20000000 - romSize) | BCHP_EBI_CS_BASE_0_size_SIZE_16MB;
+		break;
+	case 2:
+		romSize = 8<<20;
+		*outp_cs0Base = (0x20000000 - romSize) | BCHP_EBI_CS_BASE_0_size_SIZE_8MB;
+		break;
+	case 3:
+		romSize = 4<<20;
+		*outp_cs0Base = (0x20000000 - romSize) | BCHP_EBI_CS_BASE_0_size_SIZE_4MB;
+		break;
+	default:
+		printk("%s: Impossible Strap Value %08lx for BCHP_SUN_TOP_CTRL_STRAP_VALUE\n", 
+			__FUNCTION__, sun_top_ctrl_strap_value);
+		BUG();
+	}
+	return romSize;
+}
+#endif
+
+
+static void brcmnand_prepare_reboot_priv(struct mtd_info *mtd)
+{
+	/* 
+	 * Must set NAND back to Direct Access mode for reboot, but only if NAND is on CS0
+	 */
+
+	struct brcmnand_chip* this;
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+	/* Flush pending in-mem CET to flash before exclusive lock */
+	if (mtd) {
+		brcmnand_cet_prepare_reboot(mtd);
+	}
+#endif
+	if (mtd) {
+		this = (struct brcmnand_chip*) mtd->priv;
+		brcmnand_get_device(mtd, BRCMNAND_FL_XIP);
+	}
+	else
+		/* Nothing we can do without an mtd handle */
+		return;
+
+#if 0
+/* No longer used.  We now required the mtd handle */
+	else {
+		/*
+		 * Prevent further access to the NAND flash, we are rebooting 
+		 */
+		this = brcmnand_get_device_exclusive();
+	}
+#endif
+
+#if	0	/* jipeng - avoid undefined variable error in 7408A0 */
+	// PR41560: Handle boot from NOR but open NAND flash for access in Linux
+	//if (!is_bootrom_nand()) {
+	if (0) {
+		// Restore CS0 in order to allow boot from NOR.
+
+		//int ret = -EFAULT;
+		int i; 
+		int csNand; // Which CS is NAND
+		volatile unsigned long cs0Base, cs0Cnfg, cs0BaseAddr, csNandSelect, extAddr;
+		volatile unsigned long csNandBase[MAX_NAND_CS], csNandCnfg[MAX_NAND_CS];
+		unsigned int romSize;
+		
+		romSize = get_rom_size((unsigned long*) &cs0Base);
+//printk("ROM size is %dMB\n", romSize >>20);
+		
+		cs0BaseAddr = cs0Base & BCHP_EBI_CS_BASE_0_base_addr_MASK;
+
+		cs0Cnfg = *(volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_CONFIG_0);
+
+		// Turn off NAND CS
+		for (i=0; i < this->numchips; i++) {
+			csNand = this->CS[i];
+
+			if (csNand == 0) {
+				printk("%s: Call this routine only if NAND is not on CS0\n", __FUNCTION__);
+			}
+
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_1_0
+			BUG_ON(csNand > 5);
+#else
+			BUG_ON(csNand > 8);
+#endif
+			csNandBase[i] = *(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_BASE_0 + 8*csNand);
+			csNandCnfg[i] = *(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_CONFIG_0 + 8*csNand);
+
+			// Turn off NAND, must turn off both NAND_CS_NAND_SELECT and CONFIG.
+			// We turn off the CS_CONFIG here, and will turn off NAND_CS_NAND_SELECT for all CS at once,
+			// outside the loop.
+			*(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_CONFIG_0 + 8*csNand) = 
+				csNandCnfg[i] & (~BCHP_EBI_CS_CONFIG_0_enable_MASK);
+
+		}
+		
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
+		csNandSelect = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+
+
+		csNandSelect &= 
+			~(
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_1_0
+				BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_SEL_MASK
+				| BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_SEL_MASK
+				| BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_MASK
+				| BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_SEL_MASK
+				| BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_SEL_MASK
+				| BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK
+#else
+				0x0000003E	/* Not documented on V1.0+ */
+#endif // Version < 1.0
+			);
+#endif // version >= 0.1
+		brcmnand_ctrl_write(BCHP_NAND_CS_NAND_SELECT, csNandSelect);
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+		// THT from TM/RP: 020609: Clear NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG
+		csNandSelect &= ~(BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK);
+		brcmnand_ctrl_write(BCHP_NAND_CS_NAND_SELECT, csNandSelect);
+		
+		// THT from TM/RP: 020609: Clear NAND_CMD_EXT_ADDRESS_CS_SEL
+		extAddr = brcmnand_ctrl_read(BCHP_NAND_CMD_EXT_ADDRESS);
+		extAddr &= ~(BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_MASK);
+		brcmnand_ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, extAddr);
+#endif
+		
+//printk("Turn on NOR\n");
+		// Turn on NOR on CS0
+		*(volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_CONFIG_0) = 
+			cs0Cnfg | BCHP_EBI_CS_CONFIG_0_enable_MASK;
+
+//printk("returning from reboot\n");
+		// We have turned on NOR, just return, leaving NAND locked
+		// The CFE will straighten out everything.
+		return;
+	}
+#endif	/* 0 */
+		
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+	// Otherwise if NAND is on CS0, turn off direct access before rebooting
+	if (this->ctrl->CS[0] == 0) { // Only if on CS0
+		volatile unsigned long nand_select, ext_addr;
+
+		// THT: Set Direct Access bit 
+		nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+		//printk("%s: B4 nand_select = %08x\n", __FUNCTION__, (uint32_t) nand_select);
+		nand_select |= BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK;
+
+		// THT from TM/RP: 020609: Clear NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG
+		nand_select &= ~(BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK);
+		brcmnand_ctrl_write(BCHP_NAND_CS_NAND_SELECT, nand_select);
+		//nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+		//printk("%s: After nand_select = %08x\n", __FUNCTION__, (uint32_t)  nand_select);
+		
+		// THT from TM/RP: 020609: Clear NAND_CMD_EXT_ADDRESS_CS_SEL
+		ext_addr = brcmnand_ctrl_read(BCHP_NAND_CMD_EXT_ADDRESS);
+		ext_addr &= ~(BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_MASK);
+		brcmnand_ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, ext_addr);
+	}
+	
+#endif  //#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+
+
+	return;
+}
+
+#if 0
+// In case someone reboot w/o going thru the MTD notifier mechanism.
+void brcmnand_prepare_reboot(void)
+{
+	brcmnand_prepare_reboot_priv(NULL);
+}
+EXPORT_SYMBOL(brcmnand_prepare_reboot);
+#endif
+
+
+static int brcmnand_reboot_cb(struct notifier_block *nb, unsigned long val, void *v)
+{
+	struct mtd_info *mtd;
+
+	mtd = container_of(nb, struct mtd_info, reboot_notifier);
+	brcmnand_prepare_reboot_priv(mtd);
+	return NOTIFY_DONE;
+}
+
+static void initialize_chip (struct brcmnand_chip* chip)
+{
+
+	/* Initialize chip level routines */
+
+	if (!chip->ctrl_read)
+		chip->ctrl_read = brcmnand_ctrl_read;
+	if (!chip->ctrl_write)
+		chip->ctrl_write = brcmnand_ctrl_write;
+	if (!chip->ctrl_writeAddr)
+		chip->ctrl_writeAddr = brcmnand_ctrl_writeAddr;
+
+#if 0
+	if (!chip->read_raw)
+		chip->read_raw = brcmnand_read_raw;
+	if (!chip->read_pageoob)
+		chip->read_pageoob = brcmnand_read_pageoob;
+#endif
+
+	if (!chip->write_is_complete)
+		chip->write_is_complete = brcmnand_write_is_complete;
+	
+	if (!chip->wait)
+		chip->wait = brcmnand_wait;
+
+	if (!chip->block_markbad)
+		chip->block_markbad = brcmnand_default_block_markbad;
+	if (!chip->scan_bbt)
+		chip->scan_bbt = brcmnand_default_bbt;
+	if (!chip->erase_bbt)
+		chip->erase_bbt = brcmnand_erase_bbt;
+
+	chip->eccsize = BRCMNAND_FCACHE_SIZE;  // Fixed for Broadcom controller
+		
+
+	/*
+	 * For now initialize ECC read ops using the controller version, will switch to ISR version after
+	 * EDU has been enabled
+	 */
+
+	if (!chip->read_page)
+		chip->read_page = brcmnand_read_page;
+	if (!chip->write_page)
+		chip->write_page = brcmnand_write_page;
+	if (!chip->read_page_oob)
+		chip->read_page_oob = brcmnand_read_page_oob;
+	if (!chip->write_page_oob)
+		chip->write_page_oob = brcmnand_write_page_oob;
+		
+	if (!chip->read_oob)
+		chip->read_oob = brcmnand_do_read_ops;
+	if (!chip->write_oob)
+		chip->write_oob = brcmnand_do_write_ops;
+}
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_2_0
+static void handle_xor(struct brcmnand_chip* chip) 
+{
+	//int i;
+	uint32_t nand_xor;
+	uint32_t __maybe_unused nand_select;
+
+	/*
+	 * 2618-7.3: For v2.0 or later, set xor_disable according to NAND_CS_NAND_XOR:00 bit
+	 */	
+
+	nand_xor = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_XOR);
+	printk("NAND_CS_NAND_XOR=%08x\n", nand_xor);
+	//
+#ifdef CONFIG_MTD_BRCMNAND_DISABLE_XOR
+/* Testing 1,2,3: Force XOR disable on CS0, if not done by CFE */
+	if (chip->ctrl->CS[0] == 0) {	
+		printk("Disabling XOR: Before: SEL=%08x, XOR=%08x\n", nand_select, nand_xor);
+		
+		nand_select &= ~BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK;
+		nand_xor &= ~BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK;
+
+		brcmnand_ctrl_write(BCHP_NAND_CS_NAND_SELECT, nand_select);
+		brcmnand_ctrl_write(BCHP_NAND_CS_NAND_XOR, nand_xor);
+
+		printk("Disabling XOR: After: SEL=%08x, XOR=%08x\n", nand_select, nand_xor);
+	}
+#endif
+	/* Translate nand_xor into our internal flag, for brcmnand_writeAddr */
+	// for (i=0; i<chip->ctrl->numchips; i++) 
+	//i = chip->csi;
+	
+					
+	/* Set xor_disable, 1 for each NAND chip */
+	if (!(nand_xor & (BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK<<chip->ctrl->CS[chip->csi]))) {
+PRINTK("Disabling XOR on CS#%1d\n", chip->ctrl->CS[chip->csi]);
+		chip->xor_disable = 1;
+	}
+	
+	
+}
+#endif /* v2.0 or later */
+
+#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_0_1
+
+/*
+ * Version 0.1 can only have Hamming, so
+ * the problem is handle the flash EBI base address
+ */
+static void handle_ecclevel_v0_1 (struct mtd_info *mtd, struct brcmnand_chip* chip, int cs)
+{
+	if (cs) {
+		volatile unsigned long wr_protect;
+		volatile unsigned long acc_control;
+
+		chip->ctrl->numchips = 1;
+
+		/* Set up base, based on flash size */
+		if (chip->chipSize >= (256 << 20)) {
+			chip->pbase = 0x12000000;
+			mtd->size = 0x20000000 - chip->pbase; // THT: This is different than chip->chipSize
+		} else {
+			/* We know that flash endAddr is 0x2000_0000 */
+			chip->pbase = 0x20000000 - chip->chipSize;
+			mtd->size = chip->chipSize;
+		}
+
+		printk("Found NAND chip on Chip Select %d, chipSize=%dMB, usable size=%dMB, base=%08x\n", 
+			(int) cs, mtd64_ll_low(chip->chipSize >> 20), mtd64_ll_low(device_size(mtd) >> 20), (unsigned int) chip->pbase);
+
+
+
+		/*
+		 * When NAND is on CS0, it reads the strap values and set up accordingly.
+		 * WHen on CS1, some configurations must be done by SW
+		 */
+
+		// Set Write-Unprotect.  This register is sticky, so if someone already set it, we are out of luck
+		wr_protect = brcmnand_ctrl_read(BCHP_NAND_BLK_WR_PROTECT);
+		if (wr_protect) {
+			printk("Unprotect Register B4: %08x.  Please do a hard power recycle to reset\n", (unsigned int) wr_protect);
+			// THT: Actually we should punt here, as we cannot zero the register.
+		} 
+		brcmnand_ctrl_write(BCHP_NAND_BLK_WR_PROTECT, 0); // This will not work.
+		if (wr_protect) {
+			printk("Unprotect Register after: %08x\n", brcmnand_ctrl_read(BCHP_NAND_BLK_WR_PROTECT));
+		}
+
+		// Enable HW ECC.  This is another sticky register.
+		acc_control = brcmnand_ctrl_read(bchp_nand_acc_control(cs));
+		printk("ACC_CONTROL B4: %08x\n", (unsigned int) acc_control);
+		 
+		brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control | BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_MASK);
+		if (!(acc_control & BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_MASK)) {
+			printk("ACC_CONTROL after: %08x\n", brcmnand_ctrl_read(bchp_nand_acc_control(cs)));
+		}
+	}
+	else {
+		/* NAND chip on Chip Select 0 */
+		chip->ctrl->CS[0] = 0;
+	
+		chip->ctrl->numchips = 1;
+		
+		/* Set up base, based on flash size */
+		if (chip->chipSize >= (256 << 20)) {
+			chip->pbase = 0x12000000;
+			mtd->size = 0x20000000 - chip->pbase; // THT: This is different than chip->chipSize
+		} else {
+			/* We know that flash endAddr is 0x2000_0000 */
+			chip->pbase = 0x20000000 - chip->chipSize;
+			mtd->size = chip->chipSize;
+		}
+		//mtd->size_hi = 0;
+		chip->mtdSize = mtd->size;
+
+		printk("Found NAND chip on Chip Select 0, size=%dMB, base=%08x\n", mtd->size>>20, (unsigned int) chip->pbase);
+
+	}
+	chip->vbase = (void*) KSEG1ADDR(chip->pbase);
+}
+
+#elif CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
+/* Version 3.0 or later */
+static uint32_t 
+handle_acc_control(struct mtd_info *mtd, struct brcmnand_chip* chip, int cs)
+{
+	volatile unsigned long acc_control, org_acc_control;
+	int csi = chip->csi; // Index into chip->ctrl->CS array
+	unsigned long eccLevel=0, eccLevel_0, eccLevel_n;
+	uint32_t eccOobSize;
+
+	if (gAccControl[csi] != 0) {
+		// Already done in brcmnand_adjust_acccontrol()
+		printk("ECC level from command line=%d\n", chip->ecclevel);
+	  	return chip->ecclevel; // Do nothing, take the overwrite value
+	}
+
+  #if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_3
+
+      
+PRINTK("100 CS=%d, chip->ctrl->CS[%d]=%d\n", cs, chip->csi, chip->ctrl->CS[chip->csi]);
+	
+	org_acc_control = acc_control = brcmnand_ctrl_read(bchp_nand_acc_control(cs));
+
+	/*
+	 * For now, we only support same ECC level for both block0 and other blocks
+	 */
+	// Verify BCH-4 ECC: Handle CS0 block0
+	
+	// ECC level for block-0
+	eccLevel = eccLevel_0 = (acc_control & BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK) >> 
+		BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT;
+	// ECC level for all other blocks.
+	eccLevel_n = (acc_control & BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK) >>
+		BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT;
+
+	// make sure that block-0 and block-n use the same ECC level.
+	if (eccLevel_0 != eccLevel_n) {
+		// Use eccLevel_0 for eccLevel_n, unless eccLevel_0 is 0.
+		if (eccLevel_0 == 0) {
+			eccLevel = eccLevel_n;
+		}
+		acc_control &= ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK|
+			BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK);
+		acc_control |= (eccLevel <<  BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT) | 
+			(eccLevel << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT);
+		brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control );
+
+		if (eccLevel == eccLevel_0) {
+			printk("Corrected ECC on block-n to ECC on block-0: ACC = %08lx from %08lx\n", 
+				acc_control, org_acc_control);
+		} 
+		else {
+			printk("Corrected ECC on block-0 to ECC on block-n: ACC = %08lx from %08lx\n", 
+				acc_control, org_acc_control);
+		}
+							
+	}
+	chip->ecclevel = eccLevel;
+
+		
+	switch (eccLevel) {
+	case BRCMNAND_ECC_HAMMING:
+		if (NAND_IS_MLC(chip)) {
+			printk(KERN_INFO "Only BCH-4 or better is supported on MLC flash\n");
+			chip->ecclevel  = BRCMNAND_ECC_BCH_4;
+			acc_control &= ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK|
+				BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK);
+			acc_control |= (BRCMNAND_ECC_BCH_4 <<  BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT) | 
+				(BRCMNAND_ECC_BCH_4 << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT);
+			brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control );
+			printk("Corrected ECC to BCH-4 for MLC flashes: ACC_CONTROL = %08lx from %08lx\n", acc_control, org_acc_control);
+		}
+		break;
+
+	case BRCMNAND_ECC_BCH_4:
+	case BRCMNAND_ECC_BCH_8:
+	case BRCMNAND_ECC_BCH_12:	
+		// eccOobSize is initialized to the board strap of ECC-level
+		eccOobSize = (acc_control & BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK) >>
+			BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT;
+		printk("ACC: %d OOB bytes per 512B ECC step; from ID probe: %d\n", eccOobSize, chip->eccOobSize);
+		//Make sure that the OOB size is >= 27
+		if (eccLevel == BRCMNAND_ECC_BCH_12 && chip->eccOobSize < 27) {
+    			printk(KERN_INFO "BCH-12 requires >=27 OOB bytes per ECC step.\n");
+			printk(KERN_INFO "Please fix your board straps. Aborting to avoid file system damage\n");
+			BUG();
+		}
+		// We have recorded chip->eccOobSize during probe, let's compare it against value from straps:
+		if (chip->eccOobSize < eccOobSize) {
+			printk("Flash says it has %d OOB bytes, eccLevel=%lu, but board strap says %d bytes, fixing it...\n",
+				chip->eccOobSize, eccLevel, eccOobSize);
+			acc_control &= ~(BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_MASK\
+				| BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK);
+			acc_control |= (chip->eccOobSize << BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_SHIFT)
+				| (chip->eccOobSize << BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT);
+			printk("ACC_CONTROL adjusted to %08x\n", (unsigned int) acc_control);
+			brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control );
+		}
+
+		break;
+
+	default:
+		printk(KERN_ERR "Unsupported ECC level %lu\n", eccLevel);
+		BUG();
+		
+	}
+
+	
+	chip->ecclevel = eccLevel;
+	//csi++; // Look at next CS
+
+
+	/*
+	 * PR57272: Workaround for BCH-n error, 
+	 * reporting correctable errors with 4 or more bits as uncorrectable:
+	 */
+	if (chip->ecclevel != 0 && chip->ecclevel != BRCMNAND_ECC_HAMMING) {
+		int corr_threshold;
+
+		if (chip->ecclevel >  BRCMNAND_ECC_BCH_4) {
+			printk(KERN_WARNING "%s: Architecture cannot support ECC level %d\n", __FUNCTION__, chip->ecclevel);
+			corr_threshold = 3;
+		}
+		else if ( chip->ecclevel ==  BRCMNAND_ECC_BCH_4) {
+			corr_threshold = 3; // Changed from 2, since refresh is costly and vulnerable to AC-ON/OFF tests.
+		} 
+		else {
+			corr_threshold = 1;  // 1 , default for Hamming
+		}
+
+		printk(KERN_INFO "%s: CORR ERR threshold set to %d bits\n", __FUNCTION__, corr_threshold);
+		corr_threshold <<= BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT;
+		brcmnand_ctrl_write(BCHP_NAND_CORR_STAT_THRESHOLD, corr_threshold);
+	}
+
+  #else /* NAND version 3.3 or later */
+
+PRINTK("100 CS=%d, chip->ctrl->CS[%d]=%d\n", cs, chip->csi, chip->ctrl->CS[chip->csi]);
+	
+	org_acc_control = acc_control = brcmnand_ctrl_read(bchp_nand_acc_control(cs));
+	
+	/*
+	 * For now, we only support same ECC level for both block0 and other blocks
+	 */
+	// Verify BCH-4 ECC: Handle CS0 block0
+	if (chip->ctrl->CS[chip->csi] == 0) 
+	{
+		// ECC level for all other blocks.
+		eccLevel_n = (acc_control & BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK) >>
+			BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT;
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+		// ECC level for block-0
+		eccLevel = eccLevel_0 = (acc_control & BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK) >> 
+			BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT;
+
+		// make sure that block-0 and block-n use the same ECC level.
+		if (eccLevel_0 != eccLevel_n) {
+			// Use eccLevel_0 for eccLevel_n, unless eccLevel_0 is 0.
+			if (eccLevel_0 == 0) {
+				eccLevel = eccLevel_n;
+			}
+			acc_control &= ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK|
+				BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK);
+			acc_control |= (eccLevel <<  BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT) | 
+				(eccLevel << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT);
+			brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control );
+
+			if (eccLevel == eccLevel_0) {
+				printk("Corrected ECC on block-n to ECC on block-0: ACC = %08lx from %08lx\n", 
+					acc_control, org_acc_control);
+			} 
+			else {
+				printk("Corrected ECC on block-0 to ECC on block-n: ACC = %08lx from %08lx\n", 
+					acc_control, org_acc_control);
+			}
+								
+		}
+		chip->ecclevel = eccLevel;
+#else
+		chip->ecclevel = eccLevel_n;
+		eccLevel = eccLevel_n;
+#endif
+		/*
+		 * Make sure that threshold is set at 75% of #bits the ECC can correct.
+		 * This should be done for each CS!!!!!
+		 */
+		if (chip->ecclevel != 0 && chip->ecclevel != BRCMNAND_ECC_HAMMING) {
+			uint32_t corr_threshold = brcmnand_ctrl_read(BCHP_NAND_CORR_STAT_THRESHOLD)&BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_MASK;
+			uint32_t seventyfivepc;
+
+			seventyfivepc = (chip->ecclevel*3)/4;
+			printk(KERN_INFO "%s: default CORR ERR threshold  %d bits\n", __FUNCTION__, corr_threshold);
+PRINTK("ECC level threshold set to %d bits\n", corr_threshold);
+			if (seventyfivepc < corr_threshold) {
+				printk(KERN_INFO "%s: CORR ERR threshold set to %d bits\n", __FUNCTION__, seventyfivepc);
+				seventyfivepc <<= BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT;
+                                seventyfivepc |= (brcmnand_ctrl_read(BCHP_NAND_CORR_STAT_THRESHOLD)&~BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_MASK);
+				brcmnand_ctrl_write(BCHP_NAND_CORR_STAT_THRESHOLD, seventyfivepc);
+			}
+		}
+PRINTK("ECC level %d, threshold at %d bits\n", 
+chip->ecclevel, brcmnand_ctrl_read(BCHP_NAND_CORR_STAT_THRESHOLD));
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+		acc_control &= ~(
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+                        BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_0_MASK |
+#endif
+			BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_MASK);
+		if (chip->eccSectorSize == 1024) {
+			acc_control |= (
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+                                BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_0_MASK |
+#endif
+				BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_MASK);
+		}
+		brcmnand_ctrl_write(bchp_nand_acc_control(0), acc_control );
+#endif
+	}
+	
+	else {  // CS != 0
+		
+		eccLevel = eccLevel_0 = (acc_control & BCHP_NAND_ACC_CONTROL_CS1_ECC_LEVEL_MASK) >> 
+			BCHP_NAND_ACC_CONTROL_CS1_ECC_LEVEL_SHIFT;
+		chip->ecclevel = eccLevel;
+	
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_5_0
+		acc_control &= ~(BCHP_NAND_ACC_CONTROL_CS1_SECTOR_SIZE_1K_MASK);
+		if (chip->eccSectorSize == 1024) {
+			acc_control |= (BCHP_NAND_ACC_CONTROL_CS1_SECTOR_SIZE_1K_MASK);
+		}
+		brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control );
+#endif
+	}
+
+
+	switch (eccLevel) {
+	case BRCMNAND_ECC_HAMMING:
+		if (NAND_IS_MLC(chip)) {
+			printk(KERN_INFO "Only BCH-4 or better is supported on MLC flash\n");
+			eccLevel = chip->ecclevel  = BRCMNAND_ECC_BCH_4;
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+			acc_control &= ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK|
+				BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK);
+			acc_control |= (BRCMNAND_ECC_BCH_4 <<  BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT) | 
+				(BRCMNAND_ECC_BCH_4 << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT);
+#else
+			acc_control &= ~(BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK);
+			acc_control |= (BRCMNAND_ECC_BCH_4 << BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT);
+#endif
+			brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control );
+			printk("Corrected ECC to BCH-4 for MLC flashes: ACC_CONTROL = %08lx from %08lx\n", acc_control, org_acc_control);
+		}
+		break;
+
+	case BRCMNAND_ECC_BCH_4:
+	case BRCMNAND_ECC_BCH_8:
+	case BRCMNAND_ECC_BCH_12:	
+		// eccOobSize is initialized to the board strap of ECC-level
+		eccOobSize = (acc_control & BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK) >>
+			BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT;
+		printk("ACC: %d OOB bytes per 512B ECC step; from ID probe: %d\n", eccOobSize, chip->eccOobSize);
+
+		/* Temporary workarond. Id probe function does not set the ecc size. Need to implmenent this.*/ 
+		if( eccOobSize >= 27 && eccOobSize  > chip->eccOobSize )
+		{
+			chip->eccOobSize = eccOobSize;
+			mtd->oobsize = chip->eccOobSize * chip->eccsteps;
+			printk(KERN_INFO "Use strap setting for ecc size %d bytes, mtd->oobsize %d.\n", eccOobSize, mtd->oobsize);
+		}
+
+		//Make sure that the OOB size is >= 27
+		if (eccLevel == BRCMNAND_ECC_BCH_12 && chip->eccOobSize < 27) {
+			printk(KERN_INFO "BCH-12 requires >=27 OOB bytes per ECC step.\n");
+			printk(KERN_INFO "Please use the NAND part with enough spare eara and fix your board straps. Aborting to avoid file system damage\n");
+			BUG();
+		}
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_7_0
+		//NAND 7.0 requires more ECC byte for BCH-8
+		if (eccLevel == BRCMNAND_ECC_BCH_8 && chip->eccOobSize < 27) {
+			printk(KERN_INFO "BCH-8 requires >=27 OOB bytes per ECC step on NAND controller 7.0 or later.\n");
+			printk(KERN_INFO "Please use the NAND part with enough spare eara and fix your board straps. Aborting to avoid file system damage\n");
+			BUG();
+		}
+#endif
+
+		// We have recorded chip->eccOobSize during probe, let's compare it against value from straps:
+		if (chip->eccOobSize < eccOobSize) {
+			printk("Flash says it has %d OOB bytes, eccLevel=%lu, but board strap says %d bytes, fixing it...\n",
+				chip->eccOobSize, eccLevel, eccOobSize);
+			acc_control &= ~(
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+                                BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_MASK | 
+#endif
+				BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK);
+			acc_control |= 
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+                                (chip->eccOobSize << BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_SHIFT) |
+#endif
+				(chip->eccOobSize << BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT);
+			printk("ACC_CONTROL adjusted to %08x\n", (unsigned int) acc_control);
+			brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control);
+		}
+
+		break;
+
+	default:
+		printk(KERN_ERR "Unsupported ECC level %lu\n", eccLevel);
+		BUG();
+		
+	}
+
+	
+	chip->ecclevel = eccLevel;
+
+#endif /* else NAND version 3.3 or later */
+
+	/*
+	 * This is just a warning
+	 */
+PRINTK("reqEccLevel=%d, eccLevel=%d\n", chip->reqEccLevel, chip->ecclevel);
+	if (chip->reqEccLevel != 0 && chip->ecclevel != BRCMNAND_ECC_DISABLE) {
+		if (chip->reqEccLevel == BRCMNAND_ECC_HAMMING) {
+			; /* Nothing, lowest requirement */
+		}
+		/* BCH */
+		else if (chip->reqEccLevel > 0 && chip->reqEccLevel <= BRCMNAND_ECC_BCH_12) {
+			if (chip->reqEccLevel  > chip->ecclevel) {
+				printk(KERN_WARNING "******* Insufficient ECC level, required=%d, strapped for %d ********\n", 
+					chip->reqEccLevel ,  chip->ecclevel);
+			}
+		}
+	}
+	
+	return eccLevel;
+	
+	/* No need to worry about correctable error for V3.3 or later, just take the default */
+}
+
+
+  
+  
+
+ // else nothing to do for v2.x
+#endif /* if controller v0.1 else 2.0 or later */
+
+#ifdef CONFIG_BCM3548
+/*
+ * Check to see if this is a 3548L or 3556,
+ * in which case, disable WR_PREEMPT to avoid data corruption
+ * 
+ * returns the passed-in acc-control register value with WR_PREEMPT disabled.
+ */
+static uint32_t check_n_disable_wr_preempt(uint32_t acc_control)
+{
+	uint32_t otp_option = BDEV_RD(BCHP_SUN_TOP_CTRL_OTP_OPTION_STATUS);
+
+	printk("mcard_disable=%08x\n", otp_option);
+	// Is there any device on the EBI bus: mcard_disable==0 means there is (a device hanging off the EBI bus)
+	if (!(otp_option & BCHP_SUN_TOP_CTRL_OTP_OPTION_STATUS_otp_option_mcard_in_disable_MASK)) {
+		/* THT PR50928: Disable WR_PREEMPT for 3548L and 3556 */
+		acc_control &= ~(BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK);
+		brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control );
+		printk("Disable WR_PREEMPT: ACC_CONTROL = %08x\n", acc_control);
+	}
+	return acc_control;
+}
+#endif
+
+/**
+ * brcmnand_scan - [BrcmNAND Interface] Scan for the BrcmNAND device
+ * @param mtd		MTD device structure
+ * @cs			  	Chip Select number
+ * @param numchips	Number of chips  (from CFE or from nandcs= kernel arg)
+ * @lastChip			Start actual scan for bad blocks only on last chip
+ *
+ * This fills out all the not initialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values.
+ *
+ */
+int brcmnand_scan(struct mtd_info *mtd , int cs, int numchips )
+{
+	struct brcmnand_chip* chip = (struct brcmnand_chip*) mtd->priv;
+	//unsigned char brcmnand_maf_id;
+	int err, i;
+	static int __maybe_unused notFirstChip;
+	volatile unsigned long nand_select;
+	unsigned int version_id;
+	unsigned int version_major;
+	unsigned int version_minor;
+
+#if defined(CONFIG_BCM_KF_NAND)
+	memset(ffchars, 0xff, sizeof(ffchars));
+#endif
+
+PRINTK("-->%s: CS=%d, numchips=%d, csi=%d\n", __FUNCTION__, cs, numchips, chip->csi);
+
+	chip->ctrl->CS[chip->csi] = cs;
+	
+
+	initialize_chip(chip);
+	chip->ecclevel = BRCMNAND_ECC_HAMMING;
+	
+	printk(KERN_INFO "mtd->oobsize=%d, mtd->eccOobSize=%d\n", mtd->oobsize, chip->eccOobSize);
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_2_0
+	handle_xor(chip);
+
+#endif // if version >= 2.0 XOR
+
+//	for (i=0; i<chip->ctrl->numchips; i++) {
+//		cs = chip->ctrl->CS[i];
+
+//gdebug=4;
+	PRINTK("brcmnand_scan: Calling brcmnand_probe for CS=%d\n", cs);
+	if (brcmnand_probe(mtd, cs)) {
+		return -ENXIO;
+	}
+//gdebug=0;
+
+/*
+ * With version 3.3, we allow per-CS mtd handle, so it is handled in bcm7xxx-nand.c
+ */
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0 && \
+	CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_3
+	if (chip->ctrl->numchips > 0) {
+		if (brcmnand_validate_cs(mtd))
+			return (-EINVAL);
+	}
+#endif
+
+PRINTK("brcmnand_scan: Done brcmnand_probe\n");
+
+
+#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_0_1
+	handle_ecclevel_v0_1(mtd, chip, cs);
+	
+#else
+	/*
+	 * v1.0 controller and after
+	 */
+	// This table is in the Architecture Doc
+	// pbase is the physical address of the "logical" start of flash.  Logical means how Linux sees it,
+	// and is given by the partition table defined in bcm7xxx-nand.c
+	// The "physical" start of the flash is always at 1FC0_0000
+
+
+	if (chip->chipSize <= (256<<20)) 
+		chip->pbase = 0x20000000 - chip->chipSize;
+	else // 512MB and up
+		chip->pbase = 0; 
+
+	// vbase is the address of the flash cache array
+	chip->vbase = (void*) BVIRTADDR(BCHP_NAND_FLASH_CACHEi_ARRAY_BASE);  // Start of Buffer Cache
+	// Already set in probe mtd->size = chip->chipSize * chip->ctrl->numchips;
+	// Make sure we use Buffer Array access, not direct access, Clear CS0
+	nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+	printk("%s: B4 nand_select = %08x\n", __FUNCTION__, (uint32_t) nand_select);
+	
+	nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+	printk("%s: After nand_select = %08x\n", __FUNCTION__, (uint32_t)  nand_select);
+	chip->directAccess = !(nand_select & BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK);
+
+
+
+	/*
+	  * Handle RD_ERASED_ECC bit, make sure it is not set
+	  */
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_2_1
+	{
+		uint32_t acc0 = brcmnand_ctrl_read(bchp_nand_acc_control(cs));
+
+		if (acc0 & BCHP_NAND_ACC_CONTROL_RD_ERASED_ECC_EN_MASK) {
+			acc0 &= ~(BCHP_NAND_ACC_CONTROL_RD_ERASED_ECC_EN_MASK);
+			brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc0);
+		}
+	}
+#endif
+
+
+
+	/* Handle Partial Write Enable configuration for MLC
+	 * {FAST_PGM_RDIN, PARTIAL_PAGE_EN}
+	 * {0, 0} = 1 write per page, no partial page writes (required for MLC flash, suitable for SLC flash)
+	 * {1, 1} = 4 partial page writes per 2k page (SLC flash only)
+	 * {0, 1} = 8 partial page writes per 2k page (not recommended)
+	 * {1, 0} = RESERVED, DO NOT USE
+ 	 */
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
+	//if (4) 
+	{
+		/* For MLC, we only support BCH-4 or better */
+		/* THT for 2.6.31-2.3: Nowadays, some SLC chips require higher ECC levels */
+
+		//int eccOobSize;
+		uint32_t eccLevel, acc_control, org_acc_control; 
+		int nrSectorPP = chip->pageSize/512; // Number of sectors per page == controller's NOP
+		
+		org_acc_control = brcmnand_ctrl_read(bchp_nand_acc_control(cs));
+		eccLevel = handle_acc_control(mtd, chip, cs);
+		acc_control = brcmnand_ctrl_read(bchp_nand_acc_control(cs));
+
+
+PRINTK("190 eccLevel=%d, chip->ecclevel=%d, acc=%08x\n", eccLevel, chip->ecclevel, acc_control);
+
+#if defined(CONFIG_BCM_KF_NAND)
+		if (NAND_IS_MLC(chip))
+		{
+			printk("Setting NAND_COMPLEX_OOB_WRITE\n");
+			chip->options |= NAND_COMPLEX_OOB_WRITE;
+		}
+#endif
+
+/*
+ * For 3556 and 3548L, disable WR_PREEMPT
+ */
+#ifdef CONFIG_BCM3548
+		acc_control = check_n_disable_wr_preempt(acc_control);	
+#endif	
+
+		/* 
+		 * Some SLC flashes have page size of 4KB, or more, and may need to disable Partial Page Programming
+		 */				
+		if (NAND_IS_MLC(chip) || ((chip->nop > 0) && (nrSectorPP > chip->nop))) {
+			/* Set FAST_PGM_RDIN, PARTIAL_PAGE_EN  to {0, 0} for NOP=1 */
+			acc_control &= ~(
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+                                BCHP_NAND_ACC_CONTROL_FAST_PGM_RDIN_MASK |
+#endif
+				BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_MASK);
+			brcmnand_ctrl_write(bchp_nand_acc_control(cs), acc_control );
+			printk("Corrected for NOP=1: ACC_CONTROL = %08x\n", acc_control);
+		}
+
+	}
+	
+	
+#endif // NAND version 3.0 or later
+			
+#endif // Version 1.0+
+
+PRINTK("%s 10\n", __FUNCTION__);
+
+PRINTK("200 CS=%d, chip->ctrl->CS[%d]=%d\n", cs, chip->csi, chip->ctrl->CS[chip->csi]);
+PRINTK("200 chip->ecclevel=%d, acc=%08x\n", chip->ecclevel, 
+	brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi])));
+
+	chip->bbt_erase_shift =  ffs(mtd->erasesize) - 1;
+
+	/* Calculate the address shift from the page size */	
+	chip->page_shift = ffs(mtd->writesize) - 1;
+	chip->bbt_erase_shift = chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
+	chip->chip_shift = mtd64_ll_ffs(chip->chipSize) - 1;
+
+	printk(KERN_INFO "page_shift=%d, bbt_erase_shift=%d, chip_shift=%d, phys_erase_shift=%d\n",
+		chip->page_shift, chip->bbt_erase_shift , chip->chip_shift, chip->phys_erase_shift);
+
+	/* Set the bad block position */
+	/* NAND_LARGE_BADBLOCK_POS also holds for MLC NAND */
+	chip->badblockpos = mtd->writesize > 512 ? 
+		NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+
+
+
+
+PRINTK("%s 220\n", __FUNCTION__);
+	
+
+
+	/* The number of bytes available for the filesystem to place fs dependend
+	 * oob data */
+//PRINTK( "Determining chip->oobavail, chip->autooob=%p \n", chip->autooob);
+
+	/* Version ID */
+	version_id = chip->ctrl_read(BCHP_NAND_REVISION);
+	version_major = (version_id & 0xff00) >> 8;
+	version_minor = (version_id & 0xff);
+
+	printk(KERN_INFO "Brcm NAND controller version = %x.%x NAND flash size %dMB @%08x\n", 
+		version_major, version_minor, mtd64_ll_low(chip->chipSize>>20), (uint32_t) chip->pbase);
+
+#ifdef EDU_DEBUG_1
+printk("++++++++++++ EDU_DEBUG_1 enabled\n");
+#endif
+#ifdef EDU_DEBUG_2
+printk("++++++++++++ EDU_DEBUG_2 enabled\n");
+#endif
+#ifdef EDU_DEBUG_3
+printk("++++++++++++ EDU_DEBUG_3 enabled\n");
+#endif
+#if defined( EDU_DEBUG_4 ) || defined( EDU_DEBUG_5 )
+init_edu_buf();
+
+  #ifdef EDU_DEBUG_4
+  printk("++++++++++++ EDU_DEBUG_4 (read verify) enabled\n");
+  #endif
+
+  #ifdef EDU_DEBUG_5
+  printk("++++++++++++ EDU_DEBUG_5 (write verify) enabled\n");
+  #endif
+#endif
+
+PRINTK("%s 230\n", __FUNCTION__);
+	/*
+	 * Initialize the eccmask array for ease of verifying OOB area.
+	 */
+	//fill_ecccmp_mask(mtd);
+	
+
+	/* Store the number of chips and calc total size for mtd */
+	//chip->ctrl->numchips = i;
+	//mtd->size = i * chip->chipSize;
+
+	/* Preset the internal oob write buffer */
+	memset(BRCMNAND_OOBBUF(chip->ctrl->buffers), 0xff, mtd->oobsize);
+
+	/*
+	 * If no default placement scheme is given, select an appropriate one
+	 * We should make a table for this convoluted mess. (TBD)
+	 */
+PRINTK("%s 40, mtd->oobsize=%d, chip->ecclayout=%08x\n", __FUNCTION__, mtd->oobsize, 
+	(unsigned int) chip->ecclayout);
+	if (!chip->ecclayout) {
+PRINTK("%s 42, mtd->oobsize=%d, chip->ecclevel=%d, isMLC=%d, chip->cellinfo=%d\n", __FUNCTION__, 
+	mtd->oobsize, chip->ecclevel, NAND_IS_MLC(chip), chip->cellinfo);
+		switch (mtd->oobsize) {
+		case 16: /* Small size NAND */
+			if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
+				chip->ecclayout = &brcmnand_oob_16;
+			}
+			else if (chip->ecclevel == BRCMNAND_ECC_BCH_4) {
+printk("ECC layout=brcmnand_oob_bch4_512\n");
+				chip->ecclayout = &brcmnand_oob_bch4_512;
+			}
+			else if (chip->ecclevel != BRCMNAND_ECC_DISABLE) {
+				printk(KERN_ERR "Unsupported ECC level for page size of %d\n", mtd->writesize);
+				BUG();
+			}
+			break;
+			
+		case 64: /* Large page NAND 2K page */
+			if (NAND_IS_MLC(chip) || chip->ecclevel == BRCMNAND_ECC_BCH_4 
+				|| chip->ecclevel == BRCMNAND_ECC_BCH_8  
+			) {
+				switch (mtd->writesize) {
+				case 4096: /* Impossible for 64B OOB per page */
+					printk(KERN_ERR "Unsupported page size of %d\n", mtd->writesize);
+					BUG();
+/*
+printk("ECC layout=brcmnand_oob_bch4_4k\n");
+					chip->ecclayout = &brcmnand_oob_bch4_4k;
+*/
+					break;
+				case 2048:
+				if (chip->ecclevel == BRCMNAND_ECC_BCH_4 ) {
+printk("ECC layout=brcmnand_oob_bch4_2k\n");
+					chip->ecclayout = &brcmnand_oob_bch4_2k;
+				}
+				else if (chip->ecclevel == BRCMNAND_ECC_BCH_8 ) {
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+					if (chip->eccOobSize == 16) {
+						printk("ECC layout=brcmnand_oob_bch8_16_2k\n");
+						chip->ecclayout = &brcmnand_oob_bch8_16_2k;
+					}
+					else if (chip->eccOobSize >=27) {
+						printk("ECC layout=brcmnand_oob_bch8_27_2k\n");
+						chip->ecclayout = &brcmnand_oob_bch8_27_2k;
+					}
+#else
+					printk("ECC layout=brcmnand_oob_bch8_27_2k\n");
+					chip->ecclayout = &brcmnand_oob_bch8_27_2k;
+#endif
+				}
+
+				break;
+				default:
+					printk(KERN_ERR "Unsupported page size of %d\n", mtd->writesize);
+					BUG();
+					break;
+				}
+			}
+			else if (chip->ecclevel == BRCMNAND_ECC_BCH_12)  			
+			{
+				printk("ECC layout=brcmnand_oob_bch12_27_2k\n");
+				chip->ecclayout = &brcmnand_oob_bch12_27_2k;
+			}
+			else if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
+printk("ECC layout=brcmnand_oob_bch4_4k\n");
+				chip->ecclayout = &brcmnand_oob_64;
+			}
+			else {
+				printk(KERN_ERR "Unsupported ECC code %d with only 64B OOB per page\n", chip->ecclevel);
+				BUG();
+			}
+			break;
+			
+		case 128: /* Large page NAND 4K page or MLC */
+			if (NAND_IS_MLC(chip)) {
+				switch (mtd->writesize) {
+				case 4096:
+					switch(chip->ecclevel) {
+					case BRCMNAND_ECC_BCH_4:
+printk("ECC layout=brcmnand_oob_bch4_4k\n");
+						chip->ecclayout = &brcmnand_oob_bch4_4k;
+						break;
+					case BRCMNAND_ECC_BCH_8:
+						if (chip->eccOobSize == 16) {
+printk("ECC layout=brcmnand_oob_bch8_16_4k\n");
+							chip->ecclayout = &brcmnand_oob_bch8_16_4k;
+						}
+#if 1
+						else if (chip->eccOobSize >=27) {
+printk("ECC layout=brcmnand_oob_bch8_27_4k\n");
+							chip->ecclayout = &brcmnand_oob_bch8_27_4k;
+						}
+						break;
+					case BRCMNAND_ECC_BCH_12:
+printk("ECC layout=brcmnand_oob_bch12_27_4k\n");
+						chip->ecclayout = &brcmnand_oob_bch12_27_4k;
+						break;
+#endif
+
+					default:
+						printk(KERN_ERR "Unsupported ECC code %d for MLC with pageSize=%d\n", chip->ecclevel, mtd->writesize);
+						BUG();
+					}
+					break;
+				default:
+					printk(KERN_ERR "Unsupported page size of %d\n", mtd->writesize);
+					BUG();
+					break;
+				}
+			}
+			else { /* SLC chips, there are now some SLCs that require BCH-4 or better */
+				switch (mtd->writesize) {
+				case 4096:
+					if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
+printk("ECC layout=brcmnand_oob_128\n");
+						chip->ecclayout = &brcmnand_oob_128;
+					}
+					else if (chip->ecclevel == BRCMNAND_ECC_BCH_4) {
+printk("ECC layout=brcmnand_oob_bch4_4k\n");
+						chip->ecclayout = &brcmnand_oob_bch4_4k;
+					}
+					else if (chip->ecclevel == BRCMNAND_ECC_BCH_8) {
+						if (chip->eccOobSize == 16) {
+printk("ECC layout=brcmnand_oob_bch8_16_4k\n");
+							chip->ecclayout = &brcmnand_oob_bch8_16_4k;
+						}
+						else if (chip->eccOobSize >=27) {
+printk("ECC layout=brcmnand_oob_bch8_27_4k\n");
+							chip->ecclayout = &brcmnand_oob_bch8_27_4k;
+
+						}
+					}
+                                 	else if (chip->ecclevel == BRCMNAND_ECC_BCH_12) {
+printk("ECC layout=brcmnand_oob_bch12_27_4k\n");
+						chip->ecclayout = &brcmnand_oob_bch12_27_4k;
+					}
+					break;
+
+				default:
+					printk(KERN_ERR "Unsupported page size of %d\n", mtd->writesize);
+					BUG();
+					break;
+				}
+			}/* else SLC chips */
+			break; /* 128B OOB case */
+			
+		default: /* 27.25/28 or greater OOB size */
+PRINTK("27B OOB\n");
+PRINTK("300 chip->ecclevel=%d, acc=%08x\n", chip->ecclevel, brcmnand_ctrl_read(bchp_nand_acc_control(chip->ctrl->CS[chip->csi])));
+			if (mtd->writesize == 2048) 
+			{
+				switch(chip->ecclevel) {
+				case BRCMNAND_ECC_BCH_4:
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch4_2k\n");
+					chip->ecclayout = &brcmnand_oob_bch4_2k;
+					break;
+				case BRCMNAND_ECC_BCH_8:
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch8_27_2k\n");
+					chip->ecclayout = &brcmnand_oob_bch8_27_2k;
+					break;
+				case BRCMNAND_ECC_BCH_12:
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch12_27_2k\n");
+					chip->ecclayout = &brcmnand_oob_bch12_27_2k;
+					break;
+				default:
+					printk(KERN_ERR "Unsupported ECC code %d with pageSize=%d\n", chip->ecclevel, mtd->writesize);
+					BUG();
+				}
+
+			}
+			else if (mtd->writesize == 4096) {
+				switch(chip->ecclevel) {
+				case BRCMNAND_ECC_BCH_4:
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch4_4k\n");
+					chip->ecclayout = &brcmnand_oob_bch4_4k;
+					break;
+				case BRCMNAND_ECC_BCH_8:
+					if (chip->eccOobSize == 16) {
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch8_16_4k\n");
+						chip->ecclayout = &brcmnand_oob_bch8_16_4k;
+					}
+					else if (chip->eccOobSize >=27) {
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch8_27_4k\n");
+						chip->ecclayout = &brcmnand_oob_bch8_27_4k;
+					}
+					break;
+				case BRCMNAND_ECC_BCH_12:
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch12_27_4k\n");
+					chip->ecclayout = &brcmnand_oob_bch12_27_4k;
+					break;
+				default:
+					printk(KERN_ERR "Unsupported ECC code %d  with pageSize=%d\n", chip->ecclevel, mtd->writesize);
+					BUG();
+				}
+
+			}
+			else if (mtd->writesize == 8192) { // 8KB page 
+				switch(chip->ecclevel) {
+				case BRCMNAND_ECC_BCH_4:
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch4_8k\n");
+					chip->ecclayout = &brcmnand_oob_bch4_8k;
+					break;
+				case BRCMNAND_ECC_BCH_8:
+					if (chip->eccOobSize == 16) {
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch8_16_8k\n");
+						chip->ecclayout = &brcmnand_oob_bch8_16_8k;
+					}
+					else if (chip->eccOobSize >=27) {
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch8_27_8k\n");
+						chip->ecclayout = &brcmnand_oob_bch8_27_8k;
+					}
+					break;
+				case BRCMNAND_ECC_BCH_12:
+printk(KERN_INFO "ECC layout=brcmnand_oob_bch12_27_8k\n");
+					chip->ecclayout = &brcmnand_oob_bch12_27_8k;
+					break;
+				default:
+					printk(KERN_ERR "Unsupported ECC code %d for MLC with pageSize=%d\n", chip->ecclevel, mtd->writesize);
+					BUG();
+				}
+			}
+			else{
+				printk(KERN_ERR "Unsupported page size of %d and oobsize %d\n", mtd->writesize, mtd->oobsize);
+				BUG();
+				break;
+			}
+			break; /* 27B OOB */
+		}
+	}
+
+
+
+	/*
+	 * The number of bytes available for a client to place data into
+	 * the out of band area
+	 */
+printk(KERN_INFO "%s:  mtd->oobsize=%d\n", __FUNCTION__, mtd->oobsize);
+	chip->ecclayout->oobavail = 0;
+	for (i = 0; chip->ecclayout->oobfree[i].length; i++)
+		chip->ecclayout->oobavail +=
+			chip->ecclayout->oobfree[i].length;
+
+	mtd->oobavail = chip->ecclayout->oobavail;
+
+printk(KERN_INFO "%s: oobavail=%d, eccsize=%d, writesize=%d\n", __FUNCTION__, 
+	chip->ecclayout->oobavail, chip->eccsize, mtd->writesize);
+
+	/*
+	 * Set the number of read / write steps for one page depending on ECC
+	 * mode
+	 */
+
+	chip->eccsteps = mtd->writesize / chip->eccsize;
+	chip->eccbytes = brcmnand_eccbytes[chip->ecclevel];
+printk(KERN_INFO "%s, eccsize=%d, writesize=%d, eccsteps=%d, ecclevel=%d, eccbytes=%d\n", __FUNCTION__, 
+	chip->eccsize, mtd->writesize, chip->eccsteps, chip->ecclevel, chip->eccbytes);
+//udelay(2000000);
+	if(chip->eccsteps * chip->eccsize != mtd->writesize) {
+		printk(KERN_WARNING "Invalid ecc parameters\n");
+
+//udelay(2000000);
+		BUG();
+	}
+	chip->ecctotal = chip->eccsteps * chip->eccbytes;
+	//ECCSIZE(mtd) = chip->eccsize;
+
+	/* Initialize state */
+	chip->ctrl->state = BRCMNAND_FL_READY;
+
+#if 0
+	/* De-select the device */
+	chip->select_chip(mtd, -1);
+#endif
+
+	/* Invalidate the pagebuffer reference */
+	chip->pagebuf = -1LL;
+
+	/* Fill in remaining MTD driver data */
+	mtd->type = MTD_NANDFLASH;
+	
+	/*
+	 * Now that we know what kind of NAND it is (SLC vs MLC),
+	 * tell the MTD layer how to test it.
+	 * ** 01/23/08: Special case: SLC with BCH ECC will be treated as MLC -- at the MTD level --
+	 * ** 			 by the high level test MTD_IS_MLC() 
+	 * The low level test NAND_IS_MLC() still tells whether the flash is actually SLC or MLC
+	 * (so that BBT codes know where to find the BI marker)
+	 */
+	if (NAND_IS_MLC(chip)) {
+		mtd->flags = MTD_CAP_MLC_NANDFLASH;
+	}
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
+	/*
+	 * If controller is version 3 or later, allow SLC to have BCH-n ECC, 
+	 * -- ONLY IF THE CFE SAYS SO --
+	 * in which case, it is treated as if it is an MLC flash by file system codes
+	 */
+	else if (chip->ecclevel > BRCMNAND_ECC_DISABLE && chip->ecclevel < BRCMNAND_ECC_HAMMING) { 
+		// CFE wants BCH codes on SLC Nand
+		mtd->flags = MTD_CAP_MLC_NANDFLASH;
+	}
+#endif
+	else {
+		mtd->flags = MTD_CAP_NANDFLASH;
+	}
+	//mtd->ecctype = MTD_ECC_SW;
+#if defined(CONFIG_BCM_KF_NAND)
+	g_nand_nop = chip->nop;
+#endif
+PRINTK("300 CS=%d, chip->ctrl->CS[%d]=%d\n", cs, chip->csi, chip->ctrl->CS[chip->csi]);
+
+	
+	mtd->_erase = brcmnand_erase;
+	mtd->_point = NULL;
+	mtd->_unpoint = NULL;
+	mtd->_read = brcmnand_read;
+	mtd->_write = brcmnand_write;
+	mtd->_read_oob = brcmnand_read_oob;
+	mtd->_write_oob = brcmnand_write_oob;
+
+	// Not needed?
+	mtd->_writev = brcmnand_writev;
+	
+	mtd->_sync = brcmnand_sync;
+	mtd->_lock = NULL;
+	mtd->_unlock = brcmnand_unlock;
+	mtd->_suspend = brcmnand_suspend;
+	mtd->_resume = brcmnand_resume;
+	
+	mtd->_block_isbad = brcmnand_block_isbad;
+	mtd->_block_markbad = brcmnand_block_markbad;
+
+	/* propagate ecc.layout to mtd_info */
+	mtd->ecclayout = chip->ecclayout;
+
+	mtd->reboot_notifier.notifier_call = brcmnand_reboot_cb;
+	register_reboot_notifier(&mtd->reboot_notifier);
+	
+	mtd->owner = THIS_MODULE;
+
+
+
+
+
+	
+
+    /*
+     * Clear ECC registers 
+     */
+    chip->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
+    chip->ctrl_write(BCHP_NAND_ECC_UNC_ADDR, 0);
+  
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+    chip->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
+    chip->ctrl_write(BCHP_NAND_ECC_UNC_EXT_ADDR, 0);
+#endif
+    
+
+#if 0
+	/* Unlock whole block */
+	if (mtd->unlock) {
+		PRINTK("Calling mtd->unlock(ofs=0, MTD Size=%016llx\n", device_size(mtd));
+		mtd->unlock(mtd, 0x0, device_size(mtd));
+	}
+#endif
+
+
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+	if (notFirstChip == 0) {
+		notFirstChip = 1;
+		EDU_init();
+	}
+
+#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
+	if (!chip->read_page)
+		chip->read_page = brcmnand_isr_read_page;
+	if (!chip->write_page)
+		chip->write_page = brcmnand_isr_write_page;
+	if (!chip->read_page_oob)
+		chip->read_page_oob = brcmnand_isr_read_page_oob;
+	/* There is no brcmnand_isr_write_page_oob */
+	if (!chip->write_page_oob)
+		chip->write_page_oob = brcmnand_write_page_oob;
+#endif
+#endif
+
+
+//	if (!lastChip)
+//		return 0;
+
+
+
+//gdebug = 4;
+PRINTK("500 chip=%p, CS=%d, chip->ctrl->CS[%d]=%d\n", chip, cs, chip->csi, chip->ctrl->CS[chip->csi]);
+	err =  chip->scan_bbt(mtd);
+//gdebug = 0;
+
+//
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+  #ifdef CONFIG_MTD_BRCMNAND_EDU
+	// For EDU Allocate the buffer early.
+	gblk_buf = BRCMNAND_malloc((mtd->erasesize/mtd->writesize)*(mtd->writesize + mtd->oobsize));
+  #endif
+  
+	if(brcmnand_create_cet(mtd) < 0) {
+		printk(KERN_INFO "%s: CET not created\n", __FUNCTION__);
+	}
+#endif
+
+PRINTK("%s 99\n", __FUNCTION__);
+
+	return err;
+
+}
+
+
+
+#if defined( CONFIG_BCM7401C0 ) || defined( CONFIG_BCM7118A0 )  || defined( CONFIG_BCM7403A0 )
+static DEFINE_SPINLOCK(bcm9XXXX_lock);
+static unsigned long misb_war_flags;
+
+static inline void
+HANDLE_MISB_WAR_BEGIN(void)
+{
+	/* if it is 7401C0, then we need this workaround */
+	if(brcm_ebi_war)
+	{	
+		spin_lock_irqsave(&bcm9XXXX_lock, misb_war_flags);
+		BDEV_WR(0x00400b1c, 0xFFFF);
+		BDEV_WR(0x00400b1c, 0xFFFF);
+		BDEV_WR(0x00400b1c, 0xFFFF);
+		BDEV_WR(0x00400b1c, 0xFFFF);
+		BDEV_WR(0x00400b1c, 0xFFFF);
+		BDEV_WR(0x00400b1c, 0xFFFF);
+		BDEV_WR(0x00400b1c, 0xFFFF);
+		BDEV_WR(0x00400b1c, 0xFFFF);
+	}
+}
+
+static inline void
+HANDLE_MISB_WAR_END(void)
+{
+	if(brcm_ebi_war)
+	{	
+		spin_unlock_irqrestore(&bcm9XXXX_lock, misb_war_flags);
+	}
+}
+
+#else
+#define HANDLE_MISB_WAR_BEGIN()
+#define HANDLE_MISB_WAR_END()
+#endif
+
+
+#if 0
+/*
+ * @ buff		Kernel buffer to hold the data read from the NOR flash, must be able to hold len bytes, 
+ *			and aligned on word boundary.
+ * @ offset	Offset of the data from CS0 (on NOR flash), must be on word boundary.
+ * @ len		Number of bytes to be read, must be even number.
+ *
+ * returns 0 on success, negative error codes on failure.
+ *
+ * The caller thread may block until access to the NOR flash can be granted.
+ * Further accesses to the NAND flash (from other threads) will be blocked until this routine returns.
+ * The routine performs the required swapping of CS0/CS1 under the hood.
+ */
+int brcmnand_readNorFlash(struct mtd_info *mtd, void* buff, unsigned int offset, int len)
+{
+	struct brcmnand_chip* chip = (struct brcmnand_chip*) mtd->priv;
+	int ret = -EFAULT;
+	int i; 
+	int csNand; // Which CS is NAND
+	volatile unsigned long cs0Base, cs0Cnfg, cs0BaseAddr, csNandSelect;
+	volatile unsigned long csNandBase[MAX_NAND_CS], csNandCnfg[MAX_NAND_CS];
+	unsigned int romSize;
+	volatile uint16_t* pui16 = (volatile uint16_t*) buff;
+	volatile uint16_t* fp;
+
+#if 1
+/*
+ *THT 03/12/09: This should never be called since the CFE no longer disable CS0
+ * when CS1 is on NAND
+ */
+ 	printk("%s should never be called\n", __FUNCTION__);
+	BUG();
+#else
+
+	if (!chip) { // When booting from CRAMFS/SQUASHFS using /dev/romblock
+		chip = brcmnand_get_device_exclusive();
+		mtd = (struct mtd_info*) chip->priv;
+	}
+	else if (brcmnand_get_device(mtd, BRCMNAND_FL_EXCLUSIVE))
+		return ret;
+
+	romSize = get_rom_size((unsigned long*) &cs0Base);
+	
+	cs0BaseAddr = cs0Base & BCHP_EBI_CS_BASE_0_base_addr_MASK;
+
+	if ((len + offset) > romSize) {
+		printk("%s; Attempt to read past end of CS0, (len+offset)=%08x, romSize=%dMB\n",
+			__FUNCTION__, len + offset, romSize>>20);
+		ret = (-EINVAL);
+		goto release_device_and_out;
+	}
+
+	cs0Cnfg = *(volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_CONFIG_0);
+
+	// Turn off NAND CS
+	for (i=0; i < chip->ctrl->numchips; i++) {
+		csNand = chip->ctrl->CS[i];
+
+		if (csNand == 0) {
+			printk("%s: Call this routine only if NAND is not on CS0\n", __FUNCTION__);
+			ret = (-EINVAL);
+			goto release_device_and_out;
+		}
+
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_1_0
+		BUG_ON(csNand > 5);
+#else
+		BUG_ON(csNand > 7);
+#endif
+		csNandBase[i] = *(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_BASE_0 + 8*csNand);
+		csNandCnfg[i] = *(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_CONFIG_0 + 8*csNand);
+
+		// Turn off NAND, must turn off both NAND_CS_NAND_SELECT and CONFIG.
+		// We turn off the CS_CONFIG here, and will turn off NAND_CS_NAND_SELECT for all CS at once,
+		// outside the loop.
+		*(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_CONFIG_0 + 8*csNand) = 
+			csNandCnfg[i] & (~BCHP_EBI_CS_CONFIG_0_enable_MASK);
+
+	}
+	
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
+	csNandSelect = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
+
+	brcmnand_ctrl_write(BCHP_NAND_CS_NAND_SELECT, csNandSelect & 
+		~(
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_1_0
+			BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_SEL_MASK
+			| BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_SEL_MASK
+			| BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_MASK
+			| BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_SEL_MASK
+			| BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_SEL_MASK
+			| BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK
+#else
+			0x0000003E	/* Not documented on V1.0+ */
+#endif
+		));
+#endif
+
+	// Turn on NOR on CS0
+	*(volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_CONFIG_0) = 
+		cs0Cnfg | BCHP_EBI_CS_CONFIG_0_enable_MASK;
+
+	// Take care of MISB Bridge bug on 7401c0/7403a0/7118a0
+	HANDLE_MISB_WAR_BEGIN();
+
+	// Read NOR, 16 bits at a time, we have already checked the out-of-bound condition above.
+	fp = (volatile uint16_t*) (KSEG1ADDR(cs0BaseAddr + offset));
+	for (i=0; i < (len>>1); i++) {
+		pui16[i] = fp[i];
+	}
+
+	HANDLE_MISB_WAR_END();
+
+	// Turn Off NOR
+	*(volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_CONFIG_0) = 
+		cs0Cnfg & (~BCHP_EBI_CS_CONFIG_0_enable_MASK);
+
+	// Turn NAND back on
+	for (i=0; i < chip->ctrl->numchips; i++) {
+		csNand = chip->ctrl->CS[i];
+		if (csNand == 0) {
+			printk("%s: Call this routine only if NAND is not on CS0\n", __FUNCTION__);
+			ret = (-EINVAL);
+			goto release_device_and_out;
+		}
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_1_0
+		BUG_ON(csNand > 5);
+#else
+		BUG_ON(csNand > 7);
+#endif
+		*(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_BASE_0 + 8*csNand) = csNandBase[i] ;
+		*(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_CONFIG_0 + 8*csNand) = csNandCnfg[i];
+	}
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
+	// Restore NAND_CS_SELECT
+	brcmnand_ctrl_write(BCHP_NAND_CS_NAND_SELECT, csNandSelect);
+#endif
+	udelay(10000); // Wait for ID Configuration to stabilize
+	
+release_device_and_out:
+	brcmnand_release_device(mtd);
+//printk("<-- %s\n", __FUNCTION__);
+
+#endif
+	return ret;
+}
+EXPORT_SYMBOL(brcmnand_readNorFlash);
+#endif
+
+/**
+ * brcmnand_release - [BrcmNAND Interface] Free resources held by the BrcmNAND device
+ * @param mtd		MTD device structure
+ */
+void brcmnand_release(struct mtd_info *mtd)
+{
+	//struct brcmnand_chip * chip = mtd->priv;
+
+	/* Unregister reboot notifier */
+	brcmnand_prepare_reboot_priv(mtd);
+	unregister_reboot_notifier(&mtd->reboot_notifier);
+	mtd->reboot_notifier.notifier_call = NULL;
+	
+	/* Deregister the device (unregisters partitions as well) */
+	mtd_device_unregister(mtd);
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+	if (gblk_buf) {
+		BRCMNAND_free(gblk_buf);
+		gblk_buf = NULL;
+	}
+#endif
+
+
+#if 0
+	/* Buffer allocated by brcmnand_scan */
+	if (chip->options & NAND_DATABUF_ALLOC)
+		kfree(chip->data_buf);
+
+	/* Buffer allocated by brcmnand_scan */
+	if (chip->options & NAND_OOBBUF_ALLOC)
+		kfree(chip->oob_buf);
+#endif
+
+}
+
diff --git a/drivers/mtd/brcmnand/brcmnand_bbt.c b/drivers/mtd/brcmnand/brcmnand_bbt.c
new file mode 100644
index 0000000000000000000000000000000000000000..9ef0080e6e88a227da1d571e33eb980a2a8055f2
--- /dev/null
+++ b/drivers/mtd/brcmnand/brcmnand_bbt.c
@@ -0,0 +1,2284 @@
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+/*
+    <:copyright-BRCM:2012:DUAL/GPL:standard
+    
+       Copyright (c) 2012 Broadcom Corporation
+       All Rights Reserved
+    
+    Unless you and Broadcom execute a separate written software license
+    agreement governing use of this software, this software is licensed
+    to you under the terms of the GNU General Public License version 2
+    (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+    with the following added to such license:
+    
+       As a special exception, the copyright holders of this software give
+       you permission to link this software with independent modules, and
+       to copy and distribute the resulting executable under terms of your
+       choice, provided that you also meet, for each linked independent
+       module, the terms and conditions of the license of that module.
+       An independent module is a module which is not derived from this
+       software.  The special exception does not apply to any modifications
+       of the software.
+    
+    Not withstanding the above, under no circumstances may you combine
+    this software in any way with any other Broadcom software provided
+    under a license other than the GPL, without Broadcom's express prior
+    written consent.
+    
+    :> 
+
+    File: brcmnand_bbt.c
+
+    Description: 
+    NAND driver for Samsung K9F1G08U0A chip with Broadcom NAND controller.
+    The main difference between the Broadcom controller and OneNAND is that the Broadcom
+    NAND controller has only a 512B cache (bufferram) regardless of the flash chip, 
+    whereas OneNAND has multiple bufferram's to match the page size.
+    This complicates this driver quite a bit, because, for large page flash (2K page)
+    we have to read in all 4 slides before we know for sure whether a page is bad.
+
+ * When brcmnand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the bbt descriptor(s). If a bbt is found
+ * then the contents are read and the memory based bbt is created. If a
+ * mirrored bbt is selected then the mirror is searched too and the
+ * versions are compared. If the mirror has a greater version number
+ * than the mirror bbt is used to build the memory based bbt.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the bbt's is out of date or does not exist it is (re)created.
+ * If no bbt exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created bbts like the one found on M-SYS DOC devices
+ * the bbt is searched and read but never created
+ *
+ * The autogenerated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the oob area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date.
+ *
+ * The table uses 2 bits per block
+ * 11b: 	block is good
+ * 00b: 	block is factory marked bad
+ * 01b, 10b: 	block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b:		block is good
+ * 01b:		block is marked bad due to wear
+ * 10b:		block is reserved (to protect the bbt area)
+ * 11b:		block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ *
+
+when	who what
+-----	---	----
+070807	tht	codings derived from nand_base & brcmnand_bbt implementations.
+ */
+
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+
+#include "brcmnand_priv.h"
+#include <bcm_map_part.h>
+#include <linux/mtd/mtd64.h>
+#include <linux/module.h>
+
+
+#define PRINTK(...) do { } while(0)
+//#define PRINTK printk
+//#define DEBUG_BBT
+#define DEBUG(...) do { } while(0)
+
+
+extern int gClearBBT;
+extern int gdebug;
+
+//char brcmNandBBTMsg[1024];
+
+	/* brcmnand=
+	 *	rescan: 	1. Rescan for bad blocks, and update existing BBT
+	 *	showbbt:	2. Print out the contents of the BBT on boot up.
+	 *
+	 * The following commands are implemented but should be removed for production builds.  
+	 * Use userspace flash_eraseall instead.
+	 * These were intended for development debugging only.
+	 * 	erase:	7. Erase entire flash, except CFE, and rescan for bad blocks 
+	 *	eraseall:	8. Erase entire flash, and rescan for bad blocks
+	 *	clearbbt:	9. Erase BBT and rescan for bad blocks.  (DANGEROUS, may lose Mfg's BIs).
+	 */
+#define NANDCMD_RESCAN	1
+#define NANDCMD_SHOWBBT	2
+
+#define NANDCMD_ERASE		7
+#define NANDCMD_ERASEALL	8
+#define NANDCMD_CLEARBBT	9
+
+int brcmnand_update_bbt (struct mtd_info *mtd, loff_t offs);
+
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf:	the buffer to search
+ * @len:	the length of buffer to search
+ * @paglen:	the pagelength
+ * @td:		search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers.
+ * If the SCAN_EMPTY option is set then check, if all bytes except the
+ * pattern area contain 0xff
+ *
+*/
+static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+	int i, end = 0;
+	uint8_t *p = buf;
+
+PRINTK("Check_pattern len=%d, pagelen=%d, td->offs-%d, pattern=%c%c%c%c\n", len, paglen, td->offs, 
+	td->pattern[0], td->pattern[1], td->pattern[2], td->pattern[3]);
+#ifdef DEBUG_BBT
+if (gdebug) { printk("oobbuf=\n"); print_oobbuf(&buf[paglen], len-paglen); }
+#endif
+	end = paglen + td->offs;
+	if (td->options & NAND_BBT_SCANEMPTY) {
+		for (i = 0; i < end; i++) {
+			if (p[i] != 0xff) {
+PRINTK("check_pattern 1: p[%d] == %02x - expect FF\n", i, p[i]);
+				return -1;
+			}
+		}
+	}
+	p += end;
+
+	/* Compare the pattern */
+	for (i = 0; i < td->len; i++) {
+		if (p[i] != td->pattern[i]) {
+PRINTK("%s: expect @i=%d td->pat[%d]=%02x, found p[%d]=%02x\n", 
+__FUNCTION__,i, i, td->pattern[i], td->offs+i, p[td->offs + i]);
+			return -1;
+		}
+	}
+
+	if (td->options & NAND_BBT_SCANEMPTY) {
+		p += td->len;
+		end += td->len;
+		for (i = end; i < len; i++) {
+			if (*p++ != 0xff) {
+PRINTK("check_pattern 2: p[%d] == %02x - expect FF\n", i, p[i]);
+				return -1;
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf:	the buffer to search
+ * @td:		search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers. Same as check_pattern, but
+ * no optional empty check
+ *
+*/
+static int check_short_pattern (uint8_t *buf, struct nand_bbt_descr *td)
+{
+	int i;
+	uint8_t *p = buf;
+
+	/* Compare the pattern */
+	for (i = 0; i < td->len; i++) {
+		if (p[td->offs + i] != td->pattern[i]) {
+PRINTK("%s: expect @i=%d td->pat[%d]=%02x, found p[%d]=%02x\n", 
+__FUNCTION__,i, i, td->pattern[i], td->offs+i, p[td->offs + i]);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+
+
+/**
+ * brcmnand_read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd:	MTD device structure
+ * @buf:	temporary buffer
+ * @page:	the starting page
+ * @num:	the number of bbt descriptors to read
+ * @bits:	number of bits per block
+ * @offs:	offset in the memory table
+ * @reserved_block_code:	Pattern to identify reserved blocks
+ *
+ * Read the bad block table starting from page.
+ *
+ */
+static int brcmnand_read_bbt (struct mtd_info *mtd, uint8_t *buf, int64_t page, int num,
+	int bits, int offs, int reserved_block_code)
+{
+	int res, i, j, act = 0;
+	struct brcmnand_chip *this = mtd->priv;
+	size_t retlen, len, totlen;
+	loff_t from;
+	uint8_t msk = (uint8_t) ((1 << bits) - 1);
+
+	totlen = (num * bits) >> 3;
+	from = ((loff_t)page) << this->page_shift;
+
+	/*
+	 * Clear ECC registers 
+	 */
+	this->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
+	this->ctrl_write(BCHP_NAND_ECC_UNC_ADDR, 0);
+
+
+	while (totlen) {
+		len = min (totlen, (size_t) (1 << this->bbt_erase_shift));
+PRINTK("%s: calling read_ecc len=%d, bits=%d, num=%d, totallen=%d\n", __FUNCTION__, len, bits, num, totlen);
+		res = mtd_read(mtd, from, len, &retlen, buf);
+		if (res < 0) {
+			if (retlen != len) {
+				printk (KERN_INFO "brcmnand_bbt: Error reading bad block table\n");
+				return res;
+			}
+			printk (KERN_ERR "%s: ECC error %d while reading bad block table\n", __FUNCTION__, res);
+			/* THT 11/10/09: If read fails, we should ignore the data, so return w/o analyzing it */
+			return res;
+		}
+
+		/* Analyse data */
+		for (i = 0; i < len; i++) {
+			uint8_t dat = buf[i];
+			for (j = 0; j < 8; j += bits, act += 2) {
+				uint8_t tmp = (dat >> j) & msk;
+				if (tmp == msk)
+					continue;
+				if (reserved_block_code && (tmp == reserved_block_code)) {
+					printk (KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n",
+						((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+					this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
+					mtd->ecc_stats.bbtblocks++;
+					continue;
+				}
+				/* Leave it for now, if its matured we can move this
+				 * message to MTD_DEBUG_LEVEL0 */
+				printk (KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n",
+					((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+				/* Factory marked bad or worn out ? */
+				if (tmp == 0)
+					this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
+				else
+					this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
+				mtd->ecc_stats.badblocks++;
+			}
+		}
+		totlen -= len;
+		from += (loff_t)len;
+	}
+	return 0;
+}
+
+/**
+ * brcmnand_read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd:	MTD device structure
+ * @buf:	temporary buffer
+ * @td:		descriptor for the bad block table
+ * @chip:	read the table for a specific chip, -1 read all chips.
+ *		Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page
+ * We assume that the bbt bits are in consecutive order.
+*/
+static int brcmnand_read_abs_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	int res = 0, i;
+	int bits;
+
+PRINTK("-->%s, numchips=%d, chip=%d\n", __FUNCTION__, this->ctrl->numchips, chip);
+	bits = td->options & NAND_BBT_NRBITS_MSK;
+	if (td->options & NAND_BBT_PERCHIP) {
+		int offs = 0;
+		for (i = 0; i < this->ctrl->numchips; i++) {
+			if (chip == -1 || chip == i)
+				res = brcmnand_read_bbt (mtd, buf, td->pages[i], this->chipSize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code);
+			if (res) {
+PRINTK("<-- brcmnand_read_abs_bbt ret = %d\n", res);
+				return res;
+			}
+			offs += this->chipSize >> (this->bbt_erase_shift + 2);
+		}
+	} else {
+PRINTK("%s: read BBT at %llx\n", __FUNCTION__, td->pages[0]);
+		res = brcmnand_read_bbt (mtd, buf, td->pages[0], 
+				(uint32_t) (this->mtdSize >> this->bbt_erase_shift), bits, 0, td->reserved_block_code);
+		if (res) {
+PRINTK("<-- brcmnand_read_abs_bbt 2 ret = %d\n", res);
+			return res;
+		}
+	}
+PRINTK("<-- brcmnand_read_abs_bbt ret 0\n");
+	return 0;
+}
+
+/*
+ * Scan read raw data from flash
+ */
+static int brcmnand_scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+			 size_t len)
+{
+	struct mtd_oob_ops ops;
+	int ret;
+
+	ops.mode = MTD_OPS_RAW;
+	ops.ooboffs = 0;
+	ops.ooblen = mtd->oobsize;
+	ops.oobbuf = &buf[mtd->writesize];
+	ops.datbuf = buf;
+	ops.len = len;
+
+	ret = mtd_read_oob(mtd, offs, &ops);
+	
+PRINTK("%s: Reading BBT Sig @%0llx, OOB=\n", __FUNCTION__, offs); 
+#ifdef DEBUG_BBT
+if (gdebug) 
+	print_oobbuf(ops.oobbuf, mtd->oobsize);
+#endif
+	return ret;
+}
+
+/*
+ * Scan write data with oob to flash
+ */
+static int brcmnand_scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+			  uint8_t *buf, uint8_t *oob)
+{
+	struct mtd_oob_ops ops;
+	struct brcmnand_chip *this = mtd->priv;
+	
+int ret;
+
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.ooboffs = 0;
+	ops.ooblen = mtd->oobsize;
+	ops.datbuf = buf;
+	ops.oobbuf = oob;
+	ops.len = len;
+
+
+PRINTK("%s: Writing BBT Sig @%0llx, OOB=\n", __FUNCTION__, offs); 
+if (gdebug) print_oobbuf(oob, mtd->oobsize);
+
+	ret = this->write_oob(mtd, offs, &ops);
+//gdebug = 0;
+	return ret;
+}
+
+/**
+ * brcmnand_read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd:	MTD device structure
+ * @buf:	temporary buffer
+ * @td:		descriptor for the bad block table
+ * @md:		descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page
+ * We assume that the bbt bits are in consecutive order.
+ *
+*/
+static int brcmnand_read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+			 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+	struct brcmnand_chip *this = mtd->priv;
+
+PRINTK("--> %s\n", __FUNCTION__);
+	/* Read the primary version, if available */
+	if (td->options & NAND_BBT_VERSION) {
+PRINTK("read primary version\n");
+		brcmnand_scan_read_raw(mtd, buf, td->pages[0] << this->page_shift,
+			      mtd->writesize);
+		td->version[0] = buf[mtd->writesize + td->veroffs];
+		printk(KERN_DEBUG "Bad block table at page %x, version 0x%02X\n",
+		       td->pages[0], td->version[0]);
+PRINTK("Main bad block table at page %llx, version 0x%02X\n",
+		       td->pages[0], td->version[0]);
+	}
+
+	/* Read the mirror version, if available */
+	if (md && (md->options & NAND_BBT_VERSION)) {
+PRINTK("read mirror version\n");
+		brcmnand_scan_read_raw(mtd, buf, md->pages[0] << this->page_shift,
+			      mtd->writesize);
+		md->version[0] = buf[mtd->writesize + md->veroffs];
+		printk(KERN_DEBUG "Bad block table at page %x, version 0x%02X\n",
+		       md->pages[0], md->version[0]);
+PRINTK( "Mirror bad block table at page %x, version 0x%02X\n",
+		       md->pages[0], md->version[0]);
+	}
+PRINTK("<-- %s\n", __FUNCTION__);
+	return 1;
+}
+
+/*
+ * Scan a given block full
+ */
+static int brcmnand_scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+			   loff_t offs, uint8_t *buf, size_t readlen,
+			   int scanlen, int len)
+{
+	int ret, j;
+
+	ret = brcmnand_scan_read_raw(mtd, buf, offs, readlen);
+	if (ret)
+		return ret;
+
+	for (j = 0; j < len; j++, buf += scanlen) {
+		if (check_pattern(buf, scanlen, mtd->writesize, bd))
+			return 1;
+	}
+	return 0;
+}
+
+/*
+ * Scan a given block partially
+ * @offs: 	Offset of start of block
+ * @len: 		Number of pages to scan
+ * For MLC we need to read backwards from the end of the block
+ */
+static int brcmnand_scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+			   loff_t offs, uint8_t *buf, int len)
+{
+	struct mtd_oob_ops ops;
+	int j, ret;
+	int dir;
+	struct brcmnand_chip *this = mtd->priv; 
+
+	/*
+	 * THT 8/23/2010 Changed to use low level test.  
+	 * Apparently new Micron chips are SLC, but behaves like an MLC flash (requires BCH-4).
+	 * The old high level test would look for the BI indicator at the wrong page.
+	 *
+	 * if (!MTD_IS_MLC(mtd)) { // SLC: First and 2nd page
+	 *	dir = 1;
+	 * }
+	*/
+	if (!NAND_IS_MLC(this)) { // SLC: First and 2nd page
+		dir = 1;
+	}
+	else { // MLC: Read last page (and next to last page).
+		int pagesPerBlock = mtd->erasesize/mtd->writesize;
+		
+		dir = -1;
+		offs += (loff_t)((pagesPerBlock -1 ) * mtd->writesize);
+	}
+	ops.len = mtd->oobsize;
+	ops.ooblen = mtd->oobsize;
+	ops.oobbuf = buf;
+	ops.ooboffs = 0;
+	ops.datbuf = NULL;
+	ops.mode = MTD_OPS_PLACE_OOB;
+
+	for (j=0; j < len; j++) {
+		ret = mtd_read_oob(mtd, offs, &ops);
+if (gdebug && ret!=0) printk("########## %s: read_oob returns %d\n", __FUNCTION__, ret);
+
+
+		if (ret == -EBADMSG ||ret == -EIO || ret == -ETIMEDOUT) {// Uncorrectable errors
+			uint32_t acc0;
+
+			// Disable ECC
+			acc0 = brcmnand_disable_read_ecc(this->ctrl->CS[this->csi]);
+
+			// Re-read the OOB
+			ret = mtd_read_oob(mtd, offs, &ops);
+
+			// Enable ECC back
+			brcmnand_restore_ecc(this->ctrl->CS[this->csi], acc0);
+		}
+
+		if (ret) {
+PRINTK("%s: read_oob returns error %d\n", __FUNCTION__, ret);
+			return ret;
+		}
+
+		if (check_short_pattern(buf, bd))
+			return 1;
+
+
+		offs += ((int64_t)dir * mtd->writesize);
+	}
+	return 0;
+}
+
+/**
+ * brcmnand_create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd:	MTD device structure
+ * @buf:	temporary buffer
+ * @bd:		descriptor for the good/bad block search pattern
+ * @chip:	create the table for a specific chip, -1 read all chips.
+ *		Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device
+ * for the given good/bad block identify pattern
+ */
+static int brcmnand_create_bbt(struct mtd_info *mtd, uint8_t *buf,
+	struct nand_bbt_descr *bd, int chip)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	int i, len, scanlen;
+	uint64_t numblocks, startblock;
+	
+	loff_t from;
+	size_t readlen;
+
+
+PRINTK("-->brcmnand_create_bbt, bbt_erase_shift=%d, this->page_shift=%d\n", this->bbt_erase_shift, this->page_shift);
+	printk (KERN_INFO "Scanning device for bad blocks, options=%08x\n", bd->options);
+
+	if (bd->options & NAND_BBT_SCANALLPAGES)
+		len = 1 << (this->bbt_erase_shift - this->page_shift);
+	else { // Also for MLC
+		if (bd->options & NAND_BBT_SCAN2NDPAGE) {
+			if (this->options & NAND_SCAN_BI_3RD_PAGE) {
+				len = 3; // For Hynix MLC chips
+			}
+			else {
+				len = 2;
+			}
+		}
+		else
+			len = 1;
+	}
+
+	if (!(bd->options & NAND_BBT_SCANEMPTY)) {
+		/* We need only read few bytes from the OOB area */
+		scanlen = 0;
+		readlen = bd->len;
+	} else {
+		/* Full page content should be read */
+		scanlen = mtd->writesize + mtd->oobsize;
+		readlen = len * mtd->writesize;
+	}
+
+	if (chip == -1) {
+		/* Note that numblocks is 2 * (real numblocks) here, see i+=2
+		 * below as it makes shifting and masking less painful */
+		numblocks = device_size(mtd) >> (this->bbt_erase_shift - 1);
+		startblock = 0ULL;
+		from = 0LL;
+	} else {
+		if (chip >= this->ctrl->numchips) {
+			printk (KERN_WARNING "brcmnand_create_bbt(): chipnr (%d) > available chips (%d)\n",
+				chip + 1, this->ctrl->numchips);
+			return -EINVAL;
+		}
+		numblocks = this->chipSize >> (this->bbt_erase_shift - 1);
+		startblock = chip * numblocks;
+		numblocks += startblock;
+		from = startblock << (this->bbt_erase_shift - 1);
+	}
+
+//gdebug=4;
+if (gdebug > 3) { 
+PRINTK("Starting for loop: from=%0llx bd->options=%08x, startblock=%d numblocks=%d\n", 
+from, bd->options, mtd64_ll_low(startblock), mtd64_ll_low(numblocks));
+}
+	for (i = startblock; i < numblocks;) {
+		int ret;
+
+		if (bd->options & NAND_BBT_SCANALLPAGES)
+			ret = brcmnand_scan_block_full(mtd, bd, from, buf, readlen,
+					      scanlen, len);
+		else
+			ret = brcmnand_scan_block_fast(mtd, bd, from, buf, len);
+
+		/* 
+		 * THT 8/24/10: Fall through and mark it as bad if -EBADMSG.
+		 * We want to mark it as bad if we can't read it, but we also
+		 * don't want to mark the block as bad due to a timeout for example
+		 */
+		if (ret < 0 && ret != (-EBADMSG) && ret != (-EIO)) {
+PRINTK("$$$$$$$$$$$$$$$$$$$$ brcmnand_scan_block_{fast/full} returns %d\n", ret);
+			// THT 8/24/10: Go to next block instead of returning
+			// return ret;
+		}
+
+		// -EBADMSG,  -EIO and +1 (marked as bad) go here:
+		else if (ret) {
+			this->bbt[i >> 3] |= 0x03 << (i & 0x6);
+			printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
+			       i >> 1, (unsigned int)from);
+PRINTK("$$$$$$$$$$$$$$$ Bad eraseblock %d at 0x%08x, ret=%d\n",
+			       i >> 1, (unsigned int)from, ret);
+			mtd->ecc_stats.badblocks++;
+		}
+
+		i += 2;
+		from += (loff_t)(1 << this->bbt_erase_shift);
+	}
+
+//gdebug=0;
+	return 0;
+}
+
+/**
+ * brcmnand_search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd:	MTD device structure
+ * @buf:	temporary buffer
+ * @td:		descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern.
+ * Search is preformed either from the beginning up or from the end of
+ * the device downwards. The search starts always at the start of a
+ * block.
+ * If the option NAND_BBT_PERCHIP is given, each chip is searched
+ * for a bbt, which contains the bad block information of this chip.
+ * This is necessary to provide support for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page
+ * in a block.
+ */
+static int brcmnand_search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	int i, chips;
+	uint32_t bits, startblock, block;
+	int dir;
+	int scanlen = mtd->writesize + mtd->oobsize;
+	int bbtblocks;
+	int blocktopage = this->bbt_erase_shift - this->page_shift;
+	int ret = 0;
+
+PRINTK("-->%s, CS=%d numchips=%d, mtdSize=%llx, mtd->size=%llx\n", __FUNCTION__, this->ctrl->CS[this->csi], this->ctrl->numchips, this->mtdSize, mtd->size);
+
+	/* Search direction top -> down ? */
+	if (td->options & NAND_BBT_LASTBLOCK) {
+		startblock = (uint32_t) (this->mtdSize >> this->bbt_erase_shift) -1;
+		dir = -1;
+	} else {
+		startblock = 0;
+		dir = 1;
+	}
+
+	/* Do we have a bbt per chip ? */
+	if (td->options & NAND_BBT_PERCHIP) {
+		chips = this->ctrl->numchips;
+		bbtblocks = this->chipSize >> this->bbt_erase_shift;
+		startblock &= bbtblocks - 1;
+	} else {
+		chips = 1;
+		bbtblocks = (uint32_t)(this->mtdSize >> this->bbt_erase_shift);
+
+	}
+
+	/* Number of bits for each erase block in the bbt */
+	bits = td->options & NAND_BBT_NRBITS_MSK;
+
+PRINTK("%s: startblock=%d, dir=%d, chips=%d\n", __FUNCTION__, (int) startblock, dir, chips);
+
+	for (i = 0; i < chips; i++) {
+		/* Reset version information */
+		td->version[i] = 0;
+		td->pages[i] = BBT_NULL_PAGE;
+		/* Scan the maximum number of blocks */
+		for (block = 0; block < td->maxblocks; block++) {
+
+			int64_t actblock = startblock + dir * block;
+			loff_t offs = (uint64_t) actblock << this->bbt_erase_shift;
+
+			/* Read first page */
+			ret = brcmnand_scan_read_raw(mtd, buf, offs, mtd->writesize);
+
+			/* Here if the read routine returns -77 then the BBT data is invalid, ignore it */
+			
+			// Ignore BBT if not there.
+			if (ret)
+				continue;
+
+PRINTK("Checking Sig %c%c%c%c against OOB\n", td->pattern[0], td->pattern[1], td->pattern[2], td->pattern[3]);
+
+			/* If scan-auto mode, fish out the useful data from the ECC stuffs */
+			if (td->options & BRCMNAND_BBT_AUTO_PLACE) {
+				u_char abuf[16];
+				struct mtd_oob_ops ops;
+
+				memset(abuf, 0, 16);
+				ops.mode = MTD_OPS_AUTO_OOB;
+				ops.ooboffs = 0;
+				ops.ooblen = mtd->oobsize;
+				ops.oobbuf = abuf;
+				ops.datbuf = buf;
+				ops.len = mtd->writesize;
+				this->oob_poi = &buf[mtd->writesize];
+
+				(void) brcmnand_transfer_oob(this, abuf, &ops, td->len+1);
+//printk("BCH-8-16 scan: \n");
+//print_oobbuf(abuf, td->len+1);
+
+				/* Look for pattern at the beginning of OOB auto-buffer */
+				if (!check_pattern(abuf, mtd->oobsize, 0, td)) {
+					PRINTK("%s: Found BBT at offset %0llx\n", __FUNCTION__, offs);
+					td->pages[i] = actblock << blocktopage;
+					if (td->options & NAND_BBT_VERSION) {
+						td->version[i] = abuf[td->veroffs];
+					}
+					break;
+				}
+				
+			}
+			
+			else if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+PRINTK("%s: Found BBT at offset %0llx\n", __FUNCTION__, offs);
+				td->pages[i] = actblock << blocktopage;
+				if (td->options & NAND_BBT_VERSION) {
+					td->version[i] = buf[mtd->writesize + td->veroffs];
+				}
+				break;
+			}
+		}
+		startblock += this->chipSize >> this->bbt_erase_shift;
+	}
+	/* Check, if we found a bbt for each requested chip */
+	for (i = 0; i < chips; i++) {
+		if (td->pages[i] == BBT_NULL_PAGE) {
+			printk (KERN_WARNING "Bad block table %c%c%c%c not found for chip on CS%d\n", 
+				td->pattern[0], td->pattern[1], td->pattern[2], td->pattern[3], this->ctrl->CS[this->csi]);
+PRINTK ( "**************** Bad block table %c%c%c%c not found for chip on CS%d\n", 
+				td->pattern[0], td->pattern[1], td->pattern[2], td->pattern[3], this->ctrl->CS[this->csi]);
+		}
+		else {
+			printk(KERN_DEBUG "Bad block table %c%c%c%c found at page %08lx, version 0x%02X for chip on CS%d\n", 
+				td->pattern[0], td->pattern[1], td->pattern[2], td->pattern[3], 
+				(unsigned long) td->pages[i], td->version[i], this->ctrl->CS[this->csi]);
+PRINTK( "############# Bad block table %c%c%c%c found at page %08lx, version 0x%02X for chip on CS%d\n", 
+			      td->pattern[0], td->pattern[1], td->pattern[2], td->pattern[3], 
+			      (unsigned long) td->pages[i], td->version[i], this->ctrl->CS[this->csi]);
+		}
+	}
+	return 0;
+}
+
+/**
+ * brcmnand_search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd:	MTD device structure
+ * @buf:	temporary buffer
+ * @td:		descriptor for the bad block table
+ * @md:		descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s)
+*/
+static int brcmnand_search_read_bbts (struct mtd_info *mtd, uint8_t *buf,
+	struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+PRINTK("-->%s\n", __FUNCTION__);
+
+	/* Search the primary table */
+	brcmnand_search_bbt (mtd, buf, td);
+
+	/* Search the mirror table */
+	if (md)
+		brcmnand_search_bbt (mtd, buf, md);
+
+	/* Force result check */
+	return 1;
+}
+
+
+/**
+ * brcmnand_write_bbt - [GENERIC] (Re)write the bad block table
+ *
+ * @mtd:	MTD device structure
+ * @buf:	temporary buffer
+ * @td:		descriptor for the bad block table
+ * @md:		descriptor for the bad block table mirror
+ * @chipsel:	selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table
+ * THT: 1/16/07: TO DO: Currently, if writing to the block failed, we punt.
+ * TBD: Use skip block mechanism, and skip over real bad blocks, so we would either start at the 1MB offset from bottom
+ * and go down, or start from the bottom and go up, skipping over bad blocks until we reach the 1MB partition reserved
+ * for BBT.
+ *
+*/
+static int brcmnand_write_bbt(struct mtd_info *mtd, uint8_t *buf,
+		     struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+		     int chipsel)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	struct erase_info einfo;
+	int i, j, res, chip = 0, skip = 0, dir = 0;
+	uint32_t bits, offs, sft, sftmsk, bbtoffs;
+	int64_t startblock = 0ULL, numblocks, page=0ULL, i64;
+	int nrchips,  pageoffs, ooboffs;
+	uint8_t msk[4];
+	uint8_t rcode = td->reserved_block_code;
+	size_t retlen, len = 0;
+	loff_t to;
+	struct mtd_oob_ops ops;
+
+int save_gdebug = gdebug;
+//gdebug=4;
+
+DEBUG(MTD_DEBUG_LEVEL3, "-->%s\n", __FUNCTION__);
+PRINTK("-->%s, chipsel=%d\n", __FUNCTION__, chipsel);
+	ops.ooblen = mtd->oobsize;
+	ops.ooboffs = 0;
+	ops.datbuf = NULL;
+	ops.mode = MTD_OPS_PLACE_OOB;
+
+	if (!rcode)
+		rcode = 0xff;
+	/* Write bad block table per chip rather than per device ? */
+	if (td->options & NAND_BBT_PERCHIP) {
+		numblocks =  (this->chipSize >> this->bbt_erase_shift);
+		/* Full device write or specific chip ? */
+		if (chipsel == -1) {
+			nrchips = this->ctrl->numchips;
+		} else {
+			nrchips = chipsel + 1;
+			chip = chipsel;
+		}
+	} else {
+		numblocks =  (this->mtdSize >> this->bbt_erase_shift);
+		nrchips = 1;
+	}
+	
+PRINTK("%s Creating %c%c%c%c numblocks=%d, nrchips=%d, td->pages[0]=%llx\n", 
+__FUNCTION__, td->pattern[0], td->pattern[1], td->pattern[2], td->pattern[3] , (int) numblocks, nrchips, td->pages[0]);
+
+	/* Loop through the chips */
+	for (; chip < nrchips; chip++) {
+
+		/* There was already a version of the table, reuse the page
+		 * This applies for absolute placement too, as we have the
+		 * page nr. in td->pages.
+		 */
+		if (td->pages[chip] != BBT_NULL_PAGE) {
+			page = td->pages[chip];
+PRINTK("There is already a version of the table, go ahead and write it\n");
+			goto write;
+		}
+
+		/* Automatic placement of the bad block table */
+		/* Search direction top -> down ? */
+		if (td->options & NAND_BBT_LASTBLOCK) {
+			startblock = numblocks * (chip + 1) - 1;
+			dir = -1;
+		} else {
+			startblock = chip * numblocks;
+			dir = 1;
+		}
+		skip = 0;
+
+write_retry:
+PRINTK("%s: write_retry: startblock=%0llx, dir=%d, td->maxblocks=%d, skip=%d\n", 
+	__FUNCTION__, startblock, dir, td->maxblocks, skip);
+
+		for (i = skip; i < td->maxblocks; i++) {
+			uint64_t block = startblock + (int64_t) (dir * i);
+			// THT One byte contains 4 set of 2-bits, so divide block by 4 to index the BBT byte
+			uint32_t blockindex = (uint32_t) (block >> 2);
+
+			/* Check, if the block is bad */
+
+PRINTK("%s: Checking BBT: i=%d, block=%0llx, BBT=%08x\n", 
+__FUNCTION__, i, block, this->bbt[blockindex]);
+			
+			// THT: bbt[blockindex] is the byte we are looking for, now get the 2 bits that
+			// is the BBT for the block (Shift (0,1,2,3) *2 positions depending on the block modulo 4)
+			switch ((this->bbt[blockindex] >> (2 * (block & 0x03))) 
+				      & 0x03) {
+			case 0x01:
+			case 0x03:
+				continue;
+			}
+			page = block << (this->bbt_erase_shift - this->page_shift);
+
+PRINTK("%s: Checking BBT2: page=%llx, md->pages[chip]=%llx\n", 
+	__FUNCTION__, page, md->pages[chip]);
+	
+			/* Check, if the block is used by the mirror table */
+			if (!md || md->pages[chip] != page)
+				goto write;
+		}
+		printk (KERN_ERR "No space left to write bad block table %c%c%c%c\n", 
+			td->pattern[0], td->pattern[1], td->pattern[2], td->pattern[3]);
+		brcmnand_post_mortem_dump(mtd, page<<this->page_shift);
+		return -ENOSPC;
+write:
+
+		/* Set up shift count and masks for the flash table */
+		bits = td->options & NAND_BBT_NRBITS_MSK;
+PRINTK("%s: bits=%d\n", __FUNCTION__, bits);
+		switch (bits) {
+		case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+			msk[3] = 0x01;
+			break;
+		case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+			msk[3] = 0x03;
+			break;
+		case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
+			msk[3] = 0x0f;
+			break;
+		case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+			msk[3] = 0xff;
+			break;
+		default: return -EINVAL;
+		}
+
+		bbtoffs = chip * ((uint32_t) (numblocks >> 2));
+
+		to = (uint64_t) page << this->page_shift;
+
+		/* Must we save the block contents ? */
+		if (td->options & NAND_BBT_SAVECONTENT) {
+			/* Make it block aligned */
+PRINTK("%s: NAND_BBT_SAVECONTENT\n", __FUNCTION__);
+			//to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
+			to = to & ( ~((1 << this->bbt_erase_shift) - 1));
+			len = 1 << this->bbt_erase_shift;
+			res = mtd_read(mtd, to, len, &retlen, buf);
+			if (res < 0) {
+				if (retlen != len) {
+					printk(KERN_INFO "nand_bbt: Error "
+					       "reading block for writing "
+					       "the bad block table\n");
+					return res;
+				}
+				printk(KERN_WARNING "nand_bbt: ECC error "
+				       "while reading block for writing "
+				       "bad block table\n");
+			}
+			/* Read oob data */
+			ops.len = (len >> this->page_shift) * mtd->oobsize;
+			ops.oobbuf = &buf[len];
+			res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+			if (res < 0 || ops.retlen != ops.len)
+				goto outerr;
+
+			/* Calc the byte offset in the buffer */
+			pageoffs = page - (to >> this->page_shift);
+
+			// offs is offset from start of buffer, so it is OK to be 32bit.
+			offs = pageoffs << this->page_shift;
+			/* Preset the bbt area with 0xff */
+			memset (&buf[offs], 0xff, (size_t)(numblocks >> sft));
+			ooboffs = len + (pageoffs * mtd->oobsize);
+
+		} else {
+PRINTK("%s: Not NAND_BBT_SAVECONTENT\n", __FUNCTION__);
+			/* Calc length */
+			len = (size_t) (numblocks >> sft);
+			/* Make it page aligned ! */
+			len = (len + (mtd->writesize - 1)) &
+				~(mtd->writesize - 1);
+			/* Preset the buffer with 0xff */
+			memset(buf, 0xff, len +
+			       (len >> this->page_shift)* mtd->oobsize);
+			offs = 0;
+			ooboffs = len;
+
+			/* Auto-place for BCH-8 on 16B OOB? */
+			if (td->options & BRCMNAND_BBT_AUTO_PLACE) {
+				u_char abuf[8];
+				struct mtd_oob_ops ops;
+
+				memcpy(abuf, td->pattern, td->len);
+				// Write the version number (1 byte)
+				if (td->options & NAND_BBT_VERSION) {
+					abuf[td->veroffs] = td->version[0];
+				}
+				
+				ops.datbuf = NULL;
+				ops.len = 0;
+				ops.mode = MTD_OPS_AUTO_OOB;
+				ops.ooboffs = 0;
+				ops.ooblen = td->len + 1; /* 5 bytes */
+				ops.oobbuf = abuf;  /* Source oobbuf */
+				this->oob_poi = &buf[ooboffs]; /* Destination oobbuf */
+
+				/* Copy abuf into OOB free bytes */
+				(void) brcmnand_fill_oob(this, abuf, &ops);
+				
+			}
+			else { /* IN-PLACE OOB format */
+				/* Pattern is located in oob area of first page */
+				memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+				
+				// Write the version number (1 byte)
+				if (td->options & NAND_BBT_VERSION) {
+					buf[ooboffs + td->veroffs]=td->version[0];
+				}
+			}
+		}
+
+		/* walk through the memory table */
+		/*
+		 * THT: Right now we are safe, but when numblocks exceed 32bit, 
+		 * then we need to look at these codes again,
+		 * as we may need to break the BBT into 2 or more tables that a uint32_t can index.
+		 */
+		for (i64 = 0ULL; i64 < numblocks; ) {
+			uint8_t dat;
+			uint32_t irs2 = (uint32_t) (i64 >> 2); // Ihdex into BBT
+
+			/*
+			 * Make sure that the cast above for b64i is not lossy
+			 */
+			if (mtd64_ll_high(i64 >> 2)) {
+				printk(KERN_ERR "FIXME: %s: integer index to BBT overflow %0llx\n", __FUNCTION__, i64 >> 2);
+			}
+			dat = this->bbt[bbtoffs + irs2];
+			for (j = 0; j < 4; j++ , i64++) {
+				uint32_t sftcnt = (uint32_t) ((i64 << (3 - sft)) & sftmsk);
+				/* Do not store the reserved bbt blocks ! */
+				buf[offs + (uint32_t) (i64 >> sft)] &=
+					~(msk[dat & 0x03] << sftcnt);
+				dat >>= 2;
+			}
+		}
+		
+		memset (&einfo, 0, sizeof (einfo));
+		einfo.mtd = mtd;
+		einfo.addr = to;
+		
+		einfo.len = 1ULL << this->bbt_erase_shift;
+		res = this->erase_bbt (mtd, &einfo, 1, 1); // Do not look up BBT
+		if (res < 0) {
+			printk (KERN_ERR "brcmnand_bbt: Error during block erase at %0llx: %d\n", to, res);
+			skip++;
+			goto write_retry;
+		}
+
+//gdebug = 4;
+		res = brcmnand_scan_write_bbt(mtd, to, len, buf, &buf[len]);
+//gdebug = 0;
+		if (res < 0) {
+			// THT: If writing reports a bad block, we will skip it, and retry.  Eventually may
+			// run out of td->maxblocks
+			printk(KERN_INFO "write_bbt returns flash status error at %0llx, skipping and retrying...\n",
+				to);
+			skip++;
+			goto write_retry;
+		}
+
+		printk(KERN_DEBUG "Bad block table written to 0x%08x, version "
+		       "0x%02X\n", (unsigned int)to, td->version[chip]);
+
+		/* Mark it as used */
+		td->pages[chip] = page;
+	}
+gdebug=save_gdebug;
+	return 0;
+
+ outerr:
+ gdebug=save_gdebug;
+	printk(KERN_WARNING
+	       "brcmnand_bbt: Error while writing bad block table %d\n", res);
+	return res;
+}
+
+/**
+ * brcmnand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd:	MTD device structure
+ * @bd:		descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device
+ * for manufacturer / software marked good / bad blocks
+*/
+static inline int brcmnand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+	struct brcmnand_chip *this = mtd->priv;
+
+	bd->options &= ~NAND_BBT_SCANEMPTY;
+	return brcmnand_create_bbt (mtd, this->ctrl->buffers->databuf, bd, -1);
+}
+
+/**
+ * brcmnand_check_create - [GENERIC] create and write bbt(s) if necessary
+ * @mtd:	MTD device structure
+ * @buf:	temporary buffer
+ * @bd:		descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to brcmnand_read_bbt
+ * and creates / updates the bbt(s) if necessary
+ * Creation is necessary if no bbt was found for the chip/device
+ * Update is necessary if one of the tables is missing or the
+ * version nr. of one table is less than the other
+*/
+static int brcmnand_check_create (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+{
+	int i, chips, writeops, chipsel, res;
+	struct brcmnand_chip *this = mtd->priv;
+	struct nand_bbt_descr *td = this->bbt_td;
+	struct nand_bbt_descr *md = this->bbt_md;
+	struct nand_bbt_descr *rd, *rd2;
+
+PRINTK("-->%s, td=%p, md=%p\n", __FUNCTION__, td, md);
+	/* Do we have a bbt per chip ? */
+	if (td->options & NAND_BBT_PERCHIP)
+		chips = this->ctrl->numchips;
+	else
+		chips = 1;
+
+	for (i = 0; i < chips; i++) {
+		writeops = 0;
+		rd = NULL;
+		rd2 = NULL;
+		/* Per chip or per device ? */
+		chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+
+		/*
+		 * THT: Reset version to 0 if 0xff
+		 */
+		if ((td->options & NAND_BBT_VERSION) && (td->version[i]==0xff) && td->pages[i] != BBT_NULL_PAGE)
+			td->version[i] = 0;
+		if ((md->options & NAND_BBT_VERSION) && (md->version[i]==0xff) && md->pages[i] != BBT_NULL_PAGE)
+			md->version[i] = 0;
+		
+		/* Mirrored table available ? */
+		if (md) {
+			if (td->pages[i] == BBT_NULL_PAGE && md->pages[i] == BBT_NULL_PAGE) {
+				writeops = 0x03;
+				goto create;
+			}
+
+			if (td->pages[i] == BBT_NULL_PAGE) {
+				rd = md;
+				td->version[i] = md->version[i];
+				writeops = 1;
+				goto writecheck;
+			}
+
+			if (md->pages[i] == BBT_NULL_PAGE) {
+				rd = td;
+				md->version[i] = td->version[i];
+				writeops = 2;
+				goto writecheck;
+			}
+
+			if (td->version[i] == md->version[i]) {
+				rd = td;
+				if (!(td->options & NAND_BBT_VERSION))
+					rd2 = md;
+				goto writecheck;
+			}
+
+			if (((int8_t) (td->version[i] - md->version[i])) > 0) {
+				rd = td;
+				md->version[i] = td->version[i];
+				writeops = 2;
+			} else {
+				rd = md;
+				td->version[i] = md->version[i];
+				writeops = 1;
+			}
+
+			goto writecheck;
+
+		} else {
+			if (td->pages[i] == BBT_NULL_PAGE) {
+				writeops = 0x01;
+				goto create;
+			}
+			rd = td;
+			goto writecheck;
+		}
+create:
+		/* Create the bad block table by scanning the device ? */
+		if (!(td->options & NAND_BBT_CREATE))
+			continue;
+
+		/* Create the table in memory by scanning the chip(s) */
+		brcmnand_create_bbt (mtd, buf, bd, chipsel);
+
+		td->version[i] = 1;
+		if (md)
+			md->version[i] = 1;
+writecheck:
+		res = 0;
+
+PRINTK("%s: writeops=%d, rd=%p, rd2=%p\n", __FUNCTION__, writeops, rd, rd2);
+		
+		/* read back first ? */
+		if (rd) {
+PRINTK("%s: Read rd\n", __FUNCTION__);
+			res = brcmnand_read_abs_bbt (mtd, buf, rd, chipsel);
+		}
+		/* If they weren't versioned, read both. */
+		if (rd2) {
+			if (res != 0) {
+				int bbtlen = (uint32_t) (this->mtdSize >> (this->bbt_erase_shift + 2));
+				/* Clear the in-memory BBT first */
+PRINTK("%s: Discarding previously read BBT %c%c%c%c, res=%d\n", 
+__FUNCTION__, rd->pattern[0], rd->pattern[1], rd->pattern[2], rd->pattern[3], res);
+				memset(this->bbt, 0, bbtlen);
+			}
+PRINTK("%s: Read rd2\n", __FUNCTION__);
+			res = brcmnand_read_abs_bbt (mtd, buf, rd2, chipsel);
+			if (res != 0) {
+PRINTK("%s: Read BBT %c%c%c%c returns res=%d, discarding\n", 
+__FUNCTION__, rd2->pattern[0], rd2->pattern[1], rd2->pattern[2], rd2->pattern[3], res);
+			}
+		}
+
+		/* Write the bad block table to the device ? */
+		if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+			res = brcmnand_write_bbt (mtd, buf, td, md, chipsel);
+			if (res < 0)
+				return res;
+		}
+
+		/* Write the mirror bad block table to the device ? */
+		if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+			res = brcmnand_write_bbt (mtd, buf, md, td, chipsel);
+			if (res < 0)
+				return res;
+		}
+	}
+	return 0;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd:	MTD device structure
+ * @td:		bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent
+ * accidental erasures / writes. The regions are identified by
+ * the mark 0x02.
+*/
+static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	int i, j, update;
+	uint32_t chips, block, nrblocks;
+	uint8_t oldval, newval;
+
+	/* Do we have a bbt per chip ? */
+	if (td->options & NAND_BBT_PERCHIP) {
+		chips = this->ctrl->numchips;
+		nrblocks = (int)(this->chipSize >> this->bbt_erase_shift);
+	} else {
+		chips = 1;
+		nrblocks = (uint32_t) (this->mtdSize >> this->bbt_erase_shift);
+	}
+
+	for (i = 0; i < chips; i++) {
+		if ((td->options & NAND_BBT_ABSPAGE) ||
+		    !(td->options & NAND_BBT_WRITE)) {
+			if (td->pages[i] == BBT_NULL_PAGE)
+				continue;
+			block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+			block <<= 1;
+			oldval = this->bbt[(block >> 3)];
+			newval = oldval | (0x2 << (block & 0x06));
+			this->bbt[(block >> 3)] = newval;
+			if ((oldval != newval) && td->reserved_block_code)
+				brcmnand_update_bbt(mtd, block << (this->bbt_erase_shift - 1));
+			continue;
+		}
+		update = 0;
+		if (td->options & NAND_BBT_LASTBLOCK)
+			block = ((i + 1) * nrblocks) - td->maxblocks;
+		else
+			block = i * nrblocks;
+		block <<= 1;
+		for (j = 0; j < td->maxblocks; j++) {
+			oldval = this->bbt[(block >> 3)];
+			newval = oldval | (0x2 << (block & 0x06));
+			this->bbt[(block >> 3)] = newval;
+			if (oldval != newval)
+				update = 1;
+			block += 2;
+		}
+		/* If we want reserved blocks to be recorded to flash, and some
+		   new ones have been marked, then we need to update the stored
+		   bbts.  This should only happen once. */
+		if (update && td->reserved_block_code)
+			brcmnand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1));
+	}
+}
+
+/**
+ * brcmnand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd:	MTD device structure
+ * @bd:		descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already
+ * available. If not it scans the device for manufacturer
+ * marked good / bad blocks and writes the bad block table(s) to
+ * the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed
+ * by calling the nand_free_bbt function.
+ *
+*/
+int brcmnand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	int  res = 0;
+	uint32_t len;
+	uint8_t *buf;
+	struct nand_bbt_descr *td = this->bbt_td;
+	struct nand_bbt_descr *md = this->bbt_md;
+
+PRINTK("-->%s: chip=%p, td->options=%08x, md->options=%08x\n", __FUNCTION__, this, td->options, md->options);
+
+	len = (uint32_t) (this->mtdSize >> (this->bbt_erase_shift + 2));
+	/* Allocate memory (2bit per block) */
+
+PRINTK("brcmnand_scan_bbt: Allocating %d byte for BBT. mtd->size=%lld, eraseshift=%d\n", 
+len, this->mtdSize, this->bbt_erase_shift);
+
+
+	this->bbt = (uint8_t*) kmalloc (len, GFP_KERNEL);
+
+	if (!this->bbt) 
+	{
+		printk (KERN_ERR "brcmnand_scan_bbt: Out of memory, bbt_erase_shift=%d, len=%d\n", 
+			this->bbt_erase_shift, len);
+		return -ENOMEM;
+	
+	}
+	/* Clear the memory bad block table */
+	memset (this->bbt, 0x00, len);
+
+	/* If no primary table decriptor is given, scan the device
+	 * to build a memory based bad block table
+	 */
+	if (!td) {
+		if ((res = brcmnand_memory_bbt(mtd, bd))) {
+			printk (KERN_ERR "brcmnand_bbt: Can't scan flash and build the RAM-based BBT\n");
+			kfree(this->bbt);
+			this->bbt = NULL;
+		}
+		return res;
+	}
+
+	/* Allocate a temporary buffer for one eraseblock incl. oob */
+	len = (1 << this->bbt_erase_shift);
+PRINTK("%s: len before OOB = %08x\n", __FUNCTION__, len);
+	len += (len >> this->page_shift) * (mtd->oobsize);
+PRINTK("%s: Inc OOB - Allocating %08x byte buffer, oobsize=%d\n", __FUNCTION__, len, mtd->oobsize);
+	buf = kmalloc (len, GFP_KERNEL);
+	if (!buf) {
+		printk (KERN_ERR "%s: Out of memory 2, bbt_erase_shift=%d, len=%dx\n", 
+			__FUNCTION__, this->bbt_erase_shift, len  );
+		
+		kfree (this->bbt);
+		
+		this->bbt = NULL;
+		return -ENOMEM;
+	}
+
+	/* Is the bbt at a given page ? */
+	if (td->options & NAND_BBT_ABSPAGE) {
+		res = brcmnand_read_abs_bbts (mtd, buf, td, md);
+	} else {
+		/* Search the bad block table using a pattern in oob */
+		res = brcmnand_search_read_bbts (mtd, buf, td, md);
+	}
+
+	if (res) {
+		res = brcmnand_check_create (mtd, buf, bd);
+	}
+
+	/* Prevent the bbt regions from erasing / writing */
+	mark_bbt_region (mtd, td);
+	if (md)
+		mark_bbt_region (mtd, md);
+
+	kfree (buf);
+	return res;
+}
+EXPORT_SYMBOL (brcmnand_scan_bbt);
+
+
+/**
+ * brcmnand_update_bbt - [NAND Interface] update bad block table(s)
+ * @mtd:	MTD device structure
+ * @offs:	the offset of the newly marked block
+ *
+ * The function updates the bad block table(s)
+*/
+int brcmnand_update_bbt (struct mtd_info *mtd, loff_t offs)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	int len, res = 0, writeops = 0;
+	int chip, chipsel;
+	uint8_t *buf;
+	struct nand_bbt_descr *td = this->bbt_td;
+	struct nand_bbt_descr *md = this->bbt_md;
+
+DEBUG(MTD_DEBUG_LEVEL3, "-->%s offs=%0llx\n", __FUNCTION__, offs);
+PRINTK("-->%s offs=%0llx\n", __FUNCTION__, offs);
+
+	if (!this->bbt || !td)
+		return -EINVAL;
+
+	len = (uint32_t) (this->mtdSize >> (this->bbt_erase_shift + 2));
+	/* Allocate a temporary buffer for one eraseblock incl. oob */
+	len = (1 << this->bbt_erase_shift);
+	len += (len >> this->page_shift) * mtd->oobsize;
+    buf = kmalloc (len, GFP_ATOMIC);
+	if (!buf) {
+		printk (KERN_ERR "brcmnand_update_bbt: Out of memory\n");
+		return -ENOMEM;
+	}
+
+	writeops = md != NULL ? 0x03 : 0x01;
+
+	/* Do we have a bbt per chip ? */
+	if (td->options & NAND_BBT_PERCHIP) {
+		chip = (int) (offs >> this->chip_shift);
+		chipsel = chip;
+	} else {
+		chip = 0;
+		chipsel = -1;
+	}
+
+	(td->version[chip])++;
+	// THT Roll over
+	if (td->version[chip] == 0xff)
+		td->version[chip] =1;
+	if (md)
+		(md->version[chip])++;
+	if (md->version[chip] == 0xff)
+		md->version[chip] =1;
+
+	/* Write the bad block table to the device ? */
+	if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+		res = brcmnand_write_bbt (mtd, buf, td, md, chipsel);
+		if (res < 0)
+			goto out;
+	}
+	/* Write the mirror bad block table to the device ? */
+	if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+		res = brcmnand_write_bbt (mtd, buf, md, td, chipsel);
+	}
+
+out:
+	kfree (buf);
+	return res;
+}
+
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks. */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr smallpage_memorybased = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 5,
+	.len = 1,
+	.pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr largepage_memorybased = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 0,
+	.len = 2,
+	.pattern = scan_ff_pattern
+};
+
+/*
+static struct nand_bbt_descr mlc_4kpage_memorybased = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 0,
+	.len = 1,
+	.pattern = scan_ff_pattern
+};
+*/
+
+static struct nand_bbt_descr smallpage_flashbased = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 5,
+	.len = 1,
+	.pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr largepage_flashbased = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 0,
+	.len = 2,
+	.pattern = scan_ff_pattern
+};
+
+/* 2K & 4K page MLC NAND use same pattern */
+static struct nand_bbt_descr bch4_flashbased = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 0,
+	.len = 1,
+	.pattern = scan_ff_pattern
+};
+
+
+static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
+
+static struct nand_bbt_descr agand_flashbased = {
+	.options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+	.offs = 0x20,
+	.len = 6,
+	.pattern = scan_agand_pattern
+};
+
+/* Generic flash bbt decriptors
+*/
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+/*
+ * THT: We only have 1 chip per device
+ */
+static struct nand_bbt_descr bbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+#if 0 //CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_3
+		| NAND_BBT_PERCHIP
+#endif
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+
+	.offs =	9, /* THT: Changed from 8 */
+	.len = 4,
+	.veroffs = 13, /* THT: Changed from 12 */
+	.maxblocks = 4, /* THT: Will update later, based on 1MB partition reserved for BBT */
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+#if  0 //CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_3
+		| NAND_BBT_PERCHIP
+#endif
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	9, /* THT: Changed from 8 */
+	.len = 4,
+	.veroffs = 13,  /* THT: Changed from 12 */
+	.maxblocks = 4,
+	.pattern = mirror_pattern
+};
+
+/* SLC flash using BCH-4 ECC, SM & Large page use same descriptor template */
+static struct nand_bbt_descr bbt_slc_bch4_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+#if  0 //CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_3
+		| NAND_BBT_PERCHIP
+#endif
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	1, /* THT: Changed from 8 */
+	.len = 4,
+	.veroffs = 6,  /* THT: Changed from 12 */
+	.maxblocks = 8,
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_slc_bch4_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+#if  0 //CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_3
+		| NAND_BBT_PERCHIP
+#endif
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	1, 
+	.len = 4,
+	.veroffs = 6,  
+	.maxblocks = 8,
+	.pattern = mirror_pattern
+};
+
+/* Also used for bch-8 & bch-12 with 27B OOB */
+static struct nand_bbt_descr bbt_bch4_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	1, 
+	.len = 4,
+	.veroffs = 5, /* THT: Changed from 12 */
+	.maxblocks = 8, /* THT: Will update later, based on 4MB partition reserved for BBT */
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_bch4_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	1, /* THT: Changed from 8 */
+	.len = 4,
+	.veroffs = 5,  /* THT: Changed from 12 */
+	.maxblocks = 8,
+	.pattern = mirror_pattern
+};
+
+
+
+/* BCH-8 with only 16B OOB, uses auto-place for the (small) OOB */
+static struct nand_bbt_descr bbt_bch8_16_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION
+		| BRCMNAND_BBT_AUTO_PLACE,
+	.offs = 0, /* Signature is at offset 0 in auto-place format */
+	.len = 4,
+	.veroffs = 4, /* Version just follows the signature in auto-place format */
+	.maxblocks = 8, /* THT: Will update later, based on 4MB partition reserved for BBT */
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_bch8_16_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION
+		| BRCMNAND_BBT_AUTO_PLACE,
+	.offs = 0, 
+	.len = 4,
+	.veroffs = 4,  
+	.maxblocks = 8,
+	.pattern = mirror_pattern
+};
+
+
+static int brcmnand_displayBBT(struct mtd_info* mtd)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	loff_t bOffset, bOffsetStart, bOffsetEnd;
+	int res;
+	int bbtSize = brcmnand_get_bbt_size(mtd);
+
+	bOffsetStart = 0;
+	bOffsetEnd = (loff_t)(this->mtdSize - bbtSize); // Skip BBT itself
+
+	printk(KERN_INFO "----- Contents of BBT -----\n");
+	for (bOffset=bOffsetStart; bOffset < bOffsetEnd; bOffset = bOffset + mtd->erasesize) {
+		res = this->isbad_bbt(mtd, bOffset, 1);
+		if (res) {
+			printk(KERN_INFO "Bad block at %0llx\n", bOffset);
+		}
+	}
+	printk(KERN_INFO "----- END Contents of BBT -----\n");
+	return 0;
+}
+
+#if 1
+// Remove this block in production builds.
+
+/*
+ * Process brcmnand= kernel command arg, BEFORE building/reading BBT table.
+ * Currently, the only accepted command is CLEARBBT, which in itself is a dangerous activity.
+ * The user is assumed to know what he is doing.
+ */
+static void brcmnand_preprocessKernelArg(struct mtd_info *mtd)
+{
+	struct brcmnand_chip *this = mtd->priv;
+
+	int ret, needBBT; 
+	uint64_t bOffset, bOffsetStart=0, bOffsetEnd=0;
+	int bbtSize = brcmnand_get_bbt_size(mtd); 
+
+	//int page;
+
+
+PRINTK("%s: gClearBBT=%d, size=%016llx, erasesize=%08x\n", __FUNCTION__, gClearBBT, device_size(mtd), mtd->erasesize);
+
+
+	switch (gClearBBT) {
+
+	case NANDCMD_CLEARBBT: // Force rescan of BBT (DANGEROUS, may lose Mfg's BIs).
+
+		bOffsetStart = this->mtdSize - bbtSize; 
+		bOffsetEnd = this->mtdSize - mtd->erasesize;
+printk("%s: gClearBBT=clearbbt, start=%0llx, end=%0llx\n", __FUNCTION__, 
+	bOffsetStart, bOffsetEnd);
+		break;
+
+	case NANDCMD_SHOWBBT:
+		return;
+	case NANDCMD_RESCAN:
+		return;
+	case NANDCMD_ERASEALL:
+		return;
+	case NANDCMD_ERASE:
+		return;
+
+	default:
+		BUG_ON("Invalid brcmnand flag");
+		break;
+	} // switch
+
+	printk("Erasing flash from %016llx to %016llx\n", bOffsetStart, bOffsetEnd);
+
+	/*
+	 * Clear ECC registers 
+	 */
+	this->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
+	this->ctrl_write(BCHP_NAND_ECC_UNC_ADDR, 0);
+
+			
+	for (bOffset=bOffsetStart; bOffset <= bOffsetEnd; bOffset += mtd->erasesize) {
+		//unsigned long pAddr = this->pbase + bOffset;
+		//int i;
+		//int skipBadBlock = 0;
+
+		/*
+		 * Skip erasing bad blocks.  If you accidentally/intentionally mark a block as bad, 
+		 * and want to clear it, use BBS to clear it
+		 * The exception are the blocks in the BBT area, which are reserved
+		 * Here during pre-processing, there is no BBT, so we cannot assume its existence.
+		 */
+		
+		PRINTK("brcmnand flag=%d: Erasing block at %0llx\n", 
+			gClearBBT, bOffset);
+		this->ctrl_writeAddr(this, bOffset, 0);
+
+		this->ctrl_write(BCHP_NAND_CMD_START, OP_BLOCK_ERASE);
+		// Wait until flash is ready
+		ret = this->write_is_complete(mtd, &needBBT);
+		if (needBBT) {
+			printk(KERN_WARNING "%s: Erase failure, marking bad block @%016llx\n", __FUNCTION__, bOffset);
+			ret = this->block_markbad(mtd, bOffset);
+		}
+	}
+	
+	return;
+	
+}
+
+
+#else
+#define brcmnand_preprocessKernelArg(mtd)
+
+#endif
+
+/*
+ * Process brcmnand= kernel command arg, AFTER building/reading BBT table
+ */
+static void brcmnand_postprocessKernelArg(struct mtd_info *mtd)
+{
+	struct brcmnand_chip *this = mtd->priv;
+
+	int ret=0, needBBT; 
+	//uint64_t bOffset, bOffsetStart=0, bOffsetEnd=0;
+	uint64_t bOffset, bOffsetStart = 0, bOffsetEnd = 0;
+	int bbtSize = brcmnand_get_bbt_size(mtd); 
+	
+PRINTK("%s: gClearBBT=%d, size=%016llx, erasesize=%08x\n", 
+	__FUNCTION__, gClearBBT, device_size(mtd), mtd->erasesize);
+
+	switch (gClearBBT) {
+	case NANDCMD_SHOWBBT:
+		brcmnand_displayBBT(mtd);
+		return;
+		
+	case NANDCMD_CLEARBBT: // already done during pre-processing
+		brcmnand_displayBBT(mtd);
+		return;
+		
+	case NANDCMD_RESCAN:
+		printk("rescanning .... \n");
+		/* FALLTHROUGH */
+	case NANDCMD_ERASEALL:
+		/* FALLTHROUGH */
+	case NANDCMD_ERASE:
+		// Force erase of entire flash (except BBT), and rescan of BBT:
+		bOffsetStart = 0LL;
+		bOffsetEnd = this->mtdSize - bbtSize; // BBT partition is 1MB
+//printk("%s: gClearBBT=erase|eraseall, start=%08x, end=%08x\n", __FUNCTION__, bOffsetStart, bOffsetEnd);
+		break;
+
+	default:
+		BUG_ON("Invalid clear brcmnand flag");
+		break;
+	} // switch
+
+	// printk("Erasing flash from %08x to %08x\n", bOffsetStart, bOffsetEnd);
+			
+	for (bOffset=bOffsetStart; bOffset <  bOffsetEnd; 
+			bOffset = bOffset + mtd->erasesize) 
+	{
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_1_0
+		unsigned long pAddr = this->pbase + bOffset;
+#else
+		uint64_t pAddr = bOffset + this->pbase;
+#endif
+
+		int i;
+		int isBadBlock = 0;
+		int inCFE = 0;
+		int cs = this->ctrl->CS[this->csi];
+
+		if (0 == cs) {
+			if (this->xor_disable) {
+				inCFE = pAddr < 0x00300000;
+			}
+			else { // XOR enabled
+				inCFE = (pAddr  >= 0x1fc00000 && pAddr < 0x1ff00000);
+			}
+		}
+
+		/* Skip reserved area, 7MB starting at 0x1f80_0000.
+		 * Reserved area is owned by the bootloader.  Linux owns the rootfs and the BBT area
+		 */
+
+		if (gClearBBT == NANDCMD_ERASE && inCFE)
+			continue;
+
+		/* Already included in BBT? */
+		if (mtd_block_isbad(mtd, bOffset)) {
+			isBadBlock = 1;
+			continue;
+		}
+
+		/*
+		 * Finding bad blocks besides the ones already in the BBT.  
+		 * If you accidentally/intentionally mark a block as bad, 
+		 * and want to clear it, use BBS to clear it, as Linux does not offer a way to do it.
+		 * The exception are the blocks in the BBT area, which are reserved
+		  */
+		else {
+			unsigned char oobbuf[NAND_MAX_OOBSIZE];
+			//int autoplace = 0;
+			//int raw = 1;
+			//struct nand_oobinfo oobsel;
+			int numpages;
+			/* THT: This __can__ be a 36bit integer (NAND controller address space is 48bit wide, minus
+			 * page size of 2*12, therefore 36bit max
+			  */
+			uint64_t blockPage = bOffset >> this->page_shift;
+			int dir;
+			uint64_t page;
+			
+			/* How many pages should we scan */
+			if (this->badblock_pattern->options & NAND_BBT_SCAN2NDPAGE) {
+				if (this->options &  NAND_SCAN_BI_3RD_PAGE) {
+					numpages = 3;
+				}
+				else {
+					numpages = 2;
+				}
+			} else {
+				numpages = 1;
+			}
+
+			if (!NAND_IS_MLC(this)) { // SLC: First and 2nd page
+				dir = 1;
+				page = blockPage; // first page of block
+			}
+			else { // MLC: Read last page
+				int pagesPerBlock = mtd->erasesize/mtd->writesize;
+				
+				dir = -1;
+				page = blockPage + pagesPerBlock - 1; // last page of block
+			}
+			
+			for (i=0; i<numpages; i++, page += i*dir) {
+				int res;
+				struct mtd_oob_ops ops;
+				uint64_t offs = page << this->page_shift;
+				
+
+				ops.len = mtd->oobsize;
+				ops.ooblen = mtd->oobsize;
+				ops.oobbuf = oobbuf;
+				ops.ooboffs = 0;
+				ops.datbuf = NULL;
+				ops.mode = MTD_OPS_PLACE_OOB;
+
+				
+				res = mtd_read_oob(mtd, offs, &ops);
+if (gdebug && res!=0) printk("########## %s: read_oob returns %d\n", __FUNCTION__, res);
+
+
+				if (res == -EBADMSG ||res == -EIO || res == -ETIMEDOUT) {// Uncorrectable errors
+					uint32_t acc0;
+
+					// Disable ECC
+					acc0 = brcmnand_disable_read_ecc(this->ctrl->CS[this->csi]);
+
+					// Re-read the OOB
+					res = mtd_read_oob(mtd, offs, &ops);
+
+					// Enable ECC back
+					brcmnand_restore_ecc(this->ctrl->CS[this->csi], acc0);
+					// res should be zero here
+				}
+
+				if (!res) {
+					if (check_short_pattern (oobbuf, this->badblock_pattern)) {
+						isBadBlock = 1;
+
+						if (NANDCMD_RESCAN == gClearBBT) 
+							printk(KERN_INFO "Found bad block at offset %0llx\n", offs);
+PRINTK("Found bad block at offset %0llx\n", offs);
+
+						break;
+					}
+				}
+				else { // We don't want to mark a block as bad otherwise
+					printk(KERN_DEBUG "brcmnand_read_pageoob returns %d for page %0llx\n",
+						res, page);
+				}
+			}
+				
+		}
+
+		switch (gClearBBT) {
+		case NANDCMD_ERASE:
+			/* FALLTHROUGH */
+//gdebug=4;
+		case NANDCMD_ERASEALL:
+			if (isBadBlock) {
+				printk(KERN_INFO "Skipping Bad Block at %0llx\n", bOffset);
+				continue;
+			}
+			
+			//printk("brcmnand flag=%d: Erasing block at %08x\n", gClearBBT, bOffset);
+			this->ctrl_writeAddr(this, bOffset, 0);
+
+			this->ctrl_write(BCHP_NAND_CMD_START, OP_BLOCK_ERASE);
+			// Wait until flash is ready
+			ret = this->write_is_complete(mtd, &needBBT);
+			if (needBBT) {
+				printk(KERN_INFO "%s: Marking bad block @%0llx\n", __FUNCTION__, bOffset);
+				ret = this->block_markbad(mtd, bOffset);
+			}
+			break;
+			
+		case NANDCMD_RESCAN:
+			if (isBadBlock) {
+				printk(KERN_INFO "%s: Marking bad block @%0llx\n", __FUNCTION__, bOffset);
+				ret = this->block_markbad(mtd, bOffset);
+			}
+			break;
+			
+		default:
+			printk(KERN_INFO "Invalid brcmnand argument in %s\n", __FUNCTION__);
+			BUG();
+		}
+	}
+	brcmnand_displayBBT(mtd);
+	return;
+}
+
+/**
+ * brcmnand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd:	MTD device structure
+ * @offs:	offset in the device
+ * @allowbbt:	allow access to bad block table region
+ *
+ * Each byte in the BBT contains 4 entries, 2 bits each per block.
+ * So the entry for the block b is:
+ * bbt[b >> 2] & (0x3 << ((b & 0x3) << 1)))
+ *
+*/
+static int brcmnand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	uint32_t block; // Used as an index, so 32bit.
+	uint8_t	res;
+
+//printk( "--> %s: bbt info for offs 0x%08x: \n", __FUNCTION__, __ll_low(offs));
+	/*
+	 * THT 03/20/07:
+	 * Get block number * 2. It's more convenient to do it in the following way
+	 *  but is actually the same thing as in the comment above
+	 */
+	block = (uint32_t) (offs >>  (this->bbt_erase_shift - 1));
+	res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+	DEBUG (MTD_DEBUG_LEVEL3, "brcmnand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+		(unsigned int)offs, block >> 1, res);
+
+//if (res) PRINTK("%s: res=%x, allowbbt=%d at block %08x\n", __FUNCTION__, res, allowbbt, (unsigned int) offs);
+
+	switch ((int)res) {
+	case 0x00:	// Good block
+//printk("<-- %s\n", __FUNCTION__);
+		return 0;
+	case 0x01:	// Marked bad due to wear
+		return 1;
+	case 0x02:	// Reserved blocks
+		return allowbbt ? 0 : 1;
+	case 0x03:	
+		return 1; // Factory marked bad
+	}
+	return 1;
+}
+
+/**
+ * brcmnand_isbad_raw - [NAND Interface] Check if a block is bad in the absence of BBT
+ * @mtd:	MTD device structure
+ * @offs:	offset in the device
+ *
+ * Each byte in the BBT contains 4 entries, 2 bits each per block.
+ * So the entry for the block b is:
+ * bbt[b >> 2] & (0x3 << ((b & 0x3) << 1)))
+ *
+*/
+int brcmnand_isbad_raw (struct mtd_info *mtd, loff_t offs)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	//uint32_t block; // Used as an index, so 32bit.
+	uint8_t	isBadBlock = 0;
+	int i;
+
+	unsigned char oobbuf[NAND_MAX_OOBSIZE];
+	int numpages;
+	/* THT: This __can__ be a 36bit integer (NAND controller address space is 48bit wide, minus
+	 * page size of 2*12, therefore 36bit max
+	  */
+	uint64_t blockPage = offs >> this->page_shift;
+	int dir;
+	uint64_t page;
+
+printk("-->%s(offs=%llx\n", __FUNCTION__, offs);
+
+	/* How many pages should we scan */
+	if (this->badblock_pattern->options & NAND_BBT_SCAN2NDPAGE) {
+		if (this->options &  NAND_SCAN_BI_3RD_PAGE) {
+			numpages = 3;
+		}
+		else {
+			numpages = 2;
+		}
+	} else {
+		numpages = 1;
+	}
+
+PRINTK("%s: 20\n", __FUNCTION__);
+
+	if (!NAND_IS_MLC(this)) { // SLC: First and 2nd page
+		dir = 1;
+		page = blockPage; // first page of block
+	}
+	else { // MLC: Read last page
+		int pagesPerBlock = mtd->erasesize/mtd->writesize;
+		
+		dir = -1;
+		page = blockPage + pagesPerBlock - 1; // last page of block
+	}
+
+PRINTK("%s: 30\n", __FUNCTION__);
+	
+	for (i=0; i<numpages; i++, page += i*dir) {
+		int res;
+		//int retlen = 0;
+
+PRINTK("%s: 50 calling read_page_oob=%p, offset=%llx\n", __FUNCTION__, this->read_page_oob, 
+	page << this->page_shift);
+
+//gdebug=4;
+		res = this->read_page_oob(mtd, oobbuf, page);
+//gdebug = 0;
+		if (!res) {
+			if (check_short_pattern (oobbuf, this->badblock_pattern)) {
+				isBadBlock = 1;
+				break;
+			}
+		}
+		else {
+			printk(KERN_DEBUG "brcmnand_read_pageoob returns %d for page %0llx\n",
+				res, page);
+		}
+	}
+		
+	return isBadBlock;
+}
+
+static struct nand_bbt_descr*
+brcmnand_bbt_desc_init(struct nand_bbt_descr* orig)
+{
+	struct nand_bbt_descr* td;
+
+	/* Potential memory leak here when used as a module, this is never freed */
+	td = kmalloc(sizeof(struct nand_bbt_descr), GFP_KERNEL);
+	if (!td) {
+		printk(KERN_ERR "%s: Cannot allocate memory for BBT descriptor\n", __FUNCTION__);
+		return NULL;
+	}
+	*td = *orig;
+	return td;
+}
+
+
+
+/**
+ * brcmnand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd:	MTD device structure
+ *
+ * This function selects the default bad block table
+ * support for the device and calls the brcmnand_scan_bbt function
+ *
+*/
+int brcmnand_default_bbt (struct mtd_info *mtd)
+{
+	struct brcmnand_chip *this = mtd->priv;
+	int res;
+
+printk("-->%s\n", __FUNCTION__);
+
+	/* Default for AG-AND. We must use a flash based
+	 * bad block table as the devices have factory marked
+	 * _good_ blocks. Erasing those blocks leads to loss
+	 * of the good / bad information, so we _must_ store
+	 * this information in a good / bad table during
+	 * startup
+	*/
+	if (this->options & NAND_IS_AND) {
+		/* Use the default pattern descriptors */
+		if (!this->bbt_td) {
+			this->bbt_td = brcmnand_bbt_desc_init(&bbt_main_descr);
+			this->bbt_md = brcmnand_bbt_desc_init(&bbt_mirror_descr);
+		}
+		this->options |= NAND_BBT_USE_FLASH;
+		return brcmnand_scan_bbt (mtd, &agand_flashbased);
+	}
+
+
+	/* Is a flash based bad block table requested ? */
+	if (this->options & NAND_BBT_USE_FLASH) {
+		if (this->ecclevel == BRCMNAND_ECC_HAMMING) {
+			/* Use the default pattern descriptors */
+			if (!this->bbt_td) {
+				this->bbt_td = brcmnand_bbt_desc_init(&bbt_main_descr);
+				this->bbt_md = brcmnand_bbt_desc_init(&bbt_mirror_descr);
+			}
+			if (!this->badblock_pattern) {
+				this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
+			}
+printk("%s: bbt_td = bbt_main_descr\n", __FUNCTION__);
+		}
+#if 1
+/* Nowadays, both SLC and MLC can have 4KB page, and more than 16 OOB size */
+		else  if (NAND_IS_MLC(this))  { // MLC
+			if (this->ecclevel == BRCMNAND_ECC_BCH_8 && this->eccOobSize == 16) {
+				/* Use the default pattern descriptors */
+				if (!this->bbt_td) {
+					this->bbt_td = brcmnand_bbt_desc_init(&bbt_bch8_16_main_descr);
+					this->bbt_md = brcmnand_bbt_desc_init(&bbt_bch8_16_mirror_descr);
+				}
+				if (!this->badblock_pattern) {
+					// 2K & 4K MLC NAND use same pattern
+					this->badblock_pattern = &bch4_flashbased;
+				}
+printk("%s: bbt_td = bbt_bch8_16_main_descr\n", __FUNCTION__);
+			}
+			else {
+				/* Use the default pattern descriptors */
+				if (!this->bbt_td) {
+					this->bbt_td = brcmnand_bbt_desc_init(&bbt_bch4_main_descr);
+					this->bbt_md = brcmnand_bbt_desc_init(&bbt_bch4_mirror_descr);
+				}
+				if (!this->badblock_pattern) {
+					// 2K & 4K MLC NAND use same pattern
+					this->badblock_pattern = &bch4_flashbased;
+				}
+			}
+printk("%s: bbt_td = bbt_bch4_main_descr\n", __FUNCTION__);
+		}
+#endif
+		else {/* SLC flashes using BCH-4 or higher ECC */
+			/* Small & Large SLC NAND use the same template */
+			if (this->ecclevel == BRCMNAND_ECC_BCH_4) {
+				if (!this->bbt_td) {
+					this->bbt_td = brcmnand_bbt_desc_init(&bbt_slc_bch4_main_descr);
+					this->bbt_md = brcmnand_bbt_desc_init(&bbt_slc_bch4_mirror_descr);
+				}
+				if (!this->badblock_pattern) {
+					this->badblock_pattern = (mtd->writesize > 512) ? &bch4_flashbased : &smallpage_flashbased;
+				}
+printk("%s: bbt_td = bbt_slc_bch4_main_descr\n", __FUNCTION__);	
+			}
+
+			/* Special case for BCH-8 with only 16B OOB */
+			else if (this->ecclevel == BRCMNAND_ECC_BCH_8 && this->eccOobSize == 16) {
+				if (!this->bbt_td) {
+					this->bbt_td = brcmnand_bbt_desc_init(&bbt_bch8_16_main_descr);
+					this->bbt_md = brcmnand_bbt_desc_init(&bbt_bch8_16_mirror_descr);
+				}
+				if (!this->badblock_pattern) {
+					// 2K & 4K MLC NAND use same pattern
+					this->badblock_pattern = &bch4_flashbased;
+				}
+printk("%s: bbt_td = bbt_bch8_16_main_descr\n", __FUNCTION__);	
+			}
+			else if (this->ecclevel >= BRCMNAND_ECC_BCH_8 && this->ecclevel < BRCMNAND_ECC_HAMMING 
+					&& this->eccOobSize > 16) {
+				/* Use the default pattern descriptors */
+				if (!this->bbt_td) {
+					this->bbt_td = brcmnand_bbt_desc_init(&bbt_slc_bch4_main_descr);
+					this->bbt_md = brcmnand_bbt_desc_init(&bbt_slc_bch4_mirror_descr);
+				}
+				if (!this->badblock_pattern) {
+					this->badblock_pattern =  &bch4_flashbased ;
+				}
+printk("%s: bbt_td = bbt_slc_bch4_main_descr\n", __FUNCTION__);	
+			}
+
+			/* TBD: Use Internal ECC */
+			else {
+				printk(KERN_ERR "***** %s: Unsupported ECC level %d\n", 
+					__FUNCTION__, this->ecclevel);
+				BUG();
+			}
+		}
+	} else {
+		/* MLC memory based not supported */
+		this->bbt_td = NULL;
+		this->bbt_md = NULL;
+		if (!this->badblock_pattern) {
+			this->badblock_pattern = (mtd->writesize > 512) ?
+				&largepage_memorybased : &smallpage_memorybased;
+		}
+	}
+
+
+	/*
+	 * BBT partition occupies 1 MB at the end of the useable flash, so adjust maxblocks accordingly.
+	 * Only applies to flash with 512MB or less, since we don't have the extra reserved space at the
+	 * end of the flash (1FF0_0000 - 1FFF_FFFF).
+	 */
+	if (device_size(mtd) <= ( 512ULL << 20)) {
+		this->bbt_td->maxblocks = this->bbt_md->maxblocks = (1<<20) / this->blockSize;
+		
+	}
+
+	/*
+	 * THT: 8/18/08: For MLC flashes with block size of 512KB, allocate 8 blocks or 4MB,
+	 * (this is possible because this region is outside of the CFE allocated space of 1MB at 1FF0_0000).
+	 */
+	else  if (this->blockSize == (512*1024)) {
+		this->bbt_td->maxblocks = this->bbt_md->maxblocks = 
+			max(this->bbt_td->maxblocks, (int)((4<<20) / this->blockSize));
+	}
+
+	/* Reserve at least 8 blocks */
+	else if (this->blockSize >= (1<<20)) {
+		this->bbt_td->maxblocks = this->bbt_md->maxblocks = 
+			max(this->bbt_td->maxblocks, 8);
+	}
+
+	this->bbtSize = this->bbt_td->maxblocks * this->blockSize;
+PRINTK("%s: gClearBBT = %d\n", __FUNCTION__, gClearBBT);
+	if (gClearBBT) {
+		(void) brcmnand_preprocessKernelArg(mtd);
+	}
+	
+	res =  brcmnand_scan_bbt (mtd, this->badblock_pattern);
+
+	/*
+	 * Now that we have scanned the BBT, allow BBT-lookup
+	 */
+	this->isbad_bbt = brcmnand_isbad_bbt;
+
+	if (gClearBBT) {
+		(void) brcmnand_postprocessKernelArg(mtd);
+	}
+
+	return res;
+}
+EXPORT_SYMBOL (brcmnand_default_bbt);
+
+#endif //defined(CONFIG_BCM_KF_MTD_BCMNAND)
diff --git a/drivers/mtd/brcmnand/brcmnand_cet.c b/drivers/mtd/brcmnand/brcmnand_cet.c
new file mode 100644
index 0000000000000000000000000000000000000000..c6507cdf7ca294244f17296d075606fac2b095f6
--- /dev/null
+++ b/drivers/mtd/brcmnand/brcmnand_cet.c
@@ -0,0 +1,1121 @@
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+/*
+    <:copyright-BRCM:2012:DUAL/GPL:standard
+    
+       Copyright (c) 2012 Broadcom Corporation
+       All Rights Reserved
+    
+    Unless you and Broadcom execute a separate written software license
+    agreement governing use of this software, this software is licensed
+    to you under the terms of the GNU General Public License version 2
+    (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+    with the following added to such license:
+    
+       As a special exception, the copyright holders of this software give
+       you permission to link this software with independent modules, and
+       to copy and distribute the resulting executable under terms of your
+       choice, provided that you also meet, for each linked independent
+       module, the terms and conditions of the license of that module.
+       An independent module is a module which is not derived from this
+       software.  The special exception does not apply to any modifications
+       of the software.
+    
+    Not withstanding the above, under no circumstances may you combine
+    this software in any way with any other Broadcom software provided
+    under a license other than the GPL, without Broadcom's express prior
+    written consent.
+    
+    :> 
+
+    File: brcmnand_cet.c
+
+Broadcom NAND Correctable Error Table Support
+---------------------------------------------
+In case of a single bit correctable error, the block in which correctable error
+occured is refreshed (i.e., read->erase->write the entire block). Following a
+refresh a success value is returned by brcmnand_read() i.e., the error is 
+hidden from the file system. The Correctable Error Table (CET) keeps a history
+(bit-vector) of per page correctable errors. If a correctable error happens 
+on the same page twice, an error is returned to the file system.
+
+The CET starts from the opposite end of BBT with 1-bit per page. The CET is 
+initialized to all 1's. On the first correctable error the bit corresponding
+to a page is reset. On an erase, all the bits of the corresponding block are
+set. The CET can span across multiple blocks therefore a signature 'CET#' 
+where # is the block number is kept in the OOB area of the first page of a 
+CET block. Also, the total correctable error count is kept in the second
+page OOB of the first CET block. 
+
+There is an in-memory correctable error table during runtime which is flushed
+to the flash every 10 mins (CET_SYNC_FREQ). 
+
+    Description: 
+when	who 	what
+-----	---	----
+080519	sidc	initial code
+080910  sidc	MLC support
+ */
+
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include "brcmnand_priv.h"
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+
+#define PRINTK(...)
+
+#define BBT_SLC_PARTITION	(1<<20)
+#define BBT_MAX_BLKS_SLC	4
+#define CET_START_BLK_SLC(x, y) (uint32_t) (((x) >> ((y)->bbt_erase_shift)) - (BBT_SLC_PARTITION/(y)->blockSize))
+
+#define BBT_MLC_PARTITION	(4<<20)
+#define BBT_MAX_BLKS_MLC(x)	(BBT_MLC_PARTITION >> ((x)->bbt_erase_shift))
+#define CET_START_BLK_MLC(x, y, z)	(uint32_t) (((x) >> ((y)->bbt_erase_shift)) - ((z)/(y)->blockSize))
+
+#define CET_GOOD_BLK	0x00
+#define CET_BAD_WEAR 	0x01
+#define CET_BBT_USE	0x02
+#define CET_BAD_FACTORY	0x03
+
+#define CET_SYNC_FREQ	(10*60*HZ)	
+
+
+static char cet_pattern[] = {'C', 'E', 'T', 0};
+static struct brcmnand_cet_descr cet_descr = {
+	.offs = 9,
+	.len = 4,
+	.pattern = cet_pattern
+};
+
+/* 
+ * This also applies to Large Page SLC flashes with BCH-4 ECC.
+ * We don't support BCH-4 on Small Page SLCs because there are not 
+ * enough free bytes for the OOB, but we don't enforce it,
+ * in order to allow page aggregation like in YAFFS2 on small page SLCs.
+ */
+static struct brcmnand_cet_descr cet_descr_mlc = {
+	.offs = 1,
+	.len = 4,
+	.pattern = cet_pattern
+};
+static void sync_cet(struct work_struct *work);
+static int search_cet_blks(struct mtd_info *, struct brcmnand_cet_descr *, char);
+extern char gClearCET;
+
+/*
+ * Private: Read OOB area in RAW mode
+ */
+static inline int brcmnand_cet_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs)
+{
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OPS_RAW;
+	ops.len = mtd->oobsize;
+	ops.ooblen = mtd->oobsize;
+	ops.datbuf = NULL;
+	ops.oobbuf = buf;
+	ops.ooboffs = 0;
+
+	return mtd_read_oob(mtd, offs, &ops);
+}
+
+/* 
+ * Private: Write to the OOB area only
+ */
+static inline int brcmnand_cet_write_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs)
+{
+#if defined(CONFIG_BCM_KF_NAND)
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.len = mtd->writesize;
+	ops.ooblen = mtd->oobsize;
+	ops.datbuf = NULL;
+	ops.oobbuf = buf;
+	ops.ooboffs = 0;
+
+	return mtd_write_oob(mtd, offs, &ops);
+#else
+	struct mtd_oob_ops ops;
+	uint8_t databuf[mtd->writesize];
+
+	memset(databuf, 0xff, mtd->writesize);
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.len = mtd->writesize;
+	ops.ooblen = mtd->oobsize;
+	ops.datbuf = databuf;
+	ops.oobbuf = buf;
+	ops.ooboffs = 0;
+
+	return mtd_write_oob(mtd, offs, &ops);
+#endif
+}
+
+/*
+ * Private: write one page of data and OOB to flash 
+ */
+static int brcmnand_cet_write(struct mtd_info *mtd, loff_t offs, size_t len, 
+		uint8_t *buf, uint8_t *oob) 
+{
+	struct mtd_oob_ops ops;
+	int ret;
+
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.ooboffs = 0;
+	ops.ooblen = mtd->oobsize;
+	ops.datbuf = buf;
+	ops.oobbuf = oob;
+	ops.len = len;
+	ret = mtd_write_oob(mtd, offs, &ops);
+
+	return ret;
+}
+
+/*
+ * bitcount - MIT Hackmem count implementation which is O(1)
+ * http://infolab.stanford.edu/~manku/bitcount/bitcount.html
+ * Counts the number of 1s in a given unsigned int n 
+ */
+static inline int bitcount(uint32_t n) 
+{
+	uint32_t tmp;
+	tmp = n - ((n >> 1) & 033333333333)
+		- ((n >> 2) & 011111111111);
+	return ((tmp + (tmp >> 3)) & 030707070707) % 63;
+}
+
+/* 
+ * Private debug function: Print OOBs 
+ */
+static void cet_printpg_oob(struct mtd_info *mtd, struct brcmnand_cet_descr *cet, int count) 
+{
+	uint8_t oobbuf[mtd->oobsize];
+	loff_t offs;
+	int i, gdebug = 0;
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+
+	offs = ((loff_t) cet->startblk) << this->bbt_erase_shift;
+	if (gdebug) {
+		printk(KERN_INFO "%s: %x\n", __FUNCTION__, (unsigned int) offs);
+	}
+	for (i = 0; i < count; i++) {
+		memset(oobbuf, 0, mtd->oobsize);
+		if (brcmnand_cet_read_oob(mtd, oobbuf, offs)) {
+			return;
+		}
+		print_oobbuf((const char *) oobbuf, mtd->oobsize);
+		offs = offs + cet->sign*this->pageSize;
+	}
+	return;
+}
+
+/*
+ * Private debug function: Prints first OOB area of all blocks <block#, page0>
+ */ 
+static void cet_printblk_oob(struct mtd_info *mtd, struct brcmnand_cet_descr *cet) 
+{
+	uint8_t *oobbuf;
+	loff_t offs;
+	int i;
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+
+	if((oobbuf = (uint8_t *) vmalloc(sizeof(uint8_t)*mtd->oobsize)) == NULL) {
+		printk(KERN_ERR "brcmnandCET: %s vmalloc failed\n", __FUNCTION__);
+		return;
+	}
+	for (i = 0; i < this->bbt_td->maxblocks; i++) {
+		memset(oobbuf, 0, mtd->oobsize);
+		offs = ((loff_t) cet->startblk+((cet->sign)*i)) << this->bbt_erase_shift;
+		if (brcmnand_cet_read_oob(mtd, oobbuf, offs)) {
+			vfree(oobbuf);
+			return;
+		}
+		print_oobbuf((const char *) oobbuf, mtd->oobsize);
+	}
+	vfree(oobbuf);
+	return;
+}
+
+/*
+ * Private debug function: erase all blocks belonging to CET 
+ * Use for testing purposes only
+ */
+static void cet_eraseall(struct mtd_info *mtd, struct brcmnand_cet_descr *cet) 
+{
+	int i, ret;
+	loff_t from;
+	struct erase_info einfo;
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	int gdebug = 0;
+
+	for (i = 0; i < cet->numblks; i++) {
+		if (cet->memtbl[i].blk != -1) {
+			from = (uint64_t) cet->memtbl[i].blk << this->bbt_erase_shift;
+			if (unlikely(gdebug)) {
+				printk(KERN_INFO "DEBUG -> Erasing blk %x\n", cet->memtbl[i].blk);
+			}
+			memset(&einfo, 0, sizeof(einfo));
+			einfo.mtd = mtd;
+			einfo.addr = from;
+			einfo.len = mtd->erasesize;
+			ret = this->erase_bbt(mtd, &einfo, 1, 1);
+			if (unlikely(ret < 0)) {
+				printk(KERN_ERR "brcmnandCET: %s Error erasing block %llx\n", __FUNCTION__, einfo.addr);
+			}
+		}
+	}
+
+	return;
+}
+
+/*
+ * Private: Check if a block is factory marked bad block
+ * Derived from brcmnand_isbad_bbt()
+ * Return values: 
+ * 0x00 Good block
+ * 0x01 Marked bad due to wear
+ * 0x02 Reserved for BBT
+ * 0x03 Factory marked bad
+ */
+static inline int check_badblk(struct mtd_info *mtd, loff_t offs) 
+{
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	uint32_t blk;
+	int res;
+
+	blk = (uint32_t) (offs >> (this->bbt_erase_shift-1));
+	res = (this->bbt[blk >> 3] >> blk & 0x06) & 0x03;
+
+	return res;
+}
+
+/*
+ * Check for CET pattern in the OOB buffer
+ * return the blk number present in the CET
+ */
+static inline int found_cet_pattern(struct brcmnand_chip *this, uint8_t *buf)
+{
+	struct brcmnand_cet_descr *cet = this->cet;
+	int i;
+
+	for (i = 0; i < cet->len-1; i++) {
+		if (buf[cet->offs + i] != cet_pattern[i]) {
+			return -1;
+		}
+	}
+	return (int) buf[cet->offs + cet->len-1];
+}
+
+/*
+ * Check for BBT/Mirror BBT pattern
+ * Similar to the implementation in brcmnand_bbt.c
+ */
+static inline int found_bbt_pattern(uint8_t *buf, struct nand_bbt_descr *bd) 
+{
+	int i;
+
+	for (i = 0; i < bd->len; i++) {
+		if (buf[bd->offs+i] != bd->pattern[i]) {
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/*
+ * Check OOB area to test if the block is erased 
+ */
+static inline int cet_iserased(struct mtd_info *mtd, uint8_t *oobbuf) 
+{
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	struct nand_ecclayout *oobinfo = this->ecclayout;
+	int i;
+
+	for (i = 0; i < oobinfo->eccbytes; i++) {
+		if (oobbuf[oobinfo->eccpos[i]] != 0xff) {
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/*
+ * Process kernel command line showcet
+ * If the CET is loaded, display which blocks of flash the CET is in
+ */
+static inline void cmdline_showcet(struct mtd_info *mtd, struct brcmnand_cet_descr *cet)
+{
+	int i;
+	loff_t offs;
+	uint8_t oobbuf[mtd->oobsize];
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	
+	if (cet->flags == BRCMNAND_CET_DISABLED) {
+		printk(KERN_INFO "brcmnandCET: Disabled\n");
+		return;
+	}
+	printk(KERN_INFO "brcmnandCET: Correctable error count is 0x%x\n", cet->cerr_count);
+	if (cet->flags == BRCMNAND_CET_LAZY) {
+		printk(KERN_INFO "brcmnandCET: Deferred until next correctable error\n");
+		return;
+	}
+	printk(KERN_INFO "brcmnandCET: Displaying first OOB area of all CET blocks ...\n");
+	for (i = 0; i < cet->numblks; i++) {
+		if (cet->memtbl[i].blk == -1) 
+			continue;
+		offs = ((loff_t) cet->memtbl[i].blk) << this->bbt_erase_shift;
+		printk(KERN_INFO "brcmnandCET: Block[%d] @ %x\n", i, (unsigned int) offs);
+		if (brcmnand_cet_read_oob(mtd, oobbuf, offs)) {
+			return;
+		}
+		print_oobbuf((const char *) oobbuf, mtd->oobsize);
+	}
+	return;
+}
+
+/*
+ * Reset CET to all 0xffs 
+ */
+static inline int cmdline_resetcet(struct mtd_info *mtd, struct brcmnand_cet_descr *cet)
+{
+	int i;
+
+	cet_eraseall(mtd, cet);
+	for (i = 0; i < cet->numblks; i++) {
+		cet->memtbl[i].isdirty = 0;
+		cet->memtbl[i].blk = -1;
+		cet->memtbl[i].bitvec = NULL;
+	}
+	printk(KERN_INFO "brcmnandCET: Recreating ... \n");
+
+	return search_cet_blks(mtd, cet, 0);
+}
+
+/*
+ * Create a CET pattern in the OOB area. 
+ */
+static int create_cet_blks(struct mtd_info *mtd, struct brcmnand_cet_descr *cet)
+{
+	int i, j, ret, gdebug = 0;
+	loff_t from;
+	struct nand_bbt_descr *td, *md;
+	struct erase_info einfo;
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	uint8_t oobbuf[mtd->oobsize];
+	char *oobptr, count = 0;
+
+	td = this->bbt_td;
+	md = this->bbt_md;
+	if (unlikely(gdebug)) {
+		printk(KERN_INFO "brcmnandCET: Inside %s\n", __FUNCTION__);
+	}
+	for (i = 0; i < td->maxblocks; i++) {
+		from = ((loff_t) cet->startblk+i*cet->sign) << this->bbt_erase_shift;
+		/* Skip if bad block */
+		ret = check_badblk(mtd, from);
+		if (ret == CET_BAD_FACTORY || ret == CET_BAD_WEAR) {
+			continue;
+		}
+		memset(oobbuf, 0, mtd->oobsize);
+		if (brcmnand_cet_read_oob(mtd, oobbuf, from)) {
+			printk(KERN_INFO "brcmnandCET: %s %d Error reading OOB\n", __FUNCTION__, __LINE__);
+			return -1;
+		}
+		/* If BBT/MBT block found  we have no space left */
+		if (found_bbt_pattern(oobbuf, td) || found_bbt_pattern(oobbuf, md)) {
+			printk(KERN_INFO "brcmnandCET: %s blk %x is BBT\n", __FUNCTION__, cet->startblk + i*cet->sign);
+			return -1;
+		}
+		//if (!cet_iserased(mtd, oobbuf)) {
+		if (unlikely(gdebug)) {
+			printk(KERN_INFO "brcmnandCET: block %x is erased\n", cet->startblk+i*cet->sign);
+		}
+		/* Erase */
+		memset(&einfo, 0, sizeof(einfo));
+		einfo.mtd = mtd;
+		einfo.addr = from;
+		einfo.len = mtd->erasesize;
+		ret = this->erase_bbt(mtd, &einfo, 1, 1);
+		if (unlikely(ret < 0)) {
+			printk(KERN_ERR "brcmnandCET: %s Error erasing block %x\n", __FUNCTION__, cet->startblk+i*cet->sign);
+			return -1;
+		}
+		//} 
+		/* Write 'CET#' pattern to the OOB area */
+		memset(oobbuf, 0xff, mtd->oobsize);
+		if (unlikely(gdebug)) {
+			printk(KERN_INFO "brcmnandCET: writing CET %d to OOB area\n", (int)count);
+		}
+		oobptr = (char *) oobbuf;
+		for (j = 0; j < cet->len-1; j++) {
+			oobptr[cet->offs + j] = cet->pattern[j];
+		}
+		oobptr[cet->offs + j] = count;
+		if (brcmnand_cet_write_oob(mtd, oobbuf, from)) {
+			printk(KERN_ERR "brcmnandCET: %s Error writing to OOB# %x\n", __FUNCTION__, (unsigned int)from);
+			return -1;
+		}
+		/* If this is the first CET block, init the correctable erase count to 0 */
+		if (count == 0) {
+			memset(oobbuf, 0xff, mtd->oobsize);
+			oobptr = (char *) oobbuf;
+			*((uint32_t *) (oobptr + cet->offs)) = 0x00000000;
+			from += this->pageSize;
+			if (unlikely(gdebug)) {
+				printk(KERN_INFO "DEBUG -> 0: from = %x\n", (unsigned int) from);
+				printk(KERN_INFO "brcmnandCET: Writing cer_count to page %x\n", (unsigned int) from);
+			}
+			if (brcmnand_cet_write_oob(mtd, oobbuf, from)) {
+				printk(KERN_INFO "brcmnandCET: %s Error writing to OOB# %x\n", __FUNCTION__, (unsigned int)from);
+				return -1;
+			}
+		}
+		count++;
+		if (((int)count) == cet->numblks) {
+			return 0;
+		}
+	}
+	return -1;
+}
+
+/*
+ * Search for CET blocks
+ * force => 1 Force creation of tables, do not defer for later
+ */
+static int search_cet_blks(struct mtd_info *mtd, struct brcmnand_cet_descr *cet, char force)
+{
+	int i, count = 0, ret;
+	loff_t from;
+	struct nand_bbt_descr *td, *md;
+	uint8_t oobbuf[mtd->oobsize];
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	int gdebug = 0;
+
+	td = this->bbt_td;
+	md = this->bbt_md;
+	if (unlikely(gdebug)) {
+		printk(KERN_INFO "DEBUG -> Inside search_cet_blks\n");
+	}
+	for (i = 0; i < td->maxblocks; i++) {
+		from = ((loff_t) cet->startblk+i*cet->sign) << this->bbt_erase_shift;
+		/* Skip if bad block */
+		ret = check_badblk(mtd, from);
+		if (ret == CET_BAD_FACTORY || ret == CET_BAD_WEAR) {
+			continue;
+		}
+		/* Read the OOB area of the first page of the block */
+		memset(oobbuf, 0, mtd->oobsize);
+		if (brcmnand_cet_read_oob(mtd, oobbuf, from)) {
+			printk(KERN_INFO "brcmnandCET: %s %d Error reading OOB\n", __FUNCTION__, __LINE__);
+			cet->flags = BRCMNAND_CET_DISABLED;
+			return -1;
+		}
+		if (unlikely(gdebug)) {
+			print_oobbuf(oobbuf, mtd->oobsize);
+		}
+		/* Return -1 if BBT/MBT block => no space left for CET */
+		if (found_bbt_pattern(oobbuf, td) || found_bbt_pattern(oobbuf, md)) {
+			printk(KERN_INFO "brcmnandCET: %s blk %x is BBT\n", __FUNCTION__, cet->startblk + i*cet->sign);
+			cet->flags = BRCMNAND_CET_DISABLED;
+			return -1;
+		}
+		/* Check for CET pattern */
+		ret = found_cet_pattern(this, oobbuf);
+		if (unlikely(gdebug)) {
+			print_oobbuf((const char *) oobbuf, mtd->oobsize);
+		}
+		if (ret < 0 || ret >= cet->numblks) {
+			/* No CET pattern found due to
+			   1. first time being booted => normal so create
+			   2. Did not find CET pattern when we're supposed to
+			      error => recreate, in either case we call create_cet_blks();
+			   3. Found an incorrect > cet->numblks count => error => recreate
+			 */
+			printk(KERN_INFO "brcmnandCET: Did not find CET, recreating\n");
+			if (create_cet_blks(mtd, cet) < 0) {
+				cet->flags = BRCMNAND_CET_DISABLED;
+				return ret;
+			}
+			cet->flags = BRCMNAND_CET_LAZY;
+			return 0;
+		}
+		/* Found CET pattern */
+		if (unlikely(gdebug)) {
+			printk(KERN_INFO "brcmnandCET: Found CET block#%d\n", count);
+		}
+		/* If this is the first block do some extra stuff ... */
+		if (count == 0) {
+			/* The global cerr_count is in the 2nd page's OOB area */
+			from += this->pageSize;
+			if (brcmnand_cet_read_oob(mtd, oobbuf, from)) {
+				printk(KERN_ERR "brcmnandCET: %s %d Error reading OOB\n", __FUNCTION__, __LINE__);
+				cet->flags = BRCMNAND_CET_DISABLED;
+				return -1;
+			}
+			cet->cerr_count = *((uint32_t *) (oobbuf + cet->offs));
+			/* TODO - Fix this -> recreate */
+			if (cet->cerr_count == 0xffffffff) {
+				/* Reset it to 0 */
+				cet->cerr_count = 0;
+				cet->memtbl[0].isdirty = 1;
+			}
+			if (unlikely(gdebug)) {
+				printk(KERN_INFO "brcmnandCET: correctable error count = %x\n", cet->cerr_count);
+			}
+			/* If force then go thru all CET blks even if cerr_count is 0 */
+			if (!force) {
+				if (cet->cerr_count == 0) {
+					cet->flags = BRCMNAND_CET_LAZY;
+					return 0;
+				} 
+			}
+		}
+		cet->memtbl[ret].blk = cet->startblk + i*cet->sign;
+		count++;
+#if 0
+		printk(KERN_INFO "DEBUG -> count = %d, nblks = %d blk = %d\n", count, cet->numblks, cet->memtbl[ret].blk);
+#endif
+		if (count == cet->numblks) {
+			cet->flags = BRCMNAND_CET_LOADED;
+			return 0;
+		}
+	}
+	/* This should never happen */
+	cet->flags = BRCMNAND_CET_DISABLED;
+	return -1;
+}
+
+/*
+ * flush pending in-memory CET data to the flash. Called as part of a 
+ * callback function from workqueue that is invoked every SYNC_FREQ seconds
+ */
+static int flush_memcet(struct mtd_info *mtd) 
+{
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	struct brcmnand_cet_descr *cet = this->cet;
+	struct erase_info einfo;
+	int i, j, k = 0, ret, pg_idx = 0, gdebug = 0;
+	uint8_t oobbuf[mtd->oobsize];
+	loff_t from, to;
+	char *oobptr, count = 0;
+
+	/* If chip is locked reset timer for a later time */
+	if (spin_is_locked(&this->ctrl->chip_lock)) {
+		printk(KERN_INFO "brcmnandCET: flash locked reseting timer\n");
+		return -1;
+	}
+	if (unlikely(gdebug)) {
+		printk(KERN_INFO "brcmnandCET: Inside %s\n", __FUNCTION__);
+	}
+	/* For each in-mem dirty block, sync with flash 
+	   sync => erase -> write */
+	for (i = 0; i < cet->numblks; i++) {
+		if (cet->memtbl[i].isdirty && cet->memtbl[i].blk != -1) {
+			/* Erase */
+			from = ((loff_t) cet->memtbl[i].blk) << this->bbt_erase_shift;
+			to = from;
+			memset(&einfo, 0, sizeof(einfo));
+			einfo.mtd = mtd;
+			einfo.addr = from;
+			einfo.len = mtd->erasesize;
+			ret = this->erase_bbt(mtd, &einfo, 1, 1);
+			if (unlikely(ret < 0)) {
+				printk(KERN_ERR "brcmnandCET: %s Error erasing block %x\n", __FUNCTION__, cet->memtbl[i].blk);
+				return -1;
+			}
+			if (unlikely(gdebug)) {
+				printk(KERN_INFO "DEBUG -> brcmnandCET: After erasing ...\n");
+				cet_printpg_oob(mtd, cet, 3);
+			}
+			pg_idx = 0;
+			/* Write pages i.e., flush */
+			for (j = 0; j < mtd->erasesize/this->pageSize; j++) {
+				memset(oobbuf, 0xff, mtd->oobsize);
+				oobptr = (char *) oobbuf;
+				if (j == 0) { /* Write CET# */
+					for (k = 0; k < cet->len-1; k++) {
+						oobptr[cet->offs + k] = cet->pattern[k];
+					}
+					oobptr[cet->offs + k] = count;
+					if (unlikely(gdebug)) {
+						print_oobbuf((const char *) oobbuf, mtd->oobsize);
+					}
+				}
+				if (j == 1 && count == 0) { /* Write cerr_count */
+					*((uint32_t *) (oobptr + cet->offs)) = cet->cerr_count;
+				}
+				ret = brcmnand_cet_write(mtd, to, (size_t) this->pageSize, cet->memtbl[i].bitvec+pg_idx, oobbuf);
+				if (ret < 0) {
+					printk(KERN_ERR "brcmnandCET: %s Error writing to page %x\n", __FUNCTION__, (unsigned int) to);
+					return ret;
+				}
+				to += mtd->writesize;
+				pg_idx += mtd->writesize;
+			}
+			cet->memtbl[i].isdirty = 0;
+			if (unlikely(gdebug)) {
+				printk(KERN_INFO "brcmnandCET: flushing CET block %d\n", i);
+			}
+		}
+		count++;
+	}
+
+	return 0;
+}
+
+/*
+ * The callback function for kernel workq task
+ * Checks if there is any work to be done, if so calls flush_memcet
+ * Resets timer before returning in any case
+ */
+static void sync_cet(struct work_struct *work)
+{
+	int i;
+	struct delayed_work *d = container_of(work, struct delayed_work, work);
+	struct brcmnand_cet_descr *cet = container_of(d, struct brcmnand_cet_descr, cet_flush);
+	struct mtd_info *mtd = cet->mtd;
+
+	/* Check if all blocks are clean */
+	for (i = 0; i < cet->numblks; i++) {
+		if (cet->memtbl[i].isdirty) break;
+	}
+	/* Avoid function call cost if there are no dirty blocks */
+	if (i != cet->numblks)
+		flush_memcet(mtd);
+	schedule_delayed_work(&cet->cet_flush, CET_SYNC_FREQ);
+
+	return;
+}
+
+
+/*
+ * brcmnand_create_cet - Create a CET (Correctable Error Table)
+ * @param mtd		MTD device structure
+ * 
+ * Called during mtd init. Checks if a CET already exists or needs
+ * to be created. Initializes in-memory CET. 
+ */
+int brcmnand_create_cet(struct mtd_info *mtd)
+{
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	struct brcmnand_cet_descr *cet;
+	int gdebug = 0, i, ret, rem;
+	uint64_t tmpdiv;
+
+	if (unlikely(gdebug)) {
+		printk(KERN_INFO "brcmnandCET: Creating correctable error table ...\n");
+	}
+	
+	if (NAND_IS_MLC(this) || /* MLC flashes */
+	   /* SLC w/ BCH-n; We don't check for pageSize, and let it be */
+	   (this->ecclevel >= BRCMNAND_ECC_BCH_1 && this->ecclevel <= BRCMNAND_ECC_BCH_12)) 
+	{
+		this->cet = cet = &cet_descr_mlc;
+if (gdebug) printk("%s: CET = cet_desc_mlc\n", __FUNCTION__);
+	} 
+
+	else {
+		this->cet = cet = &cet_descr;
+if (gdebug) printk("%s: CET = cet_descr\n", __FUNCTION__);
+	}
+	cet->flags = 0x00;
+	/* Check that BBT table and mirror exist */
+	if (unlikely(!this->bbt_td && !this->bbt_md)) {
+		printk(KERN_INFO "brcmnandCET: BBT tables not found, disabling\n");
+		cet->flags = BRCMNAND_CET_DISABLED;
+		return -1;
+	}
+	/* Per chip not supported. We do not use per chip BBT, but this 
+	   is just a safety net */
+	if (unlikely(this->bbt_td->options & NAND_BBT_PERCHIP)) {
+		printk(KERN_INFO "brcmnandCET: per chip CET not supported, disabling\n");
+		cet->flags = BRCMNAND_CET_DISABLED;
+		return -1;
+	}
+	/* Calculate max blocks based on 1-bit per page */
+	tmpdiv = this->mtdSize;
+	do_div(tmpdiv, this->pageSize);
+	do_div(tmpdiv, (8*this->blockSize));
+	cet->numblks = (uint32_t) tmpdiv;
+	//cet->numblks = (this->mtdSize/this->pageSize)/(8*this->blockSize);
+	tmpdiv = this->mtdSize;
+	do_div(tmpdiv, this->pageSize);
+	do_div(tmpdiv, 8);
+	rem = do_div(tmpdiv, this->blockSize);
+	//if (((this->mtdSize/this->pageSize)/8)%this->blockSize) {
+	if (rem) {
+		cet->numblks++;
+	}
+	/* Allocate twice the size in case we have bad blocks */
+	cet->maxblks = cet->numblks*2;
+	/* Determine the direction of CET based on reverse direction of BBT */
+	cet->sign = (this->bbt_td->options & NAND_BBT_LASTBLOCK) ? 1 : -1;
+	/* For flash size <= 512MB BBT and CET share the last 1MB
+	   for flash size > 512MB CET is at the 512th MB of flash */
+#if 0
+	if (NAND_IS_MLC(this)) {
+	} else {
+		if (this->mtdSize < (1<<29)) {
+			if (cet->maxblks + BBT_MAX_BLKS > get_bbt_partition(this)/this->blockSize) {
+				printk(KERN_INFO "brcmnandCET: Not enough space to store CET, disabling CET\n");
+				cet->flags = BRCMNAND_CET_DISABLED;
+				return -1;
+			}
+			if (cet->sign) {
+				cet->startblk = CET_START_BLK(this->mtdSize, this);
+			} else {
+				cet->startblk = (uint32_t) (this->mtdSize >> this->bbt_erase_shift)-1;
+			}
+
+		} else {
+			if (cet->maxblks > (get_bbt_partition(this))/this->blockSize) {
+				printk(KERN_INFO "brcmnandCET: Not enough space to store CET, disabling CET\n");
+				cet->flags = BRCMNAND_CET_DISABLED;
+				return -1;
+			}
+			cet->startblk = CET_START_BLK((1<<29), this);
+		}
+	}
+#endif
+	if (NAND_IS_MLC(this)) {
+		if (this->mtdSize < (1<<29)) {
+			if (cet->maxblks + BBT_MAX_BLKS_MLC(this) > BBT_MLC_PARTITION/this->blockSize) {
+				printk(KERN_INFO "brcmnandCET: Not enough space to store CET, disabling CET\n");
+				cet->flags = BRCMNAND_CET_DISABLED;
+				return -1;
+			}
+			/* Reverse direction of BBT */
+			if (cet->sign) {
+				cet->startblk = CET_START_BLK_MLC(this->mtdSize, this, BBT_MLC_PARTITION);
+			} else {
+				cet->startblk = (uint32_t) (this->mtdSize >> this->bbt_erase_shift)-1;
+			}
+		} else {
+			/* 512th MB used by CET */
+			if (cet->maxblks > (1<<29)/this->blockSize) {
+				printk(KERN_INFO "brcmnandCET: Not enough space to store CET, disabling CET\n");
+				cet->flags = BRCMNAND_CET_DISABLED;
+				return -1;
+			}
+#if defined(CONFIG_BCM_KF_NAND)
+			cet->startblk = CET_START_BLK_SLC(this->mtdSize, this);
+#else
+			cet->startblk = CET_START_BLK_MLC((1<<29), this, (1<<20));
+#endif
+		}
+	} else {
+		if (this->mtdSize < (1<<29)) {
+			if (cet->maxblks + BBT_MAX_BLKS_SLC > BBT_SLC_PARTITION/this->blockSize) {
+				printk(KERN_INFO "brcmnandCET: Not enough space to store CET, disabling CET\n");
+				cet->flags = BRCMNAND_CET_DISABLED;
+				return -1;
+			}
+			/* Reverse direction of BBT */
+			if (cet->sign) {
+				cet->startblk = CET_START_BLK_SLC(this->mtdSize, this);
+			} else {
+				cet->startblk = (uint32_t) (this->mtdSize >> this->bbt_erase_shift)-1;
+			}
+		} else {
+			/* 512th MB used by CET */
+			if (cet->maxblks > BBT_SLC_PARTITION/this->blockSize) {
+				printk(KERN_INFO "brcmnandCET: Not enough space to store CET, disabling CET\n");
+				cet->flags = BRCMNAND_CET_DISABLED;
+				return -1;
+			}
+#if defined(CONFIG_BCM_KF_NAND)
+			cet->startblk = CET_START_BLK_SLC(this->mtdSize, this);
+#else
+			cet->startblk = CET_START_BLK_SLC((1<<29), this);
+#endif
+		}
+	}
+	if (gdebug) {
+		printk(KERN_INFO "brcmnandCET: start blk = %x, numblks = %x\n", cet->startblk, cet->numblks);
+	}
+
+	/* Init memory based CET */
+	cet->memtbl = (struct brcmnand_cet_memtable *) vmalloc(cet->numblks*sizeof(struct brcmnand_cet_memtable));
+	if (cet->memtbl == NULL) {
+		printk(KERN_ERR "brcmnandCET: vmalloc failed %s\n", __FUNCTION__);
+		cet->flags = BRCMNAND_CET_DISABLED;
+		return -1;
+	}
+	for (i = 0; i < cet->numblks; i++) {
+		cet->memtbl[i].isdirty = 0;
+		cet->memtbl[i].blk = -1;
+		cet->memtbl[i].bitvec = NULL;
+	}
+	ret = search_cet_blks(mtd, cet, 0);
+	if (unlikely(gClearCET == 1)) {		/* kernel cmdline showcet */
+		cmdline_showcet(mtd, cet);
+	}
+	if (unlikely(gClearCET == 2)) { 	/* kernel cmdline resetcet */
+		if (cmdline_resetcet(mtd, cet) < 0) {
+			cet->flags = BRCMNAND_CET_DISABLED;
+			return -1;
+		}
+	}
+	if (unlikely(gClearCET == 3)) {		/* kernel cmdline disable */
+		cet->flags = BRCMNAND_CET_DISABLED;
+		ret = -1;
+	}
+	//cet_printpg_oob(mtd, cet, 3);
+	switch(cet->flags) {
+		case BRCMNAND_CET_DISABLED:
+			printk(KERN_INFO "brcmnandCET: Status -> Disabled\n");
+			break;
+		case BRCMNAND_CET_LAZY:
+			printk(KERN_INFO "brcmnandCET: Status -> Deferred\n");
+			break;
+		case BRCMNAND_CET_LOADED:
+			printk(KERN_INFO "brcmnandCET: Status -> Loaded\n");
+			break;
+		default:
+			printk(KERN_INFO "brcmnandCET: Status -> Fatal error CET disabled\n");
+			cet->flags = BRCMNAND_CET_DISABLED;
+			break;
+	}
+	if (unlikely(gdebug)) {
+		cet_printpg_oob(mtd, cet, 3);
+		cet_printblk_oob(mtd, cet);
+	}
+
+	INIT_DELAYED_WORK(&cet->cet_flush, sync_cet);
+	cet->mtd = mtd;
+	schedule_delayed_work(&cet->cet_flush, CET_SYNC_FREQ);
+
+	return ret;
+}
+
+/*
+ * brcmnand_cet_erasecallback: Called every time there is an erase due to
+ *                             userspace activity
+ *
+ * @param mtd		MTD device structure
+ * @param addr		Address of the block that was erased by fs/userspace
+ *
+ * Assumption: cet->flag != BRCMNAND_CET_DISABLED || BRCMNAND_CET_LAZY
+ * is checked by the caller
+ * flag == BRCMNAND_CET_DISABLED => CET not being used
+ * flag == BRCMNAND_CET_LAZY => correctable error count is 0 so need of callback
+ * 
+ * TODO Optimize, add comments, check all return paths
+ */
+int brcmnand_cet_erasecallback(struct mtd_info *mtd, u_int32_t addr) 
+{
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	struct brcmnand_cet_descr *cet = this->cet;
+	uint32_t page = 0;
+	int blkbegin, blk, i, ret, retlen, pg_idx = 0, numzeros = 0, byte, gdebug = 0;
+	uint32_t *ptr;
+	unsigned int pos;
+	loff_t origaddr = addr;
+	
+	/* Find out which entry in the memtbl does the addr map to */
+	page = (uint32_t) (addr >> this->page_shift);
+	blk = page/(this->blockSize<<3);
+	if (unlikely(cet->memtbl[blk].blk == -1)) {
+		printk(KERN_INFO "brcmnandCET: %s invalid block# in CET\n", __FUNCTION__);
+		return -1;
+	}
+	blkbegin = cet->memtbl[blk].blk;
+	/* Start page of the block */
+	addr = ((loff_t) blkbegin) << this->bbt_erase_shift;
+	if (cet->memtbl[blk].bitvec == NULL) {
+		if (gdebug) {
+			printk(KERN_INFO "DEBUG -> brcmnandCET: bitvec is null, reloading\n");
+		}
+                /* using kmalloc as the erase typically are called from image update which has interrupt disabled. We can only 
+                support 32bit addressing 4GB maximum size NAND. These NAND use 128K block and 2048 byte page. So it takes 4GB/2048/8 
+                = 256KB or 2 block. Our kernel should have enough kmalloc ATMOIC memory for 2x128KB allocations */
+		cet->memtbl[blk].bitvec = (char *) kmalloc(this->blockSize, GFP_ATOMIC);
+		if (cet->memtbl[blk].bitvec == NULL) {
+			printk(KERN_INFO "brcmnandCET: %s kmalloc failed\n", __FUNCTION__);
+			return -1;
+		}
+                
+		memset(cet->memtbl[blk].bitvec, 0xff, sizeof(this->blockSize));
+		/* Read an entire block */
+		for (i = 0; i < mtd->erasesize/mtd->writesize; i++) {
+			if (gdebug) {
+				printk(KERN_INFO "DEBUG -> brcmnandCET: Reading page %d\n", i);
+			}
+			ret = mtd_read(mtd, addr, this->pageSize, &retlen, (uint8_t *) (cet->memtbl[blk].bitvec+pg_idx));
+			if (ret < 0 || (retlen != this->pageSize)) {
+				kfree(cet->memtbl[blk].bitvec);
+				return -1;
+			}
+			pg_idx += mtd->writesize;
+			addr += this->pageSize;
+		}
+	} 
+	page = (uint32_t) ((origaddr & (~(mtd->erasesize-1))) >> this->page_shift);
+	pos = page % (this->blockSize<<3);
+	byte = pos / (1<<3);
+	ptr = (uint32_t *) ((char *)cet->memtbl[blk].bitvec+byte);
+	/* numpages/8bits per byte/4byte per uint32 */
+	for (i = 0; i < ((mtd->erasesize/mtd->writesize)>>3)>>2; i++) {
+		/* Count the number of 0s for in the bitvec */
+		numzeros += bitcount(~ptr[i]);
+	}
+	if (likely(numzeros == 0)) {
+		if (gdebug) {
+			printk(KERN_INFO "DEBUG -> brcmnandCET: returning 0 numzeros = 0\n");
+		}
+		return 0;
+	} 
+	if (cet->cerr_count < numzeros) {
+		if (gdebug) {
+			printk(KERN_ERR "brcmnandCET: Erroneous correctable error count");
+		}
+		return -1;
+	}
+	cet->cerr_count -= numzeros;
+	/* Make bits corresponding to this block all 1s */
+	memset(cet->memtbl[blk].bitvec+byte, 0xff, (mtd->erasesize/mtd->writesize)>>3);
+	cet->memtbl[blk].isdirty = 1;
+
+	return 0;
+}
+
+/*
+ * brcmnand_cet_update: Called every time a single correctable error is
+ *                      encountered.
+ * @param mtd		MTD device structure
+ * @param from		Page address at which correctable error occured
+ * @param status	Return status 
+ *			1 => This page had a correctable errror in past,
+ *			therefore, return correctable error to filesystem
+ *			0 => First occurence of a correctable error for
+ *			this page. return a success to the filesystem
+ *  
+ * Check the in memory CET bitvector to see if this page (loff_t from)
+ * had a correctable error in past, if not set this page's bit to '0'
+ * in the bitvector.
+ *
+ */
+int brcmnand_cet_update(struct mtd_info *mtd, loff_t from, int *status) 
+{
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	struct brcmnand_cet_descr *cet = this->cet;
+	int gdebug = 0, ret, blk, byte, bit, retlen = 0, blkbegin, i;
+	uint32_t page = 0;
+	unsigned int pg_idx = 0, pos = 0;
+	unsigned char c, mask;
+
+	if (gdebug) {
+		printk(KERN_INFO "DEBUG -> brcmnandCET: Inside %s\n", __FUNCTION__);
+	}
+	if (cet->flags == BRCMNAND_CET_LAZY) {
+		/* Force creation of the CET and the mem table */
+		ret = search_cet_blks(mtd, cet, 1);
+		if (ret < 0) {
+			cet->flags = BRCMNAND_CET_DISABLED;
+			return ret;
+		}
+		cet->flags = BRCMNAND_CET_LOADED;
+	}
+	/* Find out which entry in memtbl does the from address map to */
+	page = (uint32_t) (from >> this->page_shift);
+	/* each bit is one page << 3 for 8 bits per byte */
+	blk = page/(this->blockSize<<3);
+	if (unlikely(cet->memtbl[blk].blk == -1)) {
+		printk(KERN_INFO "brcmnandCET: %s invalid block# in CET\n", __FUNCTION__);
+		return -1;
+	}
+	blkbegin = cet->memtbl[blk].blk;
+	/* Start page of the block */
+	from = ((loff_t) blkbegin) << this->bbt_erase_shift;
+	/* If bitvec == NULL, load the block from flash */
+	if (cet->memtbl[blk].bitvec == NULL) {
+		if (gdebug) {
+			printk(KERN_INFO "DEBUG -> brcmnandCET: bitvec null .... loading ...\n");
+		}
+                /* using kmalloc to be consisent with erasecallback function. see erasecallback for details */
+		cet->memtbl[blk].bitvec = (char *) kmalloc(this->blockSize, GFP_ATOMIC);
+		if (cet->memtbl[blk].bitvec == NULL) {
+			printk(KERN_ERR "brcmnandCET: %s kmalloc failed\n", __FUNCTION__);
+			return -1;
+		}
+		memset(cet->memtbl[blk].bitvec, 0xff, this->blockSize);
+		/* Read an entire block */
+		if (gdebug) {
+			printk(KERN_INFO "DEBUG -> brcmnandCET: Reading pages starting @ %x\n", (unsigned int) from);
+		}
+		for (i = 0; i < mtd->erasesize/mtd->writesize; i++) {
+			ret = mtd_read(mtd, from, this->pageSize, &retlen, (uint8_t *) (cet->memtbl[blk].bitvec+pg_idx));
+			if (ret < 0 || (retlen != this->pageSize)) {
+				kfree(cet->memtbl[blk].bitvec);
+				return -1;
+			}
+			pg_idx += mtd->writesize;
+			from += this->pageSize;
+		}
+	}
+	pos = page % (this->blockSize<<3);
+	byte = pos / (1<<3);
+	bit = pos % (1<<3);
+	c = cet->memtbl[blk].bitvec[byte];
+	mask = 1<<bit;
+	if ((c & mask) == mask) { /* First time error mark it but return a good status */
+		*status = 0;
+		c = (c & ~mask);
+		cet->memtbl[blk].bitvec[byte] = c;
+		cet->memtbl[blk].isdirty = 1;
+	} else {		
+		*status = 1; /* This page had a previous error so return a bad status */
+	}
+	cet->cerr_count++;
+#if 0
+	printk(KERN_INFO "DEBUG -> count = %d, byte = %d, bit = %d, blk = %x status = %d c = %d addr = %x\n", cet->cerr_count\
+			, byte, bit, blk, *status, cet->memtbl[blk].bitvec[byte], cet->memtbl[blk].bitvec+byte);
+	printk(KERN_INFO "DEBUG -> CET: Exiting %s\n", __FUNCTION__);
+#endif
+
+	return 0;
+}
+
+EXPORT_SYMBOL(brcmnand_cet_update);
+
+/*
+ * brcmnand_cet_prepare_reboot Call flush_memcet to flush any in-mem dirty data
+ * 
+ * @param mtd		MTD device structure
+ *
+ * Flush any pending in-mem CET blocks to flash before reboot 
+ */
+int brcmnand_cet_prepare_reboot(struct mtd_info *mtd) 
+{
+	int gdebug = 0;
+	struct brcmnand_chip *this = (struct brcmnand_chip *) mtd->priv;
+	struct brcmnand_cet_descr *cet = this->cet;
+
+#if 0
+	// Disable for MLC
+	if (NAND_IS_MLC(this)) {
+		return 0;
+	}
+#endif
+	if (unlikely(gdebug)) {
+		printk(KERN_INFO "DEBUG -> brcmnandCET: flushing pending CET\n");
+	}
+	if (unlikely(cet->flags == BRCMNAND_CET_DISABLED)) {
+		return 0;
+	}
+	flush_memcet(mtd);
+
+	return 0;
+}
+
+
+#endif
+
+#endif
diff --git a/drivers/mtd/brcmnand/brcmnand_priv.h b/drivers/mtd/brcmnand/brcmnand_priv.h
new file mode 100644
index 0000000000000000000000000000000000000000..8c7a7d4d9a02665b3e1b2be67d1213c87384d2dd
--- /dev/null
+++ b/drivers/mtd/brcmnand/brcmnand_priv.h
@@ -0,0 +1,514 @@
+/*
+ * drivers/mtd/brcmnand/brcmnand_priv.h
+ *
+ *  Copyright (c) 2005-2009 Broadcom Corp.
+ *  
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Data structures for Broadcom NAND controller
+ * 
+ * when		who		what
+ * 20060729	tht		Original coding
+ */
+
+
+#ifndef _BRCMNAND_PRIV_H_
+#define _BRCMNAND_PRIV_H_
+
+#include <linux/vmalloc.h>
+#include <linux/mtd/brcmnand.h>
+
+#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
+#include <linux/irq.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+
+//#include "edu.h"
+#endif
+
+#define BRCMNAND_CORRECTABLE_ECC_ERROR		(1)
+#define BRCMNAND_SUCCESS						(0)
+#define BRCMNAND_UNCORRECTABLE_ECC_ERROR	(-1)
+#define BRCMNAND_FLASH_STATUS_ERROR			(-2)
+#define BRCMNAND_TIMED_OUT					(-3)
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+#define BRCMEDU_CORRECTABLE_ECC_ERROR        	(4)
+#define BRCMEDU_UNCORRECTABLE_ECC_ERROR      (-4)
+
+#define  BRCMEDU_MEM_BUS_ERROR				(-5)
+
+
+#define BRCMNAND_malloc(size) kmalloc(size, GFP_DMA)
+#define BRCMNAND_free(addr) kfree(addr)
+
+#else
+#define BRCMNAND_malloc(size) vmalloc(size)
+#define BRCMNAND_free(addr) vfree(addr)
+#endif
+
+#if 0 /* TO */
+typedef u8 uint8;
+typedef u16 uint16;
+typedef u32 uint32;
+#endif
+
+#define BRCMNAND_FCACHE_SIZE		512
+#define ECCSIZE(chip)					BRCMNAND_FCACHE_SIZE	/* Always 512B for Brcm NAND controller */
+
+#define MTD_OOB_NOT_WRITEABLE	0x8000
+#define MTD_CAP_MLC_NANDFLASH	(MTD_WRITEABLE | MTD_OOB_NOT_WRITEABLE)
+#define MTD_IS_MLC(mtd) ((((mtd)->flags & MTD_CAP_MLC_NANDFLASH) == MTD_CAP_MLC_NANDFLASH) &&\
+			(((mtd)->flags & MTD_OOB_NOT_WRITEABLE) == MTD_OOB_NOT_WRITEABLE))
+
+
+/* 
+ * NUM_NAND_CS here is strictly based on the number of CS in the NAND registers
+ * It does not have the same value as NUM_CS in brcmstb/setup.c
+ * It is not the same as NAND_MAX_CS, the later being the bit fields found in NAND_CS_NAND_SELECT.
+ */
+
+/*
+ * # number of CS supported by EBI
+ */
+#ifdef BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_SEL_MASK
+/* Version < 3 */
+#define NAND_MAX_CS    8
+
+#elif defined(BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_MASK)
+/* 7420Cx */
+#define NAND_MAX_CS    4
+#else
+/* 3548 */
+#define NAND_MAX_CS 2
+#endif
+
+/* 
+ * Number of CS seen by NAND
+ */
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_3
+#define NUM_NAND_CS			4
+
+#else
+#define NUM_NAND_CS			2
+#endif
+
+#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
+
+//#define BCM_BASE_ADDRESS				0xb0000000
+
+/* CP0 hazard avoidance. */
+#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
+				     "nop; nop; nop; nop; nop; nop;\n\t" \
+				     ".set reorder\n\t")
+
+/* 
+ * Right now we submit a full page Read for queueing, so with a 8KB page,
+ * and an ECC step of 512B, the queue depth is 16. Add 2 for dummy elements
+ * during EDU WAR
+ */
+#if CONFIG_MTD_BRCMNAND_VERSION <=  CONFIG_MTD_BRCMNAND_VERS_3_3
+#define MAX_NAND_PAGE_SIZE	(4<<10)
+
+#else
+#define MAX_NAND_PAGE_SIZE	(8<<10)
+#endif
+
+/* Max queue size is (PageSize/512B_ECCSize)+2 spare for WAR */
+#define MAX_JOB_QUEUE_SIZE	((MAX_NAND_PAGE_SIZE>>9))
+
+typedef enum {
+	ISR_OP_QUEUED = 0, 
+	ISR_OP_SUBMITTED = 1, 
+	ISR_OP_NEED_WAR = 2,
+	ISR_OP_COMPLETED = 3, 
+	ISR_OP_TIMEDOUT = 4,
+	ISR_OP_COMP_WITH_ERROR = 5,
+} isrOpStatus_t;
+
+typedef struct eduIsrNode {
+	struct list_head list;
+	spinlock_t lock; // per Node update lock
+	// int cmd;	// 1 == Read, 0 == Write
+
+	// ISR stuffs
+	uint32_t mask;	/* Clear status mask */
+	uint32_t expect;	/* Status on success */
+	uint32_t error;	/* Status on error */
+	uint32_t intr;		/* Interrupt bits */
+	uint32_t status; 	/* Status read during ISR.  There may be several interrupts before completion */
+	isrOpStatus_t opComplete;	/* Completion status */
+
+	/* Controller Level params (for queueing)  */
+	struct mtd_info* mtd;
+	void* 	buffer;
+	u_char* 	oobarea;
+	loff_t 	offset;
+	int		ret;
+	int		needBBT;
+
+	/* EDU level params (for ISR) */
+	uint32_t edu_ldw;
+	uint32_t physAddr;
+	uint32_t hif_intr2;
+	uint32_t edu_status;
+
+	int refCount;		/* Marked for re-use when refCount=0 */
+	unsigned long expired; /* Time stamp for expiration, 3 secs from submission */
+} eduIsrNode_t;
+
+/*
+ * Read/Write Job Q.
+ * Process one page at a time, and queue 512B sector Read or Write EDU jobs.
+ * ISR will wake up the process context thread iff
+ * 1-EDU reports an error, in which case the process context thread need to be awaken
+ *  		in order to do WAR
+ * 2-Q is empty, in which case the page read/write op is complete.
+ */
+typedef struct jobQ_t {
+	struct list_head 	jobQ;		/* Nodes queued for EDU jobs */
+	struct list_head 	availList;	/* Free Nodes */
+	spinlock_t		lock; 		/* Queues guarding spin lock */
+	int 				needWakeUp;	/* Wake up Process context thread to do EDU WAR */
+	int 				cmd; 		/* 1 == Read, 0 == Write */
+	int				corrected;	/* Number of correctable errors */
+} isrJobQ_t;
+
+extern isrJobQ_t gJobQ; 
+
+void ISR_init(void);
+
+/*
+ * Submit the first entry that is in queued state,
+ * assuming queue lock has been held by caller.
+ * 
+ * @doubleBuffering indicates whether we need to submit just 1 job or until EDU is full (double buffering)
+ * Return the number of job submitted for read.
+ *
+ * In current version (v3.3 controller), since EDU only have 1 register for EDU_ERR_STATUS,
+ * we can't really do double-buffering without losing the returned status of the previous read-op.
+ */
+#undef EDU_DOUBLE_BUFFER_READ
+
+int brcmnand_isr_submit_job(void);
+
+eduIsrNode_t*  ISR_queue_read_request(struct mtd_info *mtd,
+        void* buffer, u_char* oobarea, loff_t offset);
+eduIsrNode_t* ISR_queue_write_request(struct mtd_info *mtd,
+        const void* buffer, const u_char* oobarea, loff_t offset);
+eduIsrNode_t*  ISR_push_request(struct mtd_info *mtd,
+        void* buffer, u_char* oobarea, loff_t offset);
+
+
+int brcmnand_edu_read_completion(struct mtd_info* mtd, 
+        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status);
+
+int brcmnand_edu_read_comp_intr(struct mtd_info* mtd, 
+        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status);
+
+#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
+int brcmnand_edu_write_completion(struct mtd_info *mtd,
+        const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status, 
+        int needBBT);
+int
+brcmnand_edu_write_war(struct mtd_info *mtd,
+        const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status, 
+        int needBBT);
+#endif
+eduIsrNode_t* ISR_find_request( isrOpStatus_t opStatus);
+
+uint32_t ISR_wait_for_completion(void);
+
+/*
+ *  wait for completion with read/write Queue
+ */
+int ISR_wait_for_queue_completion(void);
+
+int ISR_cache_is_valid(void);
+
+static __inline__ uint32_t ISR_volatileRead(uint32_t addr)
+{
+        
+        
+        return (uint32_t) BDEV_RD(addr);
+}
+
+static __inline__ void ISR_volatileWrite(uint32_t addr, uint32_t data)
+{
+        BDEV_WR(addr, data);
+}
+
+static __inline__ void ISR_enable_irq(eduIsrNode_t* req)
+{
+	//uint32_t intrMask; 
+	//unsigned long flags;
+
+	//spin_lock_irqsave(&gEduIsrData.lock, flags);
+	
+	// Clear status bits
+	ISR_volatileWrite(BCHP_HIF_INTR2_CPU_CLEAR, req->mask);
+
+	// Enable interrupt
+	ISR_volatileWrite(BCHP_HIF_INTR2_CPU_MASK_CLEAR, req->intr);
+
+	//spin_unlock_irqrestore(&gEduIsrData.lock, flags);
+}
+
+static __inline__ void ISR_disable_irq(uint32_t mask)
+{
+
+	/* Disable L2 interrupts */
+	ISR_volatileWrite(BCHP_HIF_INTR2_CPU_MASK_SET, mask);
+
+}
+
+/*
+ * For debugging
+ */
+
+#ifdef DEBUG_ISR
+
+static void __inline__
+ISR_print_queue(void)
+{
+	eduIsrNode_t* req;
+	//struct list_head* node;
+	int i = 0;
+
+	list_for_each_entry(req, &gJobQ.jobQ, list) {
+		
+		printk("i=%d, cmd=%d, offset=%08llx, flashAddr=%08x, opComp=%d, status=%08x\n",
+			i, gJobQ.cmd, req->offset, req->edu_ldw,req->opComplete, req->status);
+		i++;
+	}	
+}
+
+static void __inline__
+ISR_print_avail_list(void)
+{
+	eduIsrNode_t* req;
+	//struct list_head* node;
+	int i = 0;
+
+	printk("AvailList=%p, next=%p\n", &gJobQ.availList, gJobQ.availList.next);
+	list_for_each_entry(req, &gJobQ.availList, list) {
+		printk("i=%d, req=%p, list=%p\n", i, req, &req->list);
+		i++;
+	}	
+}
+#else
+#define IS_print_queue()
+#define ISR_print_avail_list()
+#endif // DEBUG_ISR
+
+
+#endif // CONFIG_MTD_BRCMNAND_USE_ISR
+
+static inline u_int64_t device_size(struct mtd_info *mtd) 
+{
+	//return mtd->size == 0 ? (u_int64_t) mtd->num_eraseblocks * mtd->erasesize : (u_int64_t) mtd->size;
+	return mtd->size;
+}
+
+/**
+ * brcmnand_scan - [BrcmNAND Interface] Scan for the BrcmNAND device
+ * @param mtd		MTD device structure
+ * @cs			  	Chip Select number
+ * @param numchips	Number of chips  (from CFE or from nandcs= kernel arg)
+
+ *
+ * This fills out all the not initialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values.
+ *
+ */
+extern int brcmnand_scan(struct mtd_info *mtd , int cs, int maxchips);
+
+/**
+ * brcmnand_release - [BrcmNAND Interface] Free resources held by the BrcmNAND device
+ * @param mtd		MTD device structure
+ */
+extern void brcmnand_release(struct mtd_info *mtd);
+
+/* BrcmNAND BBT interface */
+
+/* Auto-format scan layout for BCH-8 with 16B OOB */
+#define BRCMNAND_BBT_AUTO_PLACE	0x80000000
+
+extern uint8_t* brcmnand_transfer_oob(struct brcmnand_chip *chip, uint8_t *oob,
+				  struct mtd_oob_ops *ops, int len);
+extern uint8_t* brcmnand_fill_oob(struct brcmnand_chip *chip, uint8_t *oob, struct mtd_oob_ops *ops);
+
+/* Read the OOB bytes and tell whether a block is bad without consulting the BBT */
+extern int brcmnand_isbad_raw (struct mtd_info *mtd, loff_t offs);
+
+extern int brcmnand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
+extern int brcmnand_default_bbt(struct mtd_info *mtd);
+
+extern int brcmnand_update_bbt (struct mtd_info *mtd, loff_t offs);
+
+//extern void* get_brcmnand_handle(void);
+
+extern void print_oobbuf(const unsigned char* buf, int len);
+extern void print_databuf(const unsigned char* buf, int len);
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+extern int brcmnand_cet_update(struct mtd_info *mtd, loff_t from, int *status);
+extern int brcmnand_cet_prepare_reboot(struct mtd_info *mtd);
+extern int brcmnand_cet_erasecallback(struct mtd_info *mtd, u_int32_t addr);
+extern int brcmnand_create_cet(struct mtd_info *mtd);
+#endif
+
+/*
+ * Disable ECC, and return the original ACC register (for restore)
+ */
+uint32_t brcmnand_disable_read_ecc(int cs);
+
+void brcmnand_restore_ecc(int cs, uint32_t orig_acc0);
+
+void brcmnand_post_mortem_dump(struct mtd_info* mtd, loff_t offset);
+
+static unsigned int __maybe_unused brcmnand_get_bbt_size(struct mtd_info* mtd)
+{
+	struct brcmnand_chip * chip = mtd->priv;
+	
+	// return ((device_size(mtd) > (512 << 20)) ? 4<<20 : 1<<20);
+	return chip->bbtSize;
+}
+
+	
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_3
+static  inline uint32_t  bchp_nand_acc_control(int cs)
+{
+	switch (cs) {
+	case 0: return BCHP_NAND_ACC_CONTROL;
+	case 1: return BCHP_NAND_ACC_CONTROL_CS1;
+#ifdef BCHP_NAND_ACC_CONTROL_CS2
+	case 2: return BCHP_NAND_ACC_CONTROL_CS2;
+#endif
+#ifdef BCHP_NAND_ACC_CONTROL_CS3
+	case 3: return BCHP_NAND_ACC_CONTROL_CS3;
+#endif
+	}
+	return 0;
+}
+
+static  inline uint32_t bchp_nand_config(int cs)
+{
+	switch (cs) {
+	case 0: return BCHP_NAND_CONFIG;
+	case 1: return BCHP_NAND_CONFIG_CS1;
+#ifdef BCHP_NAND_CONFIG_CS2
+	case 2: return BCHP_NAND_CONFIG_CS2;
+#endif
+#ifdef BCHP_NAND_CONFIG_CS3
+	case 3: return BCHP_NAND_CONFIG_CS3;
+#endif
+	}
+	return 0;
+}
+
+static  inline uint32_t bchp_nand_timing1(int cs)
+{
+	switch (cs) {
+	case 0: return BCHP_NAND_TIMING_1;
+	case 1: return BCHP_NAND_TIMING_1_CS1;
+#ifdef BCHP_NAND_TIMING_1_CS2
+	case 2: return BCHP_NAND_TIMING_1_CS2;
+#endif
+#ifdef BCHP_NAND_TIMING_1_CS3
+	case 3: return BCHP_NAND_TIMING_1_CS3;
+#endif
+	}
+	return 0;
+}
+static  inline uint32_t bchp_nand_timing2(int cs)
+{
+	switch (cs) {
+	case 0: return BCHP_NAND_TIMING_2;
+	case 1: return BCHP_NAND_TIMING_2_CS1;
+#ifdef BCHP_NAND_TIMING_2_CS2
+	case 2: return BCHP_NAND_TIMING_2_CS2;
+#endif
+#ifdef BCHP_NAND_TIMING_2_CS3
+	case 3: return BCHP_NAND_TIMING_2_CS3;
+#endif
+	}
+	return 0;
+}
+
+#else
+#define bchp_nand_acc_control(cs) BCHP_NAND_ACC_CONTROL
+#define bchp_nand_config(cs) BCHP_NAND_CONFIG
+#define bchp_nand_timing1(cs) BCHP_NAND_TIMING_1
+#define bchp_nand_timing2(cs) BCHP_NAND_TIMING_2
+#endif
+	
+/***********************************************************************
+ * Register access macros - sample usage:
+ *
+ * DEV_RD(0xb0404000)                       -> reads 0xb0404000
+ * BDEV_RD(0x404000)                        -> reads 0xb0404000
+ * BDEV_RD(BCHP_SUN_TOP_CTRL_PROD_REVISION) -> reads 0xb0404000
+ *
+ * _RB means read back after writing.
+ ***********************************************************************/
+#ifdef CONFIG_ARM
+#define BPHYSADDR(x)	(x)
+#define BVIRTADDR(x)	(x)
+#else
+#define BPHYSADDR(x)	((x) | 0x10000000)
+#define BVIRTADDR(x)	KSEG1ADDR(BPHYSADDR(x))
+#endif
+
+#define DEV_RD(x) (*((volatile unsigned long *)(x)))
+#define DEV_WR(x, y) do { *((volatile unsigned long *)(x)) = (y); } while (0)
+#define DEV_UNSET(x, y) do { DEV_WR((x), DEV_RD(x) & ~(y)); } while (0)
+#define DEV_SET(x, y) do { DEV_WR((x), DEV_RD(x) | (y)); } while (0)
+
+#define DEV_WR_RB(x, y) do { DEV_WR((x), (y)); DEV_RD(x); } while (0)
+#define DEV_SET_RB(x, y) do { DEV_SET((x), (y)); DEV_RD(x); } while (0)
+#define DEV_UNSET_RB(x, y) do { DEV_UNSET((x), (y)); DEV_RD(x); } while (0)
+
+#define BDEV_RD(x) (DEV_RD(BVIRTADDR(x)))
+#define BDEV_WR(x, y) do { DEV_WR(BVIRTADDR(x), (y)); } while (0)
+#define BDEV_UNSET(x, y) do { BDEV_WR((x), BDEV_RD(x) & ~(y)); } while (0)
+#define BDEV_SET(x, y) do { BDEV_WR((x), BDEV_RD(x) | (y)); } while (0)
+
+#define BDEV_SET_RB(x, y) do { BDEV_SET((x), (y)); BDEV_RD(x); } while (0)
+#define BDEV_UNSET_RB(x, y) do { BDEV_UNSET((x), (y)); BDEV_RD(x); } while (0)
+#define BDEV_WR_RB(x, y) do { BDEV_WR((x), (y)); BDEV_RD(x); } while (0)
+
+#define BDEV_RD_F(reg, field) \
+	((BDEV_RD(BCHP_##reg) & BCHP_##reg##_##field##_MASK) >> \
+	 BCHP_##reg##_##field##_SHIFT)
+#define BDEV_WR_F(reg, field, val) do { \
+	BDEV_WR(BCHP_##reg, \
+	(BDEV_RD(BCHP_##reg) & ~BCHP_##reg##_##field##_MASK) | \
+	(((val) << BCHP_##reg##_##field##_SHIFT) & \
+	 BCHP_##reg##_##field##_MASK)); \
+	} while (0)
+#define BDEV_WR_F_RB(reg, field, val) do { \
+	BDEV_WR(BCHP_##reg, \
+	(BDEV_RD(BCHP_##reg) & ~BCHP_##reg##_##field##_MASK) | \
+	(((val) << BCHP_##reg##_##field##_SHIFT) & \
+	 BCHP_##reg##_##field##_MASK)); \
+	BDEV_RD(BCHP_##reg); \
+	} while (0)
+
+
+#endif
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8af67cfd671acac48ad7885a6e73ebe2f0ecdbff..ed3e837f938b9b64ca1d30cf4cf2897ed14da9d5 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -74,6 +74,12 @@ config MTD_PHYSMAP_OF
 	  physically into the CPU's memory. The mapping description here is
 	  taken from OF device tree.
 
+config MTD_BCM963XX
+	tristate "Broadcom 963xx ADSL board flash memory support"
+	depends on BCM_KF_MTD_BCM963XX
+	help
+	  Broadcom 963xx ADSL board flash memory
+
 config MTD_PMC_MSP_EVM
 	tristate "CFI Flash device mapped on PMC-Sierra MSP"
 	depends on PMC_MSP && MTD_CFI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 68a9a91d344fbc65974e5978fb6ee4d6a7ee113e..61e82c3360a8acfaa8428a9590b1c20002c7ab32 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -57,3 +57,9 @@ obj-$(CONFIG_MTD_VMU)		+= vmu-flash.o
 obj-$(CONFIG_MTD_GPIO_ADDR)	+= gpio-addr-flash.o
 obj-$(CONFIG_MTD_LATCH_ADDR)	+= latch-addr-flash.o
 obj-$(CONFIG_MTD_LANTIQ)	+= lantiq-flash.o
+
+ifdef BCM_KF #  defined(CONFIG_BCM_KF_MTD_BCM963XX)
+obj-$(CONFIG_MTD_BCM963XX)	+= bcm963xx.o
+obj-$(CONFIG_MTD_BCM_SPI_NAND)	+= bcm963xx_mtd.o
+EXTRA_CFLAGS	+= -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD) -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD)
+endif # BCM_KF
diff --git a/drivers/mtd/maps/bcm963xx.c b/drivers/mtd/maps/bcm963xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..c035175bf27866a12d938b8d06ddf322f58de099
--- /dev/null
+++ b/drivers/mtd/maps/bcm963xx.c
@@ -0,0 +1,240 @@
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard 
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ * Flash mapping code for BCM963xx board SPI NOR flash memory
+ *
+ * Song Wang (songw@broadcom.com)
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+#include <board.h>
+#include <bcmTag.h>
+#include <bcm_map_part.h>
+#include <flash_api.h>
+
+extern PFILE_TAG kerSysImageTagGet(void);
+extern bool kerSysIsRootfsSet(void);
+
+static void bcm63xx_noop(struct mtd_info *mtd);
+static int bcm63xx_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
+static int bcm63xx_erase(struct mtd_info *mtd, struct erase_info *instr);
+static int bcm63xx_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
+
+static struct mtd_info *mtdRootFS;
+
+#ifdef CONFIG_AUXFS_JFFS2
+static struct mtd_info *mtdAuxFS;
+#endif
+
+static void bcm63xx_noop(struct mtd_info *mtd)
+{
+}
+
+static int bcm63xx_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+	unsigned long flash_base;
+
+	flash_base = (unsigned long)mtd->priv;
+	*retlen = kerSysReadFromFlash(buf, flash_base + from, len); 
+	
+	return 0;
+}
+
+static int bcm63xx_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	unsigned long flash_base;
+
+	if (instr->addr + instr->len > mtd->size) {
+		printk("ERROR: bcm63xx_erase( mtd[%s]) invalid region\n", mtd->name);
+		return (-EINVAL);
+	}
+
+	flash_base = (unsigned long)mtd->priv;
+
+	if (kerSysEraseFlash( flash_base + instr->addr, instr->len))
+		return (-EINVAL);
+	
+	instr->state = MTD_ERASE_DONE;
+	mtd_erase_callback(instr);
+
+	return 0;
+}
+
+static int bcm63xx_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+	unsigned long flash_base;
+	int bytesRemaining;
+
+	flash_base = (unsigned long)mtd->priv;
+
+	bytesRemaining = kerSysWriteToFlash(flash_base + to, (char*)buf, len);
+	*retlen = (len - bytesRemaining);
+
+	return 0;
+}
+
+#ifdef CONFIG_AUXFS_JFFS2
+static int __init create_aux_partition(void)
+{
+	FLASH_PARTITION_INFO fPartAuxFS;
+
+	/* Read the flash memory partition map. */
+	kerSysFlashPartInfoGet(&fPartAuxFS);
+
+	if (fPartAuxFS.mem_length != 0) {
+
+		if ((mtdAuxFS = kzalloc(sizeof(*mtdAuxFS), GFP_KERNEL)) == NULL)
+			return -ENOMEM;
+
+		/* Read/Write data (Aux) partition */
+		mtdAuxFS->name = "data";
+		mtdAuxFS->index = -1;
+		mtdAuxFS->type = MTD_NORFLASH;
+		mtdAuxFS->flags = MTD_CAP_NORFLASH;
+
+		mtdAuxFS->erasesize = fPartAuxFS.sect_size;
+		mtdAuxFS->writesize = 1;
+		mtdAuxFS->numeraseregions = 0;
+		mtdAuxFS->eraseregions	= NULL;
+		mtdAuxFS->size = fPartAuxFS.mem_length;
+
+		mtdAuxFS->_read = bcm63xx_read;
+		mtdAuxFS->_erase = bcm63xx_erase;
+		mtdAuxFS->_write = bcm63xx_write; 
+		mtdAuxFS->_sync = bcm63xx_noop;
+		mtdAuxFS->owner = THIS_MODULE;
+		mtdAuxFS->priv = (void*)fPartAuxFS.mem_base;
+
+		if (mtd_device_register(mtdAuxFS, NULL, 0 )) {
+			printk("Failed to register device mtd:%s\n", mtdAuxFS->name);
+			return -EIO;
+		}	
+	
+		printk("Registered device mtd:%s dev%d Address=0x%08x Size=%llu\n",
+			mtdAuxFS->name, mtdAuxFS->index, (int)mtdAuxFS->priv, mtdAuxFS->size);
+	}
+
+	return 0;
+}
+#endif
+
+static int __init init_brcm_physmap(void)
+{
+	unsigned int rootfs_addr, kernel_addr;
+	PFILE_TAG pTag = (PFILE_TAG)NULL;
+	int flash_type = flash_get_flash_type();
+
+	if ((flash_type == FLASH_IFC_NAND) || (flash_type == FLASH_IFC_SPINAND))
+		return -EIO;
+
+	printk("bcm963xx_mtd driver\n");
+
+	if (!(pTag = kerSysImageTagGet())) {
+		printk("Failed to read image tag from flash\n");
+		return -EIO;
+	}
+
+	rootfs_addr = (unsigned int)simple_strtoul(pTag->rootfsAddress, NULL, 10) + BOOT_OFFSET + IMAGE_OFFSET;
+	kernel_addr = (unsigned int)simple_strtoul(pTag->kernelAddress, NULL, 10) + BOOT_OFFSET + IMAGE_OFFSET;
+
+	if ((mtdRootFS = kzalloc(sizeof(*mtdRootFS), GFP_KERNEL)) == NULL)
+		return -ENOMEM;
+
+	/* RootFS Read only partition */
+	mtdRootFS->name = "rootfs";
+	mtdRootFS->index = -1;
+	mtdRootFS->type = MTD_NORFLASH;
+	mtdRootFS->flags = MTD_CAP_ROM;
+
+	mtdRootFS->erasesize = 0x10000;
+	mtdRootFS->writesize = 0x10000;
+	mtdRootFS->numeraseregions = 0;
+	mtdRootFS->eraseregions	= NULL;
+
+	mtdRootFS->_read = bcm63xx_read;
+	mtdRootFS->_erase = bcm63xx_erase;
+	mtdRootFS->_write = bcm63xx_write;
+	mtdRootFS->_sync = bcm63xx_noop;
+	mtdRootFS->owner = THIS_MODULE;
+
+	if ((mtdRootFS->size = (kernel_addr - rootfs_addr)) <= 0) {
+		printk("Invalid RootFs size\n");
+		kfree(mtdRootFS);
+		return -EIO;
+	}
+
+	mtdRootFS->priv = (void*)rootfs_addr;
+
+	if (mtd_device_register(mtdRootFS , NULL, 0)) {
+		printk("Failed to register device mtd:%s\n", mtdRootFS->name);
+		return -EIO;
+	}
+	
+	printk("Registered device mtd:%s dev%d Address=0x%08x Size=%llu\n",
+		mtdRootFS->name, mtdRootFS->index, (int)mtdRootFS->priv, mtdRootFS->size);
+
+	if (kerSysIsRootfsSet() == false) {
+		kerSysSetBootParm("root=", "/dev/mtdblock0");
+		kerSysSetBootParm("rootfstype=", "squashfs");
+	}
+
+#ifdef CONFIG_AUXFS_JFFS2
+	create_aux_partition();
+#endif
+
+	return 0;
+}
+
+static void __exit cleanup_brcm_physmap(void)
+{
+	if (mtdRootFS->index >= 0)
+		mtd_device_unregister(mtdRootFS);
+
+	kfree(mtdRootFS);
+
+#ifdef CONFIG_AUXFS_JFFS2
+	if (mtdAuxFS->index >= 0)
+		mtd_device_unregister(mtdAuxFS);
+
+	kfree(mtdAuxFS);
+#endif
+}
+
+module_init(init_brcm_physmap);
+module_exit(cleanup_brcm_physmap);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Song Wang songw@broadcom.com");
+MODULE_DESCRIPTION("MTD Driver for Broadcom NOR Flash");
diff --git a/drivers/mtd/maps/bcm963xx_mtd.c b/drivers/mtd/maps/bcm963xx_mtd.c
new file mode 100644
index 0000000000000000000000000000000000000000..7113ca860cbd61f4b751ad2eaeee25db2530a1b4
--- /dev/null
+++ b/drivers/mtd/maps/bcm963xx_mtd.c
@@ -0,0 +1,285 @@
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <../drivers/mtd/mtdcore.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/platform_device.h>
+#include <bcm_hwdefs.h>
+#include <board.h>
+#include <bcm_map_part.h>
+
+#include "flash_api.h"
+
+#define PRINTK(...)
+//#define PRINTK printk
+
+bool SpiNandRegistered = FALSE;
+
+extern void bcmspinand_probe(struct mtd_info * mtd);
+extern bool kerSysIsRootfsSet(void);
+
+
+static struct mtd_partition bcm63XX_nand_parts[] =
+{
+    {name: "rootfs",        offset: 0, size: 0},
+    {name: "rootfs_update", offset: 0, size: 0},
+    {name: "data",          offset: 0, size: 0},
+    {name: "nvram",         offset: 0, size: 0},
+    {name: "image",         offset: 0, size: 0},
+    {name: "image_update",  offset: 0, size: 0},
+    {name: "dummy1",        offset: 0, size: 0},
+    {name: "dummy2",        offset: 0, size: 0},
+    {name: "dummy3",        offset: 0, size: 0},
+    {name: "dummy4",        offset: 0, size: 0},
+    {name: "dummy5",        offset: 0, size: 0},
+    {name: "dummy6",        offset: 0, size: 0},
+    {name: NULL,            offset: 0, size: 0}
+};
+
+static char* misc_mtd_partition_names[BCM_MAX_EXTRA_PARTITIONS] =
+{
+	"misc1",
+	"misc2",
+	"misc3",
+	NULL,
+};
+
+static int __init 
+is_split_partition (struct mtd_info* mtd, unsigned long offset, unsigned long size, unsigned long *split_offset)
+{
+    uint8_t buf[0x100];
+    size_t retlen;
+    int split_found = 0;
+
+    /* Search RootFS partion for split marker.
+     * Marker is located in the last 0x100 bytes of the last BootFS Erase Block
+     * If marker is found, we have separate Boot and Root Partitions.
+     */
+    for (*split_offset = offset + mtd->erasesize; *split_offset <= offset + size; *split_offset += mtd->erasesize)
+    {
+        if (mtd->_block_isbad(mtd, *split_offset - mtd->erasesize)) {
+            continue;
+        }
+        mtd->_read(mtd, *split_offset - 0x100, 0x100, &retlen, buf);
+
+        if (!strncmp (BCM_BCMFS_TAG, buf, strlen (BCM_BCMFS_TAG))) {
+            if (!strncmp (BCM_BCMFS_TYPE_UBIFS, &buf[strlen (BCM_BCMFS_TAG)], strlen (BCM_BCMFS_TYPE_UBIFS)))
+            {
+                printk("***** Found UBIFS Marker at 0x%08lx\n", *split_offset - 0x100);
+                split_found = 1;
+                break;
+            }
+        }
+    }
+
+    return split_found;
+}
+
+static int __init mtd_init(void)
+{
+    struct mtd_info * mtd;
+    struct nand_chip * nand;
+
+    /* If SPI NAND FLASH is present then register the device. Otherwise do nothing */
+    if (FLASH_IFC_SPINAND != flash_get_flash_type())
+        return -ENODEV;
+
+    if (((mtd = kmalloc(sizeof(struct mtd_info), GFP_KERNEL)) == NULL) ||
+        ((nand = kmalloc(sizeof(struct nand_chip), GFP_KERNEL)) == NULL))
+    {
+        printk("Unable to allocate SPI NAND dev structure.\n");
+        return -ENOMEM;
+    }
+
+    memset(mtd, 0, sizeof(struct mtd_info));
+    memset(nand, 0, sizeof(struct nand_chip));
+
+    mtd->priv = nand;
+
+    bcmspinand_probe(mtd);
+
+    /* Scan to check existence of the nand device */
+    if(nand_scan(mtd, 1))
+    {
+        static NVRAM_DATA nvram;
+        unsigned long rootfs_ofs;
+        int nr_parts;
+        int rootfs, rootfs_update;
+        unsigned long split_offset;
+        int i=0;
+        uint64_t extra=0, extra_single_part_size=0;
+
+        nand->init_size(mtd, nand, NULL); // override possibly incorrect values detected by Linux NAND driver
+
+        /* Root FS.  The CFE RAM boot loader saved the rootfs offset that the
+         * Linux image was loaded from.
+         */
+        kerSysBlParmsGetInt(NAND_RFS_OFS_NAME, (int *) &rootfs_ofs);
+
+        kerSysNvRamLoad(mtd);
+        kerSysNvRamGet((char *)&nvram, sizeof(nvram), 0);
+        nr_parts = 6;
+
+        if( rootfs_ofs == nvram.ulNandPartOfsKb[NP_ROOTFS_1] )
+        {
+            rootfs = NP_ROOTFS_1;
+            rootfs_update = NP_ROOTFS_2;
+        }
+        else if( rootfs_ofs == nvram.ulNandPartOfsKb[NP_ROOTFS_2] )
+        {
+            rootfs = NP_ROOTFS_2;
+            rootfs_update = NP_ROOTFS_1;
+        }
+        else
+        {
+            /* Backward compatibility with old cferam. */
+            extern unsigned char _text;
+            unsigned long rootfs_ofs = *(unsigned long *) (&_text - 4);
+
+            if( rootfs_ofs == nvram.ulNandPartOfsKb[NP_ROOTFS_1] )
+            {
+                rootfs = NP_ROOTFS_1;
+                rootfs_update = NP_ROOTFS_2;
+            }
+            else
+            {
+                rootfs = NP_ROOTFS_2;
+                rootfs_update = NP_ROOTFS_1;
+            }
+        }
+
+        /* RootFS partition */
+        bcm63XX_nand_parts[0].offset = nvram.ulNandPartOfsKb[rootfs]*1024;
+        bcm63XX_nand_parts[0].size = nvram.ulNandPartSizeKb[rootfs]*1024;
+        bcm63XX_nand_parts[0].ecclayout = nand->ecclayout;
+
+        /* This partition is used for flashing images */
+        bcm63XX_nand_parts[4].offset = bcm63XX_nand_parts[0].offset;
+        bcm63XX_nand_parts[4].size = bcm63XX_nand_parts[0].size;
+        bcm63XX_nand_parts[4].ecclayout = nand->ecclayout;
+
+        if (is_split_partition (mtd, bcm63XX_nand_parts[0].offset, bcm63XX_nand_parts[0].size, &split_offset))
+        {
+            /* RootFS partition */
+            bcm63XX_nand_parts[0].offset = split_offset;
+            bcm63XX_nand_parts[0].size -= (split_offset - nvram.ulNandPartOfsKb[rootfs]*1024);
+
+            /* BootFS partition */
+            bcm63XX_nand_parts[nr_parts].name = "bootfs";
+            bcm63XX_nand_parts[nr_parts].offset = nvram.ulNandPartOfsKb[rootfs]*1024;
+            bcm63XX_nand_parts[nr_parts].size = split_offset - nvram.ulNandPartOfsKb[rootfs]*1024;
+            bcm63XX_nand_parts[nr_parts].ecclayout = nand->ecclayout;
+
+            if (kerSysIsRootfsSet() == false) {
+                kerSysSetBootParm("ubi.mtd", "0");
+                kerSysSetBootParm("root=", "ubi:rootfs_ubifs");
+                kerSysSetBootParm("rootfstype=", "ubifs");
+            }
+        }
+        else {
+            if (kerSysIsRootfsSet() == false) {
+                kerSysSetBootParm("root=", "mtd:rootfs");
+                kerSysSetBootParm("rootfstype=", "jffs2");
+            }
+        }
+            nr_parts++;
+
+        /* RootFS_update partition */
+        bcm63XX_nand_parts[1].offset = nvram.ulNandPartOfsKb[rootfs_update]*1024;
+        bcm63XX_nand_parts[1].size = nvram.ulNandPartSizeKb[rootfs_update]*1024;
+        bcm63XX_nand_parts[1].ecclayout = nand->ecclayout;
+
+        /* This partition is used for flashing images */
+        bcm63XX_nand_parts[5].offset = bcm63XX_nand_parts[1].offset;
+        bcm63XX_nand_parts[5].size = bcm63XX_nand_parts[1].size;
+        bcm63XX_nand_parts[5].ecclayout = nand->ecclayout;
+
+        if (is_split_partition (mtd, bcm63XX_nand_parts[1].offset, bcm63XX_nand_parts[1].size, &split_offset))
+        {
+            /* rootfs_update partition */
+            bcm63XX_nand_parts[1].offset = split_offset;
+            bcm63XX_nand_parts[1].size -= (split_offset - nvram.ulNandPartOfsKb[rootfs_update]*1024);
+
+            /* bootfs_update partition */
+            bcm63XX_nand_parts[nr_parts].name = "bootfs_update";
+            bcm63XX_nand_parts[nr_parts].offset = nvram.ulNandPartOfsKb[rootfs_update]*1024;
+            bcm63XX_nand_parts[nr_parts].size = split_offset - nvram.ulNandPartOfsKb[rootfs_update]*1024;
+            bcm63XX_nand_parts[nr_parts].ecclayout = nand->ecclayout;
+
+        }
+        nr_parts++;
+
+        /* Data (psi, scratch pad) */
+        bcm63XX_nand_parts[2].offset = nvram.ulNandPartOfsKb[NP_DATA] * 1024;
+        bcm63XX_nand_parts[2].size = nvram.ulNandPartSizeKb[NP_DATA] * 1024;
+        bcm63XX_nand_parts[2].ecclayout = nand->ecclayout;
+
+        i=BCM_MAX_EXTRA_PARTITIONS-2; // skip DATA partition
+        while(i >= 0)
+        {
+            if(nvram.part_info[i].size != 0xffff)
+            {
+                //sz_bits -- 0b01 -- MB, 0b10 - GB , 0b10, 0b11 - reserved
+                switch((nvram.part_info[i].size & 0xc000)>>14)
+                {
+                    case 0:
+                        extra_single_part_size=1<<20;//1024*1024;
+                        break;
+                    case 1:
+                        extra_single_part_size=1<<30;//1024*1024*1024;
+                        break;
+                    default:
+                        extra_single_part_size=0;
+                        break;
+                }
+
+                extra_single_part_size=(nvram.part_info[i].size&0x3fff)*extra_single_part_size;
+                if((extra_single_part_size&(~((uint64_t)mtd->erasesize-1))) != extra_single_part_size)
+                    extra_single_part_size=(extra_single_part_size+mtd->erasesize);
+                extra_single_part_size =  (extra_single_part_size) & (~((uint64_t)mtd->erasesize-1));
+                if(extra_single_part_size >  mtd->erasesize)
+                {
+                    extra+=extra_single_part_size;
+                    bcm63XX_nand_parts[nr_parts].name = misc_mtd_partition_names[i];
+                    bcm63XX_nand_parts[nr_parts].offset = (nvram.ulNandPartOfsKb[NP_DATA] * 1024) - extra;
+                    bcm63XX_nand_parts[nr_parts].size = extra_single_part_size;
+                    bcm63XX_nand_parts[nr_parts].ecclayout = mtd->ecclayout;
+                    nr_parts++;
+                }
+            }
+            i--;
+        }
+
+        /* Boot and NVRAM data */
+        bcm63XX_nand_parts[3].offset = nvram.ulNandPartOfsKb[NP_BOOT] * 1024;
+        bcm63XX_nand_parts[3].size = nvram.ulNandPartSizeKb[NP_BOOT] * 1024;
+        bcm63XX_nand_parts[3].ecclayout = nand->ecclayout;
+
+        PRINTK("Part[0] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[0].name,
+            bcm63XX_nand_parts[0].size, bcm63XX_nand_parts[0].offset);
+        PRINTK("Part[1] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[1].name,
+            bcm63XX_nand_parts[1].size, bcm63XX_nand_parts[1].offset);
+        PRINTK("Part[2] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[2].name,
+            bcm63XX_nand_parts[2].size, bcm63XX_nand_parts[2].offset);
+        PRINTK("Part[3] name=%s, size=%llx, ofs=%llx\n", bcm63XX_nand_parts[3].name,
+            bcm63XX_nand_parts[3].size, bcm63XX_nand_parts[3].offset);
+
+        mtd_device_register(mtd, bcm63XX_nand_parts, nr_parts);
+
+        SpiNandRegistered = TRUE;
+    }
+
+    return 0;
+}
+
+module_init(mtd_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Regan");
+MODULE_DESCRIPTION("MTD map and partitions SPI NAND");
+
+#endif /* CONFIG_BCM_KF_MTD_BCMNAND */
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f2f482bec5736b21a562da5e4fda11375e8cf457..7fcd8ce46cde556a98e91f7ea460f6dd4e6d2722 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -230,6 +230,9 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
 		case MTD_FILE_MODE_RAW:
 		{
 			struct mtd_oob_ops ops;
+#if defined(CONFIG_BCM_KF_MTD_IOCTL_FIX)
+			memset(&ops, 0x00, sizeof(ops));
+#endif
 
 			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
@@ -324,6 +327,9 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
 		case MTD_FILE_MODE_RAW:
 		{
 			struct mtd_oob_ops ops;
+#if defined(CONFIG_BCM_KF_MTD_IOCTL_FIX)
+			memset(&ops, 0x00, sizeof(ops));
+#endif
 
 			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
@@ -424,7 +430,11 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
 	ops.ooboffs = start & (mtd->writesize - 1);
 	ops.datbuf = NULL;
 	ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+#if defined(CONFIG_BCM_KF_MTD_OOB_AUTO)
+	MTD_OPS_AUTO_OOB;
+#else
 		MTD_OPS_PLACE_OOB;
+#endif
 
 	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 		return -EINVAL;
@@ -464,7 +474,11 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
 	ops.ooboffs = start & (mtd->writesize - 1);
 	ops.datbuf = NULL;
 	ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
-		MTD_OPS_PLACE_OOB;
+#if defined(CONFIG_BCM_KF_MTD_OOB_AUTO)
+	MTD_OPS_AUTO_OOB;
+#else
+        MTD_OPS_PLACE_OOB;
+#endif
 
 	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 		return -EINVAL;
@@ -756,6 +770,9 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
 		struct mtd_oob_buf buf;
 		struct mtd_oob_buf __user *buf_user = argp;
 
+#if defined(CONFIG_BCM_KF_MTD_IOCTL_FIX)
+		memset(&buf, 0x00, sizeof(buf));
+#endif
 		/* NOTE: writes return length to buf_user->length */
 		if (copy_from_user(&buf, argp, sizeof(buf)))
 			ret = -EFAULT;
@@ -770,6 +787,9 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
 		struct mtd_oob_buf buf;
 		struct mtd_oob_buf __user *buf_user = argp;
 
+#if defined(CONFIG_BCM_KF_MTD_IOCTL_FIX)
+		memset(&buf, 0x00, sizeof(buf));
+#endif
 		/* NOTE: writes return length to buf_user->start */
 		if (copy_from_user(&buf, argp, sizeof(buf)))
 			ret = -EFAULT;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7d17cecad69d8fccc1467eaa210c6fe2915197dd..fe17f0bfdbcf6b334af26161647cfc9ce95ee6a7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,13 @@
+if BCM_KF_ANDROID
+config MTD_NAND_IDS
+	tristate "Include chip ids for known NAND devices."
+	depends on MTD
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  Useful for NAND drivers that do not use the NAND subsystem but
+	  still like to take advantage of the known chip information.
+endif
+
 config MTD_NAND_ECC
 	tristate
 
@@ -115,8 +125,10 @@ config MTD_NAND_OMAP2
           Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
 	  platforms.
 
+if !BCM_KF_ANDROID
 config MTD_NAND_IDS
 	tristate
+endif
 
 config MTD_NAND_RICOH
 	tristate "Ricoh xD card reader"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d4b4d8739bd8e88584e4b8f425d7cc9234b3e304..4e4a3d0f624bbd61c1e4b016161ca5080dfc9a0e 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -52,4 +52,8 @@ obj-$(CONFIG_MTD_NAND_RICOH)		+= r852.o
 obj-$(CONFIG_MTD_NAND_JZ4740)		+= jz4740_nand.o
 obj-$(CONFIG_MTD_NAND_GPMI_NAND)	+= gpmi-nand/
 
+ifdef BCM_KF # defined (CONFIG_BCM_KF_MTD_BCM963XX)
+obj-$(CONFIG_MTD_BCM_SPI_NAND)		+= bcm63xx_spinand.o
+endif # BCM_KF # defined(CONFIG_BCM_KF_MTD_BCM963XX)
+
 nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/bcm63xx_spinand.c b/drivers/mtd/nand/bcm63xx_spinand.c
new file mode 100644
index 0000000000000000000000000000000000000000..0cf87c63811181d595a804e1280e3fda7702a0a8
--- /dev/null
+++ b/drivers/mtd/nand/bcm63xx_spinand.c
@@ -0,0 +1,1708 @@
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+/*
+ *
+ *  drivers/mtd/bcmspinand/bcm63xx-spinand.c
+ *
+    <:copyright-BRCM:2011:DUAL/GPL:standard
+    
+       Copyright (c) 2011 Broadcom Corporation
+       All Rights Reserved
+    
+    Unless you and Broadcom execute a separate written software license
+    agreement governing use of this software, this software is licensed
+    to you under the terms of the GNU General Public License version 2
+    (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+    with the following added to such license:
+    
+       As a special exception, the copyright holders of this software give
+       you permission to link this software with independent modules, and
+       to copy and distribute the resulting executable under terms of your
+       choice, provided that you also meet, for each linked independent
+       module, the terms and conditions of the license of that module.
+       An independent module is a module which is not derived from this
+       software.  The special exception does not apply to any modifications
+       of the software.
+    
+    Not withstanding the above, under no circumstances may you combine
+    this software in any way with any other Broadcom software provided
+    under a license other than the GPL, without Broadcom's express prior
+    written consent.
+    
+    :> 
+
+
+    File: bcm63xx-spinand.c
+
+    Description: 
+    This is a device driver for the Broadcom SPINAND flash for bcm63xxx boards.
+
+ */
+ 
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <linux/slab.h> 
+#include <linux/version.h>
+#include <generated/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#define COUNT_BAD_BITS 1 // check higher granularity of bad bits in a page read
+
+//#undef DEBUG_NAND
+//#define DEBUG_NAND
+#if defined(DEBUG_NAND)
+#define DBG_PRINTF printk
+#else
+#define DBG_PRINTF(...)
+#endif
+
+
+#define STATUS_DEFAULT NAND_STATUS_TRUE_READY|NAND_STATUS_READY|NAND_STATUS_WP
+
+/* Command codes for the flash_command routine */
+#define FLASH_PROG          0x02    /* program load data to cache */
+#define FLASH_READ          0x03    /* read data from cache */
+#define FLASH_WRDI          0x04    /* reset write enable latch */
+#define FLASH_WREN          0x06    /* set write enable latch */
+#define FLASH_READ_FAST     0x0B    /* read data from cache */
+#define FLASH_GFEAT         0x0F    /* get feature option */
+#define FLASH_PEXEC         0x10    /* program cache data to memory array */
+#define FLASH_PREAD         0x13    /* read from memory array to cache */
+#define FLASH_SFEAT         0x1F    /* set feature option */
+#define FLASH_PROG_RAN      0x84    /* program load data to cache at offset */
+#define FLASH_BERASE        0xD8    /* erase one block in memory array */
+#define FLASH_RDID          0x9F    /* read manufacturer and product id */
+#define FLASH_RESET         0xFF    /* reset flash */
+
+#define FEATURE_PROT_ADDR   0xA0
+#define FEATURE_FEAT_ADDR   0xB0
+#define FEATURE_STAT_ADDR   0xC0
+#define FEATURE_STAT_AUX    0xF0
+
+/* Feature protectin bit defintion */
+//#define PROT_BRWD           0x80
+//#define PROT_BP_MASK        0x38
+//#define PROT_BP_SHIFT       0x3
+//#define PROT_BP_ALL         0x7
+//#define PROT_BP_NONE        0x0
+/* Gigadevice only */
+//#define PROT_INV            0x04
+//#define PROT_CMP            0x02
+
+/* Feature feature bit defintion */
+#define FEAT_OPT_EN         0x40
+#define FEAT_ECC_EN         0x10
+#define FEAT_DISABLE        0x0
+/* Gigadevice only */
+//#define FEAT_BBI            0x04
+//#define FEAT_QE             0x01
+
+/* Feature status bit definition */
+#define STAT_ECC_MASK       0x30
+#define STAT_ECC_GOOD       0x00
+#define STAT_ECC_CORR       0x10  /* correctable error */
+//#define STAT_ECC_UNCORR     0x20  /* uncorrectable error */
+#define STAT_PFAIL          0x8   /* program fail */
+#define STAT_EFAIL          0x4   /* erase fail */
+#define STAT_WEL            0x2   /* write enable latch */
+#define STAT_OIP            0x1   /* operation in progress */
+
+/* Return codes from flash_status */
+#define STATUS_READY        0       /* ready for action */
+#define STATUS_BUSY         1       /* operation in progress */
+#define STATUS_TIMEOUT      2       /* operation timed out */
+#define STATUS_ERROR        3       /* unclassified but unhappy status */
+
+/* Micron manufacturer ID */
+#define MICRONPART          0x2C
+#define ID_MT29F1G01        0x12
+#define ID_MT29F2G01        0x22
+#define ID_MT29F4G01        0x32
+ 
+/* Gigadevice manufacturer ID */
+#define GIGADEVPART         0xC8
+#define ID_GD5F1GQ4UA       0xF1
+#define ID_GD5F2GQ4UA       0xF2
+#define ID_GD5F1GQ4UB       0xD1
+#define ID_GD5F2GQ4UB       0xD2
+#define ID_GD5F4GQ4UB       0xD4
+
+/* ESMT manufacturer ID */
+#define ESMTPART            0xC8
+#define ID_F50L1G41A        0x21
+
+/* Winbond manufacturer ID */
+#define WINBONDPART         0xEF
+#define ID_W25N01GV         0xAA
+
+// device information bytes required to identify device for SPI NAND
+#define SPI_NAND_ID_LENGTH  2
+
+// device information bytes required to identify device for Linux NAND
+#define NAND_ID_LENGTH  4
+/** Variables. **/
+
+static struct nand_ecclayout spinand_oob_gigadevice_2k_A =
+{
+    .eccbytes = 69,
+    .eccpos = { // for ease of use, call the bad block marker an ECC byte as well
+        0,                                                       12,  13,  14,  15, // these must be in numerical order
+                                                                 28,  29,  30,  31,
+                                                                 44,  45,  46,  47,
+                                                                 60,  61,  62,  63,
+                    67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+                    83,  84,  85,  86,  87,  88,  89,  90,  91,  92,  93,  94,  95,
+                    99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+                   115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127
+    },
+//    .oobavail = 59,
+    .oobavail = 14, // per 512 bytes? JFFS2 multiplies by 4 to find available OOB size
+    .oobfree = {
+        {.offset = 1,
+         .length = 11},
+        {.offset = 16,
+         .length = 12},
+        {.offset = 24,
+         .length = 12},
+        {.offset = 48,
+         .length = 12},
+        {.offset = 64,
+         .length = 3},
+        {.offset = 80,
+         .length = 3},
+        {.offset = 96,
+         .length = 3},
+        {.offset = 112,
+         .length = 3}
+    }
+};
+
+static struct nand_ecclayout spinand_oob_gigadevice_2k_B =
+{
+    .eccbytes = 53,
+    .eccpos = { // for ease of use, call the bad block marker an ECC byte as well
+        0, // these must be in numerical order
+
+
+
+                    67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+                    83,  84,  85,  86,  87,  88,  89,  90,  91,  92,  93,  94,  95,
+                    99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+                   115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127
+    },
+//    .oobavail = 75,
+    .oobavail = 18, // per 512 bytes? JFFS2 multiplies by 4 to find available OOB size
+    .oobfree = {
+        {.offset = 1,
+         .length = 63},
+        {.offset = 64,
+         .length = 3},
+        {.offset = 80,
+         .length = 3},
+        {.offset = 96,
+         .length = 3},
+        {.offset = 112,
+         .length = 3}
+
+    }
+};
+
+static struct nand_ecclayout spinand_oob_gigadevice_4k =
+{
+    .eccbytes = 105,
+    .eccpos = { // for ease of use, call the bad block marker an ECC byte as well
+        0, // these must be in numerical order
+
+
+
+
+
+
+
+                   131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+                   147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+                   163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+                   179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+                   195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+                   211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+                   227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+                   243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255
+    },
+//    .oobavail = 151,
+    .oobavail = 37, // per 512 bytes? JFFS2 multiplies by 4 to find available OOB size
+    .oobfree = {
+        {.offset = 1,
+         .length = 130},
+        {.offset = 144,
+         .length = 3},
+        {.offset = 160,
+         .length = 3},
+        {.offset = 176,
+         .length = 3},
+        {.offset = 192,
+         .length = 3},
+        {.offset = 208,
+         .length = 3},
+        {.offset = 224,
+         .length = 3},
+        {.offset = 240,
+         .length = 3}
+    }
+};
+
+static struct nand_ecclayout spinand_oob_micron =
+{
+    .eccbytes = 33,
+    .eccpos = { // for ease of use, call the bad block marker an ECC byte as well
+        0,                                    8,   9,  10,  11,  12,  13,  14,  15, // these must be in numerical order
+                                             24,  25,  26,  27,  28,  29,  30,  31,
+                                             40,  41,  42,  43,  44,  45,  46,  47,
+                                             56,  57,  58,  59,  60,  61,  62,  63
+    },
+//    .oobavail = 31,
+    .oobavail = 7, // per 512 bytes? JFFS2 multiplies by 4 to find available OOB size
+    .oobfree = {
+        {.offset = 1,
+         .length = 7},
+        {.offset = 16,
+         .length = 8},
+        {.offset = 24,
+         .length = 8},
+        {.offset = 48,
+         .length = 8}
+    }
+};
+
+static struct nand_ecclayout spinand_oob_esmt =
+{
+    .eccbytes = 29,
+    .eccpos = { // for ease of use, call the bad block marker an ECC byte as well
+        0,   1,   2,   3,   4,   5,   6,   7, // these must be in numerical order
+            17,  18,  19,  20,  21,  22,  23,
+            33,  34,  35,  36,  37,  38,  39,
+            49,  50,  51,  52,  53,  54,  55
+    },
+
+//    .oobavail = 35,
+    .oobavail = 8, // per 512 bytes? JFFS2 multiplies by 4 to find available OOB size
+    .oobfree = {
+        {.offset = 8,
+         .length = 9},
+        {.offset = 24,
+         .length = 9},
+        {.offset = 40,
+         .length = 9},
+        {.offset = 56,
+         .length = 8}
+    }
+};
+
+
+#define FLASH_API_OK                1
+#define FLASH_API_ERROR            -1
+#define FLASH_API_CORR             -2
+
+/* the controller will handle operations that are greater than the FIFO size
+   code that relies on READ_BUF_LEN_MAX, READ_BUF_LEN_MIN or spi_max_op_len
+   could be changed */
+#define SPI_BUF_LEN        512    /* largest of the maximum transaction sizes for SPI */
+/* this is the slave ID of the SPI flash for use with the SPI controller */
+#define SPI_FLASH_SLAVE_DEV_ID    0
+/* clock defines for the flash */
+#define SPI_FLASH_DEF_CLOCK       781000
+#define SPARE_MAX_SIZE          (27 * 16)
+#define CTRLR_CACHE_SIZE        512
+#define ECC_MASK_BIT(ECCMSK, OFS)   (ECCMSK[OFS / 8] & (1 << (OFS % 8)))
+
+/* legacy and HS controllers can coexist - use bus num to differentiate */
+#define LEG_SPI_BUS_NUM  0
+#define HS_SPI_BUS_NUM   1
+
+#define SPI_CONTROLLER_STATE_SET             (1 << 31)
+#define SPI_CONTROLLER_STATE_CPHA_EXT        (1 << 30)
+#define SPI_CONTROLLER_STATE_GATE_CLK_SSOFF  (1 << 29)
+#define SPI_CONTROLLER_STATE_ASYNC_CLOCK     (1 << 28)
+
+#define SPI_CONTROLLER_MAX_SYNC_CLOCK 30000000
+
+/* set mode and controller state based on CHIP defaults
+   these values do not apply to the legacy controller
+   legacy controller uses SPI_MODE_3 and clock is not
+   gated */
+
+#define SPI_MODE_DEFAULT              SPI_MODE_0
+#define SPI_CONTROLLER_STATE_DEFAULT  (SPI_CONTROLLER_STATE_GATE_CLK_SSOFF)
+
+
+static unsigned int spi_max_op_len = SPI_BUF_LEN;
+//static int spi_dummy_bytes         = 0;
+
+/* default to legacy controller - updated later */
+static int spi_flash_clock  = SPI_FLASH_DEF_CLOCK;
+static int spi_flash_busnum = LEG_SPI_BUS_NUM;
+spinlock_t chip_lock;
+
+struct SpiNandChip
+{
+    unsigned char *chip_name;
+    unsigned char chip_device_id[2];
+    unsigned long chip_total_size;
+    unsigned int chip_num_blocks;
+    unsigned int chip_block_size;
+    unsigned int chip_page_size;
+    unsigned int chip_spare_size;
+    unsigned int chip_spare_available;
+    unsigned int chip_ecc_offset;
+    struct nand_ecclayout *ecclayout;
+    unsigned short chip_block_shift;
+    unsigned short chip_page_shift;
+    unsigned short chip_num_planes;
+    unsigned char chip_ecc_corr; // threshold to fix correctable bits
+    unsigned char chip_ecc_enh; // enhanced bad bit detection by chip
+    unsigned char chip_subpage_shift; // 2^ shift amount based on number of subpages, typically 4
+};
+
+static struct SpiNandChip * pchip;
+
+
+static struct SpiNandChip SpiDevInfo[] =
+{
+    {
+        .chip_name = "GigaDevice GD5F1GQ4UA",
+        .chip_device_id = {GIGADEVPART, ID_GD5F1GQ4UA},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 128,
+        .chip_spare_available = 14, // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x840,   // location of ECC bytes
+        .chip_num_blocks = 1024,    // 1024 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 2048 * 1024, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_gigadevice_2k_A,
+        .chip_ecc_corr = 6, // threshold to fix correctable bits (6/8)
+        .chip_ecc_enh = 0, // enhanced bad bit detection by chip (none)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "GigaDevice GD5F2GQ4UA",
+        .chip_device_id = {GIGADEVPART, ID_GD5F2GQ4UA},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 128,
+        .chip_spare_available = 14, // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x840,   // location of ECC bytes
+        .chip_num_blocks = 2048,    // 2048 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 2048 * 2048, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_gigadevice_2k_A,
+        .chip_ecc_corr = 6, // threshold to fix correctable bits (6/8)
+        .chip_ecc_enh = 0, // enhanced bad bit detection by chip (none)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "GigaDevice GD5F1GQ4UB",
+        .chip_device_id = {GIGADEVPART, ID_GD5F1GQ4UB},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 128,
+        .chip_spare_available = 18, // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x840,   // location of ECC bytes
+        .chip_num_blocks = 1024,    // 1024 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 2048 * 1024, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_gigadevice_2k_B,
+        .chip_ecc_corr = 6, // threshold to fix correctable bits (6/8)
+        .chip_ecc_enh = 0x10, // enhanced bad bit detection by chip (6/8)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "GigaDevice GD5F2GQ4UB",
+        .chip_device_id = {GIGADEVPART, ID_GD5F2GQ4UB},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 128,
+        .chip_spare_available = 18, // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x840,   // location of ECC bytes
+        .chip_num_blocks = 2048,    // 2048 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 2048 * 2048, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_gigadevice_2k_B,
+        .chip_ecc_corr = 6, // threshold to fix correctable bits (6/8)
+        .chip_ecc_enh = 0x10, // enhanced bad bit detection by chip (6/8)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "GigaDevice GD5F4GQ4UB",
+        .chip_device_id = {GIGADEVPART, ID_GD5F4GQ4UB},
+        .chip_page_size = 4096,
+        .chip_page_shift = 12,
+        .chip_block_size = 64 * 4096,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 18,
+        .chip_spare_size = 256,
+        .chip_spare_available = 37, // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x1080,   // location of ECC bytes
+        .chip_num_blocks = 2048,    // 2048 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 4096 * 2048, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_gigadevice_4k,
+        .chip_ecc_corr = 6, // threshold to fix correctable bits (6/8)
+        .chip_ecc_enh = 0x10, // enhanced bad bit detection by chip (6/8)
+        .chip_subpage_shift = 3, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "Micron MT29F1G01",
+        .chip_device_id = {MICRONPART, ID_MT29F1G01},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 64,
+        .chip_spare_available = 7,  // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x800,   // location of ECC bytes
+        .chip_num_blocks = 1024,    // 1024 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 2048 * 1024, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_micron,
+        .chip_ecc_corr = 3, // threshold to fix correctable bits (3/4)
+        .chip_ecc_enh = 0, // enhanced bad bit detection by chip (none)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "Micron MT29F2G01",
+        .chip_device_id = {MICRONPART, ID_MT29F2G01},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 64,
+        .chip_spare_available = 7,  // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x800,   // location of ECC bytes
+        .chip_num_blocks = 2048,    // 2048 blocks total
+        .chip_num_planes = 2,
+        .chip_total_size = 64 * 2048 * 2048, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_micron,
+        .chip_ecc_corr = 3, // threshold to fix correctable bits (3/4)
+        .chip_ecc_enh = 0, // enhanced bad bit detection by chip (none)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "Micron MT29F4G01",
+        .chip_device_id = {MICRONPART, ID_MT29F4G01},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 64,
+        .chip_spare_available = 7,  // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x800,   // location of ECC bytes
+        .chip_num_blocks = 4096,    // 4096 blocks total
+        .chip_num_planes = 2,
+        .chip_total_size = 64 * 2048 * 4096, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_micron,
+        .chip_ecc_corr = 3, // threshold to fix correctable bits (3/4)
+        .chip_ecc_enh = 0, // enhanced bad bit detection by chip (none)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "ESMT F50L1G41A",
+        .chip_device_id = {ESMTPART, ID_F50L1G41A},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 64,
+        .chip_spare_available = 8, // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x800,   // location of ECC bytes
+        .chip_num_blocks = 1024,    // 1024 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 2048 * 1024, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_esmt,
+        .chip_ecc_corr = 1, // threshold to fix correctable bits (1/1)
+        .chip_ecc_enh = 0, // enhanced bad bit detection by chip (none)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "Winbond W25N01GV",
+        .chip_device_id = {WINBONDPART, ID_W25N01GV},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 64,
+        .chip_spare_available = 7,  // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x800,   // location of ECC bytes
+        .chip_num_blocks = 1024,    // 1024 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 2048 * 1024, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_micron,
+        .chip_ecc_corr = 1, // threshold to fix correctable bits (1/1)
+        .chip_ecc_enh = 0, // enhanced bad bit detection by chip (none)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    },
+    {
+        .chip_name = "default",
+        .chip_device_id = {0, 0},
+        .chip_page_size = 2048,
+        .chip_page_shift = 11,
+        .chip_block_size = 64 * 2048,   // 64 pages per block x chip_page_size
+        .chip_block_shift = 17,
+        .chip_spare_size = 64,
+        .chip_spare_available = 7,  // JFFS2 uses this value x 4 to determine amount of OOB available
+        .chip_ecc_offset = 0x800,   // location of ECC bytes
+        .chip_num_blocks = 1024,    // 1024 blocks total
+        .chip_num_planes = 1,
+        .chip_total_size = 64 * 2048 * 1024, // chip_block_size x chip_num_blocks
+        .ecclayout = &spinand_oob_micron,
+        .chip_ecc_corr = 3, // threshold to fix correctable bits (3/4)
+        .chip_ecc_enh = 0, // enhanced bad bit detection by chip (none)
+        .chip_subpage_shift = 2, // 2^ shift amount based on number of subpages (4)
+    }
+};
+
+
+static struct spi_device * pSpiDevice; // handle for SPI NAND device
+
+static unsigned char * pageBuf;
+static unsigned int pageBufI;
+static int pageAddr, pageOffset;
+static int status = STATUS_DEFAULT;
+static bool SpiNandDeviceRegistered = 0;
+
+/** Prototypes. **/
+static int spi_nand_read_page(unsigned long page_addr, unsigned int page_offset, unsigned char *buffer, int len);
+static int spi_nand_write_page(unsigned long page_addr, unsigned int page_offset, unsigned char *buffer, int len);
+static int spi_nand_is_blk_bad(unsigned long addr);
+static int spi_nand_mark_blk_bad(unsigned long addr);
+static int spi_nand_write_enable(void);
+static int spi_nand_write_disable(void);
+static void spi_nand_row_addr(unsigned int page_addr, unsigned char* buf);
+static void spi_nand_col_addr(unsigned int page_addr, unsigned int page_offset, unsigned char* buf);
+static void spi_nand_get_device_id(unsigned char * buf, unsigned int len);
+static int spi_nand_wel(void);
+
+static int spiRead( struct spi_transfer *xfer );
+static int spiWrite( unsigned char *msg_buf, int nbytes );
+static void spi_nand_read_cfg(void);
+
+static int spi_nand_device_reset(void);
+static int spi_nand_status(void);
+static int spi_nand_ready(void);
+static int spi_nand_ecc(void);
+static int spi_nand_sector_erase_int(unsigned long addr);
+
+static int spi_nand_get_feat(unsigned char feat_addr);
+static void spi_nand_set_feat(unsigned char feat_addr, unsigned char feat_val);
+
+static void bcm63xx_cmd(struct mtd_info *mtd, unsigned int command, int column, int page);
+static unsigned char bcm63xx_read_byte(struct mtd_info *mtd);
+static void bcm63xx_read_buf(struct mtd_info *mtd, uint8_t *buf, int len);
+static void bcm63xx_write(struct mtd_info *mtd, const uint8_t *buf, int len);
+static int bcm63xx_status(struct mtd_info *mtd, struct nand_chip *chip);
+static int bcm63xx_block_isbad(struct mtd_info *mtd, loff_t ofs, int getchip);
+static int bcm63xx_block_markbad(struct mtd_info *mtd, loff_t ofs);
+static void bcm63xx_select(struct mtd_info *mtd, int chip);
+static int bcm63xx_scan_bbt(struct mtd_info *mtd);
+
+
+static int spiRead(struct spi_transfer *xfer)
+{
+    if (!SpiNandDeviceRegistered)
+    {
+        printk("ERROR!! SPI NAND read without SPI NAND Linux device registration\n");
+        return(0);
+    }
+
+    {
+        struct spi_message  message;
+
+        spi_message_init(&message);
+        spi_message_add_tail(xfer, &message);
+
+        /* the controller does not support asynchronous transfer,
+           when spi_async returns the transfer will be complete
+           don't use spi_sync (to avoid the call to schedule),
+           scheduling will conflict with atomic operations
+           such as writing image from Linux */
+        return(spi_async(pSpiDevice, &message));
+    }
+}
+
+
+static int spiWrite(unsigned char *msg_buf, int nbytes)
+{
+    if (!SpiNandDeviceRegistered)
+    {
+        printk("ERROR!! SPI NAND write without SPI NAND Linux device registration\n");
+        return(0);
+    }
+
+    {
+        struct spi_message  message;
+        struct spi_transfer xfer;
+
+        spi_message_init(&message);
+        memset(&xfer, 0, (sizeof xfer));
+        xfer.prepend_cnt = 0;
+        xfer.len         = nbytes;
+        xfer.speed_hz    = pSpiDevice->max_speed_hz;
+        xfer.rx_buf      = NULL;
+        xfer.tx_buf      = msg_buf;
+
+        spi_message_add_tail(&xfer, &message);
+
+        /* the controller does not support asynchronous transfer
+           when spi_async returns the transfer will be complete
+           don't use spi_sync to avoid the call to schedule */
+        return(spi_async(pSpiDevice, &message));
+    }
+}
+
+static void spi_nand_read_cfg(void)
+{ // search through SPI NAND devices to find match
+    unsigned char buf[SPI_NAND_ID_LENGTH];
+    int i = 0;
+
+    spi_nand_get_device_id(buf, SPI_NAND_ID_LENGTH);
+
+    do
+    {
+        if (!memcmp(SpiDevInfo[i].chip_device_id, buf, SPI_NAND_ID_LENGTH))
+            break;
+        i++;
+    } while(memcmp(SpiDevInfo[i].chip_name, "default", 7));
+
+    pchip = &SpiDevInfo[i];
+
+    if (!spin_is_locked(&chip_lock)) // show status only if initial reset since Linux NAND code resets chip during every block erase
+        printk("Found SPI NAND device %s\n", pchip->chip_name);
+}
+
+/***********************************************************************/
+/* reset SPI NAND device and get configuration information             */
+/* some devices such as Micron MT29F1G01 require explicit reset before */
+/* access to the device.                                               */
+/***********************************************************************/
+static int spi_nand_device_reset(void)
+{
+    unsigned char buf[4];
+#if defined(CONFIG_BRCM_IKOS)
+    unsigned int i;
+    for( i = 0; i < 250; i++);
+#else
+    udelay(300);
+#endif
+    if (!spin_is_locked(&chip_lock)) // show status only if initial reset since Linux NAND code resets chip during every block erase
+        printk("SPI NAND device reset\n");
+    buf[0]        = FLASH_RESET;
+    spiWrite(buf, 1);
+
+#if defined(CONFIG_BRCM_IKOS)
+    for( i = 0; i < 3000; i++);
+#else
+    /* device is availabe after 5ms */
+    mdelay(5);
+#endif
+    while(!spi_nand_ready()); // do we need this here??
+
+    spi_nand_set_feat(FEATURE_PROT_ADDR, FEAT_DISABLE); // disable block locking
+
+    spi_nand_read_cfg();
+
+    return(FLASH_API_OK);
+}
+
+/*****************************************************************************************/
+/*  row address is 24 bit length. so buf must be at least 3 bytes.                       */
+/*  For gigadevcie GD5F1GQ4 part(2K page size, 64 page per block and 1024 blocks)        */
+/*  Row Address. RA<5:0> selects a page inside a block, and RA<15:6> selects a block and */
+/*  first byte is dummy byte                                                             */
+/*****************************************************************************************/
+static void spi_nand_row_addr(unsigned int page_addr, unsigned char* buf)
+{
+    buf[0] = (unsigned char)(page_addr>>(pchip->chip_page_shift+16)); //dummy byte
+    buf[1] = (unsigned char)(page_addr>>(pchip->chip_page_shift+8));
+    buf[2] = (unsigned char)(page_addr>>(pchip->chip_page_shift));
+
+    return;
+}
+
+/*********************************************************************************************************************/
+/*  column address select the offset within the page. For gigadevcie GD5F1GQ4 part(2K page size and 2112 with spare) */
+/*  is 12 bit length. so buf must be at least 2 bytes. The 12 bit address is capable of address from 0 to 4095 bytes */
+/*  however only byte 0 to 2111 are valid.                                                                           */
+/*********************************************************************************************************************/
+static void spi_nand_col_addr(unsigned int page_addr, unsigned int page_offset, unsigned char* buf)
+{
+    page_offset = page_offset&((1<<(pchip->chip_page_shift+1))-1);  /* page size + spare area size */
+
+    /* the upper 4 bits of buf[0] is either wrap bits for gigadevice or dummy bit[3:1] + plane select bit[0] for micron
+     */
+    if(*pchip->chip_device_id == MICRONPART)
+    {
+        /* setup plane bit if more than one plane. otherwise that bit is always 0 */
+        if( pchip->chip_num_planes > 1 )
+            buf[0] = (unsigned char)(((page_offset>>8)&0xf)|((page_addr>>pchip->chip_block_shift)&0x1)<<4); //plane bit is the first bit of the block number RowAddr[6]
+        else
+            buf[0] = (unsigned char)((page_offset>>8)&0xFF);
+    }
+    else
+    {
+        /* use default wrap option 0, wrap length 2112 */
+        buf[0] = (unsigned char)((page_offset>>8)&0xFF);
+    }
+    buf[1] = (unsigned char)(page_offset&0xFF);
+
+    return;
+}
+
+/***************************************************************************
+ * Function Name: spi_xfr
+ * Description  : Commonly used SPI transfer function.
+ * Returns      : nothing
+ ***************************************************************************/
+static void spi_xfr(unsigned long page_addr, unsigned int page_offset, unsigned char *buffer, int len)
+{
+    int maxread;
+    unsigned char buf[4];
+    struct spi_transfer xfer;
+
+    while (len > 0)
+    { // break up NAND buffer read into SPI buffer sized chunks
+       /* Random data read (0Bh or 03h) command to read the page data from the cache
+          The RANDOM DATA READ command requires 4 dummy bits, followed by a 12-bit column
+          address for the starting byte address and a dummy byte for waiting data.
+          This is only for 2K page size, the format will change for other page size.
+       */
+
+        maxread = (len < spi_max_op_len) ? len : spi_max_op_len;
+
+        buf[0] = FLASH_READ;
+        spi_nand_col_addr(page_addr, page_offset, buf+1);
+        buf[3] = 0; //dummy byte
+
+        if ((page_offset < pchip->chip_page_size) && ((maxread + page_offset) > pchip->chip_page_size))
+            maxread = pchip->chip_page_size - page_offset; // snap address to OOB boundary to let chip know we want OOB
+
+        if ((page_offset < pchip->chip_ecc_offset) && ((maxread + page_offset) > pchip->chip_ecc_offset))
+            maxread = pchip->chip_ecc_offset - page_offset; // snap address to ECC boundary to let chip know we want ECC
+
+        DBG_PRINTF("spi_xfr - spi cmd 0x%x, 0x%x, 0x%x, 0x%x\n", buf[0],buf[1],buf[2],buf[3]);
+        DBG_PRINTF("spi_xfr - spi read len 0x%x, offset 0x%x, remaining 0x%x\n", maxread, page_offset, len);
+
+        memset(&xfer, 0, sizeof(struct spi_transfer));
+        xfer.tx_buf      = buf;
+        xfer.rx_buf      = buffer;
+        xfer.len         = maxread;
+        xfer.speed_hz    = spi_flash_clock;
+        xfer.prepend_cnt = 4;
+        xfer.addr_len    = 3; // length of address field (max 4 bytes)
+        xfer.addr_offset = 1; // offset of first addr byte in header
+        xfer.hdr_len     = 4; // length of header
+        xfer.unit_size   = 1; // data for each transfer will be divided into multiples of unit_size
+        spiRead(&xfer);
+        while (!spi_nand_ready());
+
+        buffer += maxread;
+        len -= maxread;
+        page_offset += maxread;
+    }
+}
+
+#if defined(COUNT_BAD_BITS)
+/***************************************************************************
+ * Function Name: count_bits
+ * Description  : Counts the bit differences between two buffers.
+ * Returns      : Bit difference count
+ ***************************************************************************/
+static int count_bits(unsigned char * buf1, unsigned char * buf2, int len)
+{
+    int i, count = 0;
+    unsigned char hold;
+
+    for(i = 0; i < len; i++)
+    {
+        hold = buf1[i] ^ buf2[i];
+        while(hold)
+        {
+            hold &= (hold-1);
+            count++;
+        }
+    }
+
+    return(count);
+}
+#endif // COUNT_BAD_BITS
+/***************************************************************************
+ * Function Name: spi_nand_read_page
+ * Description  : Reads up to a NAND block of pages into the specified buffer.
+ * Returns      : FLASH_API_OK or FLASH_API_ERROR or FLASH_API_CORR
+ ***************************************************************************/
+static int spi_nand_read_page(unsigned long page_addr, unsigned int page_offset, unsigned char *buffer, int len)
+{
+    unsigned char buf[(spi_max_op_len > pchip->chip_spare_size) ? spi_max_op_len : pchip->chip_spare_size];
+    int status = FLASH_API_OK;
+
+    if ((page_offset + len) > (pchip->chip_page_size + pchip->chip_spare_size)) // check to see if reading within page/OOB boundary
+    {
+        printk("spi_nand_read_page(): Attempt to read past page boundary, offset 0x%x, length 0x%x, into page address 0x%x\n", page_offset, len, (unsigned int)page_addr);
+
+        return (FLASH_API_ERROR);
+    }
+
+    spi_nand_set_feat(FEATURE_FEAT_ADDR, FEAT_ECC_EN); // reading from page, enable ECC, turn on ECC anyway even if there's a failure should still fill buffer
+
+    /* The PAGE READ (13h) command transfers the data from the NAND Flash array to the
+     * cache register.  The PAGE READ command requires a 24-bit address consisting of
+     * 8 dummy bits followed by a 16-bit block/page address.
+     */
+    buf[0] = FLASH_PREAD;
+    spi_nand_row_addr(page_addr, buf+1);
+    DBG_PRINTF("spi_nand_read_page - spi cmd 0x%x, 0x%x, 0x%x, 0x%x\n", buf[0], buf[1], buf[2], buf[3]);
+    spiWrite(buf, 4);
+
+    /* GET FEATURES (0Fh)  command to read the status */
+    while(!spi_nand_ready());
+
+    if (page_offset < pchip->chip_page_size)
+        status = spi_nand_ecc();
+
+    if (!len)
+        return(status);
+
+    spi_xfr(page_addr, page_offset, buffer, len);
+
+    /* check ecc from status bits if we are reading from page area */
+    if (status == FLASH_API_ERROR)
+    { // check to see if ECC is set to all FF's to verify as blank page (don't check page area as there may be bad bits which would not look like a blank page)
+        spi_nand_set_feat(FEATURE_FEAT_ADDR, FEAT_DISABLE); // disable ECC
+
+        spi_xfr(page_addr, pchip->chip_page_size, buf, pchip->chip_spare_size);
+
+        { // check to see if all ECC bytes are 0xFF to verify page is empty
+            int i;
+
+            for (i = 0; i < pchip->ecclayout->eccbytes; i++)
+                if (buf[pchip->ecclayout->eccpos[i]] != 0xFF)
+                {
+                    printk("Uncorrectable ECC ERROR!! Address 0x%x, block 0x%x, page 0x%x is not empty!!\n", (unsigned int)page_addr, (unsigned int)page_addr>>pchip->chip_block_shift, (unsigned int)(page_addr%pchip->chip_block_size)>>pchip->chip_page_shift);
+                    return(FLASH_API_ERROR);
+                }
+
+            memset(buffer, 0xFF, len); // fill buffer with 0xFF just in case there were any bad bits read
+
+            return(FLASH_API_OK);
+        }
+    }
+
+    if(status == FLASH_API_CORR)
+    { // count bad bits to see if we exceed threshold
+        if(pchip->chip_ecc_enh)
+        { // chip has enhanced bad bit detection
+            if ( (spi_nand_get_feat(FEATURE_STAT_AUX) & STAT_ECC_MASK) < pchip->chip_ecc_enh)
+                status = FLASH_API_OK;
+        }
+#if defined(COUNT_BAD_BITS)
+        else
+        {
+            unsigned char buf_ecc[pchip->chip_page_size + pchip->chip_spare_size];
+            unsigned char buf_noecc[pchip->chip_page_size + pchip->chip_spare_size];
+            int i, size, count, worst = 0;
+
+            spi_xfr(page_addr, 0, buf_ecc, pchip->chip_page_size + pchip->chip_spare_size);
+
+            spi_nand_set_feat(FEATURE_FEAT_ADDR, FEAT_DISABLE); // now grab data with ecc turned off
+
+            /* The PAGE READ (13h) command transfers the data from the NAND Flash array to the
+             * cache register.  The PAGE READ command requires a 24-bit address consisting of
+             * 8 dummy bits followed by a 16-bit block/page address.
+             */
+            buf[0] = FLASH_PREAD;
+            spi_nand_row_addr(page_addr, buf+1);
+            spiWrite(buf, 4);
+
+            while(!spi_nand_ready());
+
+            spi_xfr(page_addr, 0, buf_noecc, pchip->chip_page_size + pchip->chip_spare_size);
+
+            for(i = 0; i < (1 << pchip->chip_subpage_shift); i++)
+            {
+                count = 0;
+
+                size = pchip->chip_page_size >> pchip->chip_subpage_shift;
+                count += count_bits(buf_ecc + (size * i), buf_noecc + (size * i), size);
+
+                size = (pchip->chip_spare_size - (pchip->chip_ecc_offset - pchip->chip_page_size)) >> pchip->chip_subpage_shift;
+                count += count_bits(buf_ecc + pchip->chip_page_size + (size * i), buf_noecc + pchip->chip_page_size + (size * i), size);
+
+                if(pchip->chip_page_size != pchip->chip_ecc_offset)
+                    count += count_bits(buf_ecc + pchip->chip_ecc_offset + (size * i), buf_noecc + pchip->chip_ecc_offset + (size * i), size);
+
+                if (count > worst)
+                    worst = count;
+            }
+
+            if (worst < pchip->chip_ecc_corr)
+                status = FLASH_API_OK;
+        }
+#endif // COUNT_BAD_BITS
+    }
+
+    return(status);
+}
+
+/*********************************************************************/
+/* Flash_status return the feature status byte                       */
+/*********************************************************************/
+static int spi_nand_status(void)
+{
+    return spi_nand_get_feat(FEATURE_STAT_ADDR);
+}
+
+/* check device ready bit */
+static int spi_nand_ready(void)
+{
+  return (spi_nand_status()&STAT_OIP) ? 0 : 1;
+}
+
+/*********************************************************************/
+/*  spi_nand_get_feat return the feature byte at feat_addr            */
+/*********************************************************************/
+static int spi_nand_get_feat(unsigned char feat_addr)
+{
+    unsigned char buf[4];
+    struct spi_transfer xfer;
+
+    /* check device is ready */
+    memset(&xfer, 0, sizeof(struct spi_transfer));
+    buf[0]           = FLASH_GFEAT;
+    buf[1]           = feat_addr;
+    xfer.tx_buf      = buf;
+    xfer.rx_buf      = buf;
+    xfer.len         = 1;
+    xfer.speed_hz    = spi_flash_clock;
+    xfer.prepend_cnt = 2;
+    spiRead(&xfer);
+
+    DBG_PRINTF("spi_nand_get_feat at 0x%x 0x%x\n", feat_addr, buf[0]);
+
+    return buf[0];
+}
+
+/*********************************************************************/
+/*  spi_nand_set_feat set the feature byte at feat_addr              */
+/*********************************************************************/
+static void spi_nand_set_feat(unsigned char feat_addr, unsigned char feat_val)
+{
+    unsigned char buf[3];
+
+    /* check device is ready */
+    buf[0]           = FLASH_SFEAT;
+    buf[1]           = feat_addr;
+    buf[2]           = feat_val;
+    spiWrite(buf, 3);
+
+    while(!spi_nand_ready());
+
+    return;
+}
+
+static int spi_nand_ecc(void)
+{
+    int status;
+
+    status = spi_nand_get_feat(FEATURE_STAT_ADDR);
+    status = status & STAT_ECC_MASK;
+
+    if (status == STAT_ECC_GOOD)
+        return(FLASH_API_OK);
+
+    if (status == STAT_ECC_CORR) // correctable errors
+        return(FLASH_API_CORR);
+
+    return(FLASH_API_ERROR); // anything else is an error
+}
+
+/*********************************************************************/
+/* Flash_sector__int() wait until the erase is completed before      */
+/* returning control to the calling function.  This can be used in   */
+/* cases which require the program to hold until a sector is erased, */
+/* without adding the wait check external to this function.          */
+/*********************************************************************/
+static int spi_nand_sector_erase_int(unsigned long addr)
+{
+    unsigned char buf[11];
+    int status;
+
+    addr &= ~(pchip->chip_block_size - 1);
+
+    DBG_PRINTF("spi_nand_sector_erase_int block at address 0x%lx\n", addr);
+
+    if (spi_nand_is_blk_bad(addr))
+    {
+        printk("spi_nand_sector_erase_int(): Attempt to erase failed due to bad block 0x%lx (address 0x%lx)\n", addr >> pchip->chip_block_shift, addr);
+        return (FLASH_API_ERROR);
+    }
+
+    { // erase dirty block
+        spi_nand_write_enable();
+        buf[0] = FLASH_BERASE;
+        spi_nand_row_addr(addr, buf+1);
+        spiWrite(buf, 4);
+        while(!spi_nand_ready()) ;
+
+        status = spi_nand_status();
+        if( status & STAT_EFAIL )
+        {
+            printk("spi_nand_sector_erase_int(): Erase block 0x%lx failed, sts 0x%x\n",  addr >> pchip->chip_block_shift, status);
+            return(FLASH_API_ERROR);
+        }
+
+        spi_nand_write_disable();
+    }
+
+    return (FLASH_API_OK);
+}
+
+/************************************************************************/
+/* flash_write_enable() must be called before any change to the         */
+/* device such as write, erase. It also unlocks the blocks if they were */
+/* previouly locked.                                                    */
+/************************************************************************/
+static int spi_nand_write_enable(void)
+{
+    unsigned char buf[4], prot;
+
+    /* make sure it is not locked first */
+    prot = spi_nand_get_feat(FEATURE_PROT_ADDR);
+    if( prot != 0 )
+    {
+        prot = 0;
+        spi_nand_set_feat(FEATURE_PROT_ADDR, prot);
+    }
+
+    /* send write enable cmd and check feature status WEL latch bit */
+    buf[0] = FLASH_WREN;
+    spiWrite(buf, 1);
+    while(!spi_nand_ready());
+    while(!spi_nand_wel());
+
+    return(FLASH_API_OK);
+}
+
+static int spi_nand_write_disable(void)
+{
+    unsigned char buf[4];
+
+    buf[0] = FLASH_WRDI;
+    spiWrite(buf, 1);
+    while(!spi_nand_ready());
+    while(spi_nand_wel());
+
+    return(FLASH_API_OK);
+}
+
+/***************************************************************************
+ * Function Name: spi_nand_write_page
+ * Description  : Writes up to a NAND block of pages from the specified buffer.
+ * Returns      : FLASH_API_OK or FLASH_API_ERROR
+ ***************************************************************************/
+static int spi_nand_write_page(unsigned long page_addr, unsigned int page_offset, unsigned char *buffer, int len)
+{
+    unsigned char spi_buf[512];  /* HS_SPI_BUFFER_LEN SPI controller fifo size is currently 512 bytes*/
+    unsigned char xfer_buf[pchip->chip_page_size + pchip->chip_spare_size];
+    int maxwrite, status;
+    unsigned int page_ofs = page_offset;
+
+    if (!len)
+    {
+        printk("spi_nand_write_page(): Not writing any data to page addr 0x%x, page_offset 0x%x, len 0x%x\n", (unsigned int)page_addr, page_offset, len);
+        return (FLASH_API_OK);
+    }
+
+    if ((page_offset + len) > (pchip->chip_page_size + pchip->chip_spare_size))
+    {
+        printk("spi_nand_write_page(): Attempt to write past page boundary, offset 0x%x, length 0x%x, into page address 0x%x\n", page_offset, len, (unsigned int)page_addr);
+        return (FLASH_API_ERROR);
+    }
+
+    if (page_ofs < pchip->chip_page_size)
+    { /* writing into page area, if writing into spare area is allowed then must read page first to fill write buffer
+       * because we don't know if JFFS2 clean marker is there or not and this clean marker would initially have
+       * been written with ECC off, but will now be included in the ECC calculation along with the page data */
+        spi_nand_set_feat(FEATURE_FEAT_ADDR, FEAT_ECC_EN); // enable ECC if writing to page
+    }
+    else
+    { // not writing into page area
+        if (len != 1)
+            return(FLASH_API_OK); // only allowed write is the bad block marker; return if not that
+
+        spi_nand_set_feat(FEATURE_FEAT_ADDR, FEAT_DISABLE); // else don't write ECC
+    }
+
+    memset(xfer_buf, 0xff, sizeof(xfer_buf));
+    memcpy(xfer_buf + page_offset, buffer, len);
+    len = pchip->chip_page_size + pchip->chip_spare_size;
+    page_offset = 0;
+
+    while (len > 0)
+    {
+        /* Send Program Load Random Data Command (0x84) to load data to cache register.
+         * PROGRAM LOAD consists of an 8-bit Op code, followed by 4 bit dummy and a
+         * 12-bit column address, then the data bytes to be programmed. */
+        spi_buf[0] = FLASH_PROG_RAN;
+        spi_nand_col_addr(page_addr, page_offset, spi_buf + 1);
+
+        maxwrite = (len > (spi_max_op_len - 5)) ? (spi_max_op_len - 5) : len;
+
+        if ((page_offset < pchip->chip_page_size) && ((maxwrite + page_offset) > pchip->chip_page_size))
+            maxwrite = pchip->chip_page_size - page_offset; // snap address to OOB boundary to let chip know we want OOB
+
+        if ((page_offset < pchip->chip_ecc_offset) && ((maxwrite + page_offset) > pchip->chip_ecc_offset))
+            maxwrite = pchip->chip_ecc_offset - page_offset; // snap address to ECC boundary to let chip know we want ECC
+
+        memcpy(&spi_buf[3], xfer_buf + page_offset, maxwrite);
+        DBG_PRINTF("spi_nand_write_page - spi cmd 0x%x, 0x%x, 0x%x\n", spi_buf[0], spi_buf[1], spi_buf[2]);
+        DBG_PRINTF("spi_nand_write_page - spi write len 0x%x, offset 0x%x, remaining 0x%x\n", maxwrite, offset, len-maxwrite);
+
+        spi_nand_write_enable();
+        spiWrite(spi_buf, maxwrite + 3);
+
+        len -= maxwrite;
+        page_offset += maxwrite;
+
+        while(!spi_nand_ready()); // do we need this here??
+    }
+
+    /* Send Program Execute command (0x10) to write cache data to memory array
+     * Send address (24bit): 8 bit dummy + 16 bit address (page/Block)
+     */
+    /* Send Write enable Command (0x06) */
+    spi_nand_write_enable();
+
+    spi_buf[0] = FLASH_PEXEC;
+    spi_nand_row_addr(page_addr, spi_buf + 1);
+    DBG_PRINTF("spi_nand_write_page - spi cmd 0x%x, 0x%x, 0x%x, 0x%x\n", spi_buf[0], spi_buf[1], spi_buf[2], spi_buf[3]);
+    spiWrite(spi_buf, 4);
+    while(!spi_nand_ready());
+
+    status = spi_nand_status();
+    spi_nand_write_disable();
+
+    if(status & STAT_PFAIL)
+    {
+        printk("Page program failed at address 0x%x, sts 0x%x\n", (unsigned int)page_addr, status);
+        return(FLASH_API_ERROR);
+    }
+
+    if (page_ofs < pchip->chip_page_size)
+    {
+        unsigned char buf[pchip->chip_page_size];
+
+        status = spi_nand_read_page(page_addr, 0, buf, pchip->chip_page_size);
+
+        if (status == FLASH_API_ERROR)
+        {
+            printk("Write verify failed reading back page at address 0x%lx\n", page_addr);
+            return(FLASH_API_ERROR);
+        }
+
+        if (memcmp(xfer_buf, buf, pchip->chip_page_size))
+        {
+            printk("Write data did not match read data at address 0x%lx\n", page_addr);
+            return(FLASH_API_ERROR);
+        }
+
+        if (status == FLASH_API_CORR)
+        {
+            printk("Write verify correctable errors at address 0x%lx\n", page_addr);
+            return(FLASH_API_CORR);
+        }
+    }
+
+    return (FLASH_API_OK);
+}
+
+/* check device write enable latch bit */
+static int spi_nand_wel(void)
+{
+  return (spi_nand_status() & STAT_WEL) ? 1 : 0;
+}
+
+/*********************************************************************/
+/* flash_get_device_id() return the device id of the component.      */
+/*********************************************************************/
+static void spi_nand_get_device_id(unsigned char * buf, unsigned int len)
+{
+    unsigned char buffer[2];
+    struct spi_transfer xfer;
+
+    memset(&xfer, 0, sizeof(struct spi_transfer));
+    buffer[0]        = FLASH_RDID;
+    buffer[1]        = 0;
+    xfer.tx_buf      = buffer;
+    xfer.rx_buf      = buf;
+    xfer.len         = len;
+    xfer.speed_hz    = spi_flash_clock;
+    xfer.prepend_cnt = 2;
+    spiRead(&xfer);
+    while(!spi_nand_ready());
+
+    DBG_PRINTF("spi_nand_get_device_id 0x%x 0x%x\n", buf[0], buf[1]);
+}
+
+static int spi_nand_is_blk_bad(unsigned long addr)
+{
+    unsigned char buf;
+
+    if (addr < pchip->chip_block_size)
+        return 0; // always return good for block 0, because if it's a bad chip quite possibly the board is useless
+
+    addr &= ~(pchip->chip_block_size - 1);
+
+    spi_nand_read_page(addr, pchip->chip_page_size, &buf, 1);
+
+    if (0xFF != buf)
+    {
+        printk("Bad Block 0x%lx found (address 0x%lx)\n", addr >> pchip->chip_block_shift, addr);
+        return(1);
+    }
+
+    return(0);
+}
+
+static int spi_nand_mark_blk_bad(unsigned long addr)
+{
+    int ret1, ret2;
+
+    addr &= ~(pchip->chip_block_size - 1);
+
+    printk("Marking block 0x%lx bad (address 0x%lx)\n", addr >> pchip->chip_block_shift, addr);
+
+    ret1 = spi_nand_write_page(addr, pchip->chip_page_size, "\0", 1); // write bad block marker into first page
+    ret2 = spi_nand_write_page(addr + pchip->chip_page_size, pchip->chip_page_size, "\0", 1); // write bad block marker into second page
+
+    if ((ret1 != FLASH_API_OK) && (ret2 != FLASH_API_OK))
+    {
+        printk("Unable to mark block 0x%lx bad\n", addr >> pchip->chip_block_shift);
+        return(FLASH_API_ERROR);
+    }
+
+    return(FLASH_API_OK);
+}
+
+static void bcm63xx_cmd(struct mtd_info *mtd, unsigned int command, int column, int page)
+{
+    unsigned long addr = page * mtd->writesize;
+
+    spin_lock(&chip_lock);
+
+    switch(command)
+    {
+        case NAND_CMD_READ0:
+        case NAND_CMD_READ1: // step 1/2 for read, execute SPI NAND read command and transfer SPI NAND data to local buffer
+
+            status = STATUS_DEFAULT;
+
+            if (addr > mtd->size)
+            {
+                printk("SPI NAND ERROR!! Trying to read past end of chip\n");
+                status |= NAND_STATUS_FAIL;
+            }
+            else
+            {
+                int temp = spi_nand_read_page(page * mtd->writesize, column, pageBuf, mtd->writesize + mtd->oobsize);
+
+                if (FLASH_API_ERROR == temp)
+                {
+                    printk("SPI NAND ERROR Reading page!!\n");
+                    status |= NAND_STATUS_FAIL;
+                    mtd->ecc_stats.failed++;
+                }
+                else if (FLASH_API_CORR == temp)
+                    mtd->ecc_stats.corrected++;
+
+                pageBufI = 0;
+            }
+            break;
+
+        case NAND_CMD_READOOB: // step 1/2 for read, execute SPI NAND read command and transfer SPI NAND data to local buffer
+
+            status = STATUS_DEFAULT;
+
+            if (addr > mtd->size)
+            {
+                printk("SPI NAND ERROR!! Trying to read past end of chip\n");
+                status |= NAND_STATUS_FAIL;
+            }
+            else
+            {
+                int temp = spi_nand_read_page(page * mtd->writesize, mtd->writesize, pageBuf + mtd->writesize, mtd->oobsize);
+
+                if (FLASH_API_ERROR == temp)
+                {
+                    printk("SPI NAND ERROR Reading page OOB!!\n");
+                    status |= NAND_STATUS_FAIL;
+                    mtd->ecc_stats.failed++;
+                }
+                else if (FLASH_API_CORR == temp)
+                    mtd->ecc_stats.corrected++;
+
+                pageBufI = mtd->writesize;
+            }
+            break;
+
+        case NAND_CMD_RESET:
+            status = STATUS_DEFAULT;
+
+            if (FLASH_API_ERROR == spi_nand_device_reset())
+            {
+                printk("ERROR resetting SPI NAND device!!\n");
+                status |= NAND_STATUS_FAIL;
+            }
+            break;
+
+        case NAND_CMD_READID:
+            status = STATUS_DEFAULT;
+
+            spi_nand_get_device_id(pageBuf, NAND_ID_LENGTH);
+
+            if (*pageBuf == GIGADEVPART)
+                *(pageBuf+2) = 0x80; *(pageBuf+3) = 0x1D; // provide the rest of the ID bytes that Gigadevice omits
+
+            pageBufI = 0;
+            break;
+
+        case NAND_CMD_STATUS: // NAND infrastructure only uses this to determine if write protect is set
+            *(pageBuf + mtd->writesize + mtd->oobsize - 1) = status;
+            pageBufI = mtd->writesize + mtd->oobsize - 1; // set pointer to end of buffer so we have a limit to the amount of data read
+            break;
+
+        case NAND_CMD_SEQIN: // step 1/3 for write, capture address
+            status = STATUS_DEFAULT;
+
+            if (addr > mtd->size)
+            {
+                printk("ERROR!! Trying to program past end of chip\n");
+                status |= NAND_STATUS_FAIL;
+            }
+            else
+            {
+                pageAddr = addr;
+                pageOffset = column;
+                pageBufI = 0;
+            }
+            break;
+
+        case NAND_CMD_PAGEPROG: // step 3/3 for write, transfer local buffer to SPI NAND device and execute SPI NAND write command
+        {
+            int error = 0;
+
+            addr = pageAddr & ~(mtd->erasesize - 1); // block address
+
+            if ((status = spi_nand_write_page(pageAddr, pageOffset, pageBuf, pageBufI)) == FLASH_API_ERROR)
+                error = 1;
+
+            if (!error && (status == FLASH_API_CORR) && (pageAddr >= mtd->erasesize))
+            { // read/erase/write block to see if we can get rid of the bit errors, but only if not block zero
+                int offset;
+                unsigned char * buffer;
+
+                printk("Correctible errors, SPI NAND Rewriting block\n");
+
+                buffer = kmalloc(mtd->erasesize, GFP_ATOMIC);
+                if (!buffer)
+                { // unfortunately can't attempt to fix block in this case
+                    printk("Error allocating buffer!!\n");
+                    error = 1;
+                }
+
+                // read block
+                for (offset = 0; !error && (offset < mtd->erasesize); offset += mtd->writesize)
+                {
+                    status = spi_nand_read_page(addr + offset, 0, buffer + offset, mtd->writesize);
+                    if (status == FLASH_API_ERROR)
+                        error = 1;
+                }
+
+                // erase block
+                if (!error)
+                {
+                    status = spi_nand_sector_erase_int(addr);
+                    if (status == FLASH_API_ERROR)
+                        error = 1;
+                }
+
+                // write block
+                if (!error)
+                {
+                    for (offset = 0; offset < mtd->erasesize; offset += mtd->writesize)
+                    {
+                        status = spi_nand_write_page(addr + offset, 0, buffer + offset, mtd->writesize);
+                        if (status != FLASH_API_OK)
+                            error = 1; // essentially failed, but finish writing out all the data anyway to hopefully be recovered later
+                    }
+                }
+
+                if(buffer)
+                    kfree(buffer);
+            }
+
+            status = STATUS_DEFAULT;
+
+            if (error)
+            {
+                printk("SPI NAND ERROR Writing page!!\n");
+                status |= NAND_STATUS_FAIL;
+                spi_nand_mark_blk_bad(addr); // JFFS2 will do this automatically
+            }
+
+            break;
+        }
+
+        case NAND_CMD_ERASE1:
+            status = STATUS_DEFAULT;
+
+            if (addr >= mtd->size)
+            {
+                printk("ERROR!! Trying to erase past end of chip\n");
+                status |= NAND_STATUS_FAIL;
+            }
+            else if (FLASH_API_ERROR == spi_nand_sector_erase_int(addr))
+            {
+                printk("SPI NAND ERROR Erasing block!!\n");
+                status |= NAND_STATUS_FAIL;
+            }
+        case NAND_CMD_ERASE2:
+            break;
+
+            default:
+                printk("ERROR!! Unkonwn NAND command 0x%x\n", command);
+                status |= NAND_STATUS_FAIL;
+        }
+
+    spin_unlock(&chip_lock);
+}
+
+static unsigned char bcm63xx_read_byte(struct mtd_info *mtd)
+{
+    unsigned char ret;
+
+    spin_lock(&chip_lock);
+
+    ret = *(pageBuf + pageBufI++);
+
+    spin_unlock(&chip_lock);
+
+    return(ret);
+}
+
+static void bcm63xx_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{ // step 2/2 for read, read data from local buffer
+    spin_lock(&chip_lock);
+
+    if ((pageBufI + len) > (mtd->writesize + mtd->oobsize))
+        printk("ERROR!! Trying to read past end of buffer\n");
+    else
+    {
+        memcpy(buf, pageBuf+pageBufI, len);
+        pageBufI += len;
+    }
+
+    spin_unlock(&chip_lock);
+}
+
+// step 2/3 for write, fill local buffer
+static void bcm63xx_write(struct mtd_info *mtd, const uint8_t *buf, int len)
+{ // write to buffer
+    spin_lock(&chip_lock);
+
+    if ((pageBufI + len) > (mtd->writesize + mtd->oobsize))
+        printk("ERROR!! Trying to write past end of buffer\n");
+    else
+    {
+        memcpy(pageBuf+pageBufI, buf, len);
+        pageBufI += len;
+    }
+
+    spin_unlock(&chip_lock);
+}
+
+static int bcm63xx_status(struct mtd_info *mtd, struct nand_chip *chip)
+{ // NAND infrastructure used this to not only determine when a command has finished (spinlocks will take care of that)
+    // but also to return the status
+
+    spin_lock(&chip_lock);
+
+    spin_unlock(&chip_lock);
+
+    return(status);
+}
+
+static int bcm63xx_init_size(struct mtd_info *mtd, struct nand_chip *chip, unsigned char *id_data)
+{ // overwrite possibly incorrectly detected values from Linux NAND driver
+    static int splash = 0;
+
+    if (!splash)
+    {
+        printk("SPI NAND device %s\n", pchip->chip_name);
+        printk("   device id    = 0x%x%x\n", pchip->chip_device_id[0], pchip->chip_device_id[1]);
+        printk("   page size    = 0x%x\n", pchip->chip_page_size);
+        printk("   block size   = 0x%x\n", pchip->chip_block_size);
+        printk("   total blocks = 0x%x\n", pchip->chip_num_blocks);
+        printk("   total size   = 0x%lx\n", pchip->chip_total_size);
+
+        splash = 1;
+    }
+
+    mtd->writesize = pchip->chip_page_size;
+    mtd->oobsize = pchip->chip_spare_size;
+    mtd->erasesize = pchip->chip_block_size;
+    chip->chipsize = mtd->size = pchip->chip_total_size;
+    mtd->name = pchip->chip_name;
+
+    chip->numchips = 1;
+
+    return(0);
+}
+
+static int bcm63xx_block_isbad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+    int ret;
+
+    spin_lock(&chip_lock);
+
+    ret = spi_nand_is_blk_bad(ofs);
+
+    spin_unlock(&chip_lock);
+
+    return(ret);
+}
+
+static int bcm63xx_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+    int ret;
+
+    spin_lock(&chip_lock);
+
+    ret = spi_nand_mark_blk_bad(ofs);
+
+    spin_unlock(&chip_lock);
+
+    return(ret);
+}
+
+static void bcm63xx_select(struct mtd_info *mtd, int chip)
+{ // dummy function, chip is always selected as far as the NAND infrastructure is concerned
+}
+
+static int bcm63xx_scan_bbt(struct mtd_info *mtd)
+{ // dummy function
+    return(1); // this will ultimately be the return value for nand_scan
+}
+
+static struct spi_board_info bcmSpiDevInfo =
+{
+    .modalias      = "bcm_SpiDev",
+    .chip_select   = 0,
+    .max_speed_hz  = 781000,
+    .bus_num       = LEG_SPI_BUS_NUM,
+    .mode          = SPI_MODE_3,
+};
+
+static struct spi_driver bcmSpiDevDrv =
+{
+    .driver =
+        {
+        .name     = "bcm_SpiDev",
+        .bus      = &spi_bus_type,
+        .owner    = THIS_MODULE,
+        },
+};
+
+
+void bcmspinand_probe(struct mtd_info * mtd)
+{
+    struct nand_chip * nand = mtd->priv;
+    struct spi_master * pSpiMaster;
+    int spiCtrlState;
+
+    printk("SPI NAND Device Linux Registration\n");
+
+    /* micron MT29F1G01 only support up to 50MHz, update to 50Mhz if it is more than that */
+    spi_flash_busnum = HS_SPI_BUS_NUM;
+    spi_flash_clock = 50000000;
+
+    /* retrieve the maximum read/write transaction length from the SPI controller */
+    spi_max_op_len = SPI_BUF_LEN;
+
+    /* set the controller state, spi_mode_0 */
+    spiCtrlState = SPI_CONTROLLER_STATE_DEFAULT;
+
+    if ( spi_flash_clock > SPI_CONTROLLER_MAX_SYNC_CLOCK )
+       spiCtrlState |= SPI_CONTROLLER_STATE_ASYNC_CLOCK;
+
+    bcmSpiDevInfo.max_speed_hz    = spi_flash_clock;
+    bcmSpiDevInfo.controller_data = (void *)spiCtrlState;
+    bcmSpiDevInfo.mode            = SPI_MODE_DEFAULT;
+    bcmSpiDevInfo.chip_select     = SPI_FLASH_SLAVE_DEV_ID;
+    bcmSpiDevInfo.bus_num         = spi_flash_busnum;
+
+    pSpiMaster = spi_busnum_to_master( spi_flash_busnum );
+    pSpiDevice = spi_new_device(pSpiMaster, &bcmSpiDevInfo);
+
+    /* register as SPI device */
+    spi_register_driver(&bcmSpiDevDrv);
+
+    SpiNandDeviceRegistered = 1;
+
+    printk("SPI NAND Linux Registration\n");
+
+    spin_lock_init(&chip_lock);
+
+    spi_nand_device_reset(); // reset and set configuration information
+
+    nand->ecc.size = pchip->chip_page_size;
+    nand->ecc.bytes = 0;
+    nand->ecc.strength = 0;
+    nand->ecc.layout = pchip->ecclayout;
+    nand->page_shift = pchip->chip_page_shift;
+    nand->phys_erase_shift = pchip->chip_block_shift;
+    nand->chipsize = pchip->chip_total_size;
+
+    pageBuf = kmalloc(pchip->chip_page_size + pchip->chip_spare_size, GFP_KERNEL);
+
+    nand->options = NAND_NO_AUTOINCR | NAND_NO_READRDY | NAND_NO_SUBPAGE_WRITE;
+
+    nand->chip_delay = 0;
+    nand->read_byte = bcm63xx_read_byte;
+    nand->read_buf = bcm63xx_read_buf;
+    nand->ecc.mode = NAND_ECC_NONE;
+
+    nand->select_chip = bcm63xx_select;
+    nand->write_buf  = bcm63xx_write;
+    nand->scan_bbt = bcm63xx_scan_bbt;
+    nand->block_bad = bcm63xx_block_isbad;
+    nand->block_markbad = bcm63xx_block_markbad;
+    nand->cmdfunc = bcm63xx_cmd;
+    nand->waitfunc = bcm63xx_status;
+
+    nand->init_size = bcm63xx_init_size;
+}
+
+#endif //CONFIG_BCM_KF_MTD_BCMNAND
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index af4fe8ca7b5ef7fbdfb25bb2553d8af4f61f8d29..ea698517feddefc0d2d7d70937c22e841268d2fb 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -88,6 +88,10 @@ struct nand_flash_dev nand_flash_ids[] = {
 	{"NAND 128MiB 1,8V 8-bit",	0xA1, 0, 128, 0, LP_OPTIONS},
 	{"NAND 128MiB 3,3V 8-bit",	0xF1, 0, 128, 0, LP_OPTIONS},
 	{"NAND 128MiB 3,3V 8-bit",	0xD1, 0, 128, 0, LP_OPTIONS},
+#if defined(CONFIG_BCM_KF_NAND)
+	{"NAND 128MiB 3,3V 8-bit",	0x12, 0, 128, 0, LP_OPTIONS}, // Micron SPI NAND
+	{"NAND 128MiB 3,3V 8-bit",	0x21, 0, 128, 0, LP_OPTIONS}, // ESMT SPI NAND
+#endif
 	{"NAND 128MiB 1,8V 16-bit",	0xB1, 0, 128, 0, LP_OPTIONS16},
 	{"NAND 128MiB 3,3V 16-bit",	0xC1, 0, 128, 0, LP_OPTIONS16},
 	{"NAND 128MiB 1,8V 16-bit",     0xAD, 0, 128, 0, LP_OPTIONS16},
@@ -95,12 +99,20 @@ struct nand_flash_dev nand_flash_ids[] = {
 	/* 2 Gigabit */
 	{"NAND 256MiB 1,8V 8-bit",	0xAA, 0, 256, 0, LP_OPTIONS},
 	{"NAND 256MiB 3,3V 8-bit",	0xDA, 0, 256, 0, LP_OPTIONS},
+#if defined(CONFIG_BCM_KF_NAND)
+	{"NAND 256MiB 3,3V 8-bit",	0xD2, 0, 256, 0, LP_OPTIONS}, // Gigadevice SPI NAND
+	{"NAND 256MiB 3,3V 8-bit",	0x22, 0, 256, 0, LP_OPTIONS}, // Micron SPI NAND
+#endif
 	{"NAND 256MiB 1,8V 16-bit",	0xBA, 0, 256, 0, LP_OPTIONS16},
 	{"NAND 256MiB 3,3V 16-bit",	0xCA, 0, 256, 0, LP_OPTIONS16},
 
 	/* 4 Gigabit */
 	{"NAND 512MiB 1,8V 8-bit",	0xAC, 0, 512, 0, LP_OPTIONS},
 	{"NAND 512MiB 3,3V 8-bit",	0xDC, 0, 512, 0, LP_OPTIONS},
+#if defined(CONFIG_BCM_KF_NAND)
+	{"NAND 512MiB 3,3V 8-bit",	0xD4, 0, 512, 0, LP_OPTIONS}, // Gigadevice SPI NAND
+	{"NAND 512MiB 3,3V 8-bit",	0x32, 0, 512, 0, LP_OPTIONS}, // Micron SPI NAND
+#endif
 	{"NAND 512MiB 1,8V 16-bit",	0xBC, 0, 512, 0, LP_OPTIONS16},
 	{"NAND 512MiB 3,3V 16-bit",	0xCC, 0, 512, 0, LP_OPTIONS16},
 
@@ -178,6 +190,9 @@ struct nand_manufacturers nand_manuf_ids[] = {
 	{NAND_MFR_MICRON, "Micron"},
 	{NAND_MFR_AMD, "AMD"},
 	{NAND_MFR_MACRONIX, "Macronix"},
+#if defined(CONFIG_BCM_KF_NAND)
+	{NAND_MFR_GIGADEVICE, "Gigadevice"},
+#endif
 	{0x0, "Unknown"}
 };
 
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 0fde9fc7d2e5415d032d57104694c9ef0e61f763..0f3f8371a79d9a5143e268d728b7431bf5edbc2a 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -816,6 +816,13 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
 	struct ubi_volume *vol = ubi->volumes[vol_id];
 	int err, old_reserved_pebs = vol->reserved_pebs;
 
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	if (ubi->ro_mode) {
+		ubi_warn("skip auto-resize because of R/O mode");
+		return 0;
+	}
+
+#endif
 	/*
 	 * Clear the auto-resize flag in the volume in-memory copy of the
 	 * volume table, and 'ubi_resize_volume()' will propagate this change
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 12c43b44f81578a4b78369e5140b66d34d26a188..4f71793f5505811e332ea3dc4e1bc7ad89035bd7 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -997,7 +997,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
 			return err;
 		goto adjust_mean_ec;
 	case UBI_IO_FF:
-		if (ec_err)
+		if (ec_err || bitflips)
 			err = add_to_list(si, pnum, ec, 1, &si->erase);
 		else
 			err = add_to_list(si, pnum, ec, 0, &si->free);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 863835f4aefea5aa6cd4fc481d36092d09a0509e..1444214db7f2a37fa787ee6f32a9de181502178d 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -464,7 +464,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
 	if (!no_vtbl && paranoid_check_volumes(ubi))
 		dbg_err("check failed while removing volume %d", vol_id);
 
-	return err;
+	return 0;
 
 out_err:
 	ubi_err("cannot remove volume %d, error %d", vol_id, err);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 17cec0c0154448d85271a4422ac8907f9dedc00d..01e82d0667ca925afddbbd06a8bb742adf466122 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -346,7 +346,11 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
 	 */
 	err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
 				vid_hdr, 0);
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	kfree(new_seb);
+#else
+	kmem_cache_free(si->scan_leb_slab, new_seb);
+#endif
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	return err;
 
@@ -359,7 +363,11 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
 		list_add(&new_seb->u.list, &si->erase);
 		goto retry;
 	}
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	kfree(new_seb);
+#else
+	kmem_cache_free(si->scan_leb_slab, new_seb);
+#endif
 out_free:
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	return err;
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a6b8ce11a22fefae4c132df38643df78dad7d7d7..1bd2cdbaedd93fb2ce9b9515fec08038e937d811 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp/
 obj-$(CONFIG_PPPOE) += ppp/
 obj-$(CONFIG_PPPOL2TP) += ppp/
 obj-$(CONFIG_PPTP) += ppp/
+obj-$(CONFIG_ACCEL_PPTP) += accel-pptp/
 obj-$(CONFIG_SLIP) += slip/
 obj-$(CONFIG_SLHC) += slip/
 obj-$(CONFIG_NET_SB1000) += sb1000.o
diff --git a/drivers/net/accel-pptp/AUTHORS b/drivers/net/accel-pptp/AUTHORS
new file mode 100644
index 0000000000000000000000000000000000000000..b93df1da19bd869f26e8a73321a957f5714bc61a
--- /dev/null
+++ b/drivers/net/accel-pptp/AUTHORS
@@ -0,0 +1 @@
+Kozlov D. <xeb@mail.ru>
diff --git a/drivers/net/accel-pptp/COPYING b/drivers/net/accel-pptp/COPYING
new file mode 100644
index 0000000000000000000000000000000000000000..5b6e7c66c276e7610d4a73c70ec1a1f7c1003259
--- /dev/null
+++ b/drivers/net/accel-pptp/COPYING
@@ -0,0 +1,340 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/drivers/net/accel-pptp/Doxyfile b/drivers/net/accel-pptp/Doxyfile
new file mode 100644
index 0000000000000000000000000000000000000000..1a7d2586b2091cf45edbc2827fd8c5748edfe7a6
--- /dev/null
+++ b/drivers/net/accel-pptp/Doxyfile
@@ -0,0 +1,275 @@
+# Doxyfile 1.4.1-KDevelop
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+PROJECT_NAME           = pptp.kdevelop
+PROJECT_NUMBER         = 0.1
+OUTPUT_DIRECTORY       = 
+CREATE_SUBDIRS         = NO
+OUTPUT_LANGUAGE        = English
+USE_WINDOWS_ENCODING   = NO
+BRIEF_MEMBER_DESC      = YES
+REPEAT_BRIEF           = YES
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+ALWAYS_DETAILED_SEC    = NO
+INLINE_INHERITED_MEMB  = NO
+FULL_PATH_NAMES        = NO
+STRIP_FROM_PATH        = /home/dima/Projects/bg2/sectrr/
+STRIP_FROM_INC_PATH    = 
+SHORT_NAMES            = NO
+JAVADOC_AUTOBRIEF      = NO
+MULTILINE_CPP_IS_BRIEF = NO
+DETAILS_AT_TOP         = NO
+INHERIT_DOCS           = YES
+DISTRIBUTE_GROUP_DOC   = NO
+TAB_SIZE               = 8
+ALIASES                = 
+OPTIMIZE_OUTPUT_FOR_C  = NO
+OPTIMIZE_OUTPUT_JAVA   = NO
+SUBGROUPING            = YES
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+EXTRACT_ALL            = NO
+EXTRACT_PRIVATE        = NO
+EXTRACT_STATIC         = NO
+EXTRACT_LOCAL_CLASSES  = YES
+EXTRACT_LOCAL_METHODS  = NO
+HIDE_UNDOC_MEMBERS     = NO
+HIDE_UNDOC_CLASSES     = NO
+HIDE_FRIEND_COMPOUNDS  = NO
+HIDE_IN_BODY_DOCS      = NO
+INTERNAL_DOCS          = NO
+CASE_SENSE_NAMES       = YES
+HIDE_SCOPE_NAMES       = NO
+SHOW_INCLUDE_FILES     = YES
+INLINE_INFO            = YES
+SORT_MEMBER_DOCS       = YES
+SORT_BRIEF_DOCS        = NO
+SORT_BY_SCOPE_NAME     = NO
+GENERATE_TODOLIST      = YES
+GENERATE_TESTLIST      = YES
+GENERATE_BUGLIST       = YES
+GENERATE_DEPRECATEDLIST= YES
+ENABLED_SECTIONS       = 
+MAX_INITIALIZER_LINES  = 30
+SHOW_USED_FILES        = YES
+SHOW_DIRECTORIES       = YES
+FILE_VERSION_FILTER    = 
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+QUIET                  = NO
+WARNINGS               = YES
+WARN_IF_UNDOCUMENTED   = YES
+WARN_IF_DOC_ERROR      = YES
+WARN_NO_PARAMDOC       = NO
+WARN_FORMAT            = "$file:$line: $text"
+WARN_LOGFILE           = 
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+INPUT                  = /home/dima/Projects/pptp/pptp
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.c++ \
+                         *.java \
+                         *.ii \
+                         *.ixx \
+                         *.ipp \
+                         *.i++ \
+                         *.inl \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.h++ \
+                         *.idl \
+                         *.odl \
+                         *.cs \
+                         *.php \
+                         *.php3 \
+                         *.inc \
+                         *.m \
+                         *.mm \
+                         *.dox \
+                         *.C \
+                         *.CC \
+                         *.C++ \
+                         *.II \
+                         *.I++ \
+                         *.H \
+                         *.HH \
+                         *.H++ \
+                         *.CS \
+                         *.PHP \
+                         *.PHP3 \
+                         *.M \
+                         *.MM \
+                         *.C \
+                         *.H \
+                         *.tlh \
+                         *.diff \
+                         *.patch \
+                         *.moc \
+                         *.xpm \
+                         *.dox
+RECURSIVE              = yes
+EXCLUDE                = 
+EXCLUDE_SYMLINKS       = NO
+EXCLUDE_PATTERNS       = 
+EXAMPLE_PATH           = 
+EXAMPLE_PATTERNS       = *
+EXAMPLE_RECURSIVE      = NO
+IMAGE_PATH             = 
+INPUT_FILTER           = 
+FILTER_PATTERNS        = 
+FILTER_SOURCE_FILES    = NO
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+SOURCE_BROWSER         = NO
+INLINE_SOURCES         = NO
+STRIP_CODE_COMMENTS    = YES
+REFERENCED_BY_RELATION = YES
+REFERENCES_RELATION    = YES
+VERBATIM_HEADERS       = YES
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+ALPHABETICAL_INDEX     = NO
+COLS_IN_ALPHA_INDEX    = 5
+IGNORE_PREFIX          = 
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+GENERATE_HTML          = YES
+HTML_OUTPUT            = html
+HTML_FILE_EXTENSION    = .html
+HTML_HEADER            = 
+HTML_FOOTER            = 
+HTML_STYLESHEET        = 
+HTML_ALIGN_MEMBERS     = YES
+GENERATE_HTMLHELP      = NO
+CHM_FILE               = 
+HHC_LOCATION           = 
+GENERATE_CHI           = NO
+BINARY_TOC             = NO
+TOC_EXPAND             = NO
+DISABLE_INDEX          = NO
+ENUM_VALUES_PER_LINE   = 4
+GENERATE_TREEVIEW      = NO
+TREEVIEW_WIDTH         = 250
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+GENERATE_LATEX         = YES
+LATEX_OUTPUT           = latex
+LATEX_CMD_NAME         = latex
+MAKEINDEX_CMD_NAME     = makeindex
+COMPACT_LATEX          = NO
+PAPER_TYPE             = a4wide
+EXTRA_PACKAGES         = 
+LATEX_HEADER           = 
+PDF_HYPERLINKS         = NO
+USE_PDFLATEX           = NO
+LATEX_BATCHMODE        = NO
+LATEX_HIDE_INDICES     = NO
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+GENERATE_RTF           = NO
+RTF_OUTPUT             = rtf
+COMPACT_RTF            = NO
+RTF_HYPERLINKS         = NO
+RTF_STYLESHEET_FILE    = 
+RTF_EXTENSIONS_FILE    = 
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+GENERATE_MAN           = NO
+MAN_OUTPUT             = man
+MAN_EXTENSION          = .3
+MAN_LINKS              = NO
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+GENERATE_XML           = yes
+XML_OUTPUT             = xml
+XML_SCHEMA             = 
+XML_DTD                = 
+XML_PROGRAMLISTING     = YES
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+GENERATE_AUTOGEN_DEF   = NO
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+GENERATE_PERLMOD       = NO
+PERLMOD_LATEX          = NO
+PERLMOD_PRETTY         = YES
+PERLMOD_MAKEVAR_PREFIX = 
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+ENABLE_PREPROCESSING   = YES
+MACRO_EXPANSION        = NO
+EXPAND_ONLY_PREDEF     = NO
+SEARCH_INCLUDES        = YES
+INCLUDE_PATH           = 
+INCLUDE_FILE_PATTERNS  = 
+PREDEFINED             = 
+EXPAND_AS_DEFINED      = 
+SKIP_FUNCTION_MACROS   = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+TAGFILES               = 
+GENERATE_TAGFILE       = pptp.tag
+ALLEXTERNALS           = NO
+EXTERNAL_GROUPS        = YES
+PERL_PATH              = /usr/bin/perl
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS         = YES
+HIDE_UNDOC_RELATIONS   = YES
+HAVE_DOT               = NO
+CLASS_GRAPH            = YES
+COLLABORATION_GRAPH    = YES
+GROUP_GRAPHS           = YES
+UML_LOOK               = NO
+TEMPLATE_RELATIONS     = NO
+INCLUDE_GRAPH          = YES
+INCLUDED_BY_GRAPH      = YES
+CALL_GRAPH             = NO
+GRAPHICAL_HIERARCHY    = YES
+DIRECTORY_GRAPH        = YES
+DOT_IMAGE_FORMAT       = png
+DOT_PATH               = 
+DOTFILE_DIRS           = 
+MAX_DOT_GRAPH_WIDTH    = 1024
+MAX_DOT_GRAPH_HEIGHT   = 1024
+MAX_DOT_GRAPH_DEPTH    = 1000
+DOT_TRANSPARENT        = NO
+DOT_MULTI_TARGETS      = NO
+GENERATE_LEGEND        = YES
+DOT_CLEANUP            = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine   
+#---------------------------------------------------------------------------
+SEARCHENGINE           = NO
diff --git a/drivers/net/accel-pptp/INSTALL b/drivers/net/accel-pptp/INSTALL
new file mode 100644
index 0000000000000000000000000000000000000000..02a4a0740aac46631b076e619cd5f08feca9d9e9
--- /dev/null
+++ b/drivers/net/accel-pptp/INSTALL
@@ -0,0 +1,167 @@
+Basic Installation
+==================
+
+   These are generic installation instructions.
+
+   The `configure' shell script attempts to guess correct values for
+various system-dependent variables used during compilation.  It uses
+those values to create a `Makefile' in each directory of the package.
+It may also create one or more `.h' files containing system-dependent
+definitions.  Finally, it creates a shell script `config.status' that
+you can run in the future to recreate the current configuration, a file
+`config.cache' that saves the results of its tests to speed up
+reconfiguring, and a file `config.log' containing compiler output
+(useful mainly for debugging `configure').
+
+   If you need to do unusual things to compile the package, please try
+to figure out how `configure' could check whether to do them, and mail
+diffs or instructions to the address given in the `README' so they can
+be considered for the next release.  If at some point `config.cache'
+contains results you don't want to keep, you may remove or edit it.
+
+   The file `configure.in' is used to create `configure' by a program
+called `autoconf'.  You only need `configure.in' if you want to change
+it or regenerate `configure' using a newer version of `autoconf'.
+
+The simplest way to compile this package is:
+
+  1. `cd' to the directory containing the package's source code and type
+     `./configure' to configure the package for your system.  If you're
+     using `csh' on an old version of System V, you might need to type
+     `sh ./configure' instead to prevent `csh' from trying to execute
+     `configure' itself.
+
+     Running `configure' takes a while.  While running, it prints some
+     messages telling which features it is checking for.
+
+  2. Type `make' to compile the package.
+
+  3. Type `make install' to install the programs and any data files and
+     documentation.
+
+  4. You can remove the program binaries and object files from the
+     source code directory by typing `make clean'.  
+
+Compilers and Options
+=====================
+
+   Some systems require unusual options for compilation or linking that
+the `configure' script does not know about.  You can give `configure'
+initial values for variables by setting them in the environment.  Using
+a Bourne-compatible shell, you can do that on the command line like
+this:
+     CC=c89 CFLAGS=-O2 LIBS=-lposix ./configure
+
+Or on systems that have the `env' program, you can do it like this:
+     env CPPFLAGS=-I/usr/local/include LDFLAGS=-s ./configure
+
+Compiling For Multiple Architectures
+====================================
+
+   You can compile the package for more than one kind of computer at the
+same time, by placing the object files for each architecture in their
+own directory.  To do this, you must use a version of `make' that
+supports the `VPATH' variable, such as GNU `make'.  `cd' to the
+directory where you want the object files and executables to go and run
+the `configure' script.  `configure' automatically checks for the
+source code in the directory that `configure' is in and in `..'.
+
+   If you have to use a `make' that does not supports the `VPATH'
+variable, you have to compile the package for one architecture at a time
+in the source code directory.  After you have installed the package for
+one architecture, use `make distclean' before reconfiguring for another
+architecture.
+
+Installation Names
+==================
+
+   By default, `make install' will install the package's files in
+`/usr/local/bin', `/usr/local/man', etc.  You can specify an
+installation prefix other than `/usr/local' by giving `configure' the
+option `--prefix=PATH'.
+
+   You can specify separate installation prefixes for
+architecture-specific files and architecture-independent files.  If you
+give `configure' the option `--exec-prefix=PATH', the package will use
+PATH as the prefix for installing programs and libraries.
+Documentation and other data files will still use the regular prefix.
+
+   If the package supports it, you can cause programs to be installed
+with an extra prefix or suffix on their names by giving `configure' the
+option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
+
+Optional Features
+=================
+
+   Some packages pay attention to `--enable-FEATURE' options to
+`configure', where FEATURE indicates an optional part of the package.
+They may also pay attention to `--with-PACKAGE' options, where PACKAGE
+is something like `gnu-as' or `x' (for the X Window System).  The
+`README' should mention any `--enable-' and `--with-' options that the
+package recognizes.
+
+   For packages that use the X Window System, `configure' can usually
+find the X include and library files automatically, but if it doesn't,
+you can use the `configure' options `--x-includes=DIR' and
+`--x-libraries=DIR' to specify their locations.
+
+Specifying the System Type
+==========================
+
+   There may be some features `configure' can not figure out
+automatically, but needs to determine by the type of host the package
+will run on.  Usually `configure' can figure that out, but if it prints
+a message saying it can not guess the host type, give it the
+`--host=TYPE' option.  TYPE can either be a short name for the system
+type, such as `sun4', or a canonical name with three fields:
+     CPU-COMPANY-SYSTEM
+
+See the file `config.sub' for the possible values of each field.  If
+`config.sub' isn't included in this package, then this package doesn't
+need to know the host type.
+
+   If you are building compiler tools for cross-compiling, you can also
+use the `--target=TYPE' option to select the type of system they will
+produce code for and the `--build=TYPE' option to select the type of
+system on which you are compiling the package.
+
+Sharing Defaults
+================
+
+   If you want to set default values for `configure' scripts to share,
+you can create a site shell script called `config.site' that gives
+default values for variables like `CC', `cache_file', and `prefix'.
+`configure' looks for `PREFIX/share/config.site' if it exists, then
+`PREFIX/etc/config.site' if it exists.  Or, you can set the
+`CONFIG_SITE' environment variable to the location of the site script.
+A warning: not all `configure' scripts look for a site script.
+
+Operation Controls
+==================
+
+   `configure' recognizes the following options to control how it
+operates.
+
+`--cache-file=FILE'
+     Use and save the results of the tests in FILE instead of
+     `./config.cache'.  Set FILE to `/dev/null' to disable caching, for
+     debugging `configure'.
+
+`--help'
+     Print a summary of the options to `configure', and exit.
+
+`--quiet'
+`--silent'
+`-q'
+     Do not print messages saying which checks are being made.
+
+`--srcdir=DIR'
+     Look for the package's source code in directory DIR.  Usually
+     `configure' can determine that directory automatically.
+
+`--version'
+     Print the version of Autoconf used to generate the `configure'
+     script, and exit.
+
+`configure' also accepts some other, not widely useful, options.
+
diff --git a/drivers/net/accel-pptp/Makefile b/drivers/net/accel-pptp/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..88712165881a9085903474603e928fb69895b91f
--- /dev/null
+++ b/drivers/net/accel-pptp/Makefile
@@ -0,0 +1,42 @@
+MDIR = extra
+
+KDIR ?= $(shell sh find_kernel_headers)
+
+obj-m      += pptp.o
+#obj-m      += gre.o
+
+CURRENT = $(shell uname -r)
+
+
+ifndef MAKING_MODULES
+all: kernel_headers
+	make -C $(KDIR) SUBDIRS=$(PWD) modules
+endif
+
+ifneq (,$(findstring 2.4.,$(CURRENT)))
+install:
+	@if test ! -d /lib/modules/$(CURRENT)/extra; then \
+	    mkdir /lib/modules/$(CURRENT)/extra; \
+	fi; \
+	cp -v $(TARGET).o /lib/modules/$(CURRENT)/extra/$(TARGET).o && /sbin/depmod -a
+else
+install:	
+	make -C $(KDIR) M=$(PWD) modules_install
+endif
+
+kernel_headers:
+	@if test -z "$(KDIR)"; then \
+	    echo "kernel headers not found"; \
+	    exit 1; \
+	else \
+	    echo "using \"$(KDIR)\" kernel headers"; \
+	fi
+
+default: all
+
+clean:
+	-rm -f *.o *.ko .*.cmd .*.flags *.mod.c
+
+ifneq (,$(findstring 2.4.,$(CURRENT)))
+include $(KDIR)/Rules.make
+endif
diff --git a/drivers/net/accel-pptp/find_kernel_headers b/drivers/net/accel-pptp/find_kernel_headers
new file mode 100644
index 0000000000000000000000000000000000000000..3d0e505e6153b242d172a5c61fa69495d163225b
--- /dev/null
+++ b/drivers/net/accel-pptp/find_kernel_headers
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+if test -n "${KDIR}"; then
+    if test -f ${KDIR}/include/linux/version.h; then
+	echo ${KDIR}
+	exit 0
+    else
+	exit 1
+    fi
+else
+    if test -f /usr/src/linux/include/linux/version.h; then
+	echo /usr/src/linux
+	exit 0
+    elif test -f /lib/modules/`uname -r`/build/include/linux/version.h; then
+	echo /lib/modules/`uname -r`/build
+	exit 0
+    else
+	exit 1
+    fi
+fi
+    
\ No newline at end of file
diff --git a/drivers/net/accel-pptp/gre.c b/drivers/net/accel-pptp/gre.c
new file mode 100644
index 0000000000000000000000000000000000000000..77886d5d8214997e4b3b466718228bb6d3fdf031
--- /dev/null
+++ b/drivers/net/accel-pptp/gre.c
@@ -0,0 +1,220 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <net/protocol.h>
+
+#include "gre.h"
+
+struct gre_protocol *gre_proto[GREPROTO_MAX] ____cacheline_aligned_in_smp;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static rwlock_t gre_proto_lock=RW_LOCK_UNLOCKED;
+#else
+static DEFINE_SPINLOCK(gre_proto_lock);
+#endif
+
+int gre_add_protocol(struct gre_protocol *proto, u8 version)
+{
+	int ret;
+
+	if (version >= GREPROTO_MAX)
+		return -EINVAL;
+	
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	write_lock_bh(&gre_proto_lock);
+#else
+	spin_lock(&gre_proto_lock);
+#endif
+	if (gre_proto[version]) {
+		ret = -EAGAIN;
+	} else {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+		gre_proto[version] = proto;
+#else
+		rcu_assign_pointer(gre_proto[version], proto);
+#endif
+		ret = 0;
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	write_unlock_bh(&gre_proto_lock);
+#else
+	spin_unlock(&gre_proto_lock);
+#endif
+
+	return ret;
+}
+
+int gre_del_protocol(struct gre_protocol *proto, u8 version)
+{
+	if (version >= GREPROTO_MAX)
+		goto out_err;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	write_lock_bh(&gre_proto_lock);
+#else
+	spin_lock(&gre_proto_lock);
+#endif
+	if (gre_proto[version] == proto)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+		gre_proto[version] = NULL;
+#else
+		rcu_assign_pointer(gre_proto[version], NULL);
+#endif
+	else
+		goto out_err_unlock;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	write_unlock_bh(&gre_proto_lock);
+#else
+	spin_unlock(&gre_proto_lock);
+	synchronize_rcu();
+#endif
+	return 0;
+
+out_err_unlock:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	write_unlock_bh(&gre_proto_lock);
+#else
+	spin_unlock(&gre_proto_lock);
+#endif
+out_err:
+	return -EINVAL;
+}
+
+static int gre_rcv(struct sk_buff *skb)
+{
+	u8 ver;
+	int ret;
+	struct gre_protocol *proto;
+
+	if (!pskb_may_pull(skb, 12))
+		goto drop_nolock;
+
+	ver = skb->data[1]&0x7f;
+	if (ver >= GREPROTO_MAX)
+		goto drop_nolock;
+	
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	read_lock(&gre_proto_lock);
+	proto = gre_proto[ver];
+#else
+	rcu_read_lock();
+	proto = rcu_dereference(gre_proto[ver]);
+#endif
+	if (!proto || !proto->handler)
+		goto drop;
+
+	ret = proto->handler(skb);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	read_unlock(&gre_proto_lock);
+#else
+	rcu_read_unlock();
+#endif
+
+	return ret;
+
+drop:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	read_unlock(&gre_proto_lock);
+#else
+	rcu_read_unlock();
+#endif
+drop_nolock:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+	u8 ver;
+	struct gre_protocol *proto;
+
+	if (!pskb_may_pull(skb, 12))
+		goto drop_nolock;
+
+	ver=skb->data[1]&0x7f;
+	if (ver>=GREPROTO_MAX)
+		goto drop_nolock;
+		
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	read_lock(&gre_proto_lock);
+	proto = gre_proto[ver];
+#else
+	rcu_read_lock();
+	proto = rcu_dereference(gre_proto[ver]);
+#endif
+	if (!proto || !proto->err_handler)
+		goto drop;
+
+	proto->err_handler(skb, info);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	read_unlock(&gre_proto_lock);
+#else
+	rcu_read_unlock();
+#endif
+
+	return;
+
+drop:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	read_unlock(&gre_proto_lock);
+#else
+	rcu_read_unlock();
+#endif
+drop_nolock:
+	kfree_skb(skb);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static struct inet_protocol net_gre_protocol = {
+	.handler	= gre_rcv,
+	.err_handler	= gre_err,
+	.protocol	= IPPROTO_GRE,
+	.name		= "GRE",
+};
+#else
+static struct net_protocol net_gre_protocol = {
+	.handler	= gre_rcv,
+	.err_handler	= gre_err,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24)
+	.netns_ok=1,
+#endif
+};
+#endif
+
+static int __init gre_init(void)
+{
+	printk(KERN_INFO "GRE over IPv4 demultiplexor driver");
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	inet_add_protocol(&net_gre_protocol);
+#else
+	if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
+		printk(KERN_INFO "gre: can't add protocol\n");
+		return -EAGAIN;
+	}
+#endif
+	return 0;
+}
+
+static void __exit gre_exit(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	inet_del_protocol(&net_gre_protocol);
+#else
+	inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
+#endif
+}
+
+module_init(gre_init);
+module_exit(gre_exit);
+
+MODULE_DESCRIPTION("GRE over IPv4 demultiplexor driver");
+MODULE_AUTHOR("Kozlov D. (xeb@mail.ru)");
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_GPL(gre_add_protocol);
+EXPORT_SYMBOL_GPL(gre_del_protocol);
diff --git a/drivers/net/accel-pptp/gre.h b/drivers/net/accel-pptp/gre.h
new file mode 100644
index 0000000000000000000000000000000000000000..2ca7f749990dd27d8c07a6ace9d9bc36f7ae752b
--- /dev/null
+++ b/drivers/net/accel-pptp/gre.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_GRE_H
+#define __LINUX_GRE_H
+
+#include <linux/skbuff.h>
+
+#define GREPROTO_CISCO	0
+#define GREPROTO_PPTP		1
+#define GREPROTO_MAX		2
+
+struct gre_protocol {
+	int		(*handler)(struct sk_buff *skb);
+	void	(*err_handler)(struct sk_buff *skb, u32 info);
+};
+
+int gre_add_protocol(struct gre_protocol *proto, u8 version);
+int gre_del_protocol(struct gre_protocol *proto, u8 version);
+
+#endif
diff --git a/drivers/net/accel-pptp/if_pppox.h b/drivers/net/accel-pptp/if_pppox.h
new file mode 100644
index 0000000000000000000000000000000000000000..bc05b533fa295183bc0366d89b8c4d6776543589
--- /dev/null
+++ b/drivers/net/accel-pptp/if_pppox.h
@@ -0,0 +1,222 @@
+/***************************************************************************
+ * Linux PPP over X - Generic PPP transport layer sockets
+ * Linux PPP over Ethernet (PPPoE) Socket Implementation (RFC 2516)
+ *
+ * This file supplies definitions required by the PPP over Ethernet driver
+ * (pppox.c).  All version information wrt this file is located in pppox.c
+ *
+ * License:
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef __LINUX_IF_PPPOX_H
+#define __LINUX_IF_PPPOX_H
+
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/version.h>
+
+#ifdef  __KERNEL__
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/ppp_channel.h>
+#endif /* __KERNEL__ */
+
+/* For user-space programs to pick up these definitions
+ * which they wouldn't get otherwise without defining __KERNEL__
+ */
+#ifndef AF_PPPOX
+#define AF_PPPOX	24
+#define PF_PPPOX	AF_PPPOX
+#endif /* !(AF_PPPOX) */
+
+/************************************************************************
+ * PPPoE addressing definition
+ */
+typedef __u16 sid_t;
+struct pppoe_addr{
+       sid_t           sid;                    /* Session identifier */
+       unsigned char   remote[ETH_ALEN];       /* Remote address */
+       char            dev[IFNAMSIZ];          /* Local device to use */
+};
+
+struct pptp_addr{
+       __u16           call_id;
+       struct in_addr  sin_addr;
+};
+/************************************************************************
+ * Protocols supported by AF_PPPOX
+ */
+#define PX_PROTO_OE    0 /* Currently just PPPoE */
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)
+#define PX_PROTO_PPTP  1
+#define PX_MAX_PROTO   2
+#else
+#define PX_PROTO_PPTP  2
+#define PX_MAX_PROTO   3
+#endif
+
+struct sockaddr_pppox {
+       sa_family_t     sa_family;            /* address family, AF_PPPOX */
+       unsigned int    sa_protocol;          /* protocol identifier */
+       union{
+               struct pppoe_addr       pppoe;
+	       			 struct pptp_addr        pptp;
+       }sa_addr;
+}__attribute__ ((packed));
+
+
+/*********************************************************************
+ *
+ * ioctl interface for defining forwarding of connections
+ *
+ ********************************************************************/
+
+#define PPPOEIOCSFWD	_IOW(0xB1 ,0, size_t)
+#define PPPOEIOCDFWD	_IO(0xB1 ,1)
+/*#define PPPOEIOCGFWD	_IOWR(0xB1,2, size_t)*/
+
+/* Codes to identify message types */
+#define PADI_CODE	0x09
+#define PADO_CODE	0x07
+#define PADR_CODE	0x19
+#define PADS_CODE	0x65
+#define PADT_CODE	0xa7
+struct pppoe_tag {
+	__u16 tag_type;
+	__u16 tag_len;
+	char tag_data[0];
+} __attribute ((packed));
+
+/* Tag identifiers */
+#define PTT_EOL		__constant_htons(0x0000)
+#define PTT_SRV_NAME	__constant_htons(0x0101)
+#define PTT_AC_NAME	__constant_htons(0x0102)
+#define PTT_HOST_UNIQ	__constant_htons(0x0103)
+#define PTT_AC_COOKIE	__constant_htons(0x0104)
+#define PTT_VENDOR 	__constant_htons(0x0105)
+#define PTT_RELAY_SID	__constant_htons(0x0110)
+#define PTT_SRV_ERR     __constant_htons(0x0201)
+#define PTT_SYS_ERR  	__constant_htons(0x0202)
+#define PTT_GEN_ERR  	__constant_htons(0x0203)
+
+struct pppoe_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 ver : 4;
+	__u8 type : 4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	__u8 type : 4;
+	__u8 ver : 4;
+#else
+#error	"Please fix <asm/byteorder.h>"
+#endif
+	__u8 code;
+	__u16 sid;
+	__u16 length;
+	struct pppoe_tag tag[0];
+} __attribute__ ((packed));
+
+
+/* Socket options */
+#define PPTP_SO_TIMEOUT 1
+
+
+#ifdef __KERNEL__
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+struct pppoe_opt {
+	struct net_device      *dev;	  /* device associated with socket*/
+	struct pppoe_addr	pa;	  /* what this socket is bound to*/
+	struct sockaddr_pppox	relay;	  /* what socket data will be
+					     relayed to (PPPoE relaying) */
+};
+#endif
+struct pptp_opt {
+	struct pptp_addr	src_addr;
+	struct pptp_addr	dst_addr;
+	__u32 ack_sent, ack_recv;
+	__u32 seq_sent, seq_recv;
+	int ppp_flags;
+};
+#define PPTP_FLAG_PAUSE 0
+#define PPTP_FLAG_PROC 1
+
+#include <net/sock.h>
+
+struct pppox_sock {
+	/* struct sock must be the first member of pppox_sock */
+	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	struct ppp_channel	chan;
+	struct sock		*sk;
+	#else
+	struct sock		sk;
+	struct ppp_channel	chan;
+	#endif
+	struct pppox_sock	*next;	  /* for hash table */
+	union {
+		struct pppoe_opt pppoe;
+		struct pptp_opt pptp;
+	} proto;
+	unsigned short		num;
+};
+#define pppoe_dev	proto.pppoe.dev
+#define pppoe_pa	proto.pppoe.pa
+#define pppoe_relay	proto.pppoe.relay
+
+static inline struct pppox_sock *pppox_sk(struct sock *sk)
+{
+	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	return (struct pppox_sock *)sk->protinfo.pppox;
+	#else
+	return (struct pppox_sock *)sk;
+	#endif
+}
+
+static inline struct sock *sk_pppox(struct pppox_sock *po)
+{
+	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	return po->sk;
+	#else
+	return (struct sock *)po;
+	#endif
+}
+
+struct module;
+
+struct pppox_proto {
+	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	int		(*create)(struct socket *sock);
+	#else
+	int		(*create)(struct net *net, struct socket *sock);
+	#endif
+	int		(*ioctl)(struct socket *sock, unsigned int cmd,
+				 unsigned long arg);
+  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
+	struct module	*owner;
+	#endif
+};
+
+extern int register_pppox_proto(int proto_num, struct pppox_proto *pp);
+extern void unregister_pppox_proto(int proto_num);
+extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
+extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+/* PPPoX socket states */
+enum {
+    PPPOX_NONE		= 0,  /* initial state */
+    PPPOX_CONNECTED	= 1,  /* connection established ==TCP_ESTABLISHED */
+    PPPOX_BOUND		= 2,  /* bound to ppp device */
+    PPPOX_RELAY		= 4,  /* forwarding is enabled */
+    PPPOX_ZOMBIE	= 8,  /* dead, but still bound to ppp device */
+    PPPOX_DEAD		= 16  /* dead, useless, please clean me up!*/
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* !(__LINUX_IF_PPPOX_H) */
diff --git a/drivers/net/accel-pptp/pptp.c b/drivers/net/accel-pptp/pptp.c
new file mode 100644
index 0000000000000000000000000000000000000000..4e34099c45f0f54a66d982e82987a3d2d4f2d73d
--- /dev/null
+++ b/drivers/net/accel-pptp/pptp.c
@@ -0,0 +1,1423 @@
+/*
+ *  Point-to-Point Tunneling Protocol for Linux
+ *
+ *  Authors: Kozlov D. (xeb@mail.ru)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include "if_pppox.h"
+#include <linux/if_ppp.h>
+#include <linux/notifier.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#include <asm/bitops.h>
+#endif
+
+#include <net/sock.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/route.h>
+
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
+//#define DEBUG
+//#define CONFIG_GRE
+
+#if defined(CONFIG_GRE) || defined(CONFIG_GRE_MODULE)
+#include "gre.h"
+#endif
+
+#define PPTP_DRIVER_VERSION "0.8.5"
+
+static int log_level=0;
+static int log_packets=10;
+
+#define MAX_CALLID 65535
+#define PPP_LCP_ECHOREQ 0x09
+#define PPP_LCP_ECHOREP 0x0A
+
+static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
+static struct pppox_sock **callid_sock;
+
+#if defined(CONFIG_BLOG)
+void pptp_xmit_update(uint16_t call_id, uint32_t seqNum, uint32_t ackNum, uint32_t daddr);
+int pptp_xmit_get(uint16_t call_id, uint32_t* seqNum, uint32_t* ackNum, uint32_t daddr);
+int pptp_rcv_check(uint16_t call_id, uint32_t *rcv_pktSeq, uint32_t rcv_pktAck, uint32_t saddr);  
+#endif
+
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define INIT_TIMER(_timer,_routine,_data) \
+do { \
+    (_timer)->function=_routine; \
+    (_timer)->data=_data; \
+    init_timer(_timer); \
+} while (0);
+
+static inline void *kzalloc(size_t size,int gfp)
+{
+    void *p=kmalloc(size,gfp);
+    memset(p,0,size);
+    return p;
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,20)
+static inline void nf_reset(struct sk_buff *skb)
+{
+#ifdef CONFIG_NETFILTER
+    nf_conntrack_put(skb->nfct);
+    skb->nfct=NULL;
+#ifdef CONFIG_NETFILTER_DEBUG
+    skb->nf_debug=0;
+#endif
+#endif
+}
+#define __user
+#endif
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+    int num = 0;
+
+#if BITS_PER_LONG == 64
+    if ((word & 0xffffffff) == 0) {
+        num += 32;
+        word >>= 32;
+    }
+#endif
+    if ((word & 0xffff) == 0) {
+        num += 16;
+        word >>= 16;
+    }
+    if ((word & 0xff) == 0) {
+        num += 8;
+        word >>= 8;
+    }
+    if ((word & 0xf) == 0) {
+        num += 4;
+        word >>= 4;
+    }
+    if ((word & 0x3) == 0) {
+        num += 2;
+        word >>= 2;
+    }
+    if ((word & 0x1) == 0)
+        num += 1;
+    return num;
+}
+
+#define BITOP_WORD(nr)      ((nr) / BITS_PER_LONG)
+/*
+ * Find the next set bit in a memory region.
+ */
+static unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+                unsigned long offset)
+{
+    const unsigned long *p = addr + BITOP_WORD(offset);
+    unsigned long result = offset & ~(BITS_PER_LONG-1);
+    unsigned long tmp;
+
+    if (offset >= size)
+        return size;
+    size -= result;
+    offset %= BITS_PER_LONG;
+    if (offset) {
+        tmp = *(p++);
+        tmp &= (~0UL << offset);
+        if (size < BITS_PER_LONG)
+            goto found_first;
+        if (tmp)
+            goto found_middle;
+        size -= BITS_PER_LONG;
+        result += BITS_PER_LONG;
+    }
+    while (size & ~(BITS_PER_LONG-1)) {
+        if ((tmp = *(p++)))
+            goto found_middle;
+        result += BITS_PER_LONG;
+        size -= BITS_PER_LONG;
+    }
+    if (!size)
+        return result;
+    tmp = *p;
+
+found_first:
+    tmp &= (~0UL >> (BITS_PER_LONG - size));
+    if (tmp == 0UL)     /* Are any bits set? */
+        return result + size;   /* Nope. */
+found_middle:
+    return result + __ffs(tmp);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static rwlock_t chan_lock=RW_LOCK_UNLOCKED;
+#define SK_STATE(sk) (sk)->state
+#else
+static DEFINE_SPINLOCK(chan_lock);
+#define SK_STATE(sk) (sk)->sk_state
+#endif
+
+static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
+static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
+               unsigned long arg);
+static int pptp_rcv_core(struct sock *sk,struct sk_buff *skb);
+
+static struct ppp_channel_ops pptp_chan_ops= {
+    .start_xmit = pptp_xmit,
+    .ioctl=pptp_ppp_ioctl,
+};
+
+
+#define MISSING_WINDOW 20
+#define WRAPPED( curseq, lastseq) \
+    ((((curseq) & 0xffffff00) == 0) && \
+     (((lastseq) & 0xffffff00 ) == 0xffffff00))
+
+/* gre header structure: -------------------------------------------- */
+
+#define PPTP_GRE_PROTO  0x880B
+#define PPTP_GRE_VER    0x1
+
+#define PPTP_GRE_FLAG_C 0x80
+#define PPTP_GRE_FLAG_R 0x40
+#define PPTP_GRE_FLAG_K 0x20
+#define PPTP_GRE_FLAG_S 0x10
+#define PPTP_GRE_FLAG_A 0x80
+
+#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C)
+#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R)
+#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K)
+#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S)
+#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A)
+
+struct pptp_gre_header {
+  u8 flags;     /* bitfield */
+  u8 ver;           /* should be PPTP_GRE_VER (enhanced GRE) */
+  u16 protocol;     /* should be PPTP_GRE_PROTO (ppp-encaps) */
+  u16 payload_len;  /* size of ppp payload, not inc. gre header */
+  u16 call_id;      /* peer's call_id for this session */
+  u32 seq;      /* sequence number.  Present if S==1 */
+  u32 ack;      /* seq number of highest packet recieved by */
+                /*  sender in this session */
+} __packed;
+#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static struct pppox_sock * lookup_chan(u16 call_id, u32 s_addr)
+#else
+static struct pppox_sock * lookup_chan(u16 call_id, __be32 s_addr)
+#endif
+{
+    struct pppox_sock *sock;
+    struct pptp_opt *opt;
+    
+#ifdef DEBUG
+    if (log_level>=3)   
+    printk(KERN_INFO"lookup_chan: rcv packet, call id =%d, s_addr = %03u.%03u.%03u.%03u\n", call_id,
+    ((uint8_t*)&s_addr)[0], ((uint8_t*)&s_addr)[1], ((uint8_t*)&s_addr)[2], ((uint8_t*)&s_addr)[3]);
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    rcu_read_lock();
+    sock = rcu_dereference(callid_sock[call_id]);
+#else
+    read_lock(&chan_lock);
+    sock = callid_sock[call_id];
+#endif
+    if (sock) {
+        opt=&sock->proto.pptp;
+        if (opt->dst_addr.sin_addr.s_addr!=s_addr) sock=NULL;
+        else sock_hold(sk_pppox(sock));
+    }
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    rcu_read_unlock();
+#else
+    read_unlock(&chan_lock);
+#endif
+    
+    return sock;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static int lookup_chan_dst(u16 call_id, u32 d_addr)
+#else
+static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+#endif
+{
+    struct pppox_sock *sock;
+    struct pptp_opt *opt;
+    int i;
+    
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    rcu_read_lock();
+#else
+    down(&chan_lock);
+#endif
+    for(i = find_next_bit(callid_bitmap,MAX_CALLID,1); i < MAX_CALLID; 
+                    i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)){
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+        sock = rcu_dereference(callid_sock[i]);
+#else
+        sock = callid_sock[i];
+#endif
+        if (!sock)
+        continue;
+        opt = &sock->proto.pptp;
+        if (opt->dst_addr.call_id == call_id && opt->dst_addr.sin_addr.s_addr == d_addr) break;
+    }
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    rcu_read_unlock();
+#else
+    up(&chan_lock);
+#endif
+    
+    return i<MAX_CALLID;
+}
+
+static int add_chan(struct pppox_sock *sock)
+{
+    static int call_id=0;
+    int res=-1;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    spin_lock(&chan_lock);
+#else
+    write_lock_bh(&chan_lock);
+#endif
+    
+    if (!sock->proto.pptp.src_addr.call_id)
+    {
+        call_id=find_next_zero_bit(callid_bitmap,MAX_CALLID,call_id+1);
+        if (call_id==MAX_CALLID)
+                call_id=find_next_zero_bit(callid_bitmap,MAX_CALLID,1);
+        sock->proto.pptp.src_addr.call_id=call_id;
+    }
+    else if (test_bit(sock->proto.pptp.src_addr.call_id,callid_bitmap))
+        goto exit;
+    
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id],sock);
+#else
+    callid_sock[sock->proto.pptp.src_addr.call_id] = sock;
+#endif
+    set_bit(sock->proto.pptp.src_addr.call_id,callid_bitmap);
+    res=0;
+
+exit:   
+    #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    spin_unlock(&chan_lock);
+    #else
+    write_unlock_bh(&chan_lock);
+    #endif
+
+    return res;
+}
+
+static void del_chan(struct pppox_sock *sock)
+{
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    spin_lock(&chan_lock);
+#else
+    write_lock_bh(&chan_lock);
+#endif
+    clear_bit(sock->proto.pptp.src_addr.call_id,callid_bitmap);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id],NULL);
+    spin_unlock(&chan_lock);
+    synchronize_rcu();
+#else
+    callid_sock[sock->proto.pptp.src_addr.call_id] = NULL;
+    write_unlock_bh(&chan_lock);
+#endif
+}
+
+static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+    struct sock *sk = (struct sock *) chan->private;
+    struct pppox_sock *po = pppox_sk(sk);
+    struct pptp_opt *opt=&po->proto.pptp;
+    struct pptp_gre_header *hdr;
+    unsigned int header_len=sizeof(*hdr);
+    int err=0;
+    int islcp;
+    int len;
+    unsigned char *data;
+    u32 seq_recv;
+    
+    
+    struct rtable *rt;              /* Route to the other host */
+    struct net_device *tdev;            /* Device to other host */
+    struct iphdr  *iph;         /* Our new IP header */
+    int    max_headroom;            /* The extra header space needed */
+
+    if (SK_STATE(sk_pppox(po)) & PPPOX_DEAD)
+        goto tx_error;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    {
+        struct rt_key key = {
+            .dst=opt->dst_addr.sin_addr.s_addr,
+            .src=opt->src_addr.sin_addr.s_addr,
+            .tos=RT_TOS(0),
+        };
+        if ((err=ip_route_output_key(&rt, &key))) {
+            goto tx_error;
+        }
+    }
+#else
+    {
+        /*ori_accel_pptp_code
+         * struct flowi fl = { .oif = 0,
+                           .nl_u = { .ip4_u =
+                                     { .daddr = opt->dst_addr.sin_addr.s_addr,
+                                       .saddr = opt->src_addr.sin_addr.s_addr,
+                                       .tos = RT_TOS(0) } 
+                                   },
+                           .proto = IPPROTO_GRE };
+        */
+        struct flowi fl;
+        fl.flowi_oif = 0;
+        fl.u.ip4.daddr = opt->dst_addr.sin_addr.s_addr;
+        fl.u.ip4.saddr = opt->src_addr.sin_addr.s_addr;
+        fl.flowi_tos = RT_TOS(0); 
+        fl.flowi_proto = IPPROTO_GRE;                  
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+        if ((err=ip_route_output_key(&rt, &fl))) {
+#else
+        //if ((err=ip_route_output_key(&init_net,&rt, &fl))) {
+        rt = ip_route_output_key(&init_net, (struct flowi4 *)&fl);
+#endif
+            //goto tx_error;
+        //}
+    }
+#endif
+    tdev = rt->dst.dev;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    max_headroom = ((tdev->hard_header_len+15)&~15) + sizeof(*iph)+sizeof(*hdr)+2;
+#else
+    max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph)+sizeof(*hdr)+2;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+    if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
+#else
+    if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
+          (skb_cloned(skb) && !skb_clone_writable(skb,0))) {
+#endif
+        struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+        if (!new_skb) {
+            ip_rt_put(rt);
+            goto tx_error;
+        }
+        if (skb->sk)
+        skb_set_owner_w(new_skb, skb->sk);
+        kfree_skb(skb);
+        skb = new_skb;
+    }
+
+    data=skb->data;
+    islcp=((data[0] << 8) + data[1])== PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+    /* compress protocol field */
+    if ((opt->ppp_flags & SC_COMP_PROT) && data[0]==0 && !islcp)
+        skb_pull(skb,1);
+
+    /*
+        * Put in the address/control bytes if necessary
+        */
+    if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
+        data=skb_push(skb,2);
+        data[0]=PPP_ALLSTATIONS;
+        data[1]=PPP_UI;
+    }
+    
+    len=skb->len;
+  
+    seq_recv = opt->seq_recv;
+  
+    if (opt->ack_sent == seq_recv) header_len-=sizeof(hdr->ack);
+
+    // Push down and install GRE header
+    skb_push(skb,header_len);
+    hdr=(struct pptp_gre_header *)(skb->data);
+
+    hdr->flags       = PPTP_GRE_FLAG_K;
+    hdr->ver         = PPTP_GRE_VER;
+    hdr->protocol    = htons(PPTP_GRE_PROTO);
+    hdr->call_id     = htons(opt->dst_addr.call_id);
+
+    hdr->flags |= PPTP_GRE_FLAG_S;
+    hdr->seq    = htonl(++opt->seq_sent);
+#ifdef DEBUG
+    if (log_level>=3 && opt->seq_sent<=log_packets)
+        printk(KERN_INFO"PPTP[%i]: send packet: seq=%i",opt->src_addr.call_id,opt->seq_sent);
+#endif
+    if (opt->ack_sent != seq_recv)  {
+    /* send ack with this message */
+        hdr->ver |= PPTP_GRE_FLAG_A;
+        hdr->ack  = htonl(seq_recv);
+        opt->ack_sent = seq_recv;
+#ifdef DEBUG
+        if (log_level>=3 && opt->seq_sent<=log_packets)
+            printk(" ack=%i",seq_recv);
+#endif
+    }
+    hdr->payload_len = htons(len);
+#ifdef DEBUG
+    if (log_level>=3 && opt->seq_sent<=log_packets)
+        printk("\n");
+#endif
+
+    /*
+     *  Push down and install the IP header.
+     */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+    skb_reset_transport_header(skb);
+    skb_push(skb, sizeof(*iph));
+    skb_reset_network_header(skb);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+    skb->transport_header = skb->network_header;
+    skb_push(skb, sizeof(*iph));
+    skb_reset_network_header(skb);
+#else
+    skb->h.raw = skb->nh.raw;
+    skb->nh.raw = skb_push(skb, sizeof(*iph));
+#endif
+    memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+    IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+                  IPSKB_REROUTED);
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+    iph             =   ip_hdr(skb);
+#else
+    iph             =   skb->nh.iph;
+#endif
+    iph->version        =   4;
+    iph->ihl        =   sizeof(struct iphdr) >> 2;
+    if (ip_dont_fragment(sk, &rt->dst))
+        iph->frag_off   =   htons(IP_DF);
+    else
+        iph->frag_off   =   0;
+    iph->protocol       =   IPPROTO_GRE;
+    iph->tos        =   0;
+    iph->daddr      =   rt->rt_dst;
+    iph->saddr      =   rt->rt_src;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    iph->ttl = sk->protinfo.af_inet.ttl;
+#else
+    //iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);//ori_accel_pptp_code
+    iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT-1);
+#endif
+    iph->tot_len = htons(skb->len);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+    skb_dst_drop(skb);
+    skb_dst_set(skb,&rt->dst);
+#else
+    dst_release(skb->dst);
+    skb->dst = &rt->dst;
+#endif
+
+    nf_reset(skb);
+
+    skb->ip_summed = CHECKSUM_NONE;
+    ip_select_ident(iph, &rt->dst, NULL);
+    ip_send_check(iph);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->dst.dev, ip_send);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+    err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->dst.dev, dst_output);
+#else
+    err = ip_local_out(skb);
+#endif
+
+tx_error:
+    return 1;
+}
+
+static int pptp_rcv_core(struct sock *sk,struct sk_buff *skb)
+{
+    struct pppox_sock *po = pppox_sk(sk);
+    struct pptp_opt *opt=&po->proto.pptp;
+    int headersize,payload_len,seq;
+    u8 *payload;
+    struct pptp_gre_header *header;
+
+    if (!(SK_STATE(sk) & PPPOX_CONNECTED)) {
+        if (sock_queue_rcv_skb(sk, skb))
+            goto drop;
+        return NET_RX_SUCCESS;
+    }
+    
+    header = (struct pptp_gre_header *)(skb->data);
+
+    /* test if acknowledgement present */
+    if (PPTP_GRE_IS_A(header->ver)){
+            u32 ack = (PPTP_GRE_IS_S(header->flags))?
+                    header->ack:header->seq; /* ack in different place if S = 0 */
+
+            ack = ntohl( ack);
+
+            if (ack > opt->ack_recv) opt->ack_recv = ack;
+            /* also handle sequence number wrap-around  */
+            if (WRAPPED(ack,opt->ack_recv)) opt->ack_recv = ack;
+    }
+
+    /* test if payload present */
+    if (!PPTP_GRE_IS_S(header->flags)){
+        goto drop;
+    }
+
+    headersize  = sizeof(*header);
+    payload_len = ntohs(header->payload_len);
+    seq         = ntohl(header->seq);
+
+    /* no ack present? */
+    if (!PPTP_GRE_IS_A(header->ver)) headersize -= sizeof(header->ack);
+    /* check for incomplete packet (length smaller than expected) */
+    if (skb->len - headersize < payload_len){
+#ifdef DEBUG
+        if (log_level>=1)
+            printk(KERN_INFO"PPTP: discarding truncated packet (expected %d, got %d bytes)\n",
+                        payload_len, skb->len - headersize);
+#endif
+        goto drop;
+    }
+
+    payload=skb->data+headersize;
+
+#if defined(CONFIG_BLOG)     
+    if (!blog_gre_tunnel_accelerated())
+    {   
+#endif
+    /* check for expected sequence number */
+    if ( seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq) ){
+        if ( (payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
+             (PPP_PROTOCOL(payload) == PPP_LCP) &&
+             ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)) ){
+#ifdef DEBUG
+            if ( log_level >= 1)
+                printk(KERN_INFO"PPTP[%i]: allowing old LCP Echo packet %d (expecting %d)\n", opt->src_addr.call_id,
+                            seq, opt->seq_recv + 1);
+#endif
+            goto allow_packet;
+        }
+#ifdef DEBUG
+        if ( log_level >= 1)
+            printk(KERN_INFO"PPTP[%i]: discarding duplicate or old packet %d (expecting %d)\n",opt->src_addr.call_id,
+                            seq, opt->seq_recv + 1);
+#endif
+    }else{
+        opt->seq_recv = seq;
+allow_packet:
+#ifdef DEBUG
+        if ( log_level >= 3 && opt->seq_sent<=log_packets)
+            printk(KERN_INFO"PPTP[%i]: accepting packet %d size=%i (%02x %02x %02x %02x %02x %02x)\n",opt->src_addr.call_id, seq,payload_len,
+                *(payload +0),
+                *(payload +1),
+                *(payload +2),
+                *(payload +3),
+                *(payload +4),
+                *(payload +5));
+#endif
+
+        skb_pull(skb,headersize);
+
+        if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI){
+            /* chop off address/control */
+            if (skb->len < 3)
+                goto drop;
+            skb_pull(skb,2);
+        }
+
+        if ((*skb->data) & 1){
+            /* protocol is compressed */
+            skb_push(skb, 1)[0] = 0;
+        }
+
+        skb->ip_summed=CHECKSUM_NONE;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
+        skb_set_network_header(skb,skb->head-skb->data);
+#endif
+        ppp_input(&po->chan,skb);
+
+        return NET_RX_SUCCESS;
+    }
+#if defined(CONFIG_BLOG)
+    }
+    else /* blog_gre_tunnel_accelerated is true, so opt->seq_recv and opt->ack_recv have been ++ by  pptp_rcv_check() */
+    {
+        /* check for expected sequence number */
+        if ( seq < opt->seq_recv || WRAPPED(opt->seq_recv, seq) )
+        {
+            if ( (payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) && (PPP_PROTOCOL(payload) == PPP_LCP) &&
+                ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)) )
+                goto allow_packet2;
+        }
+        else
+        {
+allow_packet2:
+            //printk(" PPTP: blog_gre_tunnel_accelerated!\n");
+#ifdef DEBUG            
+            if ( log_level >= 3 && opt->seq_sent<=log_packets)
+                printk(KERN_INFO"PPTP[%i]: accepting packet %d size=%i (%02x %02x %02x %02x %02x %02x)\n",opt->src_addr.call_id, seq,payload_len,
+                *(payload +0),*(payload +1),*(payload +2),*(payload +3),*(payload +4),*(payload +5));
+#endif          
+            skb_pull(skb,headersize);
+
+            if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI)
+            {
+                /* chop off address/control */
+                if (skb->len < 3)
+                    goto drop;
+                skb_pull(skb,2);
+            }
+
+            if ((*skb->data) & 1){
+                /* protocol is compressed */
+                skb_push(skb, 1)[0] = 0;
+            }
+
+            skb->ip_summed=CHECKSUM_NONE;
+            skb_set_network_header(skb,skb->head-skb->data);
+            ppp_input(&po->chan,skb);
+            return NET_RX_SUCCESS;  
+        }
+    }       
+#endif
+    
+drop:
+    kfree_skb(skb);
+    return NET_RX_DROP;
+}
+
+static int pptp_rcv(struct sk_buff *skb)
+{
+    struct pppox_sock *po;
+    struct pptp_gre_header *header;
+    struct iphdr *iph;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0)
+    int ret;
+    struct sock *sk;
+#endif
+
+    if (skb->pkt_type != PACKET_HOST)
+        goto drop;
+
+    /*if (!pskb_may_pull(skb, 12))
+        goto drop;*/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+    iph = ip_hdr(skb);
+#else
+    iph = skb->nh.iph;
+#endif
+
+    header = (struct pptp_gre_header *)skb->data;
+
+    if (    /* version should be 1 */
+                    ((header->ver & 0x7F) != PPTP_GRE_VER) ||
+                    /* PPTP-GRE protocol for PPTP */
+                    (ntohs(header->protocol) != PPTP_GRE_PROTO)||
+                    /* flag C should be clear   */
+                    PPTP_GRE_IS_C(header->flags) ||
+                    /* flag R should be clear   */
+                    PPTP_GRE_IS_R(header->flags) ||
+                    /* flag K should be set     */
+                    (!PPTP_GRE_IS_K(header->flags)) ||
+                    /* routing and recursion ctrl = 0  */
+                    ((header->flags&0xF) != 0)){
+            /* if invalid, discard this packet */
+        if (log_level>=1)
+            printk(KERN_INFO"PPTP: Discarding GRE: %X %X %X %X %X %X\n",
+                            header->ver&0x7F, ntohs(header->protocol),
+                            PPTP_GRE_IS_C(header->flags),
+                            PPTP_GRE_IS_R(header->flags),
+                            PPTP_GRE_IS_K(header->flags),
+                            header->flags & 0xF);
+        goto drop;
+    }
+
+
+    if ((po=lookup_chan(htons(header->call_id),iph->saddr))) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+        skb_dst_drop(skb);
+#else
+        dst_release(skb->dst);
+        skb->dst = NULL;
+#endif
+        nf_reset(skb);
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0)
+        sk=sk_pppox(po);
+            bh_lock_sock(sk);
+        /* Socket state is unknown, must put skb into backlog. */
+        if (sk->lock.users != 0) {
+            sk_add_backlog(sk, skb);
+            ret = NET_RX_SUCCESS;
+        } else {
+            ret = pptp_rcv_core(sk, skb);
+        }
+        bh_unlock_sock(sk);
+        sock_put(sk);
+        return ret;
+        
+#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
+        
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,19)
+        return sk_receive_skb(sk_pppox(po), skb);
+#else
+        return sk_receive_skb(sk_pppox(po), skb, 0);
+#endif
+        
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
+    }else {
+#ifdef DEBUG
+        if (log_level>=1)
+            printk(KERN_INFO"PPTP: Discarding packet from unknown call_id %i\n",htons(header->call_id));
+#endif
+    }
+
+drop:
+    kfree_skb(skb);
+    return NET_RX_DROP;
+}
+
+static int pptp_bind(struct socket *sock,struct sockaddr *uservaddr,int sockaddr_len)
+{
+    struct sock *sk = sock->sk;
+    struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+    struct pppox_sock *po = pppox_sk(sk);
+    struct pptp_opt *opt=&po->proto.pptp;
+    int error=0;
+
+#ifdef DEBUG    
+    if (log_level>=1)
+        printk(KERN_INFO"PPTP: bind: addr=%X call_id=%i\n",sp->sa_addr.pptp.sin_addr.s_addr,
+                        sp->sa_addr.pptp.call_id);
+#endif
+    lock_sock(sk);
+
+    opt->src_addr=sp->sa_addr.pptp;
+    if (add_chan(po))
+    {
+        release_sock(sk);
+        error=-EBUSY;
+    }
+#ifdef DEBUG
+    if (log_level>=1)
+        printk(KERN_INFO"PPTP: using call_id %i\n",opt->src_addr.call_id);
+#endif
+
+    release_sock(sk);
+    return error;
+}
+
+static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+          int sockaddr_len, int flags)
+{
+    struct sock *sk = sock->sk;
+    struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+    struct pppox_sock *po = pppox_sk(sk);
+    struct pptp_opt *opt = &po->proto.pptp;
+    struct rtable *rt;              /* Route to the other host */
+    int error=0;
+    struct flowi4 fl4;
+
+    if (sp->sa_protocol != PX_PROTO_PPTP)
+        return -EINVAL;
+    
+#ifdef DEBUG
+    if (log_level>=1)
+        printk(KERN_INFO"PPTP[%i]: connect: addr=%X call_id=%i\n",opt->src_addr.call_id,
+                        sp->sa_addr.pptp.sin_addr.s_addr,sp->sa_addr.pptp.call_id);
+#endif
+    
+    if (lookup_chan_dst(sp->sa_addr.pptp.call_id,sp->sa_addr.pptp.sin_addr.s_addr))
+        return -EALREADY;
+
+    lock_sock(sk);
+    /* Check for already bound sockets */
+    if (SK_STATE(sk) & PPPOX_CONNECTED){
+        error = -EBUSY;
+        goto end;
+    }
+
+    /* Check for already disconnected sockets, on attempts to disconnect */
+    if (SK_STATE(sk) & PPPOX_DEAD){
+        error = -EALREADY;
+        goto end;
+    }
+
+    if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr){
+        error = -EINVAL;
+        goto end;
+    }
+
+    po->chan.private=sk;
+    po->chan.ops=&pptp_chan_ops;
+
+
+    {
+        rt = ip_route_output_ports(&init_net, &fl4, sk,
+            opt->dst_addr.sin_addr.s_addr,
+            opt->src_addr.sin_addr.s_addr,
+            0, 0,
+            IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
+            
+        if (IS_ERR(rt)) 
+        {
+            error = -EHOSTUNREACH;
+            goto end;
+        }
+
+        sk_setup_caps(sk, &rt->dst);
+    }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    po->chan.mtu=PPP_MTU;
+#else
+    po->chan.mtu=dst_mtu(&rt->dst);
+    if (!po->chan.mtu) po->chan.mtu=1500;
+#endif
+    ip_rt_put(rt);
+    po->chan.mtu-=PPTP_HEADER_OVERHEAD;
+
+    po->chan.hdrlen=2+sizeof(struct pptp_gre_header);
+    error = ppp_register_channel(&po->chan);
+    if (error){
+        printk(KERN_ERR "PPTP: failed to register PPP channel (%d)\n",error);
+        goto end;
+    }
+
+    opt->dst_addr=sp->sa_addr.pptp;
+    SK_STATE(sk) = PPPOX_CONNECTED;
+
+ end:
+    release_sock(sk);
+    return error;
+}
+
+static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
+          int *usockaddr_len, int peer)
+{
+    int len = sizeof(struct sockaddr_pppox);
+    struct sockaddr_pppox sp;
+
+    sp.sa_family    = AF_PPPOX;
+    sp.sa_protocol  = PX_PROTO_PPTP;
+    sp.sa_addr.pptp=pppox_sk(sock->sk)->proto.pptp.src_addr;
+
+    memcpy(uaddr, &sp, len);
+
+    *usockaddr_len = len;
+
+    return 0;
+}
+
+static int pptp_release(struct socket *sock)
+{
+    struct sock *sk = sock->sk;
+    struct pppox_sock *po;
+    struct pptp_opt *opt;
+    int error = 0;
+
+    if (!sk)
+        return 0;
+
+    lock_sock(sk);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    if (sk->dead)
+#else
+    if (sock_flag(sk, SOCK_DEAD))
+#endif
+    {
+        release_sock(sk);
+        return -EBADF;
+    }
+        
+    po = pppox_sk(sk);
+    opt=&po->proto.pptp;
+    del_chan(po);
+
+    pppox_unbind_sock(sk);
+    SK_STATE(sk) = PPPOX_DEAD;
+
+#ifdef DEBUG
+    if (log_level>=1)
+        printk(KERN_INFO"PPTP[%i]: release\n",opt->src_addr.call_id);
+#endif
+
+    sock_orphan(sk);
+    sock->sk = NULL;
+
+    release_sock(sk);
+    sock_put(sk);
+
+    return error;
+}
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+static struct proto pptp_sk_proto = {
+    .name     = "PPTP",
+    .owner    = THIS_MODULE,
+    .obj_size = sizeof(struct pppox_sock),
+};
+#endif
+
+static struct proto_ops pptp_ops = {
+    .family     = AF_PPPOX,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    .owner      = THIS_MODULE,
+#endif
+    .release        = pptp_release,
+    .bind       =  pptp_bind,
+    .connect        = pptp_connect,
+    .socketpair     = sock_no_socketpair,
+    .accept     = sock_no_accept,
+    .getname        = pptp_getname,
+    .poll       = sock_no_poll,
+    .listen     = sock_no_listen,
+    .shutdown       = sock_no_shutdown,
+    .setsockopt     = sock_no_setsockopt,
+    .getsockopt     = sock_no_getsockopt,
+    .sendmsg        = sock_no_sendmsg,
+    .recvmsg        = sock_no_recvmsg,
+    .mmap       = sock_no_mmap,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+    .ioctl      = pppox_ioctl,
+#endif
+};
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static void pptp_sock_destruct(struct sock *sk)
+{
+    skb_queue_purge(&sk->receive_queue);
+    if (!(SK_STATE(sk) & PPPOX_DEAD)) {
+        del_chan(pppox_sk(sk));
+        pppox_unbind_sock(sk);
+    }
+    if (sk->protinfo.destruct_hook)
+        kfree(sk->protinfo.destruct_hook);
+
+    MOD_DEC_USE_COUNT;
+}
+
+static int pptp_create(struct socket *sock)
+{
+    int error = -ENOMEM;
+    struct sock *sk;
+    struct pppox_sock *po;
+    struct pptp_opt *opt;
+
+    MOD_INC_USE_COUNT;
+
+    sk = sk_alloc(PF_PPPOX, GFP_KERNEL, 1);
+    if (!sk)
+        goto out;
+
+    sock_init_data(sock, sk);
+
+    sock->state = SS_UNCONNECTED;
+    sock->ops   = &pptp_ops;
+
+    //sk->sk_backlog_rcv = pppoe_rcv_core;
+    sk->state      = PPPOX_NONE;
+    sk->type       = SOCK_STREAM;
+    sk->family     = PF_PPPOX;
+    sk->protocol       = PX_PROTO_PPTP;
+
+    sk->protinfo.pppox=kzalloc(sizeof(struct pppox_sock),GFP_KERNEL);
+    sk->destruct=pptp_sock_destruct;
+    sk->protinfo.destruct_hook=sk->protinfo.pppox;
+
+    po = pppox_sk(sk);
+    po->sk=sk;
+    opt=&po->proto.pptp;
+
+    opt->seq_sent=0; opt->seq_recv=0;
+    opt->ack_recv=0; opt->ack_sent=0;
+
+    error = 0;
+out:
+    return error;
+}
+#else
+static void pptp_sock_destruct(struct sock *sk)
+{
+    if (!(SK_STATE(sk) & PPPOX_DEAD)){
+        del_chan(pppox_sk(sk));
+        pppox_unbind_sock(sk);
+    }
+    skb_queue_purge(&sk->sk_receive_queue);
+}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static int pptp_create(struct socket *sock)
+#else
+static int pptp_create(struct net *net, struct socket *sock)
+#endif
+{
+    int error = -ENOMEM;
+    struct sock *sk;
+    struct pppox_sock *po;
+    struct pptp_opt *opt;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+    sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, 1);
+#else
+    sk = sk_alloc(net,PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
+#endif
+    if (!sk)
+        goto out;
+
+    sock_init_data(sock, sk);
+
+    sock->state = SS_UNCONNECTED;
+    sock->ops   = &pptp_ops;
+
+    sk->sk_backlog_rcv = pptp_rcv_core;
+    sk->sk_state       = PPPOX_NONE;
+    sk->sk_type    = SOCK_STREAM;
+    sk->sk_family      = PF_PPPOX;
+    sk->sk_protocol    = PX_PROTO_PPTP;
+    sk->sk_destruct    = pptp_sock_destruct;
+
+    po = pppox_sk(sk);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    po->sk=sk;
+#endif
+    opt=&po->proto.pptp;
+
+    opt->seq_sent=0; opt->seq_recv=0;
+    opt->ack_recv=0; opt->ack_sent=0;
+
+    error = 0;
+out:
+    return error;
+}
+#endif
+
+
+static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
+               unsigned long arg)
+{
+    struct sock *sk = (struct sock *) chan->private;
+    struct pppox_sock *po = pppox_sk(sk);
+    struct pptp_opt *opt=&po->proto.pptp;
+    void __user *argp = (void __user *)arg;
+    int __user *p = argp;
+    int err, val;
+
+    err = -EFAULT;
+    switch (cmd) {
+    case PPPIOCGFLAGS:
+        val = opt->ppp_flags;
+        if (put_user(val, p))
+            break;
+        err = 0;
+        break;
+    case PPPIOCSFLAGS:
+        if (get_user(val, p))
+            break;
+        opt->ppp_flags = val & ~SC_RCV_BITS;
+        err = 0;
+        break;
+    default:
+        err = -ENOTTY;
+    }
+
+    return err;
+}
+
+
+static struct pppox_proto pppox_pptp_proto = {
+    .create = pptp_create,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
+    .owner  = THIS_MODULE,
+#endif
+};
+
+#if defined(CONFIG_GRE) || defined(CONFIG_GRE_MODULE)
+static struct gre_protocol gre_pptp_protocol = {
+    .handler    = pptp_rcv,
+};
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+static struct inet_protocol net_pptp_protocol = {
+    .handler    = pptp_rcv,
+    .protocol = IPPROTO_GRE,
+    .name     = "PPTP",
+};
+#else
+static struct net_protocol net_pptp_protocol = {
+    .handler    = pptp_rcv,
+};
+#endif
+
+static int __init pptp_init_module(void)
+{
+    int err=0;
+    printk(KERN_INFO "PPTP driver version " PPTP_DRIVER_VERSION "\n");
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
+                            GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+#else
+    callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
+                            GFP_KERNEL, PAGE_KERNEL);
+    memset(callid_sock, 0, (MAX_CALLID + 1) * sizeof(void *));
+#endif
+    if (!callid_sock) {
+        printk(KERN_ERR "PPTP: cann't allocate memory\n");
+        return -ENOMEM;
+    }
+
+#if defined(CONFIG_GRE) || defined(CONFIG_GRE_MODULE)
+    if (gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP) < 0) {
+        printk(KERN_INFO "PPTP: can't add protocol\n");
+        goto out_free_mem;
+    }
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    inet_add_protocol(&net_pptp_protocol);
+#else
+    if (inet_add_protocol(&net_pptp_protocol, IPPROTO_GRE) < 0) {
+        printk(KERN_INFO "PPTP: can't add protocol\n");
+        goto out_free_mem;
+    }
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    err = proto_register(&pptp_sk_proto, 0);
+    if (err){
+        printk(KERN_INFO "PPTP: can't register sk_proto\n");
+        goto out_inet_del_protocol;
+    }
+#endif
+
+    err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
+    if (err){
+        printk(KERN_INFO "PPTP: can't register pppox_proto\n");
+        goto out_unregister_sk_proto;
+    }
+    
+#if defined(CONFIG_BLOG)    
+    blog_pptp_xmit_update_fn = (blog_pptp_xmit_upd_t) pptp_xmit_update; 
+    blog_pptp_xmit_get_fn = (blog_pptp_xmit_get_t) pptp_xmit_get;
+    blog_pptp_rcv_check_fn = (blog_pptp_rcv_check_t) pptp_rcv_check;
+#endif
+    
+    return 0;
+out_unregister_sk_proto:
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    proto_unregister(&pptp_sk_proto);
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+out_inet_del_protocol:
+#endif
+
+#if defined(CONFIG_GRE) || defined(CONFIG_GRE_MODULE)
+    gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    inet_del_protocol(&net_pptp_protocol);
+#else
+    inet_del_protocol(&net_pptp_protocol, IPPROTO_GRE);
+#endif
+out_free_mem:
+    vfree(callid_sock);
+    
+    return err;
+}
+
+static void __exit pptp_exit_module(void)
+{
+    unregister_pppox_proto(PX_PROTO_PPTP);
+#if defined(CONFIG_GRE) || defined(CONFIG_GRE_MODULE)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
+    proto_unregister(&pptp_sk_proto);
+#endif
+    gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    inet_del_protocol(&net_pptp_protocol);
+#else
+    proto_unregister(&pptp_sk_proto);
+    inet_del_protocol(&net_pptp_protocol, IPPROTO_GRE);
+#endif
+    vfree(callid_sock);
+}
+
+#if defined(CONFIG_BLOG)
+void pptp_xmit_update(uint16_t call_id, uint32_t seqNum, uint32_t ackNum, uint32_t daddr)
+{
+    struct pppox_sock *sock;
+    struct pptp_opt *opt;
+    int i;
+    
+    rcu_read_lock();
+
+    for(i = find_next_bit(callid_bitmap,MAX_CALLID,1); i < MAX_CALLID; i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1))
+    {
+        sock = rcu_dereference(callid_sock[i]);
+        if (!sock)
+            continue;
+            
+        opt = &sock->proto.pptp;
+        if (opt->dst_addr.call_id == call_id && opt->dst_addr.sin_addr.s_addr == daddr) 
+        {   
+            //printk(KERN_INFO "PPTP: find the channel!\n");
+            if( opt->seq_sent != seqNum && seqNum > 0)
+            {   
+                //printk(KERN_INFO "PPTP: update seq_sent!\n");
+                opt->seq_sent = seqNum;
+            }
+            if( opt->ack_sent != ackNum && ackNum > 0)
+            {   
+                //printk(KERN_INFO "PPTP: update ack_sent!\n");
+                opt->ack_sent = ackNum; 
+            }           
+            break;
+        }
+    }
+    
+    rcu_read_unlock();
+
+    return;
+}
+
+int pptp_xmit_get(uint16_t call_id, uint32_t* seqNum, uint32_t* ackNum, uint32_t daddr)
+{
+    struct pppox_sock *sock;
+    struct pptp_opt *opt;
+    int i, ack_flag = PPTP_NOT_ACK;
+    
+    rcu_read_lock();
+
+    for(i = find_next_bit(callid_bitmap,MAX_CALLID,1); i < MAX_CALLID; i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1))
+    {
+        sock = rcu_dereference(callid_sock[i]);
+        if (!sock)
+            continue;
+            
+        opt = &sock->proto.pptp;
+        if (opt->dst_addr.call_id == call_id && opt->dst_addr.sin_addr.s_addr == daddr) 
+        {   
+            //printk(KERN_INFO "PPTP: seq_sent = %d, ack_sent = %d \n", opt->seq_sent, opt->ack_sent);
+            opt->seq_sent += 1;
+            *seqNum = opt->seq_sent;
+            *ackNum = opt->ack_sent;
+            
+            if (opt->ack_sent != opt->seq_recv)
+            {   
+                ack_flag = PPTP_WITH_ACK;
+                *ackNum = opt->ack_sent = opt->seq_recv;            
+            }   
+            break;
+        }
+    }
+    
+    rcu_read_unlock();
+
+    return ack_flag;
+}
+
+int pptp_rcv_check(uint16_t call_id, uint32_t *rcv_pktSeq, uint32_t rcv_pktAck, uint32_t saddr)
+{
+    struct pppox_sock *sock;
+    struct pptp_opt *opt;
+    int ret = BLOG_PPTP_RCV_NO_TUNNEL;
+    
+    rcu_read_lock();
+    sock = rcu_dereference(callid_sock[call_id]);
+    if (sock) 
+    {
+        opt=&sock->proto.pptp;
+        if (opt->dst_addr.sin_addr.s_addr!=saddr) 
+            sock=NULL;
+        else 
+        {   
+            sock_hold(sk_pppox(sock));
+            //printk(KERN_INFO "PPTP: pptp_rcv_check() current seq_recv is %d \n", opt->seq_recv);
+            if (opt->seq_recv && ((*rcv_pktSeq) > opt->seq_recv)) 
+            {
+                opt->seq_recv = (*rcv_pktSeq);
+                ret = BLOG_PPTP_RCV_IN_SEQ;
+            } else if (opt->seq_recv && ((*rcv_pktSeq) - opt->seq_recv) <= 0) {
+                printk(KERN_INFO "pptp_rcv_check():[BLOG_PPTP_RCV_OOS_LT] current seq_recv is %d \n", opt->seq_recv);
+                ret = BLOG_PPTP_RCV_OOS_LT;
+            } else {
+                printk(KERN_INFO "pptp_rcv_check():[BLOG_PPTP_RCV_OOS_GT] current seq_recv is %d \n", opt->seq_recv);
+                opt->seq_recv = (*rcv_pktSeq);
+                ret = BLOG_PPTP_RCV_OOS_GT;
+            }       
+            
+            if (rcv_pktAck > opt->ack_recv) opt->ack_recv = rcv_pktAck;    
+        }
+            
+    }          
+    rcu_read_unlock();
+    
+    return ret;
+}
+
+EXPORT_SYMBOL(pptp_xmit_update);
+EXPORT_SYMBOL(pptp_xmit_get);
+EXPORT_SYMBOL(pptp_rcv_check);
+#endif
+
+module_init(pptp_init_module);
+module_exit(pptp_exit_module);
+
+MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol for Linux");
+MODULE_AUTHOR("Kozlov D. (xeb@mail.ru)");
+MODULE_LICENSE("GPL");
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+MODULE_PARM(log_level,"i");
+MODULE_PARM(log_packets,"i");
+#else
+module_param(log_level,int,0);
+module_param(log_packets,int,0);
+#endif
+MODULE_PARM_DESC(log_level,"Logging level (default=0)");
+
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 872df3ef07a60899f13060cae59a498dd81c53e7..57715c39c873b3f2f5beeca96e5ee2207f20102f 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -148,6 +148,25 @@ config PPPOL2TP
 	  used by ISPs and enterprises to tunnel PPP traffic over UDP
 	  tunnels. L2TP is replacing PPTP for VPN uses.
 
+config PPPOLAC
+	tristate "PPP on L2TP Access Concentrator"
+	depends on PPP && INET
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles L2TP data packets between a UDP socket
+	  and a PPP channel, but only permits one session per socket. Thus it is
+	  fairly simple and suited for clients.
+
+config PPPOPNS
+	tristate "PPP on PPTP Network Server"
+	depends on PPP && INET
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles PPTP data packets between a RAW socket
+	  and a PPP channel. It is fairly simple and easy to use.
+
 config PPP_ASYNC
 	tristate "PPP support for async serial ports"
 	depends on PPP
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
index a6b6297b00669a3f1e0852f83780550a295e3216..9a0ee9d23508ecf893d78f8cbd3f76296ed2e7b7 100644
--- a/drivers/net/ppp/Makefile
+++ b/drivers/net/ppp/Makefile
@@ -11,3 +11,9 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
 obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 obj-$(CONFIG_PPPOL2TP) += pppox.o
 obj-$(CONFIG_PPTP) += pppox.o pptp.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 21d7151fb0ab3ea03c93208cf6d0c2749d1b825b..5e84526ae3fc532631f598a6f31cb279a58a798a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -54,6 +54,10 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 #define PPP_VERSION	"2.4.2"
 
 /*
@@ -70,6 +74,12 @@
 #define MPHDRLEN	6	/* multilink protocol header length */
 #define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
 
+#if defined(CONFIG_BCM_KF_PPP)
+#define FIELD0    4        /* ppp device number ppp0, ppp1, the third digit (max 16) */
+#define FIELD1    8        /* if 0, default mode, 1 vlan mux, 2 msc */    
+#define FIELD2    19       /* if FILED1 is 0, add no extension, 1 add vlan id, 2 add conId for msc */
+#endif
+
 /*
  * An instance of /dev/ppp can be associated with either a ppp
  * interface unit or a ppp channel.  In both cases, file->private_data
@@ -122,6 +132,11 @@ struct ppp {
 	unsigned long	last_recv;	/* jiffies when last pkt rcvd a0 */
 	struct net_device *dev;		/* network interface device a4 */
 	int		closing;	/* is device closing down? a8 */
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	BlogStats_t bstats; /* stats when the blog promiscuous layer has consumed packets */
+	struct net_device_stats cstats; /* Cummulative Device stats (rx-bytes, tx-pkts, etc...) */
+#endif
+
 #ifdef CONFIG_PPP_MULTILINK
 	int		nxchan;		/* next channel to send something on */
 	u32		nxseq;		/* next sequence number to send */
@@ -480,6 +495,10 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
 		goto out;
 	}
 
+#if defined(CONFIG_BCM_KF_PPP)
+	skb->mark = 7;    /* mark with the highest subpriority value */
+#endif	
+
 	skb_queue_tail(&pf->xq, skb);
 
 	switch (pf->kind) {
@@ -562,8 +581,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	struct npioctl npi;
 	int unit, cflags;
 	struct slcompress *vj;
+#if defined(CONFIG_BCM_KF_PPP) && defined(CONFIG_BCM_KF_NETDEV_PATH)
+        char real_dev_name[IFNAMSIZ];
+        struct net_device *real_dev;
+#endif
 	void __user *argp = (void __user *)arg;
 	int __user *p = argp;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	BlogStats_t bStats;
+#endif
 
 	if (!pf)
 		return ppp_unattached_ioctl(current->nsproxy->net_ns,
@@ -644,6 +670,34 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		err = 0;
 		break;
 
+#if defined(CONFIG_BCM_KF_PPP) && defined(CONFIG_BCM_KF_NETDEV_PATH)
+	case PPPIOCSREALDEV:
+                copy_from_user(real_dev_name, argp, IFNAMSIZ);
+                real_dev_name[IFNAMSIZ-1] = '\0'; /* NULL terminate, just in case */
+
+                real_dev = dev_get_by_name(&init_net, real_dev_name);
+                if(real_dev == NULL)
+                {
+                    printk(KERN_ERR "PPP: Invalid Real Device Name : %s\n", real_dev_name);
+                    err = -EINVAL;
+                    break;
+                }
+
+                err = netdev_path_add(ppp->dev, real_dev);
+                if(err)
+                {
+                    printk(KERN_ERR "PPP: Failed to add %s to Interface path (%d)",
+                           ppp->dev->name, err);
+                }
+                else
+                {
+                    netdev_path_dump(ppp->dev);
+                }
+
+                dev_put(real_dev);
+		break;
+#endif
+
 	case PPPIOCSFLAGS:
 		if (get_user(val, p))
 			break;
@@ -687,6 +741,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		break;
 
 	case PPPIOCGIDLE:
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		memset(&bStats, 0, sizeof(BlogStats_t));
+		blog_lock();
+		blog_notify(FETCH_NETIF_STATS, (void*)ppp->dev, (uint32_t)&bStats, BLOG_PARAM2_NO_CLEAR);
+		blog_unlock();
+		if(bStats.tx_packets)
+			ppp->last_xmit = jiffies;
+		if(bStats.rx_packets)
+			ppp->last_recv = jiffies;
+#endif
 		idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
 		idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
 		if (copy_to_user(argp, &idle, sizeof(idle)))
@@ -978,6 +1042,187 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	return NETDEV_TX_OK;
 }
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+/* note: BLOG changes for read-only statistic data. */
+
+static inline BlogStats_t *ppp_dev_get_bstats(struct net_device *dev)
+{
+	struct ppp *ppp = netdev_priv(dev);
+
+	return &ppp->bstats;
+}
+static inline struct net_device_stats *ppp_dev_get_cstats(struct net_device *dev)
+{
+	struct ppp *ppp = netdev_priv(dev);
+
+	return &ppp->cstats;
+}
+
+static struct net_device_stats * ppp_dev_collect_stats(struct net_device *dev_p)
+{
+	BlogStats_t bStats;
+	BlogStats_t * bStats_p;
+	struct net_device_stats *dStats_p;
+	struct net_device_stats *cStats_p;
+
+	if ( dev_p == (struct net_device *)NULL )
+		return (struct net_device_stats *)NULL;
+
+	dStats_p = &dev_p->stats;
+	cStats_p = ppp_dev_get_cstats(dev_p);
+	bStats_p = ppp_dev_get_bstats(dev_p);
+
+	memset(&bStats, 0, sizeof(BlogStats_t));
+
+	blog_lock();
+	blog_notify(FETCH_NETIF_STATS, (void*)dev_p,
+				(uint32_t)&bStats, BLOG_PARAM2_NO_CLEAR);
+	blog_unlock();
+
+	memcpy( cStats_p, dStats_p, sizeof(struct net_device_stats) );
+   
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+	/* Handle packet count statistics, adding in BlogStats_t entries
+		from the flowcache */
+	cStats_p->rx_packets += ( bStats.rx_packets + bStats_p->rx_packets );
+	cStats_p->tx_packets += ( bStats.tx_packets + bStats_p->tx_packets );
+	cStats_p->multicast  += ( bStats.multicast  + bStats_p->multicast );
+	cStats_p->tx_multicast_packets += ( bStats.tx_multicast_packets + bStats_p->tx_multicast_packets );
+    
+	/* NOTE: There are no broadcast packets in BlogStats_t since the
+		flowcache doesn't accelerate broadcast.  Thus, they aren't added here */
+ 
+	/* set byte counts to 0 if the bstat packet counts are non 0 and the
+		octet counts are 0 */
+	/* Handle RX byte counts */
+	if ( ((bStats.rx_bytes + bStats_p->rx_bytes) == 0) &&
+			((bStats.rx_packets + bStats_p->rx_packets) > 0) )
+	{
+		cStats_p->rx_bytes = 0;
+	}
+	else
+	{
+		cStats_p->rx_bytes   += ( bStats.rx_bytes   + bStats_p->rx_bytes );
+	}
+
+	/* Handle TX byte counts */
+	if ( ((bStats.tx_bytes + bStats_p->tx_bytes) == 0) &&
+			((bStats.tx_packets + bStats_p->tx_packets) > 0) )
+	{
+		cStats_p->tx_bytes = 0;
+	}
+	else
+	{
+		cStats_p->tx_bytes   += ( bStats.tx_bytes   + bStats_p->tx_bytes );
+	}
+
+	/* Handle RX multicast byte counts */
+	if ( ((bStats.rx_multicast_bytes + bStats_p->rx_multicast_bytes) == 0) &&
+			((bStats.multicast + bStats_p->multicast) > 0) )
+	{
+		cStats_p->rx_multicast_bytes = 0;
+	}
+	else
+	{
+		cStats_p->rx_multicast_bytes   += ( bStats.rx_multicast_bytes   + bStats_p->rx_multicast_bytes );
+	}
+
+	/* Handle TX multicast byte counts */
+	if ( ((bStats.tx_multicast_bytes + bStats_p->tx_multicast_bytes) == 0) &&
+			((bStats.tx_multicast_packets + bStats_p->tx_multicast_packets) > 0) )
+	{
+		cStats_p->tx_multicast_bytes = 0;
+	}
+	else
+	{
+		cStats_p->tx_multicast_bytes   += ( bStats.tx_multicast_bytes   + bStats_p->tx_multicast_bytes );
+	}        
+#else
+   
+	cStats_p->rx_packets += ( bStats.rx_packets + bStats_p->rx_packets );
+	cStats_p->tx_packets += ( bStats.tx_packets + bStats_p->tx_packets );
+
+	/* set byte counts to 0 if the bstat packet counts are non 0 and the
+		octet counts are 0 */
+	if ( ((bStats.rx_bytes + bStats_p->rx_bytes) == 0) &&
+		  ((bStats.rx_packets + bStats_p->rx_packets) > 0) )
+	{
+		cStats_p->rx_bytes = 0;
+	}
+	else
+	{
+		cStats_p->rx_bytes   += ( bStats.rx_bytes   + bStats_p->rx_bytes );
+	}
+
+	if ( ((bStats.tx_bytes + bStats_p->tx_bytes) == 0) &&
+		  ((bStats.tx_packets + bStats_p->tx_packets) > 0) )
+	{
+		cStats_p->tx_bytes = 0;
+	}
+	else
+	{
+		cStats_p->tx_bytes   += ( bStats.tx_bytes   + bStats_p->tx_bytes );
+	}
+	cStats_p->multicast  += ( bStats.multicast  + bStats_p->multicast );
+#endif
+
+	return cStats_p;
+}
+
+static void ppp_dev_update_stats(struct net_device * dev_p, 
+                                BlogStats_t * blogStats_p)
+{
+	BlogStats_t * bStats_p;
+
+	if ( dev_p == (struct net_device *)NULL )
+		return;
+
+	bStats_p = ppp_dev_get_bstats(dev_p);
+
+	bStats_p->rx_packets += blogStats_p->rx_packets;
+	bStats_p->tx_packets += blogStats_p->tx_packets;
+	bStats_p->rx_bytes   += blogStats_p->rx_bytes;
+	bStats_p->tx_bytes   += blogStats_p->tx_bytes;
+	bStats_p->multicast  += blogStats_p->multicast;
+
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+	/* Extended statistics */
+	bStats_p->tx_multicast_packets  += blogStats_p->tx_multicast_packets;
+	bStats_p->rx_multicast_bytes    += blogStats_p->rx_multicast_bytes;
+	bStats_p->tx_multicast_bytes    += blogStats_p->tx_multicast_bytes;
+    
+	/* NOTE: There are no broadcast packets in BlogStats_t since the
+		flowcache doesn't accelerate broadcast.  Thus, they aren't added here */    
+#endif       
+
+	return;
+}
+
+static void ppp_dev_clear_stats(struct net_device * dev_p)
+{
+	BlogStats_t * bStats_p;
+	struct net_device_stats *dStats_p;
+	struct net_device_stats *cStats_p;
+
+	if ( dev_p == (struct net_device *)NULL )
+		return;
+
+	dStats_p = &dev_p->stats;
+	cStats_p = ppp_dev_get_cstats(dev_p); 
+	bStats_p = ppp_dev_get_bstats(dev_p);
+
+	blog_lock();
+	blog_notify(FETCH_NETIF_STATS, (void*)dev_p, 0, BLOG_PARAM2_DO_CLEAR);
+	blog_unlock();
+
+	memset(bStats_p, 0, sizeof(BlogStats_t));
+	memset(dStats_p, 0, sizeof(struct net_device_stats));
+	memset(cStats_p, 0, sizeof(struct net_device_stats));
+
+	return;
+}
+#endif	/* defined(CONFIG_BLOG) */
+
 static int
 ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
@@ -1024,6 +1269,9 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static const struct net_device_ops ppp_netdev_ops = {
 	.ndo_start_xmit = ppp_start_xmit,
 	.ndo_do_ioctl   = ppp_net_ioctl,
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	.ndo_get_stats  = ppp_dev_collect_stats,
+#endif
 };
 
 static void ppp_setup(struct net_device *dev)
@@ -1035,8 +1283,21 @@ static void ppp_setup(struct net_device *dev)
 	dev->tx_queue_len = 3;
 	dev->type = ARPHRD_PPP;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+#if defined(CONFIG_BCM_KF_WANDEV)
+	dev->priv_flags = IFF_WANDEV;
+#endif
+#if defined(CONFIG_BCM_KF_PPP)
+	dev->priv_flags |= IFF_PPP;
+#endif
 	dev->features |= NETIF_F_NETNS_LOCAL;
 	dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	dev->put_stats = ppp_dev_update_stats;
+	dev->clr_stats = ppp_dev_clear_stats;
+#if defined(CONFIG_BCM_KF_EXTSTATS)	
+	dev->features |= NETIF_F_EXTSTATS;
+#endif	
+#endif
 }
 
 /*
@@ -1118,6 +1379,54 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
 	return new_skb;
 }
 
+#if defined(CONFIG_BCM_KF_PPP)
+/*
+brcm_on_demand_filter(...) and ppp_send_frame(...) are protected for SMP+Preempt safety
+by ppp_xmit_lock(ppp) => spin_lock_bh(&(ppp)->wlock) and ppp_xmit_unlock(ppp) =>  spin_unlock_bh(&(ppp)->wlock). 
+*/
+
+/*
+ * Excluding timestamp for packet generated from ADSL modem
+ * these include WAN-side RIP,dnsprobe
+ */
+static int
+brcm_on_demand_filter(char *data)
+{
+	unsigned short udp_port=0;
+
+#if 0
+	char cmd;
+
+        printk("%02x%02x%02x%02x\n%02x%02x%02x%02x\n",data[2],data[3],data[4],data[5],data[6],data[7],data[8],data[9]);
+        printk("%02x%02x%02x%02x\n%02x%02x%02x%02x\n",data[10],data[11],data[12],data[13],data[14],data[15],data[16],data[17]);
+        printk("%02x%02x%02x%02x\n",data[18],data[19],data[20],data[21]);
+#endif
+
+	if ( data[11] == 0x2 )  /* IGMP */
+		return 0;
+	if ( data[11] == 0x11 ) { /* UDP */
+	   udp_port= (data[24]<< 8) + data[25];
+	   if ( udp_port == 123 ) { /* ntpclient */
+		return 0;
+	   }
+	   if ( udp_port == 53 ) {
+		if ( data[45] == 'r' && data[46] == 'o' && data[47] == 'o' && data[48] =='t')
+		 
+		return 0;
+	   }
+	   else if (udp_port == 520) { /* RIP */
+#if 0
+			cmd = data[30]; // 1=request, 2=reply
+			if ( cmd == 1)
+#endif
+			  return 0;
+	   }
+	}	
+	   
+        return 1;
+}
+#endif
+
 /*
  * Compress and send a frame.
  * The caller should have locked the xmit path,
@@ -1130,6 +1439,15 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
 	struct sk_buff *new_skb;
 	int len;
 	unsigned char *cp;
+#if defined(CONFIG_BCM_KF_PPP)
+	unsigned char *data;
+	int timestamp = 1;
+
+	if ( proto == PPP_IP) {
+		data = skb->data;
+		timestamp = brcm_on_demand_filter(data);
+	}
+#endif	
 
 	if (proto < 0x8000) {
 #ifdef CONFIG_PPP_FILTER
@@ -1149,14 +1467,26 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
 		/* if this packet passes the active filter, record the time */
 		if (!(ppp->active_filter &&
 		      sk_run_filter(skb, ppp->active_filter) == 0))
+#if defined(CONFIG_BCM_KF_PPP)
+	       if (timestamp)
+#endif					       
 			ppp->last_xmit = jiffies;
 		skb_pull(skb, 2);
 #else
 		/* for data packets, record the time */
-		ppp->last_xmit = jiffies;
+#if defined(CONFIG_BCM_KF_PPP)
+	       if (timestamp)
+#endif			
+			ppp->last_xmit = jiffies;
 #endif /* CONFIG_PPP_FILTER */
 	}
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	blog_link(IF_DEVICE, blog_ptr(skb), (void*)ppp->dev, DIR_TX, skb->len - 2 );
+	blog_unlock();
+#endif
+
 	++ppp->dev->stats.tx_packets;
 	ppp->dev->stats.tx_bytes += skb->len - 2;
 
@@ -1223,6 +1553,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
 	if (ppp->flags & SC_LOOP_TRAFFIC) {
 		if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
 			goto drop;
+#if defined(CONFIG_BCM_KF_PPP)
+		if (!timestamp)
+			goto drop;
+#endif		
 		skb_queue_tail(&ppp->file.rq, skb);
 		wake_up_interruptible(&ppp->file.rwait);
 		return;
@@ -1668,11 +2002,42 @@ ppp_receive_error(struct ppp *ppp)
 		slhc_toss(ppp->vj);
 }
 
+#if defined(CONFIG_BCM_KF_PPP)
+/* 
+note: brcm_mcast_filter(...) and ppp_receive_nonmp_frame(...) are protected for SMP+Preempt safety
+by ppp_recv_lock(ppp) => spin_lock_bh(&(ppp)->rlock) and  ppp_recv_unlock(ppp) => spin_unlock_bh(&(ppp)->rlock). 
+*/
+
+static int
+brcm_mcast_filter(char *data)
+{
+	struct iphdr *encap;
+
+	encap = (struct iphdr *)(data + 2);
+	if ( ipv4_is_multicast(encap->daddr)) {
+	   if ( !ipv4_is_local_multicast(encap->daddr)) { // real mcast data
+		//printk("bcm_mcast_filer: 0x%x \n",encap->daddr);
+		return 1;		 // no timestamp
+	   }
+	   else
+		return 0;
+        }
+	else
+		return 0;
+}
+#endif
+
+
 static void
 ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 {
 	struct sk_buff *ns;
 	int proto, len, npi;
+#if defined(CONFIG_BCM_KF_PPP)
+	struct sk_buff *tmp;
+	int timestamp=0;
+	unsigned char *data;
+#endif	
 
 	/*
 	 * Decompress the frame, if compressed.
@@ -1687,6 +2052,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 		goto err;
 
 	proto = PPP_PROTO(skb);
+
+#if defined(CONFIG_BCM_KF_PPP)
+	if (proto == PPP_IP) {
+		data = skb->data;
+		timestamp = brcm_mcast_filter(data);
+	}
+#endif	
 	switch (proto) {
 	case PPP_VJC_COMP:
 		/* decompress VJ compressed packets */
@@ -1745,6 +2117,12 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 		break;
 	}
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	blog_link(IF_DEVICE, blog_ptr(skb), (void*)ppp->dev, DIR_RX, skb->len - 2 );
+	blog_unlock();
+#endif
+
 	++ppp->dev->stats.rx_packets;
 	ppp->dev->stats.rx_bytes += skb->len - 2;
 
@@ -1762,6 +2140,37 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 	} else {
 		/* network protocol frame - give it to the kernel */
 
+#if defined(CONFIG_BCM_KF_PPP)
+#ifdef CONFIG_PPP_FILTER
+		/* check if the packet passes the pass and active filters */
+		/* the filter instructions are constructed assuming
+		   a four-byte PPP header on each packet */
+		if (skb_headroom(skb) < 2) { 
+		    tmp = alloc_skb(skb->len+2,GFP_ATOMIC); 
+		    skb_reserve(tmp, 2); 
+		    memcpy(skb_put(tmp, skb->len), skb->data, skb->len); 
+		    kfree_skb(skb); 
+		    skb = tmp; 
+	   } 
+		*skb_push(skb, 2) = 0;
+		if (ppp->pass_filter
+		    && sk_run_filter(skb, ppp->pass_filter) == 0) {
+			if (ppp->debug & 1)
+				printk(KERN_DEBUG "PPP: inbound frame not passed\n");
+			kfree_skb(skb);
+			return;
+		}
+		if (!(ppp->active_filter
+		      && sk_run_filter(skb, ppp->active_filter) == 0))
+	      if (timestamp)
+			   ppp->last_recv = jiffies;
+		skb_pull(skb, 2);
+#else
+		if (timestamp)
+		   ppp->last_recv = jiffies;
+#endif /* CONFIG_PPP_FILTER */
+
+#else
 #ifdef CONFIG_PPP_FILTER
 		/* check if the packet passes the pass and active filters */
 		/* the filter instructions are constructed assuming
@@ -1788,6 +2197,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 		} else
 #endif /* CONFIG_PPP_FILTER */
 			ppp->last_recv = jiffies;
+#endif /* CONFIG_BCM_KF_PPP */
 
 		if ((ppp->dev->flags & IFF_UP) == 0 ||
 		    ppp->npmode[npi] != NPMODE_PASS) {
@@ -2570,12 +2980,23 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
 	struct slcompress *vj = ppp->vj;
 
 	memset(st, 0, sizeof(*st));
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	ppp_dev_collect_stats(ppp->dev);
+
+	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
+	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
+	st->p.ppp_ipackets = ppp->cstats.rx_packets;
+	st->p.ppp_ibytes = ppp->cstats.rx_bytes;
+	st->p.ppp_opackets = ppp->cstats.tx_packets;
+	st->p.ppp_obytes = ppp->cstats.tx_bytes;
+#else
 	st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
 	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
 	st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
 	st->p.ppp_opackets = ppp->dev->stats.tx_packets;
 	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
 	st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
+#endif
 	if (!vj)
 		return;
 	st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
@@ -2593,6 +3014,10 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
  * and for initialization.
  */
 
+#if defined(CONFIG_BCM_KF_PPP)
+/* note: ppp_create_interface(...) is protected by lock_kernel() and unlock_kernel() in ppp_unattached_ioctl(...). */
+#endif
+
 /*
  * Create a new ppp interface unit.  Fails if it can't allocate memory
  * or if there is already a unit with the requested number.
@@ -2662,7 +3087,44 @@ ppp_create_interface(struct net *net, int unit, int *retp)
 
 	/* Initialize the new ppp unit */
 	ppp->file.index = unit;
+
+#if defined(CONFIG_BCM_KF_PPP)
+   if (unit >= 0)
+   {
+      unsigned num[3]={0,0,0};
+      unsigned u=unit;
+     
+     /* req_name will beused as ifname and  for
+     * num[1] == 0:  default connection mdoe: ppp0, ppp1...
+     * num[1] == 1:  vlanMux mode: ppp0.100, ppp1.200...  
+     * num[1] == 2:  msc (multiple service mode) ppp0_1, ppp1_3...
+     * num[1] == 3:  pppoa0, pppoa1...
+     *
+     */
+      num[0] = u<<(32-(FIELD2+FIELD1+FIELD0))>>(32-FIELD0);
+      num[1] = u<<(32-(FIELD2+FIELD1))>>(32-FIELD1);
+      num[2] = u<<(32-(FIELD2))>>(32-FIELD2);
+      if (num[1] == 0)
+      {
+         sprintf(dev->name, "ppp%d", num[0]);
+      }
+      else if (num[1] == 1) /* vlan mux */
+      {
+         sprintf(dev->name, "ppp%d.%d", num[0], num[2]);
+      }
+      else if (num[1] == 2) /* msc */
+      {
+         sprintf(dev->name, "ppp%d_%d", num[0], num[2]);
+      }
+      else if (num[1] == 3) /* pppoa */
+      {
+         sprintf(dev->name, "pppoa%d", num[0]);
+      }
+   }
+#else
 	sprintf(dev->name, "ppp%d", unit);
+#endif
+
 
 	ret = register_netdev(dev);
 	if (ret != 0) {
@@ -2715,6 +3177,16 @@ static void ppp_shutdown_interface(struct ppp *ppp)
 	/* This will call dev_close() for us. */
 	ppp_lock(ppp);
 	if (!ppp->closing) {
+#if defined(CONFIG_BCM_KF_PPP)
+                int err;
+                err = netdev_path_remove(ppp->dev);
+                if(err)
+                {
+                    printk(KERN_ERR "PPP: Failed to remove %s from Interface path (%d)",
+                           ppp->dev->name, err);
+                    netdev_path_dump(ppp->dev);
+                }
+#endif
 		ppp->closing = 1;
 		ppp_unlock(ppp);
 		unregister_netdev(ppp->dev);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2fa1a9b6f498c929333e5453b4c41b94f727c507..b51a3afa78a7d04d2978ee4996d3455bc05757ff 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -483,7 +483,11 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
 		goto abort;
 
 	ph = pppoe_hdr(skb);
+#if defined(CONFIG_BCM_KF_PPP)
+	if ((ph->code != PADT_CODE) || (ph->sid))
+#else
 	if (ph->code != PADT_CODE)
+#endif /* CONFIG_BCM_KF_PPP */
 		goto abort;
 
 	pn = pppoe_pernet(dev_net(dev));
diff --git a/drivers/net/tg3_compat.h b/drivers/net/tg3_compat.h
new file mode 100644
index 0000000000000000000000000000000000000000..cd34157413c7664379c378db7fbda75d7605cc59
--- /dev/null
+++ b/drivers/net/tg3_compat.h
@@ -0,0 +1,1646 @@
+/* Copyright (C) 2008-2011 Broadcom Corporation. */
+
+#include "tg3_flags.h"
+
+#if !defined(__maybe_unused)
+#define __maybe_unused  /* unimplemented */
+#endif
+
+#if !defined(__iomem)
+#define __iomem
+#endif
+
+#ifndef __acquires
+#define __acquires(x)
+#endif
+
+#ifndef __releases
+#define __releases(x)
+#endif
+
+#ifndef mmiowb
+#define mmiowb()
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef MODULE_VERSION
+#define MODULE_VERSION(version)
+#endif
+
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do { } while (0)
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x,mask)	(((x)+(mask))&~(mask))
+#endif
+
+#ifndef ALIGN
+#define ALIGN(x,a)		__ALIGN_MASK(x,(typeof(x))(a)-1)
+#endif
+
+#ifndef BCM_HAS_BOOL
+typedef int bool;
+#define false 0
+#define true  1
+#endif
+
+#ifndef BCM_HAS_LE32
+typedef u32 __le32;
+typedef u32 __be32;
+#endif
+
+#ifndef BCM_HAS_RESOURCE_SIZE_T
+typedef unsigned long resource_size_t;
+#endif
+
+#ifndef IRQ_RETVAL
+typedef void irqreturn_t;
+#define IRQ_RETVAL(x)
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef IRQF_SAMPLE_RANDOM
+#define IRQF_SAMPLE_RANDOM SA_SAMPLE_RANDOM
+#endif
+
+#if (LINUX_VERSION_CODE <= 0x020600)
+#define schedule_work(x)	schedule_task(x)
+#define work_struct		tq_struct
+#define INIT_WORK(x, y, z)	INIT_TQUEUE(x, y, z)
+#endif
+
+#ifndef BCM_HAS_KZALLOC
+static inline void *kzalloc(size_t size, int flags)
+{
+	void * memptr = kmalloc(size, flags);
+	if (memptr)
+		memset(memptr, 0, size);
+
+	return memptr;
+}
+#endif
+
+#ifndef USEC_PER_SEC
+#define USEC_PER_SEC			1000000
+#endif
+
+#ifndef MSEC_PER_SEC
+#define MSEC_PER_SEC			1000
+#endif
+
+#ifndef MAX_JIFFY_OFFSET
+#define MAX_JIFFY_OFFSET		((LONG_MAX >> 1)-1)
+#endif
+
+#ifndef BCM_HAS_JIFFIES_TO_USECS
+static unsigned int inline jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+	return (USEC_PER_SEC / HZ) * j;
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+	return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
+#else
+	return (j * USEC_PER_SEC) / HZ;
+#endif
+}
+#endif /* BCM_HAS_JIFFIES_TO_USECS */
+
+#ifndef BCM_HAS_USECS_TO_JIFFIES
+static unsigned long usecs_to_jiffies(const unsigned int u)
+{
+	if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
+		return MAX_JIFFY_OFFSET;
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+	return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+	return u * (HZ / USEC_PER_SEC);
+#else
+	return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
+#endif
+}
+#endif /* BCM_HAS_USECS_TO_JIFFIES */
+
+#ifndef BCM_HAS_MSECS_TO_JIFFIES
+static unsigned long msecs_to_jiffies(const unsigned int m)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+	/*
+	 * HZ is equal to or smaller than 1000, and 1000 is a nice
+	 * round multiple of HZ, divide with the factor between them,
+	 * but round upwards:
+	 */
+	return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+	/*
+	 * HZ is larger than 1000, and HZ is a nice round multiple of
+	 * 1000 - simply multiply with the factor between them.
+	 *
+	 * But first make sure the multiplication result cannot
+	 * overflow:
+	 */
+	if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+		return MAX_JIFFY_OFFSET;
+
+	return m * (HZ / MSEC_PER_SEC);
+#else
+	/*
+	 * Generic case - multiply, round and divide. But first
+	 * check that if we are doing a net multiplication, that
+	 * we wouldn't overflow:
+	 */
+	if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+		return MAX_JIFFY_OFFSET;
+
+	return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+#endif /* BCM_HAS_MSECS_TO_JIFFIES */
+
+#ifndef BCM_HAS_MSLEEP
+static void msleep(unsigned int msecs)
+{
+	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+	while (timeout) {
+		__set_current_state(TASK_UNINTERRUPTIBLE);
+		timeout = schedule_timeout(timeout);
+	}
+}
+#endif /* BCM_HAS_MSLEEP */
+
+#ifndef BCM_HAS_MSLEEP_INTERRUPTIBLE
+static unsigned long msleep_interruptible(unsigned int msecs)
+{
+	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+	while (timeout) {
+		__set_current_state(TASK_UNINTERRUPTIBLE);
+		timeout = schedule_timeout(timeout);
+	}
+
+	return 0;
+}
+#endif /* BCM_HAS_MSLEEP_INTERRUPTIBLE */
+
+#ifndef printk_once
+#define printk_once(x...) ({			\
+	static bool tg3___print_once = false;	\
+						\
+	if (!tg3___print_once) {		\
+		tg3___print_once = true;	\
+		printk(x);			\
+	}					\
+})
+#endif
+
+#if !defined(BCM_HAS_DEV_DRIVER_STRING) || defined(__VMKLNX__)
+#define dev_driver_string(dev)	"tg3"
+#endif
+
+#ifndef BCM_HAS_DEV_NAME
+#define dev_name(dev)			""
+#endif
+
+#if defined(dev_printk) && ((LINUX_VERSION_CODE < 0x020609) || defined(__VMKLNX__))
+/*
+ * SLES 9 and VMWare do not populate the pdev->dev.bus_id string soon
+ * enough for driver use during boot.  Use our own format instead.
+ */
+#undef dev_printk
+#endif
+
+#ifndef dev_printk
+#define dev_printk(level, dev, format, arg...)	\
+	printk(level "%s %s: " format , dev_driver_string(dev) , \
+	       dev_name(dev) , ## arg)
+#endif
+
+#ifndef dev_err
+#define dev_err(dev, format, arg...)		\
+	dev_printk(KERN_ERR , dev , format , ## arg)
+#endif
+
+#ifndef dev_warn
+#define dev_warn(dev, format, arg...)		\
+	dev_printk(KERN_WARNING , dev , format , ## arg)
+#endif
+
+#ifndef BCM_HAS_PCI_IOREMAP_BAR
+static inline void * pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+	resource_size_t base, size;
+
+	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+		printk(KERN_ERR
+		       "Cannot find proper PCI device base address for BAR %d.\n",
+		       bar);
+		return NULL;
+	}
+
+	base = pci_resource_start(pdev, bar);
+	size = pci_resource_len(pdev, bar);
+
+	return ioremap_nocache(base, size);
+}
+#endif
+
+#ifndef DEFINE_PCI_DEVICE_TABLE
+#define DEFINE_PCI_DEVICE_TABLE(x) struct pci_device_id x[]
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020547)
+#define pci_set_consistent_dma_mask(pdev, mask) (0)
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020600)
+#define pci_get_device(x, y, z)	pci_find_device(x, y, z)
+#define pci_get_slot(x, y)	pci_find_slot((x)->number, y)
+#define pci_dev_put(x)
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020605)
+#define pci_dma_sync_single_for_cpu(pdev, map, len, dir)	\
+        pci_dma_sync_single(pdev, map, len, dir)
+#define pci_dma_sync_single_for_device(pdev, map, len, dir)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+	.vendor = (vend), .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5704S_2
+#define PCI_DEVICE_ID_TIGON3_5704S_2	0x1649
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5705F
+#define PCI_DEVICE_ID_TIGON3_5705F	0x166e
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5720
+#define PCI_DEVICE_ID_TIGON3_5720	0x1658
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5721
+#define PCI_DEVICE_ID_TIGON3_5721	0x1659
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5750
+#define PCI_DEVICE_ID_TIGON3_5750	0x1676
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5751
+#define PCI_DEVICE_ID_TIGON3_5751	0x1677
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5750M
+#define PCI_DEVICE_ID_TIGON3_5750M	0x167c
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5751M
+#define PCI_DEVICE_ID_TIGON3_5751M	0x167d
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5751F
+#define PCI_DEVICE_ID_TIGON3_5751F	0x167e
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5789
+#define PCI_DEVICE_ID_TIGON3_5789	0x169d
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5753
+#define PCI_DEVICE_ID_TIGON3_5753	0x16f7
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5753M
+#define PCI_DEVICE_ID_TIGON3_5753M	0x16fd
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5753F
+#define PCI_DEVICE_ID_TIGON3_5753F	0x16fe
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5781
+#define PCI_DEVICE_ID_TIGON3_5781	0x16dd
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5752
+#define PCI_DEVICE_ID_TIGON3_5752	0x1600
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5752M
+#define PCI_DEVICE_ID_TIGON3_5752M	0x1601
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5714
+#define PCI_DEVICE_ID_TIGON3_5714	0x1668
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5714S
+#define PCI_DEVICE_ID_TIGON3_5714S	0x1669
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5780
+#define PCI_DEVICE_ID_TIGON3_5780	0x166a
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5780S
+#define PCI_DEVICE_ID_TIGON3_5780S	0x166b
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5715
+#define PCI_DEVICE_ID_TIGON3_5715	0x1678
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5715S
+#define PCI_DEVICE_ID_TIGON3_5715S	0x1679
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5756
+#define PCI_DEVICE_ID_TIGON3_5756	0x1674
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5754
+#define PCI_DEVICE_ID_TIGON3_5754	0x167a
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5754M
+#define PCI_DEVICE_ID_TIGON3_5754M	0x1672
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5755
+#define PCI_DEVICE_ID_TIGON3_5755	0x167b
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5755M
+#define PCI_DEVICE_ID_TIGON3_5755M	0x1673
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5722
+#define PCI_DEVICE_ID_TIGON3_5722	0x165a
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5786
+#define PCI_DEVICE_ID_TIGON3_5786	0x169a
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5787M
+#define PCI_DEVICE_ID_TIGON3_5787M	0x1693
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5787
+#define PCI_DEVICE_ID_TIGON3_5787	0x169b
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5787F
+#define PCI_DEVICE_ID_TIGON3_5787F	0x167f
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5906
+#define PCI_DEVICE_ID_TIGON3_5906	0x1712
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5906M
+#define PCI_DEVICE_ID_TIGON3_5906M	0x1713
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5784
+#define PCI_DEVICE_ID_TIGON3_5784	0x1698
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5764
+#define PCI_DEVICE_ID_TIGON3_5764	0x1684
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5723
+#define PCI_DEVICE_ID_TIGON3_5723	0x165b
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5761
+#define PCI_DEVICE_ID_TIGON3_5761	0x1681
+#endif
+
+#ifndef PCI_DEVICE_ID_TIGON3_5761E
+#define PCI_DEVICE_ID_TIGON3_5761E	0x1680
+#endif
+
+#ifndef PCI_DEVICE_ID_APPLE_TIGON3
+#define PCI_DEVICE_ID_APPLE_TIGON3	0x1645
+#endif
+
+#ifndef PCI_DEVICE_ID_APPLE_UNI_N_PCI15
+#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15	0x002e
+#endif
+
+#ifndef PCI_DEVICE_ID_VIA_8385_0
+#define PCI_DEVICE_ID_VIA_8385_0	0x3188
+#endif
+
+#ifndef PCI_DEVICE_ID_AMD_8131_BRIDGE
+#define PCI_DEVICE_ID_AMD_8131_BRIDGE	0x7450
+#endif
+
+#ifndef PCI_DEVICE_ID_SERVERWORKS_EPB
+#define PCI_DEVICE_ID_SERVERWORKS_EPB	0x0103
+#endif
+
+#ifndef PCI_VENDOR_ID_ARIMA
+#define PCI_VENDOR_ID_ARIMA		0x161f
+#endif
+
+#ifndef PCI_DEVICE_ID_INTEL_PXH_0
+#define PCI_DEVICE_ID_INTEL_PXH_0	0x0329
+#endif
+
+#ifndef PCI_DEVICE_ID_INTEL_PXH_1
+#define PCI_DEVICE_ID_INTEL_PXH_1	0x032A
+#endif
+
+#ifndef PCI_D0
+typedef u32 pm_message_t;
+typedef u32 pci_power_t;
+#define PCI_D0		0
+#define PCI_D1		1
+#define PCI_D2		2
+#define PCI_D3hot	3
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK ((u64) 0xffffffffffffffffULL)
+#endif
+
+#ifndef DMA_40BIT_MASK
+#define DMA_40BIT_MASK ((u64) 0x000000ffffffffffULL)
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK ((u64) 0x00000000ffffffffULL)
+#endif
+
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n)  DMA_ ##n ##BIT_MASK
+#endif
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR	DECLARE_PCI_UNMAP_ADDR
+#endif
+
+#if !defined(BCM_HAS_DMA_UNMAP_ADDR)
+#define dma_unmap_addr		pci_unmap_addr
+#endif
+
+#if !defined(BCM_HAS_DMA_UNMAP_ADDR_SET)
+#define dma_unmap_addr_set	pci_unmap_addr_set
+#endif
+
+#if !defined(BCM_HAS_PCI_TARGET_STATE) && !defined(BCM_HAS_PCI_CHOOSE_STATE)
+static inline pci_power_t pci_choose_state(struct pci_dev *dev,
+					   pm_message_t state)
+{
+	return state;
+}
+#endif
+
+#ifndef BCM_HAS_PCI_PME_CAPABLE
+static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
+{
+	int pm_cap;
+	u16 caps;
+
+	pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM);
+	if (pm_cap == 0)
+		return false;
+
+	pci_read_config_word(dev, pm_cap + PCI_PM_PMC, &caps);
+
+	if (caps & PCI_PM_CAP_PME_D3cold)
+		return true;
+
+	return false;
+}
+#endif /* BCM_HAS_PCI_PME_CAPABLE */
+
+#ifndef BCM_HAS_PCI_ENABLE_WAKE
+static int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
+{
+	int pm_cap;
+	u16 pmcsr;
+
+	pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM);
+	if (pm_cap == 0)
+		return -EIO;
+
+	pci_read_config_word(dev, pm_cap + PCI_PM_CTRL, &pmcsr);
+
+	/* Clear PME_Status by writing 1 to it */
+	pmcsr |= PCI_PM_CTRL_PME_STATUS;
+
+	if (enable)
+		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+	else
+		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+
+	pci_write_config_word(dev, pm_cap + PCI_PM_CTRL, pmcsr);
+
+	return 0;
+}
+#endif /* BCM_HAS_PCI_ENABLE_WAKE */
+
+#ifndef BCM_HAS_PCI_SET_POWER_STATE
+static int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+	int pm_cap;
+	u16 pmcsr;
+
+	if (state < PCI_D0 || state > PCI_D3hot)
+		return -EINVAL;
+
+	pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM);
+	if (pm_cap == 0)
+		return -EIO;
+
+	pci_read_config_word(dev, pm_cap + PCI_PM_CTRL, &pmcsr);
+
+	pmcsr &= ~(PCI_PM_CTRL_STATE_MASK);
+	pmcsr |= state;
+
+	pci_write_config_word(dev, pm_cap + PCI_PM_CTRL, pmcsr);
+
+	msleep(10);
+
+	return 0;
+}
+#endif /* BCM_HAS_PCI_SET_POWER_STATE */
+
+#ifndef BCM_HAS_DEVICE_WAKEUP_API
+#define device_init_wakeup(dev, val)
+#define device_can_wakeup(dev) 1
+#define device_set_wakeup_enable(dev, val)
+#define device_may_wakeup(dev) 1
+#endif /* BCM_HAS_DEVICE_WAKEUP_API */
+
+
+#ifndef PCI_X_CMD_READ_2K
+#define  PCI_X_CMD_READ_2K		0x0008
+#endif
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+#ifndef PCI_EXP_LNKCTL
+#define PCI_EXP_LNKCTL 16
+#endif
+#ifndef PCI_EXP_LNKCTL_CLKREQ_EN
+#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100
+#endif
+
+#ifndef PCI_EXP_DEVCTL_NOSNOOP_EN
+#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800
+#endif
+
+#ifndef PCI_EXP_DEVCTL_RELAX_EN
+#define PCI_EXP_DEVCTL_RELAX_EN		0x0010
+#endif
+
+#ifndef PCI_EXP_DEVCTL_PAYLOAD
+#define PCI_EXP_DEVCTL_PAYLOAD		0x00e0
+#endif
+
+#ifndef PCI_EXP_DEVSTA
+#define PCI_EXP_DEVSTA          10
+#define  PCI_EXP_DEVSTA_CED     0x01
+#define  PCI_EXP_DEVSTA_NFED    0x02
+#define  PCI_EXP_DEVSTA_FED     0x04
+#define  PCI_EXP_DEVSTA_URD     0x08
+#endif
+
+#ifndef PCI_EXP_LNKSTA
+#define PCI_EXP_LNKSTA		18
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS
+#define  PCI_EXP_LNKSTA_CLS	0x000f
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_2_5GB
+#define  PCI_EXP_LNKSTA_CLS_2_5GB 0x01
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_5_0GB
+#define  PCI_EXP_LNKSTA_CLS_5_0GB 0x02
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW
+#define  PCI_EXP_LNKSTA_NLW	0x03f0
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
+#define  PCI_EXP_LNKSTA_NLW_SHIFT 4
+#endif
+
+#ifndef BCM_HAS_PCIE_SET_READRQ
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL		8
+#endif
+#ifndef PCI_EXP_DEVCTL_READRQ
+#define PCI_EXP_DEVCTL_READRQ	0x7000
+#endif
+static inline int pcie_set_readrq(struct pci_dev *dev, int rq)
+{
+	int cap, err = -EINVAL;
+	u16 ctl, v;
+
+	if (rq < 128 || rq > 4096 || (rq & (rq-1)))
+		goto out;
+
+	v = (ffs(rq) - 8) << 12;
+
+	cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (!cap)
+		goto out;
+
+	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+	if (err)
+		goto out;
+
+	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
+		ctl &= ~PCI_EXP_DEVCTL_READRQ;
+		ctl |= v;
+		err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
+	}
+
+out:
+	return err;
+}
+#endif /* BCM_HAS_PCIE_SET_READRQ */
+
+#ifndef BCM_HAS_PCI_READ_VPD
+#if !defined(PCI_CAP_ID_VPD)
+#define  PCI_CAP_ID_VPD		0x03
+#endif
+#if !defined(PCI_VPD_ADDR)
+#define PCI_VPD_ADDR		2
+#endif
+#if !defined(PCI_VPD_DATA)
+#define PCI_VPD_DATA		4
+#endif
+static inline ssize_t
+pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, u8 *buf)
+{
+	int i, vpd_cap;
+
+	vpd_cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+	if (!vpd_cap)
+		return -ENODEV;
+
+	for (i = 0; i < count; i += 4) {
+		u32 tmp, j = 0;
+		__le32 v;
+		u16 tmp16;
+
+		pci_write_config_word(dev, vpd_cap + PCI_VPD_ADDR, i);
+		while (j++ < 100) {
+			pci_read_config_word(dev, vpd_cap +
+					     PCI_VPD_ADDR, &tmp16);
+			if (tmp16 & 0x8000)
+				break;
+			msleep(1);
+		}
+		if (!(tmp16 & 0x8000))
+			break;
+
+		pci_read_config_dword(dev, vpd_cap + PCI_VPD_DATA, &tmp);
+		v = cpu_to_le32(tmp);
+		memcpy(&buf[i], &v, sizeof(v));
+	}
+
+	return i;
+}
+#endif /* BCM_HAS_PCI_READ_VPD */
+
+#ifndef PCI_VPD_LRDT
+#define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
+#define PCI_VPD_LRDT_ID(x)		(x | PCI_VPD_LRDT)
+
+/* Large Resource Data Type Tag Item Names */
+#define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
+#define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
+#define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
+
+#define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
+#define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
+#define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
+
+/* Small Resource Data Type Tag Item Names */
+#define PCI_VPD_STIN_END		0x78	/* End */
+
+#define PCI_VPD_SRDT_END		PCI_VPD_STIN_END
+
+#define PCI_VPD_SRDT_TIN_MASK		0x78
+#define PCI_VPD_SRDT_LEN_MASK		0x07
+
+#define PCI_VPD_LRDT_TAG_SIZE		3
+#define PCI_VPD_SRDT_TAG_SIZE		1
+
+#define PCI_VPD_INFO_FLD_HDR_SIZE	3
+
+#define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
+#define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
+#define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
+
+/**
+ * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
+ * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
+ *
+ * Returns the extracted Large Resource Data Type length.
+ */
+static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
+{
+	return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
+}
+
+/**
+ * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
+ * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
+ *
+ * Returns the extracted Small Resource Data Type length.
+ */
+static inline u8 pci_vpd_srdt_size(const u8 *srdt)
+{
+	return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
+}
+
+/**
+ * pci_vpd_info_field_size - Extracts the information field length
+ * @lrdt: Pointer to the beginning of an information field header
+ *
+ * Returns the extracted information field length.
+ */
+static inline u8 pci_vpd_info_field_size(const u8 *info_field)
+{
+	return info_field[2];
+}
+
+static int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
+{
+	int i;
+
+	for (i = off; i < len; ) {
+		u8 val = buf[i];
+
+		if (val & PCI_VPD_LRDT) {
+			/* Don't return success of the tag isn't complete */
+			if (i + PCI_VPD_LRDT_TAG_SIZE > len)
+				break;
+
+			if (val == rdt)
+				return i;
+
+			i += PCI_VPD_LRDT_TAG_SIZE +
+			     pci_vpd_lrdt_size(&buf[i]);
+		} else {
+			u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK;
+
+			if (tag == rdt)
+				return i;
+
+			if (tag == PCI_VPD_SRDT_END)
+				break;
+
+			i += PCI_VPD_SRDT_TAG_SIZE +
+			     pci_vpd_srdt_size(&buf[i]);
+		}
+	}
+
+	return -ENOENT;
+}
+
+static int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
+			      unsigned int len, const char *kw)
+{
+	int i;
+
+	for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
+		if (buf[i + 0] == kw[0] &&
+		    buf[i + 1] == kw[1])
+			return i;
+
+		i += PCI_VPD_INFO_FLD_HDR_SIZE +
+		     pci_vpd_info_field_size(&buf[i]);
+	}
+
+	return -ENOENT;
+}
+#endif
+
+#ifndef BCM_HAS_INTX_MSI_WORKAROUND
+static inline void tg3_enable_intx(struct pci_dev *pdev)
+{
+#if (LINUX_VERSION_CODE < 0x2060e)
+	u16 pci_command;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+	if (pci_command & PCI_COMMAND_INTX_DISABLE)
+		pci_write_config_word(pdev, PCI_COMMAND,
+				      pci_command & ~PCI_COMMAND_INTX_DISABLE);
+#else
+	pci_intx(pdev, 1);
+#endif
+}
+#endif /* BCM_HAS_INTX_MSI_WORKAROUND */
+
+
+#if (LINUX_VERSION_CODE >= 0x20613) || \
+    (defined(__VMKLNX__) && defined(__USE_COMPAT_LAYER_2_6_18_PLUS__))
+#define BCM_HAS_NEW_IRQ_SIG
+#endif
+
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || \
+    defined(INIT_WORK_NAR) || \
+    (defined(__VMKLNX__) && defined(__USE_COMPAT_LAYER_2_6_18_PLUS__))
+#define BCM_HAS_NEW_INIT_WORK
+#endif
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+
+#ifndef BCM_HAS_PRINT_MAC
+
+#ifndef DECLARE_MAC_BUF
+#define DECLARE_MAC_BUF(_mac) char _mac[18]
+#endif
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+
+static char *print_mac(char * buf, const u8 *addr)
+{
+	sprintf(buf, MAC_FMT,
+	        addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+	return buf;
+}
+#endif
+
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+
+#if !defined(BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM) && \
+    !defined(BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM)   && \
+     defined(BCM_HAS_SET_TX_CSUM)
+static int tg3_set_tx_hw_csum(struct net_device *dev, u32 data)
+{
+	if (data)
+		dev->features |= NETIF_F_HW_CSUM;
+	else
+		dev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#endif
+
+#ifndef NETIF_F_IPV6_CSUM
+#define NETIF_F_IPV6_CSUM 16
+#define BCM_NO_IPV6_CSUM  1
+#endif
+
+#ifndef NETIF_F_GRO
+#define NETIF_F_GRO			16384
+#endif
+
+#ifdef NETIF_F_TSO
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+#ifndef NETIF_F_TSO6
+#define NETIF_F_TSO6	0
+#define BCM_NO_TSO6     1
+#endif
+#ifndef NETIF_F_TSO_ECN
+#define NETIF_F_TSO_ECN 0
+#endif
+
+#if (LINUX_VERSION_CODE < 0x2060c)
+static inline int skb_header_cloned(struct sk_buff *skb) { return 0; }
+#endif
+
+#ifndef BCM_HAS_SKB_TRANSPORT_OFFSET
+static inline int skb_transport_offset(const struct sk_buff *skb)
+{
+	return (int) (skb->h.raw - skb->data);
+}
+#endif
+
+#ifndef BCM_HAS_IP_HDR
+static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
+{
+	return skb->nh.iph;
+}
+#endif
+
+#ifndef BCM_HAS_IP_HDRLEN
+static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
+{
+	return ip_hdr(skb)->ihl * 4;
+}
+#endif
+
+#ifndef BCM_HAS_TCP_HDR
+static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+{
+	return skb->h.th;
+}
+#endif
+
+#ifndef BCM_HAS_TCP_OPTLEN
+static inline unsigned int tcp_optlen(const struct sk_buff *skb)
+{
+	return (tcp_hdr(skb)->doff - 5) * 4;
+}
+#endif
+
+#ifndef NETIF_F_GSO
+static struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+{
+	struct sk_buff *segs = NULL;
+	struct sk_buff *tail = NULL;
+	unsigned int mss = skb_shinfo(skb)->gso_size;
+	unsigned int doffset = skb->data - skb->mac.raw;
+	unsigned int offset = doffset;
+	unsigned int headroom;
+	unsigned int len;
+	int nfrags = skb_shinfo(skb)->nr_frags;
+	int err = -ENOMEM;
+	int i = 0;
+	int pos;
+
+	__skb_push(skb, doffset);
+	headroom = skb_headroom(skb);
+	pos = skb_headlen(skb);
+
+	do {
+		struct sk_buff *nskb;
+		skb_frag_t *frag;
+		int hsize;
+		int k;
+		int size;
+
+		len = skb->len - offset;
+		if (len > mss)
+			len = mss;
+
+		hsize = skb_headlen(skb) - offset;
+		if (hsize < 0)
+			hsize = 0;
+		if (hsize > len)
+			hsize = len;
+
+		nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
+		if (unlikely(!nskb))
+			goto err;
+
+		if (segs)
+			tail->next = nskb;
+		else
+			segs = nskb;
+		tail = nskb;
+
+		nskb->dev = skb->dev;
+		nskb->priority = skb->priority;
+		nskb->protocol = skb->protocol;
+		nskb->dst = dst_clone(skb->dst);
+		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
+		nskb->pkt_type = skb->pkt_type;
+		nskb->mac_len = skb->mac_len;
+
+		skb_reserve(nskb, headroom);
+		nskb->mac.raw = nskb->data;
+		nskb->nh.raw = nskb->data + skb->mac_len;
+		nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
+		memcpy(skb_put(nskb, doffset), skb->data, doffset);
+
+		frag = skb_shinfo(nskb)->frags;
+		k = 0;
+
+		nskb->ip_summed = CHECKSUM_PARTIAL;
+		nskb->csum = skb->csum;
+		memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
+
+		while (pos < offset + len) {
+			BUG_ON(i >= nfrags);
+
+			*frag = skb_shinfo(skb)->frags[i];
+			get_page(frag->page);
+			size = frag->size;
+
+			if (pos < offset) {
+				frag->page_offset += offset - pos;
+				frag->size -= offset - pos;
+			}
+
+			k++;
+
+			if (pos + size <= offset + len) {
+				i++;
+				pos += size;
+			} else {
+				frag->size -= pos + size - (offset + len);
+				break;
+			}
+
+			frag++;
+		}
+
+		skb_shinfo(nskb)->nr_frags = k;
+		nskb->data_len = len - hsize;
+		nskb->len += nskb->data_len;
+		nskb->truesize += nskb->data_len;
+	} while ((offset += len) < skb->len);
+
+	return segs;
+
+err:
+	while ((skb = segs)) {
+		segs = skb->next;
+		kfree(skb);
+	}
+	return ERR_PTR(err);
+}
+
+static struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+{
+	struct sk_buff *segs = ERR_PTR(-EINVAL);
+	struct tcphdr *th;
+	unsigned thlen;
+	unsigned int seq;
+	u32 delta;
+	unsigned int oldlen;
+	unsigned int len;
+
+	if (!pskb_may_pull(skb, sizeof(*th)))
+		goto out;
+
+	th = skb->h.th;
+	thlen = th->doff * 4;
+	if (thlen < sizeof(*th))
+		goto out;
+
+	if (!pskb_may_pull(skb, thlen))
+		goto out;
+
+	oldlen = (u16)~skb->len;
+	__skb_pull(skb, thlen);
+
+	segs = skb_segment(skb, features);
+	if (IS_ERR(segs))
+		goto out;
+
+	len = skb_shinfo(skb)->gso_size;
+	delta = htonl(oldlen + (thlen + len));
+
+	skb = segs;
+	th = skb->h.th;
+	seq = ntohl(th->seq);
+
+	do {
+		th->fin = th->psh = 0;
+
+		th->check = ~csum_fold((u32)((u32)th->check +
+				       (u32)delta));
+		seq += len;
+		skb = skb->next;
+		th = skb->h.th;
+
+		th->seq = htonl(seq);
+		th->cwr = 0;
+	} while (skb->next);
+
+	delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
+	th->check = ~csum_fold((u32)((u32)th->check +
+				(u32)delta));
+out:
+	return segs;
+}
+
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+{
+	struct sk_buff *segs = ERR_PTR(-EINVAL);
+	struct iphdr *iph;
+	int ihl;
+	int id;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+		goto out;
+
+	iph = skb->nh.iph;
+	ihl = iph->ihl * 4;
+	if (ihl < sizeof(*iph))
+		goto out;
+
+	if (unlikely(!pskb_may_pull(skb, ihl)))
+		goto out;
+
+	skb->h.raw = __skb_pull(skb, ihl);
+	iph = skb->nh.iph;
+	id = ntohs(iph->id);
+	segs = ERR_PTR(-EPROTONOSUPPORT);
+
+	segs = tcp_tso_segment(skb, features);
+
+	if (!segs || IS_ERR(segs))
+		goto out;
+
+	skb = segs;
+	do {
+		iph = skb->nh.iph;
+		iph->id = htons(id++);
+		iph->tot_len = htons(skb->len - skb->mac_len);
+		iph->check = 0;
+		iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
+	} while ((skb = skb->next));
+
+out:
+	return segs;
+}
+
+static struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+{
+	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+
+	skb->mac.raw = skb->data;
+	skb->mac_len = skb->nh.raw - skb->data;
+	__skb_pull(skb, skb->mac_len);
+
+	segs = inet_gso_segment(skb, features);
+
+	__skb_push(skb, skb->data - skb->mac.raw);
+	return segs;
+}
+#endif /* NETIF_F_GSO */
+
+#endif /* NETIF_F_TSO */
+
+#ifndef BCM_HAS_SKB_COPY_FROM_LINEAR_DATA
+static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
+					     void *to,
+					     const unsigned int len)
+{
+	memcpy(to, skb->data, len);
+}
+#endif
+
+#if !defined(BCM_NO_TSO6) && !defined(BCM_HAS_SKB_IS_GSO_V6)
+static inline int skb_is_gso_v6(const struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+#endif
+
+#ifndef BCM_HAS_SKB_CHECKSUM_NONE_ASSERT
+static inline void skb_checksum_none_assert(struct sk_buff *skb)
+{
+	skb->ip_summed = CHECKSUM_NONE;
+}
+#endif
+
+#ifndef BCM_HAS_NETDEV_TX_T
+typedef int	netdev_tx_t;
+#endif
+
+#ifndef BCM_HAS_NETDEV_NAME
+#define netdev_name(netdev)	netdev->name
+#endif
+
+#if defined(netdev_printk) && (LINUX_VERSION_CODE < 0x020609)
+/* SLES 9.X provides their own print routines, but they are not compatible
+ * with the versions found in the latest upstream kernel.  The kernel
+ * version check above was picked out of the air as a value greater than
+ * 2.6.5-7.308, but any number that preserves this boundary should be
+ * acceptable.
+ */
+#undef netdev_printk
+#undef netdev_info
+#undef netdev_err
+#undef netdev_warn
+#endif
+
+#ifndef netdev_printk
+#define netdev_printk(level, netdev, format, args...)	\
+	dev_printk(level, tp->pdev->dev.parent,	\
+		   "%s: " format,			\
+		   netdev_name(tp->dev), ##args)
+#endif
+
+#ifndef netif_printk
+#define netif_printk(priv, type, level, dev, fmt, args...)	\
+do {								\
+	if (netif_msg_##type(priv))				\
+		netdev_printk(level, (dev), fmt, ##args);	\
+} while (0)
+#endif
+
+#ifndef netif_info
+#define netif_info(priv, type, dev, fmt, args...)		\
+	netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args)
+#endif
+
+#ifndef netdev_err
+#define netdev_err(dev, format, args...)			\
+	netdev_printk(KERN_ERR, dev, format, ##args)
+#endif
+
+#ifndef netdev_warn
+#define netdev_warn(dev, format, args...)			\
+	netdev_printk(KERN_WARNING, dev, format, ##args)
+#endif
+
+#ifndef netdev_notice
+#define netdev_notice(dev, format, args...)			\
+	netdev_printk(KERN_NOTICE, dev, format, ##args)
+#endif
+
+#ifndef netdev_info
+#define netdev_info(dev, format, args...)			\
+	netdev_printk(KERN_INFO, dev, format, ##args)
+#endif
+
+#ifndef BCM_HAS_NETIF_TX_LOCK
+static inline void netif_tx_lock(struct net_device *dev)
+{
+	spin_lock(&dev->xmit_lock);
+	dev->xmit_lock_owner = smp_processor_id();
+}
+
+static inline void netif_tx_unlock(struct net_device *dev)
+{
+	dev->xmit_lock_owner = -1;
+	spin_unlock(&dev->xmit_lock);
+}
+#endif /* BCM_HAS_NETIF_TX_LOCK */
+
+#if defined(BCM_HAS_STRUCT_NETDEV_QUEUE) || \
+    (defined(__VMKLNX__) && defined(__USE_COMPAT_LAYER_2_6_18_PLUS__))
+
+#define TG3_NAPI
+#define tg3_netif_rx_complete(dev, napi)	napi_complete((napi))
+#define tg3_netif_rx_schedule(dev, napi)	napi_schedule((napi))
+#define tg3_netif_rx_schedule_prep(dev, napi)	napi_schedule_prep((napi))
+
+#else  /* BCM_HAS_STRUCT_NETDEV_QUEUE */
+
+#define netdev_queue	net_device
+#define netdev_get_tx_queue(dev, i)		(dev)
+#define netif_tx_start_queue(dev)		netif_start_queue((dev))
+#define netif_tx_start_all_queues(dev)		netif_start_queue((dev))
+#define netif_tx_stop_queue(dev)		netif_stop_queue((dev))
+#define netif_tx_stop_all_queues(dev)		netif_stop_queue((dev))
+#define netif_tx_queue_stopped(dev)		netif_queue_stopped((dev))
+#define netif_tx_wake_queue(dev)		netif_wake_queue((dev))
+#define netif_tx_wake_all_queues(dev)		netif_wake_queue((dev))
+#define __netif_tx_lock(txq, procid)		netif_tx_lock((txq))
+#define __netif_tx_unlock(txq)			netif_tx_unlock((txq))
+
+#if defined(BCM_HAS_NEW_NETIF_INTERFACE)
+#define TG3_NAPI
+#define tg3_netif_rx_complete(dev, napi)	netif_rx_complete((dev), (napi))
+#define tg3_netif_rx_schedule(dev, napi)	netif_rx_schedule((dev), (napi))
+#define tg3_netif_rx_schedule_prep(dev, napi)	netif_rx_schedule_prep((dev), (napi))
+#else  /* BCM_HAS_NEW_NETIF_INTERFACE */
+#define tg3_netif_rx_complete(dev, napi)	netif_rx_complete((dev))
+#define tg3_netif_rx_schedule(dev, napi)	netif_rx_schedule((dev))
+#define tg3_netif_rx_schedule_prep(dev, napi)	netif_rx_schedule_prep((dev))
+#endif /* BCM_HAS_NEW_NETIF_INTERFACE */
+
+#endif /* BCM_HAS_STRUCT_NETDEV_QUEUE */
+
+#if !defined(BCM_HAS_ALLOC_ETHERDEV_MQ) || !defined(TG3_NAPI)
+#define alloc_etherdev_mq(size, numqs)		alloc_etherdev((size))
+#endif
+
+#if !defined(TG3_NAPI) || !defined(BCM_HAS_VLAN_GRO_RECEIVE)
+#define vlan_gro_receive(nap, grp, tag, skb) \
+        vlan_hwaccel_receive_skb((skb), (grp), (tag))
+#endif
+
+#if !defined(TG3_NAPI) || !defined(BCM_HAS_NAPI_GRO_RECEIVE)
+#define napi_gro_receive(nap, skb) \
+        netif_receive_skb((skb))
+#endif
+
+#if !defined(BCM_HAS_SKB_GET_QUEUE_MAPPING) || !defined(TG3_NAPI)
+#define skb_get_queue_mapping(skb)		0
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020612)
+static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
+		unsigned int length)
+{
+	struct sk_buff *skb = dev_alloc_skb(length);
+	if (skb)
+		skb->dev = dev;
+	return skb;
+}
+#endif
+
+#if !defined(HAVE_NETDEV_PRIV) && (LINUX_VERSION_CODE != 0x020603) && (LINUX_VERSION_CODE != 0x020604) && (LINUX_VERSION_CODE != 0x20605)
+static inline void *netdev_priv(struct net_device *dev)
+{
+	return dev->priv;
+}
+#endif
+
+#ifdef OLD_NETIF
+static inline void netif_poll_disable(struct net_device *dev)
+{
+	while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+		/* No hurry. */
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(1);
+	}
+}
+
+static inline void netif_poll_enable(struct net_device *dev)
+{
+	clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+static inline void netif_tx_disable(struct net_device *dev)
+{
+	spin_lock_bh(&dev->xmit_lock);
+	netif_stop_queue(dev);
+	spin_unlock_bh(&dev->xmit_lock);
+}
+#endif /* OLD_NETIF */
+
+#ifndef BCM_HAS_NETIF_SET_REAL_NUM_TX_QUEUES
+#define netif_set_real_num_tx_queues(dev, nq)	((dev)->real_num_tx_queues = (nq))
+#endif
+
+#ifndef BCM_HAS_NETIF_SET_REAL_NUM_RX_QUEUES
+#define netif_set_real_num_rx_queues(dev, nq)	0
+#endif
+
+#ifndef netdev_mc_count
+#define netdev_mc_count(dev) ((dev)->mc_count)
+#endif
+
+#ifndef netdev_mc_empty
+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#endif
+
+/*
+ * Commit ID 22bedad3ce112d5ca1eaf043d4990fa2ed698c87 is the patch that
+ * undefines dmi_addr and pivots the code to use netdev_hw_addr rather
+ * than dev_mc_list.  Commit ID 6683ece36e3531fc8c75f69e7165c5f20930be88
+ * is the patch that introduces netdev_for_each_mc_addr.  Commit ID
+ * f001fde5eadd915f4858d22ed70d7040f48767cf is the patch that introduces
+ * netdev_hw_addr.  These features are presented in reverse chronological
+ * order.
+ */
+#ifdef BCM_HAS_NETDEV_HW_ADDR
+#ifdef dmi_addr
+#undef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(ha, dev) \
+	struct dev_mc_list * oldmclist; \
+	struct netdev_hw_addr foo; \
+	ha = &foo; \
+    for (oldmclist = dev->mc_list; oldmclist && memcpy(foo.addr, oldmclist->dmi_addr, 6); oldmclist = oldmclist->next)
+#endif
+#else /* BCM_HAS_NETDEV_HW_ADDR */
+struct netdev_hw_addr {
+	u8 * addr;
+	struct dev_mc_list * curr;
+};
+#undef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(ha, dev) \
+	struct netdev_hw_addr mclist; \
+	ha = &mclist; \
+    for (mclist.curr = dev->mc_list; mclist.curr && (mclist.addr = &mclist.curr->dmi_addr[0]); mclist.curr = mclist.curr->next)
+#endif /* BCM_HAS_NETDEV_HW_ADDR */
+
+#ifndef BCM_HAS_GET_STATS64
+#define rtnl_link_stats64	net_device_stats
+#endif /* BCM_HAS_GET_STATS64 */
+
+#ifndef BCM_HAS_VLAN_GROUP_SET_DEVICE
+static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
+					 struct net_device *dev)
+{
+	if (vg)
+		vg->vlan_devices[vlan_id] = dev;
+}
+#endif
+
+#ifndef ETH_SS_TEST
+#define ETH_SS_TEST  0
+#endif
+#ifndef ETH_SS_STATS
+#define ETH_SS_STATS 1
+#endif
+#ifndef ADVERTISED_Pause
+#define ADVERTISED_Pause		(1 << 13)
+#endif
+#ifndef ADVERTISED_Asym_Pause
+#define ADVERTISED_Asym_Pause		(1 << 14)
+#endif
+
+#ifndef MII_CTRL1000
+#define MII_CTRL1000			0x09
+#endif
+#ifndef MII_STAT1000
+#define MII_STAT1000			0x0a
+#endif
+#ifndef BMCR_SPEED1000
+#define BMCR_SPEED1000			0x0040
+#endif
+#ifndef ADVERTISE_1000XFULL
+#define ADVERTISE_1000XFULL		0x0020
+#endif
+#ifndef ADVERTISE_1000XHALF
+#define ADVERTISE_1000XHALF		0x0040
+#endif
+#ifndef ADVERTISE_1000XPAUSE
+#define ADVERTISE_1000XPAUSE		0x0080
+#endif
+#ifndef ADVERTISE_1000XPSE_ASYM
+#define ADVERTISE_1000XPSE_ASYM		0x0100
+#endif
+#ifndef ADVERTISE_PAUSE
+#define ADVERTISE_PAUSE_CAP		0x0400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+#define ADVERTISE_PAUSE_ASYM		0x0800
+#endif
+#ifndef LPA_1000XFULL
+#define LPA_1000XFULL			0x0020
+#endif
+#ifndef LPA_1000XHALF
+#define LPA_1000XHALF			0x0040
+#endif
+#ifndef LPA_1000XPAUSE
+#define LPA_1000XPAUSE			0x0080
+#endif
+#ifndef LPA_1000XPAUSE_ASYM
+#define LPA_1000XPAUSE_ASYM		0x0100
+#endif
+#ifndef LPA_PAUSE
+#define LPA_PAUSE_CAP			0x0400
+#endif
+#ifndef LPA_PAUSE_ASYM
+#define LPA_PAUSE_ASYM			0x0800
+#endif
+#ifndef ADVERTISE_1000HALF
+#define ADVERTISE_1000HALF		0x0100
+#endif
+#ifndef ADVERTISE_1000FULL
+#define ADVERTISE_1000FULL		0x0200
+#endif
+
+#ifndef ETHTOOL_FWVERS_LEN
+#define ETHTOOL_FWVERS_LEN 32
+#endif
+
+#ifndef MDIO_MMD_AN
+#define MDIO_MMD_AN			7
+#endif
+
+#ifndef MDIO_AN_EEE_ADV
+#define MDIO_AN_EEE_ADV			60
+#endif
+
+#ifndef MDIO_AN_EEE_ADV_100TX
+#define MDIO_AN_EEE_ADV_100TX		0x0002
+#endif
+
+#ifndef MDIO_AN_EEE_ADV_1000T
+#define MDIO_AN_EEE_ADV_1000T		0x0004
+#endif
+
+#ifndef BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX
+#ifndef FLOW_CTRL_TX
+#define FLOW_CTRL_TX	0x01
+#endif
+#ifndef FLOW_CTRL_RX
+#define FLOW_CTRL_RX	0x02
+#endif
+static u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
+{
+	u8 cap = 0;
+
+	if (lcladv & ADVERTISE_PAUSE_CAP) {
+		if (lcladv & ADVERTISE_PAUSE_ASYM) {
+			if (rmtadv & LPA_PAUSE_CAP)
+				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+			else if (rmtadv & LPA_PAUSE_ASYM)
+				cap = FLOW_CTRL_RX;
+		} else {
+			if (rmtadv & LPA_PAUSE_CAP)
+				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+		}
+	} else if (lcladv & ADVERTISE_PAUSE_ASYM) {
+		if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
+			cap = FLOW_CTRL_TX;
+	}
+
+	return cap;
+}
+#endif /* BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX */
+
+#ifdef BCM_INCLUDE_PHYLIB_SUPPORT
+
+#ifndef PHY_ID_BCM50610
+#define PHY_ID_BCM50610			0x0143bd60
+#endif
+#ifndef PHY_ID_BCM50610M
+#define PHY_ID_BCM50610M		0x0143bd70
+#endif
+#ifndef PHY_ID_BCM50612E
+#define PHY_ID_BCM50612E		0x03625e20
+#endif
+#ifndef PHY_ID_BCMAC131
+#define PHY_ID_BCMAC131			0x0143bc70
+#endif
+#ifndef PHY_ID_BCM57780
+#define PHY_ID_BCM57780			0x03625d90
+#endif
+#ifndef PHY_BCM_OUI_MASK
+#define PHY_BCM_OUI_MASK		0xfffffc00
+#endif
+#ifndef PHY_BCM_OUI_1
+#define PHY_BCM_OUI_1			0x00206000
+#endif
+#ifndef PHY_BCM_OUI_2
+#define PHY_BCM_OUI_2			0x0143bc00
+#endif
+#ifndef PHY_BCM_OUI_3
+#define PHY_BCM_OUI_3			0x03625c00
+#endif
+
+#ifndef PHY_BRCM_STD_IBND_DISABLE
+#define PHY_BRCM_STD_IBND_DISABLE	0x00000800
+#define PHY_BRCM_EXT_IBND_RX_ENABLE	0x00001000
+#define PHY_BRCM_EXT_IBND_TX_ENABLE	0x00002000
+#endif
+
+#ifndef PHY_BRCM_RX_REFCLK_UNUSED
+#define PHY_BRCM_RX_REFCLK_UNUSED	0x00000400
+#endif
+
+#ifndef PHY_BRCM_CLEAR_RGMII_MODE
+#define PHY_BRCM_CLEAR_RGMII_MODE	0x00004000
+#endif
+
+#ifndef PHY_BRCM_DIS_TXCRXC_NOENRGY
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY	0x00008000
+#endif
+
+#ifndef BCM_HAS_MDIOBUS_ALLOC
+static struct mii_bus *mdiobus_alloc(void)
+{
+	struct mii_bus *bus;
+
+	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+
+	return bus;
+}
+
+void mdiobus_free(struct mii_bus *bus)
+{
+	kfree(bus);
+}
+#endif
+
+#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */
diff --git a/drivers/net/tg3_compat2.h b/drivers/net/tg3_compat2.h
new file mode 100644
index 0000000000000000000000000000000000000000..a283cdfb9f8ceb0ed4688cfb71d8d7ae47798c16
--- /dev/null
+++ b/drivers/net/tg3_compat2.h
@@ -0,0 +1,83 @@
+/* Copyright (C) 2009-2011 Broadcom Corporation. */
+
+#ifdef BCM_HAS_NEW_PCI_DMA_MAPPING_ERROR
+#define tg3_pci_dma_mapping_error(pdev, mapping)  pci_dma_mapping_error((pdev), (mapping))
+#elif defined(BCM_HAS_PCI_DMA_MAPPING_ERROR)
+#define tg3_pci_dma_mapping_error(pdev, mapping)  pci_dma_mapping_error((mapping))
+#else
+#define tg3_pci_dma_mapping_error(pdev, mapping)  0
+#endif
+
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+static inline void tg3_5780_class_intx_workaround(struct tg3 *tp)
+{
+#ifndef BCM_HAS_INTX_MSI_WORKAROUND
+	if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
+	    (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
+		tg3_enable_intx(tp->pdev);
+#endif
+}
+
+#ifdef BCM_HAS_TXQ_TRANS_UPDATE
+#define tg3_update_trans_start(dev)
+#else
+#define tg3_update_trans_start(dev)		((dev)->trans_start = jiffies)
+#endif
+
+#ifdef __VMKLNX__
+
+/**
+ *      skb_copy_expand -       copy and expand sk_buff
+ *      @skb: buffer to copy
+ *      @newheadroom: new free bytes at head
+ *      @newtailroom: new free bytes at tail
+ *      @gfp_mask: allocation priority
+ *
+ *      Make a copy of both an &sk_buff and its data and while doing so
+ *      allocate additional space.
+ *
+ *      This is used when the caller wishes to modify the data and needs a
+ *      private copy of the data to alter as well as more space for new fields.
+ *      Returns %NULL on failure or the pointer to the buffer
+ *      on success. The returned buffer has a reference count of 1.
+ *
+ *      You must pass %GFP_ATOMIC as the allocation priority if this function
+ *      is called from an interrupt.
+ */
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+                                int newheadroom, int newtailroom,
+                                gfp_t gfp_mask)
+{
+	int rc;
+	struct sk_buff *new_skb = skb_copy((struct sk_buff *) skb, gfp_mask);
+
+	if(new_skb == NULL)
+		return NULL;
+
+	rc = pskb_expand_head(new_skb, newheadroom, newtailroom, gfp_mask);
+
+	if(rc != 0)
+		return NULL;
+
+	return new_skb;
+}
+
+void *memmove(void *dest, const void *src, size_t count)
+{
+	if (dest < src) {
+		return memcpy(dest, src, count);
+	} else {
+		char *p = dest + count;
+		const char *s = src + count;
+		while (count--)
+			*--p = *--s;
+	}
+	return dest;
+}
+
+
+#endif
diff --git a/drivers/net/tg3_firmware.h b/drivers/net/tg3_firmware.h
new file mode 100644
index 0000000000000000000000000000000000000000..e04c6af45f0016d8ff379da2971ff6e125b7187f
--- /dev/null
+++ b/drivers/net/tg3_firmware.h
@@ -0,0 +1,974 @@
+/* Copyright (C) 2009-2011 Broadcom Corporation. */
+
+#ifdef NETIF_F_TSO
+#define TG3_TSO_SUPPORT	1
+#else
+#define TG3_TSO_SUPPORT	0
+#endif
+
+#ifndef BCM_HAS_REQUEST_FIRMWARE
+
+struct tg3_firmware {
+	size_t size;
+	const u8 *data;
+};
+
+#ifndef MODULE_FIRMWARE
+#define MODULE_FIRMWARE(x)
+#endif
+
+#define TG3_FW_RELEASE_MAJOR	0x0
+#define TG3_FW_RELASE_MINOR	0x0
+#define TG3_FW_RELEASE_FIX	0x0
+#define TG3_FW_START_ADDR	0x08000000
+#define TG3_FW_TEXT_ADDR	0x08000000
+#define TG3_FW_TEXT_LEN		0x9c0
+#define TG3_FW_RODATA_ADDR	0x080009c0
+#define TG3_FW_RODATA_LEN	0x60
+#define TG3_FW_DATA_ADDR	0x08000a40
+#define TG3_FW_DATA_LEN		0x20
+#define TG3_FW_SBSS_ADDR	0x08000a60
+#define TG3_FW_SBSS_LEN		0xc
+#define TG3_FW_BSS_ADDR		0x08000a70
+#define TG3_FW_BSS_LEN		0x10
+
+#define TG3_5701_RLS_FW_LEN (TG3_FW_TEXT_LEN + TG3_FW_RODATA_LEN)
+
+static const u32 tg3FwText[] = {
+0x00000000, (u32)TG3_FW_TEXT_ADDR, (u32)TG3_5701_RLS_FW_LEN,
+0x00000000, 0x10000003, 0x00000000, 0x0000000d,
+0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021,
+0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
+0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021,
+0x3c100800, 0x26100034, 0x0e00021c, 0x00000000,
+0x0000000d, 0x00000000, 0x00000000, 0x00000000,
+0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c,
+0x0e00004c, 0x241b2105, 0x97850000, 0x97870002,
+0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
+0xafa00014, 0x00021400, 0x00621825, 0x00052c00,
+0xafa30010, 0x8f860010, 0x00e52825, 0x0e000060,
+0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
+0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494,
+0xaf830498, 0xaf82049c, 0x24020001, 0xaf825ce0,
+0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
+0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff,
+0xaf825404, 0x8f835400, 0x34630400, 0xaf835400,
+0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
+0x03e00008, 0xaf805400, 0x00000000, 0x00000000,
+0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
+0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
+0x24020040, 0x3c010800, 0xac220a68, 0x3c010800,
+0xac200a60, 0xac600000, 0x24630004, 0x0083102b,
+0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
+0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60,
+0x3c040800, 0x8c840a68, 0x8fab0014, 0x24430001,
+0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
+0x00004021, 0x3c010800, 0xac200a60, 0x3c020800,
+0x8c420a60, 0x3c030800, 0x8c630a64, 0x91240000,
+0x00021140, 0x00431021, 0x00481021, 0x25080001,
+0xa0440000, 0x29020008, 0x1440fff4, 0x25290001,
+0x3c020800, 0x8c420a60, 0x3c030800, 0x8c630a64,
+0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
+0xac45000c, 0xac460010, 0xac470014, 0xac4a0018,
+0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001,
+0x0a0001e3, 0x3c0a0002, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008,
+0x0a0001e3, 0x3c0a0009, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
+0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013,
+0x0a0001e3, 0x3c0a0014, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018,
+0xafb10014, 0xafb00010, 0x3c010800, 0x00220821,
+0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
+0x3c010800, 0x00220821, 0xac200a78, 0x24630001,
+0x1860fff5, 0x2442000c, 0x24110001, 0x8f906810,
+0x32020004, 0x14400005, 0x24040001, 0x3c020800,
+0x8c420a78, 0x18400003, 0x00002021, 0x0e000182,
+0x00000000, 0x32020001, 0x10400003, 0x00000000,
+0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
+0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008,
+0x27bd0020, 0x3c050800, 0x8ca50a70, 0x3c060800,
+0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
+0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010,
+0x0e000060, 0xafa00014, 0x0e00017b, 0x00002021,
+0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
+0x8f836810, 0x00821004, 0x00021027, 0x00621824,
+0x03e00008, 0xaf836810, 0x27bdffd8, 0xafbf0024,
+0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
+0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c,
+0x34028000, 0xaf825cec, 0x8e020000, 0x18400016,
+0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
+0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800,
+0x0e000201, 0xac220a74, 0x10400005, 0x00000000,
+0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
+0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0,
+0x0a0001c5, 0xafa2001c, 0x0e000201, 0x00000000,
+0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
+0x24420001, 0x3c010800, 0xac230a70, 0x3c010800,
+0xac230a74, 0x0a0001df, 0xae020000, 0x3c100800,
+0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
+0x0e000201, 0x00000000, 0x14400024, 0x00000000,
+0x8e020000, 0x3c030800, 0x8c630a70, 0x2442ffff,
+0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
+0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70,
+0x97a2001e, 0x2442ff00, 0x2c420300, 0x1440000b,
+0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
+0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060,
+0x00003821, 0x0a0001df, 0x00000000, 0xaf825cf8,
+0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
+0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024,
+0x8fb00020, 0x03e00008, 0x27bd0028, 0x27bdffe0,
+0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
+0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060,
+0xafa00014, 0x8fbf0018, 0x03e00008, 0x27bd0020,
+0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
+0x00031823, 0x00431024, 0x00441021, 0x00a2282b,
+0x10a00006, 0x00000000, 0x00401821, 0x8f82680c,
+0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
+0x00000000, 0x3c040800, 0x8c840000, 0x3c030800,
+0x8c630a40, 0x0064102b, 0x54400002, 0x00831023,
+0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
+0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00,
+0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
+0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
+0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
+0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0x0e00004c,
+0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
+0x00003021, 0x00003821, 0xafa00010, 0x0e000060,
+0xafa00014, 0x2402ffff, 0xaf825404, 0x3c0200aa,
+0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
+0x27bd0020, 0x00000000, 0x00000000, 0x00000000,
+0x27bdffe8, 0xafb00010, 0x24100001, 0xafbf0014,
+0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
+0x10400003, 0x00000000, 0x0e000246, 0x00000000,
+0x0a00023a, 0xaf905428, 0x8fbf0014, 0x8fb00010,
+0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
+0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8,
+0x00821024, 0x1043001e, 0x3c0500ff, 0x34a5fff8,
+0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
+0x3c010800, 0xac230a50, 0x30420008, 0x10400005,
+0x00871025, 0x8cc20000, 0x24420001, 0xacc20000,
+0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
+0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001,
+0xafa20000, 0x8fa20000, 0x8f845d0c, 0x3c030800,
+0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
+0x27bd0008, 0x03e00008, 0x00000000, 0x00000000,
+0x35373031, 0x726c7341, 0x00000000, 0x00000000,
+0x53774576, 0x656e7430, 0x00000000, 0x726c7045,
+0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x66617461, 0x6c457272, 0x00000000, 0x00000000,
+0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
+};
+
+static const struct tg3_firmware tg3_5701_fw = {
+	.size = TG3_5701_RLS_FW_LEN,
+	.data = (u8 *)&tg3FwText[0],
+};
+
+
+#if TG3_TSO_SUPPORT != 0
+
+#define TG3_TSO_FW_RELEASE_MAJOR	0x1
+#define TG3_TSO_FW_RELASE_MINOR		0x6
+#define TG3_TSO_FW_RELEASE_FIX		0x0
+#define TG3_TSO_FW_START_ADDR		0x08000000
+#define TG3_TSO_FW_TEXT_ADDR		0x08000000
+#define TG3_TSO_FW_TEXT_LEN		0x1aa0
+#define TG3_TSO_FW_RODATA_ADDR		0x08001aa0
+#define TG3_TSO_FW_RODATA_LEN		0x60
+#define TG3_TSO_FW_DATA_ADDR		0x08001b20
+#define TG3_TSO_FW_DATA_LEN		0x30
+#define TG3_TSO_FW_SBSS_ADDR		0x08001b50
+#define TG3_TSO_FW_SBSS_LEN		0x2c
+#define TG3_TSO_FW_BSS_ADDR		0x08001b80
+#define TG3_TSO_FW_BSS_LEN		0x894
+
+#define TG3_LGCY_TSO_FW_LEN \
+        (TG3_TSO_FW_TEXT_LEN   + \
+         TG3_TSO_FW_RODATA_LEN + \
+         0x20                  + \
+         TG3_TSO_FW_DATA_LEN)
+
+static const u32 tg3TsoFwText[] = {
+0x00010600, (u32)TG3_TSO_FW_TEXT_ADDR, (u32)TG3_LGCY_TSO_FW_LEN,
+0x0e000003, 0x00000000, 0x08001b24, 0x00000000,
+0x10000003, 0x00000000, 0x0000000d, 0x0000000d,
+0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
+0x26100000, 0x0e000010, 0x00000000, 0x0000000d,
+0x27bdffe0, 0x3c04fefe, 0xafbf0018, 0x0e0005d8,
+0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
+0x90631b68, 0x24020002, 0x3c040800, 0x24841aac,
+0x14620003, 0x24050001, 0x3c040800, 0x24841aa0,
+0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
+0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50,
+0x8f625c90, 0x34420001, 0xaf625c90, 0x2402ffff,
+0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
+0x27bd0020, 0x00000000, 0x00000000, 0x00000000,
+0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
+0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
+0x8f706820, 0x32020100, 0x10400003, 0x00000000,
+0x0e0000bb, 0x00000000, 0x8f706820, 0x32022000,
+0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
+0x32020001, 0x10400003, 0x00000000, 0x0e0000a3,
+0x00000000, 0x3c020800, 0x90421b98, 0x14520003,
+0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
+0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014,
+0x8fb00010, 0x03e00008, 0x27bd0020, 0x27bdffe0,
+0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
+0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c,
+0xafa00014, 0x3c040800, 0x248423d8, 0xa4800000,
+0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
+0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4,
+0x3c010800, 0xac201bac, 0x3c010800, 0xac201bb8,
+0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
+0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c,
+0x8f624410, 0xac80f7a8, 0x3c010800, 0xac201b84,
+0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
+0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400,
+0x3c010800, 0xac221b90, 0x8f620068, 0x24030007,
+0x00021702, 0x10430005, 0x00000000, 0x8f620068,
+0x00021702, 0x14400004, 0x24020001, 0x3c010800,
+0x0a000097, 0xac20240c, 0xac820034, 0x3c040800,
+0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
+0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014,
+0x8fbf0018, 0x03e00008, 0x27bd0020, 0x27bdffe0,
+0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
+0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c,
+0xafa00014, 0x0e00005b, 0x00000000, 0x0e0000b4,
+0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
+0x24020001, 0x8f636820, 0x00821004, 0x00021027,
+0x00621824, 0x03e00008, 0xaf636820, 0x27bdffd0,
+0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
+0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010,
+0x8f675c5c, 0x3c030800, 0x24631bbc, 0x8c620000,
+0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
+0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824,
+0xac670000, 0x00111902, 0x306300ff, 0x30e20003,
+0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
+0x3c030800, 0x90631b98, 0x3044000f, 0x14600036,
+0x00804821, 0x24020001, 0x3c010800, 0xa0221b98,
+0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
+0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4,
+0x3c010800, 0xac201bac, 0x3c010800, 0xac201bb8,
+0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
+0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff,
+0x3c010800, 0xa4222410, 0x30428000, 0x3c010800,
+0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
+0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036,
+0x3c010800, 0xac2023f4, 0x9622000a, 0x3c030800,
+0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
+0xac2023f8, 0x00021302, 0x00021080, 0x00c21021,
+0x00621821, 0x3c010800, 0xa42223d0, 0x3c010800,
+0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
+0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000,
+0x00021100, 0x3c010800, 0x00220821, 0xac311bc8,
+0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
+0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff,
+0x00021100, 0x3c010800, 0x00220821, 0xac261bd0,
+0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
+0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac,
+0x00432821, 0x3c010800, 0xac251bac, 0x9622000a,
+0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
+0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000,
+0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
+0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
+0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c,
+0x8f625c50, 0x30420002, 0x10400014, 0x00000000,
+0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
+0x3c040800, 0x94841b94, 0x01221025, 0x3c010800,
+0xa42223da, 0x24020001, 0x3c010800, 0xac221bb8,
+0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
+0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800,
+0x24c61b9c, 0x8cc20000, 0x24420001, 0xacc20000,
+0x28420080, 0x14400005, 0x00000000, 0x0e000656,
+0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800,
+0x8c421bb8, 0x10400078, 0x24020001, 0x3c050800,
+0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
+0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff,
+0x0083102a, 0x1440006c, 0x00000000, 0x14830003,
+0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
+0x00009021, 0x24d60004, 0x0060a021, 0x24d30014,
+0x8ec20000, 0x00028100, 0x3c110800, 0x02308821,
+0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
+0x00000000, 0x9628000a, 0x31020040, 0x10400005,
+0x2407180c, 0x8e22000c, 0x2407188c, 0x00021400,
+0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
+0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00,
+0x00021400, 0x00621825, 0xaca30014, 0x8ec30004,
+0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
+0x00431021, 0x0282102a, 0x14400002, 0x02b23023,
+0x00803021, 0x8e620000, 0x30c4ffff, 0x00441021,
+0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
+0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e,
+0x8e62fff4, 0x00441021, 0xae62fff4, 0x96230008,
+0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
+0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008,
+0x3242ffff, 0x14540008, 0x24020305, 0x31020080,
+0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
+0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800,
+0x8c4223f0, 0x10400003, 0x3c024b65, 0x0a0001d3,
+0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
+0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021,
+0x3242ffff, 0x0054102b, 0x1440ffa9, 0x00000000,
+0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
+0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c,
+0x0e0004c0, 0x00000000, 0x8fbf002c, 0x8fb60028,
+0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
+0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030,
+0x27bdffd0, 0xafbf0028, 0xafb30024, 0xafb20020,
+0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
+0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824,
+0x9623000e, 0x8ce20000, 0x00431021, 0xace20000,
+0x8e220010, 0x30420020, 0x14400011, 0x00809821,
+0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825,
+0xaf635c9c, 0x8f625c90, 0x30420002, 0x1040011e,
+0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
+0x10400119, 0x00000000, 0x0a00020d, 0x00000000,
+0x8e240008, 0x8e230014, 0x00041402, 0x000231c0,
+0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
+0x00031942, 0x30637800, 0x00021100, 0x24424000,
+0x00624821, 0x9522000a, 0x3084ffff, 0x30420008,
+0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
+0x14400024, 0x24c50008, 0x94c20014, 0x3c010800,
+0xa42223d0, 0x8cc40010, 0x00041402, 0x3c010800,
+0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
+0x3083ffff, 0x00431023, 0x3c010800, 0xac222408,
+0x94c2001a, 0x3c010800, 0xac262400, 0x3c010800,
+0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
+0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
+0x104000e5, 0x00000000, 0xaf635c9c, 0x8f625c90,
+0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
+0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4,
+0x00434023, 0x3103ffff, 0x2c620008, 0x1040001c,
+0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
+0x00031042, 0x1840000b, 0x00002021, 0x24e60848,
+0x00403821, 0x94a30000, 0x8cc20000, 0x24840001,
+0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
+0x24a50002, 0x31020001, 0x1040001f, 0x3c024000,
+0x3c040800, 0x248423fc, 0xa0a00001, 0x94a30000,
+0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
+0x8f626800, 0x3c030010, 0x00431024, 0x10400009,
+0x00000000, 0x94c2001a, 0x3c030800, 0x8c6323fc,
+0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
+0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800,
+0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800,
+0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
+0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000,
+0x9522000a, 0x30420010, 0x1040009b, 0x00000000,
+0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
+0x8ce40000, 0x8f626800, 0x24630030, 0x00832821,
+0x3c030010, 0x00431024, 0x1440000a, 0x00000000,
+0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
+0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800,
+0xac2323fc, 0x3c040800, 0x8c8423fc, 0x00041c02,
+0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
+0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404,
+0x3c0200ff, 0x3442fff8, 0x00628824, 0x96220008,
+0x24050001, 0x24034000, 0x000231c0, 0x00801021,
+0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800,
+0xac251b60, 0xaf635cb8, 0x8f625cb0, 0x30420002,
+0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
+0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002,
+0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
+0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
+0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a,
+0x00000000, 0x3c030800, 0x90631b98, 0x24020002,
+0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
+0x8e22001c, 0x34637654, 0x10430002, 0x24100002,
+0x24100001, 0x00c02021, 0x0e000350, 0x02003021,
+0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
+0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0,
+0x10620006, 0x00000000, 0x3c020800, 0x944223d8,
+0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
+0x248423da, 0x94820000, 0x00021400, 0xae220014,
+0x3c020800, 0x8c421bbc, 0x3c03c000, 0x3c010800,
+0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
+0x30420002, 0x10400009, 0x00000000, 0x2484f7e2,
+0x8c820000, 0x00431025, 0xaf625c5c, 0x8f625c50,
+0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
+0x24421b84, 0x8c430000, 0x24630001, 0xac430000,
+0x8f630c14, 0x3063000f, 0x2c620002, 0x1440000c,
+0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
+0x3063000f, 0x24420001, 0x3c010800, 0xac221b40,
+0x2c620002, 0x1040fff7, 0x00000000, 0x3c024000,
+0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
+0x1440fffc, 0x00000000, 0x12600003, 0x00000000,
+0x0e0004c0, 0x00000000, 0x8fbf0028, 0x8fb30024,
+0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
+0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88,
+0x8c820000, 0x00031c02, 0x0043102b, 0x14400007,
+0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
+0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444,
+0x8f624444, 0x00431024, 0x1440fffd, 0x00000000,
+0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
+0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002,
+0x1440fffc, 0x00000000, 0x03e00008, 0x00000000,
+0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
+0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016,
+0x3c010800, 0xa42223d2, 0x2402002a, 0x3c010800,
+0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
+0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4,
+0x3c040800, 0x948423d4, 0x3c030800, 0x946323d2,
+0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
+0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821,
+0x3082ffff, 0x14c0001a, 0x01226021, 0x9582000c,
+0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
+0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800,
+0xac2023e8, 0x00021400, 0x00431025, 0x3c010800,
+0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
+0x95230002, 0x01e51023, 0x0043102a, 0x10400010,
+0x24020001, 0x3c010800, 0x0a000398, 0xac2223f8,
+0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
+0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0,
+0xa5820004, 0x3c020800, 0x8c421bc0, 0xa5820006,
+0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
+0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800,
+0x94421bc4, 0x004a1821, 0x3063ffff, 0x0062182b,
+0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
+0x944223d6, 0x30420009, 0x10400008, 0x00000000,
+0x9582000c, 0x3042fff6, 0xa582000c, 0x3c020800,
+0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
+0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800,
+0x944223d2, 0x00004021, 0xa520000a, 0x01e21023,
+0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
+0x00003021, 0x00401821, 0x94e20000, 0x25080001,
+0x00c23021, 0x0103102a, 0x1440fffb, 0x24e70002,
+0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
+0x00c23021, 0x00c02821, 0x00061027, 0xa522000a,
+0x00003021, 0x2527000c, 0x00004021, 0x94e20000,
+0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
+0x24e70002, 0x95220002, 0x00004021, 0x91230009,
+0x00442023, 0x01803821, 0x3082ffff, 0xa4e00010,
+0x00621821, 0x00021042, 0x18400010, 0x00c33021,
+0x00404821, 0x94e20000, 0x24e70002, 0x00c23021,
+0x30e2007f, 0x14400006, 0x25080001, 0x8d630000,
+0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
+0x0109102a, 0x1440fff3, 0x00000000, 0x30820001,
+0x10400005, 0x00061c02, 0xa0e00001, 0x94e20000,
+0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
+0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff,
+0x24020002, 0x14c20081, 0x00000000, 0x3c020800,
+0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
+0x944223d2, 0x95230002, 0x01e21023, 0x10620077,
+0x00000000, 0x3c020800, 0x944223d2, 0x01e21023,
+0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
+0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96,
+0x00e04021, 0x00072c02, 0x00aa2021, 0x00431023,
+0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
+0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800,
+0x948423d4, 0x00453023, 0x00e02821, 0x00641823,
+0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
+0x0a00047d, 0x00623021, 0x01203821, 0x00004021,
+0x3082ffff, 0x00021042, 0x18400008, 0x00003021,
+0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
+0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02,
+0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021,
+0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
+0x2527000c, 0x00004021, 0x94e20000, 0x25080001,
+0x00c23021, 0x2d020004, 0x1440fffb, 0x24e70002,
+0x95220002, 0x00004021, 0x91230009, 0x00442023,
+0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800,
+0x948423d4, 0x00621821, 0x00c33021, 0x00061c02,
+0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
+0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2,
+0x00431021, 0x00021043, 0x18400010, 0x00003021,
+0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
+0x30e2007f, 0x14400006, 0x25080001, 0x8d630000,
+0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
+0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
+0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021,
+0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
+0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
+0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010,
+0x00e04021, 0x11400007, 0x00072c02, 0x00aa3021,
+0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
+0x00c22821, 0x00051027, 0xa522000a, 0x3c030800,
+0x946323d4, 0x3102ffff, 0x01e21021, 0x00433023,
+0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
+0x00061402, 0x00c23021, 0x00c04021, 0x00061027,
+0xa5820010, 0x3102ffff, 0x00051c00, 0x00431025,
+0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
+0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870,
+0xa5c20034, 0x3c030800, 0x246323e8, 0x8c620000,
+0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
+0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021,
+0x00431821, 0x0062102b, 0x3c010800, 0xac2423e4,
+0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
+0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020,
+0x27bdffb8, 0x3c050800, 0x24a51b96, 0xafbf0044,
+0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
+0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024,
+0xafb00020, 0x94a90000, 0x3c020800, 0x944223d0,
+0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
+0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be,
+0xa7a20016, 0x24be0022, 0x97b6001e, 0x24b3001a,
+0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
+0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021,
+0x0082202a, 0x148000b0, 0x00000000, 0x97d50818,
+0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
+0x00008821, 0x0e000625, 0x00000000, 0x00403021,
+0x14c00007, 0x00000000, 0x3c020800, 0x8c4223dc,
+0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
+0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a,
+0x31020040, 0x10400005, 0x2407180c, 0x8e02000c,
+0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
+0x54400001, 0x34e70010, 0x3c020800, 0x00511021,
+0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4,
+0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
+0x96040008, 0x3242ffff, 0x00821021, 0x0282102a,
+0x14400002, 0x02b22823, 0x00802821, 0x8e020000,
+0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
+0x26310010, 0xac820004, 0x30e2ffff, 0xac800008,
+0xa485000e, 0xac820010, 0x24020305, 0x0e0005a2,
+0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
+0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000,
+0x8e63fffc, 0x0043102a, 0x10400067, 0x00000000,
+0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
+0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005,
+0x00000000, 0x8e62082c, 0x24420001, 0x0a000596,
+0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
+0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400,
+0xacc20018, 0x3c020800, 0x00511021, 0x8c421bd0,
+0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
+0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4,
+0x96020008, 0x00432023, 0x3242ffff, 0x3083ffff,
+0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
+0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff,
+0x00441021, 0xae620000, 0xa4c5000e, 0x8e020000,
+0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
+0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821,
+0x0062102a, 0x14400006, 0x02459021, 0x8e62fff0,
+0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
+0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003,
+0x31020004, 0x10400006, 0x24020305, 0x31020080,
+0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
+0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007,
+0x3c02b49a, 0x8ee20860, 0x54400001, 0x34e70400,
+0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
+0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2,
+0x00c02021, 0x3242ffff, 0x0056102b, 0x1440ff9b,
+0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
+0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040,
+0x8fb7003c, 0x8fb60038, 0x8fb50034, 0x8fb40030,
+0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
+0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014,
+0xafb00010, 0x8f624450, 0x8f634410, 0x0a0005b1,
+0x00808021, 0x8f626820, 0x30422000, 0x10400003,
+0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450,
+0x8f634410, 0x3042ffff, 0x0043102b, 0x1440fff5,
+0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
+0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800,
+0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800,
+0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
+0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009,
+0x00000000, 0x8f626820, 0x30422000, 0x1040fff8,
+0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
+0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008,
+0x27bd0018, 0x00000000, 0x00000000, 0x00000000,
+0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
+0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804,
+0x8f634000, 0x24020b50, 0x3c010800, 0xac221b54,
+0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
+0xaf634000, 0x0e000605, 0x00808021, 0x3c010800,
+0xa0221b68, 0x304200ff, 0x24030002, 0x14430005,
+0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
+0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc,
+0x8f624434, 0x8f634438, 0x8f644410, 0x3c010800,
+0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
+0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008,
+0x27bd0018, 0x3c040800, 0x8c870000, 0x3c03aa55,
+0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
+0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa,
+0xac830000, 0x8cc20000, 0x50430001, 0x24050001,
+0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
+0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c,
+0x8f62680c, 0x1043fffe, 0x00000000, 0x24a50001,
+0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
+0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c,
+0x00031c02, 0x0043102b, 0x14400008, 0x3c038000,
+0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
+0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444,
+0x8f624444, 0x00431024, 0x1440fffd, 0x00000000,
+0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
+0x2442e000, 0x2c422001, 0x14400003, 0x3c024000,
+0x0a000648, 0x2402ffff, 0x00822025, 0xaf645c38,
+0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
+0x03e00008, 0x00000000, 0x8f624450, 0x3c030800,
+0x8c631b58, 0x0a000651, 0x3042ffff, 0x8f624450,
+0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
+0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821,
+0x3c040800, 0x24841af0, 0x00003021, 0x00003821,
+0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
+0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008,
+0x27bd0020, 0x00000000, 0x00000000, 0x00000000,
+0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
+0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74,
+0x24020040, 0x3c010800, 0xac221b78, 0x3c010800,
+0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
+0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
+0x00804821, 0x8faa0010, 0x3c020800, 0x8c421b70,
+0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
+0x0044102b, 0x3c010800, 0xac231b70, 0x14400003,
+0x00004021, 0x3c010800, 0xac201b70, 0x3c020800,
+0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
+0x00021140, 0x00431021, 0x00481021, 0x25080001,
+0xa0440000, 0x29020008, 0x1440fff4, 0x25290001,
+0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
+0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
+0xac45000c, 0xac460010, 0xac470014, 0xac4a0018,
+0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
+0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e,
+0x43707541, 0x00000000, 0x00000000, 0x00000000,
+0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
+0x66662a2a, 0x00000000, 0x53774576, 0x656e7430,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x66617461, 0x6c457272, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e,
+0x362e3000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const struct tg3_firmware tg3_lgcy_tso_fw = {
+	.size = TG3_LGCY_TSO_FW_LEN,
+	.data = (u8 *)&tg3TsoFwText[0],
+};
+
+/* 5705 needs a special version of the TSO firmware.  */
+#define TG3_TSO5_FW_RELEASE_MAJOR	0x1
+#define TG3_TSO5_FW_RELASE_MINOR	0x2
+#define TG3_TSO5_FW_RELEASE_FIX		0x0
+#define TG3_TSO5_FW_START_ADDR		0x00010000
+#define TG3_TSO5_FW_TEXT_ADDR		0x00010000
+#define TG3_TSO5_FW_TEXT_LEN		0xe90
+#define TG3_TSO5_FW_RODATA_ADDR		0x00010e90
+#define TG3_TSO5_FW_RODATA_LEN		0x50
+#define TG3_TSO5_FW_DATA_ADDR		0x00010f00
+#define TG3_TSO5_FW_DATA_LEN		0x20
+#define TG3_TSO5_FW_SBSS_ADDR		0x00010f20
+#define TG3_TSO5_FW_SBSS_LEN		0x28
+#define TG3_TSO5_FW_BSS_ADDR		0x00010f50
+#define TG3_TSO5_FW_BSS_LEN		0x88
+
+#define TG3_5705_TSO_FW_LEN \
+        (TG3_TSO5_FW_TEXT_LEN   + \
+         TG3_TSO5_FW_RODATA_LEN + \
+         0x20                  + \
+         TG3_TSO5_FW_DATA_LEN)
+
+static const u32 tg3Tso5FwText[] = {
+0x00010200, (u32)TG3_TSO5_FW_TEXT_ADDR, (u32)TG3_5705_TSO_FW_LEN,
+0x0c004003, 0x00000000, 0x00010f04, 0x00000000,
+0x10000003, 0x00000000, 0x0000000d, 0x0000000d,
+0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
+0x26100000, 0x0c004010, 0x00000000, 0x0000000d,
+0x27bdffe0, 0x3c04fefe, 0xafbf0018, 0x0c0042e8,
+0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
+0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c,
+0x14620003, 0x24050001, 0x3c040001, 0x24840e90,
+0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
+0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018,
+0x03e00008, 0x27bd0020, 0x00000000, 0x00000000,
+0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
+0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001,
+0x8f706810, 0x32020400, 0x10400007, 0x00000000,
+0x8f641008, 0x00921024, 0x14400003, 0x00000000,
+0x0c004064, 0x00000000, 0x3c020001, 0x90420f56,
+0x10510003, 0x32020200, 0x1040fff1, 0x00000000,
+0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
+0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
+0x03e00008, 0x27bd0020, 0x27bdffe0, 0x3c040001,
+0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
+0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014,
+0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
+0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
+0x03e00008, 0x27bd0020, 0x00000000, 0x00000000,
+0x3c030001, 0x24630f60, 0x90620000, 0x27bdfff0,
+0x14400003, 0x0080c021, 0x08004073, 0x00004821,
+0x3c022000, 0x03021024, 0x10400003, 0x24090002,
+0x08004073, 0xa0600000, 0x24090001, 0x00181040,
+0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
+0x3c040001, 0x00832021, 0x8c848010, 0x3c050001,
+0x24a50f7a, 0x00041402, 0xa0a20000, 0x3c010001,
+0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
+0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021,
+0x8d8c8018, 0x304200ff, 0x24420008, 0x000220c3,
+0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
+0x1040000c, 0x00003821, 0x24a6000e, 0x01602821,
+0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001,
+0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
+0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b,
+0x91060000, 0x3c020001, 0x90420f7c, 0x2503000d,
+0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
+0x00021043, 0x1840000c, 0x00002021, 0x91020001,
+0x00461023, 0x00021fc2, 0x00431021, 0x00021843,
+0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
+0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff,
+0x00622021, 0x00041402, 0x00822021, 0x3c02ffff,
+0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
+0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c,
+0x90a20000, 0x3c0c0001, 0x01836021, 0x8d8c8018,
+0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
+0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008,
+0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b,
+0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
+0x90a20000, 0x30430007, 0x24020004, 0x10620011,
+0x28620005, 0x10400005, 0x24020002, 0x10620008,
+0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
+0x1062000e, 0x000710c0, 0x080040fa, 0x00000000,
+0x00a21821, 0x9463000c, 0x004b1021, 0x080040fa,
+0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
+0x004b1021, 0x080040fa, 0xac430000, 0x00a21821,
+0x8c63000c, 0x004b2021, 0x00a21021, 0xac830000,
+0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
+0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823,
+0x3c020001, 0x90420f7b, 0x24630028, 0x01e34021,
+0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
+0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006,
+0x3c010001, 0xa4200f76, 0x3c010001, 0xa4200f72,
+0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
+0x95020004, 0x3c010001, 0x08004124, 0xa4220f70,
+0x3c020001, 0x94420f70, 0x3c030001, 0x94630f72,
+0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
+0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006,
+0x3c040001, 0x94840f72, 0x3c020001, 0x94420f70,
+0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
+0x0062182a, 0x24020002, 0x1122000b, 0x00832023,
+0x3c030001, 0x94630f78, 0x30620009, 0x10400006,
+0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
+0x30420009, 0x01425023, 0x24020001, 0x1122001b,
+0x29220002, 0x50400005, 0x24020002, 0x11200007,
+0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
+0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001,
+0x95ce0f80, 0x10800005, 0x01806821, 0x01c42021,
+0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
+0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff,
+0x00e21021, 0x0800418d, 0x00432023, 0x3c020001,
+0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
+0x00622021, 0x00807021, 0x00041027, 0x08004185,
+0xa502000a, 0x3c050001, 0x24a50f7a, 0x90a30000,
+0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
+0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80,
+0x3c020001, 0x94420f5a, 0x30e5ffff, 0x00641821,
+0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
+0x00622021, 0x00041027, 0xa502000a, 0x3c030001,
+0x90630f7c, 0x24620001, 0x14a20005, 0x00807021,
+0x01631021, 0x90420000, 0x08004185, 0x00026200,
+0x24620002, 0x14a20003, 0x306200fe, 0x004b1021,
+0x944c0000, 0x3c020001, 0x94420f82, 0x3183ffff,
+0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
+0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff,
+0x00622021, 0x00041402, 0x00822021, 0x00806821,
+0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
+0x00431025, 0x3c040001, 0x24840f72, 0xade20010,
+0x94820000, 0x3c050001, 0x94a50f76, 0x3c030001,
+0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
+0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001,
+0xa4250f76, 0x10600003, 0x24a2ffff, 0x3c010001,
+0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
+0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010,
+0x3c030001, 0x90630f56, 0x27bdffe8, 0x24020001,
+0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
+0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000,
+0x3c010001, 0xac230f64, 0x8c434008, 0x24444000,
+0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
+0x24020008, 0x3c010001, 0xa4220f68, 0x30620004,
+0x10400005, 0x24020001, 0x3c010001, 0xa0220f57,
+0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
+0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c,
+0x24020001, 0x3c010001, 0xa4200f50, 0x3c010001,
+0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
+0x1342001e, 0x00000000, 0x13400005, 0x24020003,
+0x13420067, 0x00000000, 0x080042cf, 0x00000000,
+0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
+0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff,
+0x00021bc2, 0x00031823, 0x3063003e, 0x34630036,
+0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
+0xa4240f58, 0x00832021, 0x24630030, 0x3c010001,
+0xa4240f5a, 0x3c010001, 0xa4230f5c, 0x3c060001,
+0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
+0x94840f5a, 0x00651021, 0x0044102a, 0x10400013,
+0x3c108000, 0x00a31021, 0xa4c20000, 0x3c02a000,
+0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
+0x00901024, 0x14400003, 0x00000000, 0x0c004064,
+0x00000000, 0x8f620cf4, 0x00501024, 0x104000b7,
+0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
+0x94630f50, 0x00851023, 0xa4c40000, 0x00621821,
+0x3042ffff, 0x3c010001, 0xa4230f50, 0xaf620ce8,
+0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
+0x94c30002, 0x3c020001, 0x94420f50, 0x14620012,
+0x3c028000, 0x3c108000, 0x3c02a000, 0xaf620cf4,
+0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
+0x14400003, 0x00000000, 0x0c004064, 0x00000000,
+0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000,
+0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
+0x8f641008, 0x00901024, 0x14400003, 0x00000000,
+0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
+0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
+0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021,
+0xaf620ce0, 0x3c020001, 0x8c420f64, 0xaf620ce4,
+0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
+0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823,
+0x00822023, 0x30a6ffff, 0x3083ffff, 0x00c3102b,
+0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
+0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000,
+0x3c030001, 0x94630f54, 0x00441021, 0xa4e20000,
+0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
+0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001,
+0x94420f68, 0x34630624, 0x0800427c, 0x0000d021,
+0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
+0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000,
+0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
+0x00901024, 0x14400003, 0x00000000, 0x0c004064,
+0x00000000, 0x8f620cf4, 0x00501024, 0x10400015,
+0x00000000, 0x08004283, 0x00000000, 0x3c030001,
+0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
+0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008,
+0x00901024, 0x14400003, 0x00000000, 0x0c004064,
+0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
+0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e,
+0x3c020001, 0x94420f5c, 0x00021400, 0x00c21025,
+0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
+0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
+0x0000d021, 0x00431025, 0xaf620cec, 0x080042c1,
+0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
+0x34630604, 0x00431025, 0xaf620cec, 0x3c020001,
+0x94420f5e, 0x00451021, 0x3c010001, 0xa4220f5e,
+0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
+0xa0200f56, 0x8f641008, 0x00901024, 0x14400003,
+0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
+0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
+0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
+0x27bdffe0, 0x3c040001, 0x24840ec0, 0x00002821,
+0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
+0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
+0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001,
+0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
+0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
+0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804,
+0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
+0x24020b78, 0x3c010001, 0xac220f30, 0x34630002,
+0xaf634000, 0x0c004315, 0x00808021, 0x3c010001,
+0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
+0x00000000, 0x3c020001, 0x8c420f20, 0x08004308,
+0xac5000c0, 0x3c020001, 0x8c420f20, 0xac5000bc,
+0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
+0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001,
+0xac240f24, 0x8fbf0014, 0x8fb00010, 0x03e00008,
+0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
+0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c,
+0x1043fffe, 0x00000000, 0x24a50001, 0x00a4102a,
+0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
+0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02,
+0x0043102b, 0x14400008, 0x3c038000, 0x3c040001,
+0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
+0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444,
+0x00431024, 0x1440fffd, 0x00000000, 0x8f624448,
+0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
+0x2c422001, 0x14400003, 0x3c024000, 0x08004347,
+0x2402ffff, 0x00822025, 0xaf645c38, 0x8f625c30,
+0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
+0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24,
+0x08004350, 0x3042ffff, 0x8f624450, 0x3042ffff,
+0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
+0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001,
+0x24840ed0, 0x00003021, 0x00003821, 0xafbf0018,
+0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
+0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
+0x3c020001, 0x3442d600, 0x3c030001, 0x3463d600,
+0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
+0x24020040, 0x3c010001, 0xac220f44, 0x3c010001,
+0xac200f3c, 0xac600000, 0x24630004, 0x0083102b,
+0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
+0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c,
+0x3c040001, 0x8c840f44, 0x8fab0014, 0x24430001,
+0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
+0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001,
+0x8c420f3c, 0x3c030001, 0x8c630f40, 0x91240000,
+0x00021140, 0x00431021, 0x00481021, 0x25080001,
+0xa0440000, 0x29020008, 0x1440fff4, 0x25290001,
+0x3c020001, 0x8c420f3c, 0x3c030001, 0x8c630f40,
+0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
+0xac45000c, 0xac460010, 0xac470014, 0xac4a0018,
+0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
+0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e,
+0x43707541, 0x00000000, 0x00000000, 0x00000000,
+0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
+0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
+0x66617461, 0x6c457272, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e,
+0x322e3000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const struct tg3_firmware tg3_5705_tso_fw = {
+	.size = TG3_5705_TSO_FW_LEN,
+	.data = (u8 *)&tg3Tso5FwText[0],
+};
+
+#endif /* TG3_TSO_SUPPORT != 0 */
+
+static int tg3_hidden_request_firmware(const struct tg3_firmware **fw,
+				       const char *name)
+{
+	*fw = 0;
+
+	if (strcmp(name, "tigon/tg3.bin") == 0)
+		*fw = &tg3_5701_fw;
+#if TG3_TSO_SUPPORT != 0
+	else if (strcmp(name, "tigon/tg3_tso.bin") == 0)
+		*fw = &tg3_lgcy_tso_fw;
+	else if (strcmp(name, "tigon/tg3_tso5.bin") == 0)
+		*fw = &tg3_5705_tso_fw;
+#endif
+
+	return *fw ? 0 : -EINVAL;
+}
+
+#define tg3_priv_request_firmware(x, y, z) tg3_hidden_request_firmware((x), (y))
+
+#define tg3_priv_release_firmware(x)
+
+#endif /* BCM_HAS_REQUEST_FIRMWARE */
diff --git a/drivers/net/tg3_flags.h b/drivers/net/tg3_flags.h
new file mode 100644
index 0000000000000000000000000000000000000000..fa909b9224a8f6c6b64dd837ba9aac802552fb47
--- /dev/null
+++ b/drivers/net/tg3_flags.h
@@ -0,0 +1,44 @@
+#define BCM_HAS_BOOL
+#define BCM_HAS_LE32
+#define BCM_HAS_RESOURCE_SIZE_T
+#define BCM_HAS_KZALLOC
+#define BCM_HAS_JIFFIES_TO_USECS
+#define BCM_HAS_USECS_TO_JIFFIES
+#define BCM_HAS_MSECS_TO_JIFFIES
+#define BCM_HAS_MSLEEP
+#define BCM_HAS_MSLEEP_INTERRUPTIBLE
+#define BCM_HAS_SKB_COPY_FROM_LINEAR_DATA
+#define BCM_HAS_SKB_IS_GSO_V6
+#define BCM_HAS_PCI_IOREMAP_BAR
+#define BCM_HAS_PCI_READ_VPD
+#define BCM_HAS_INTX_MSI_WORKAROUND
+#define BCM_HAS_PCI_TARGET_STATE
+#define BCM_HAS_PCI_CHOOSE_STATE
+#define BCM_HAS_PCI_PME_CAPABLE
+#define BCM_HAS_PCI_ENABLE_WAKE
+#define BCM_HAS_PCI_SET_POWER_STATE
+#define BCM_HAS_DEVICE_WAKEUP_API
+#define BCM_HAS_NEW_PCI_DMA_MAPPING_ERROR
+#define BCM_HAS_PCIE_SET_READRQ
+#define BCM_HAS_PRINT_MAC
+#define BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM
+#define BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM
+#define BCM_HAS_SET_TX_CSUM
+#define BCM_HAS_SKB_TRANSPORT_OFFSET
+#define BCM_HAS_SKB_GET_QUEUE_MAPPING
+#define BCM_HAS_IP_HDR
+#define BCM_HAS_IP_HDRLEN
+#define BCM_HAS_TCP_HDR
+#define BCM_HAS_TCP_OPTLEN
+#define BCM_HAS_STRUCT_NETDEV_QUEUE
+#define BCM_HAS_VLAN_FEATURES
+#define BCM_HAS_ALLOC_ETHERDEV_MQ
+#define BCM_HAS_NAPI_GRO_RECEIVE
+#define BCM_HAS_NETIF_TX_LOCK
+#define BCM_HAS_VLAN_GRO_RECEIVE
+#define BCM_HAS_VLAN_GROUP_SET_DEVICE
+#define BCM_HAS_DEV_DRIVER_STRING
+#define BCM_HAS_DEV_NAME
+#define BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX
+#define BCM_HAS_MDIOBUS_ALLOC
+#define BCM_HAS_DMA_DATA_DIRECTION
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a2e2d72c52a039015bce7f5b6a5b6fb8b02796df..182d66820b07e3c014d4e2812804fea432e195c7 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -2,6 +2,10 @@
 # Makefile for USB Network drivers
 #
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_USBNET)
+EXTRA_CFLAGS	+= -I$(INC_BRCMSHARED_PUB_PATH)/$(BRCM_BOARD)
+endif #BCM_KF # defined(CONFIG_BCM_KF_USBNET)
+
 obj-$(CONFIG_USB_CATC)		+= catc.o
 obj-$(CONFIG_USB_KAWETH)	+= kaweth.o
 obj-$(CONFIG_USB_PEGASUS)	+= pegasus.o
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index b38db48b1ce09b380d8cdea0ea444be0d97573d0..cdea92b52eb2104e3c28139f56f8fd14d672686e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -47,6 +47,11 @@
 #include <linux/kernel.h>
 #include <linux/pm_runtime.h>
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION) && defined(CONFIG_BLOG))
+#include <linux/nbuff.h> 
+#include <linux/blog.h>
+#endif
+
 #define DRIVER_VERSION		"22-Aug-2005"
 
 
@@ -232,6 +237,30 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
 		return;
 	}
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION) && defined(CONFIG_BLOG))
+	if(skb->clone_fc_head == NULL)
+	{
+		/* Make sure fcache does not expand the skb->data if clone_fc_head
+		 * is not set by the dongle driver's(ex:rndis_host.c, asix.c etc..)
+		 * we expect dongle/class drivers using fcache to set minumun headroom
+		 * available for all packets in an aggregated skb by calling 
+		 * skb_clone_headers_set() before calling usbnet_skb_return.
+		 *
+		 * Ex:rndis based drivers have 8 bytes spacig between 2 packets in an
+		 * aggreated skb. we can call skb_clone_ headers_set(skb, 8) in
+		 * rndis_rx_fixup();
+		 * By setting this we are telling fcache or enet driver can expand
+		 * skb->data for upto 8 bytes. This is helpful to avoid packet
+		 * copy incase of LAN VLAN's, External Switch tag's  etc..
+		 *   
+		 */
+		skb_clone_headers_set(skb, 0);
+	}
+	if (PKT_DONE == blog_sinit(skb, skb->dev, TYPE_ETH, 0, BLOG_USBPHY)) {
+		return;
+	}
+#endif
+
 	skb->protocol = eth_type_trans (skb, dev->net);
 	dev->net->stats.rx_packets++;
 	dev->net->stats.rx_bytes += skb->len;
@@ -243,7 +272,11 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
 	if (skb_defer_rx_timestamp(skb))
 		return;
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION) && defined(CONFIG_BLOG))
+	status = netif_receive_skb(skb);
+#else
 	status = netif_rx (skb);
+#endif
 	if (status != NET_RX_SUCCESS)
 		netif_dbg(dev, rx_err, dev->net,
 			  "netif_rx status %d\n", status);
@@ -333,6 +366,10 @@ void usbnet_defer_kevent (struct usbnet *dev, int work)
 }
 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM96838))
+int bcm_usb_hw_align_size = 1024;
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 static void rx_complete (struct urb *urb);
@@ -345,7 +382,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 	unsigned long		lockflags;
 	size_t			size = dev->rx_urb_size;
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM96838))
+	skb = __netdev_alloc_skb_ip_align(dev->net, size + bcm_usb_hw_align_size, flags);
+#else
 	skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+#endif
+
 	if (!skb) {
 		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
 		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
@@ -353,6 +395,14 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 		return -ENOMEM;
 	}
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM96838))
+    {
+        unsigned int aligned_len = (unsigned int)skb->data;
+        aligned_len = bcm_usb_hw_align_size - (aligned_len & (bcm_usb_hw_align_size - 1));
+		skb_reserve(skb, aligned_len);
+    }
+#endif
+
 	entry = (struct skb_data *) skb->cb;
 	entry->urb = urb;
 	entry->dev = dev;
@@ -1092,6 +1142,21 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
 	unsigned long		flags;
 	int retval;
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION) && defined(CONFIG_BLOG))
+	if(skb)
+	{
+		struct sk_buff *orig_skb = skb;
+		skb = nbuff_xlate((pNBuff_t )skb);
+		if (skb == NULL)
+		{
+			dev->net->stats.tx_dropped++;
+			nbuff_free((pNBuff_t) orig_skb);
+			return NETDEV_TX_OK;
+		}
+		blog_emit( skb, net, TYPE_ETH, 0, BLOG_USBPHY );
+	}
+#endif
+
 	if (skb)
 		skb_tx_timestamp(skb);
 
@@ -1413,6 +1478,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
 	net->ethtool_ops = &usbnet_ethtool_ops;
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM96838))
+    printk("+++++ &bcm_usb_hw_align_size =%p \n", &bcm_usb_hw_align_size);
+#endif
 	// allow device-specific bind/init procedures
 	// NOTE net->name still not usable ...
 	if (info->bind) {
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b500840a143b08ac72e8c045fea66135535676d5..034aa4f94a090ead528a3fdc25d2b7e894bba990 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -76,6 +76,10 @@ static LIST_HEAD(link_list);
 #define POLICY_DEFAULT 0	/* BIOS default setting */
 #define POLICY_PERFORMANCE 1	/* high performance */
 #define POLICY_POWERSAVE 2	/* high power saving */
+#if defined(CONFIG_BCM_KF_POWER_SAVE)
+#define POLICY_L0SPOWERSAVE 3	/* Only do L0S */
+#define POLICY_L1POWERSAVE 4	/* Typically same savings as L1+L0s */
+#endif
 
 #ifdef CONFIG_PCIEASPM_PERFORMANCE
 static int aspm_policy = POLICY_PERFORMANCE;
@@ -89,6 +93,11 @@ static const char *policy_str[] = {
 	[POLICY_DEFAULT] = "default",
 	[POLICY_PERFORMANCE] = "performance",
 	[POLICY_POWERSAVE] = "powersave"
+#if defined(CONFIG_BCM_KF_POWER_SAVE)
+        ,
+	[POLICY_L0SPOWERSAVE] = "l0s_powersave",
+	[POLICY_L1POWERSAVE] = "l1_powersave",
+#endif
 };
 
 #define LINK_RETRAIN_TIMEOUT HZ
@@ -102,6 +111,14 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
 	case POLICY_POWERSAVE:
 		/* Enable ASPM L0s/L1 */
 		return ASPM_STATE_ALL;
+#if defined(CONFIG_BCM_KF_POWER_SAVE)
+	case POLICY_L0SPOWERSAVE:
+		/* Enable ASPM L0s */
+		return ASPM_STATE_L0S;
+	case POLICY_L1POWERSAVE:
+		/* Enable ASPM L1 */
+		return ASPM_STATE_L1;
+#endif
 	case POLICY_DEFAULT:
 		return link->aspm_default;
 	}
@@ -112,9 +129,15 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
 {
 	switch (aspm_policy) {
 	case POLICY_PERFORMANCE:
+#if defined(CONFIG_BCM_KF_POWER_SAVE)
+	case POLICY_L0SPOWERSAVE:
+#endif
 		/* Disable ASPM and Clock PM */
 		return 0;
 	case POLICY_POWERSAVE:
+#if defined(CONFIG_BCM_KF_POWER_SAVE)
+	case POLICY_L1POWERSAVE:
+#endif
 		/* Disable Clock PM */
 		return 1;
 	case POLICY_DEFAULT:
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4bf71028556b995bd48c0ea9caec081445fa4234..353e9227482e8238b105351e4b62ffdf0537ea47 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1633,7 +1633,7 @@ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
 
 	dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
 	dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
-		 dev->vendor, dev->device);
+			dev->vendor, dev->device);
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_0,	quirk_reroute_to_boot_interrupts_intel);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_1,	quirk_reroute_to_boot_interrupts_intel);
@@ -1676,7 +1676,7 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
 	pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
 
 	dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
-		 dev->vendor, dev->device);
+		dev->vendor, dev->device);
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB_10, 	quirk_disable_intel_boot_interrupt);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB_10, 	quirk_disable_intel_boot_interrupt);
@@ -1744,10 +1744,10 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
 	dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
 		 dev->vendor, dev->device);
 }
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8131_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8131_BRIDGE, 	quirk_disable_amd_813x_boot_interrupt);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8131_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8132_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8132_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8132_BRIDGE, 	quirk_disable_amd_813x_boot_interrupt);
 
 #define AMD_8111_PCI_IRQ_ROUTING	0x56
 
@@ -2833,7 +2833,7 @@ static void __devinit quirk_intel_mc_errata(struct pci_dev *dev)
 		dev_err(&dev->dev, "Error attempting to write the read "
 			"completion coalescing register.\n");
 		return;
-	}
+}
 
 	pr_info_once("Read completion coalescing disabled due to hardware "
 		     "errata relating to 256B MPS.\n");
@@ -2866,7 +2866,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
 
-
 static void do_one_fixup_debug(void (*fn)(struct pci_dev *dev), struct pci_dev *dev)
 {
 	ktime_t calltime, delta, rettime;
@@ -2917,6 +2916,7 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 
+
 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
 			  struct pci_fixup *end)
 {
@@ -3028,12 +3028,12 @@ static int __init pci_apply_final_quirks(void)
 			       pci_dfl_cache_line_size << 2);
 			pci_cache_line_size = pci_dfl_cache_line_size;
 		}
-	}
+                }
 	if (!pci_cache_line_size) {
 		printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
 		       cls << 2, pci_dfl_cache_line_size << 2);
 		pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
-	}
+        }
 
 	return 0;
 }
@@ -3083,7 +3083,7 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
 	msleep(100);
 
 	return 0;
-}
+        }
 
 #define PCI_DEVICE_ID_INTEL_82599_SFP_VF   0x10ed
 
@@ -3105,7 +3105,7 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
 		    (i->device == dev->device ||
 		     i->device == (u16)PCI_ANY_ID))
 			return i->reset(dev, probe);
-	}
+}
 
 	return -ENOTTY;
 }
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index f8d818abf98caf4dc8275a65e18695c41c639537..3080e0b70cb9b66b46e45d6d1de9d32890eca0be 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -1,10 +1,20 @@
 menu "Remoteproc drivers (EXPERIMENTAL)"
 
+if !BCM_KF_ANDROID
 # REMOTEPROC gets selected by whoever wants it
 config REMOTEPROC
 	tristate
 	depends on EXPERIMENTAL
 	select FW_CONFIG
+endif
+if BCM_KF_ANDROID
+# REMOTEPROC gets selected by whoever wants it
+config REMOTEPROC
+	tristate
+	depends on EXPERIMENTAL
+	select FW_CONFIG
+	select VIRTIO
+endif
 
 config OMAP_REMOTEPROC
 	tristate "OMAP remoteproc support"
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index b48c24f7d21aacbae95775e0f47e841de5015f66..3572dc962012a5502629a66f7db3c9d0a616d96b 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -34,6 +34,10 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
+#if defined(CONFIG_BCM_KF_USB_STORAGE)
+#include <linux/bcm_realtime.h>
+#endif
+
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport.h>
@@ -435,6 +439,14 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
 			shost->host_no, PTR_ERR(shost->ehandler));
 		goto fail_kfree;
 	}
+#if defined(CONFIG_BCM_KF_USB_STORAGE)
+    /*convert the thread to realtime RR thread */
+    {
+        struct sched_param param;
+        param.sched_priority = BCM_RTPRIO_DATA;
+        sched_setscheduler(shost->ehandler, SCHED_RR, &param);
+    }
+#endif
 
 	scsi_proc_hostdir_add(shost->hostt);
 	return shost;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3d8f662e4fe9a52d0adc9b9b8d5b4fcb06cdd3a9..3a545c5c0e778bdb679338dafa71977c7abb8806 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1154,6 +1154,24 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
  * which are wrappers around this core asynchronous primitive.)
  */
 int spi_async(struct spi_device *spi, struct spi_message *message)
+#if defined(CONFIG_BCM_KF_SPI)
+{
+	struct spi_master *master = spi->master;
+	unsigned long flags;
+
+	/* holding the spinlock and disabling irqs for the duration of the transfer is problematic
+	 ��the controller driver manages the locking so call __spi_async without the lock */
+
+	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+	if (master->bus_lock_flag){
+		spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+		return -EBUSY;
+        }
+	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+	return __spi_async(spi, message);
+}
+#else
 {
 	struct spi_master *master = spi->master;
 	int ret;
@@ -1170,6 +1188,7 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
 
 	return ret;
 }
+#endif
 EXPORT_SYMBOL_GPL(spi_async);
 
 /**
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index eb1dee26bda3662972d9e9e6e4595402ba775ee5..4aa7d1749c86f61a0886a52ab754c9f5f3182f80 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -38,6 +38,21 @@ config ANDROID_RAM_CONSOLE
 	select ANDROID_PERSISTENT_RAM
 	default n
 
+config PERSISTENT_TRACER
+	bool "Persistent function tracer"
+	depends on HAVE_FUNCTION_TRACER
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	select FUNCTION_TRACER
+	select ANDROID_PERSISTENT_RAM
+	help
+	  persistent_trace traces function calls into a persistent ram
+	  buffer that can be decoded and dumped after reboot through
+	  /sys/kernel/debug/persistent_trace.  It can be used to
+	  determine what function was last called before a reset or
+	  panic.
+
+	  If unsure, say N.
+
 config ANDROID_TIMED_OUTPUT
 	bool "Timed output class driver"
 	default y
@@ -53,8 +68,19 @@ config ANDROID_LOW_MEMORY_KILLER
 	---help---
 	  Register processes to be killed when memory is low
 
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+	bool "Android Low Memory Killer: detect oom_adj values"
+	depends on ANDROID_LOW_MEMORY_KILLER
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+	---help---
+	  Detect oom_adj values written to
+	  /sys/module/lowmemorykiller/parameters/adj and convert them
+	  to oom_score_adj values.
+
 source "drivers/staging/android/switch/Kconfig"
 
+if !BCM_KF_ANDROID
 config ANDROID_INTF_ALARM
 	bool "Android alarm driver"
 	depends on RTC_CLASS
@@ -80,6 +106,19 @@ config ANDROID_ALARM_OLDDRV_COMPAT
 	  Provides preprocessor alias to aid compatability with
 	  older out-of-tree drivers that use the Android Alarm
 	  in-kernel API. This will be removed eventually.
+endif
+
+if BCM_KF_ANDROID
+config ANDROID_INTF_ALARM_DEV
+	bool "Android alarm driver"
+	depends on RTC_CLASS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+	  elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+	  Also exports the alarm interface to user-space.
+endif
 
 endif # if ANDROID
 
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 9b6c9ed91f6900549afb1544b607d7ccc1d0b315..e53d8d2b6be785c009ce5757b88db33026dde0b5 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,3 +1,9 @@
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+ccflags-y += -I$(src)			# needed for trace events
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+
 obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o
 obj-$(CONFIG_ASHMEM)			+= ashmem.o
 obj-$(CONFIG_ANDROID_LOGGER)		+= logger.o
@@ -7,5 +13,17 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT)	+= timed_output.o
 obj-$(CONFIG_ANDROID_TIMED_GPIO)	+= timed_gpio.o
 obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o
 obj-$(CONFIG_ANDROID_SWITCH)		+= switch/
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_ANDROID_INTF_ALARM_DEV)	+= alarm-dev.o
+obj-$(CONFIG_PERSISTENT_TRACER)		+= trace_persistent.o
+
+CFLAGS_REMOVE_trace_persistent.o = -pg
+else
+obj-$(CONFIG_ANDROID_INTF_ALARM)	+= alarm.o
+obj-$(CONFIG_ANDROID_INTF_ALARM_DEV)	+= alarm-dev.o
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_ANDROID_INTF_ALARM)	+= alarm.o
 obj-$(CONFIG_ANDROID_INTF_ALARM_DEV)	+= alarm-dev.o
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/drivers/staging/android/persistent_ram.c b/drivers/staging/android/persistent_ram.c
index 3d986ce50b9935bf9c56efe7b714b30d85a8ecd9..1502801ff731c335035f3f92896859ae1333c0d6 100644
--- a/drivers/staging/android/persistent_ram.c
+++ b/drivers/staging/android/persistent_ram.c
@@ -407,11 +407,11 @@ struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
 		if (buffer_size(prz) > prz->buffer_size ||
 		    buffer_start(prz) > buffer_size(prz))
 			pr_info("persistent_ram: found existing invalid buffer,"
-				" size %ld, start %ld\n",
+				" size %zu, start %zu\n",
 			       buffer_size(prz), buffer_start(prz));
 		else {
 			pr_info("persistent_ram: found existing buffer,"
-				" size %ld, start %ld\n",
+				" size %zu, start %zu\n",
 			       buffer_size(prz), buffer_start(prz));
 			persistent_ram_save_old(prz);
 		}
diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..65204a269478a86237376c7c9fd66cef5d4c344c
--- /dev/null
+++ b/drivers/switch/Kconfig
@@ -0,0 +1,20 @@
+if BCM_KF_ANDROID
+menuconfig SWITCH
+	tristate "Switch class support"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  Say Y here to enable switch class support. This allows
+	  monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+	tristate "GPIO Swith support"
+	depends on GENERIC_GPIO
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
+endif
+
diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..4d6ca1cf31a60a3918acea6499f6bd9957f63cc7
--- /dev/null
+++ b/drivers/switch/Makefile
@@ -0,0 +1,6 @@
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+# Switch Class Driver
+obj-$(CONFIG_SWITCH)		+= switch_class.o
+obj-$(CONFIG_SWITCH_GPIO)	+= switch_gpio.o
+
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 830cd62d84925e548cd7480b2d6fb3a4e582a08d..0994bb2c949c58f7de1ccea37769c60def252586 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -388,3 +388,4 @@ config PPC_EARLY_DEBUG_EHV_BC_HANDLE
 	  If the number you specify is not a valid byte channel handle, then
 	  there simply will be no early console output.  This is true also
 	  if you don't boot under a hypervisor at all.
+
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 94b6eda87afdd8a7a5ac761eddd04a9b618d77bd..6a1d6ebb65589f95d09826e859c72411b7726a91 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1987,7 +1987,15 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
 				tty->ops->flush_chars(tty);
 		} else {
 			while (nr > 0) {
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+				/*CVE-2014-0196*/
+				mutex_lock(&tty->output_lock);
+#endif
 				c = tty->ops->write(tty, b, nr);
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)				
+				/*CVE-2014-0196*/
+				mutex_unlock(&tty->output_lock);
+#endif
 				if (c < 0) {
 					retval = c;
 					goto break_out;
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 0db379ed9f86432e9075b26ddb7cbd922404af59..2c4e799604c4f89477380f392b9a6605dc1e4b48 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -17,6 +17,9 @@
  */
 
 #if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+static char sysrq_start_char='^';
+#endif
 #define SUPPORT_SYSRQ
 #endif
 
@@ -1437,6 +1440,20 @@ serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
 			else if (lsr & UART_LSR_FE)
 				flag = TTY_FRAME;
 		}
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ) && defined(SUPPORT_SYSRQ)
+		/*
+		* Simple hack for substituting a regular ASCII char as the break
+		* char for the start of the Magic Sysrq sequence.  This duplicates
+		* some of the code in uart_handle_break() in serial_core.h
+		*/
+		if (up->port.sysrq == 0)
+		{
+			if (ch == sysrq_start_char) {
+				up->port.sysrq = jiffies + HZ*5;
+				goto ignore_char;
+			}
+		}
+#endif
 		if (uart_handle_sysrq_char(port, ch))
 			goto ignore_char;
 
@@ -1691,6 +1708,9 @@ static void serial_unlink_irq_chain(struct uart_8250_port *up)
 	struct irq_info *i;
 	struct hlist_node *n;
 	struct hlist_head *h;
+#if defined(CONFIG_BCM_KF_KERN_WARNING)
+	i = NULL;
+#endif
 
 	mutex_lock(&hash_mutex);
 
@@ -3351,3 +3371,4 @@ module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
 MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
 #endif
 MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
+
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 3d569cd68f58d5a6550fc37bb87f6d571eeb5de3..d90226eb5663accde1a548000902e139e4eb61c8 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1655,8 +1655,13 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
 	}
 
 	/* Set baud rate */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM63138_SIM)
+	writew(0, port->membase + UART011_FBRD);
+	writew(1, port->membase + UART011_IBRD);
+#else
 	writew(quot & 0x3f, port->membase + UART011_FBRD);
 	writew(quot >> 6, port->membase + UART011_IBRD);
+#endif
 
 	/*
 	 * ----------v----------v----------v----------v-----
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 246b823c1b27f7f2847fdcf08798b2f669102039..01cad50bd4db26de6857f8772e5f79094372e102 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1739,6 +1739,9 @@ struct baud_rates {
 };
 
 static const struct baud_rates baud_rates[] = {
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM63138_SIM)
+	{3000000, B3000000},
+#endif
 	{ 921600, B921600 },
 	{ 460800, B460800 },
 	{ 230400, B230400 },
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 05728894a88c23330e7b57eb8994f50477fc1b73..ce57a26008e15cf4ac85572696ea03ee9b268bba 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -199,12 +199,23 @@ static struct sysrq_key_op sysrq_showlocks_op = {
 #define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL)
 #endif
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_BCM_KF_CHAR_SYSRQ)
 static DEFINE_SPINLOCK(show_lock);
 
 static void showacpu(void *dummy)
 {
 	unsigned long flags;
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+	struct pt_regs *regs = get_irq_regs();
+
+	printk("=>entered showacpu on CPU %d: (idle=%d)\n",
+	       smp_processor_id(), idle_cpu(smp_processor_id()));
+
+	if (regs) {
+		printk(KERN_INFO "=>calling show_regs:\n");
+		show_regs(regs);
+	}
+#endif
 
 	/* Idle CPUs have no interesting backtrace. */
 	if (idle_cpu(smp_processor_id()))
@@ -212,7 +223,18 @@ static void showacpu(void *dummy)
 
 	spin_lock_irqsave(&show_lock, flags);
 	printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+	printk(KERN_INFO "=>calling show_stack, current=%p ", current);
+	if (current)
+		printk(" (comm=%s)\n", current->comm);
+	else
+		printk("\n");
+
+	show_stack(current, NULL);
+#else
 	show_stack(NULL, NULL);
+#endif
+
 	spin_unlock_irqrestore(&show_lock, flags);
 }
 
@@ -225,6 +247,21 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
 
 static void sysrq_handle_showallcpus(int key)
 {
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+
+
+	showacpu(NULL);
+
+#ifdef CONFIG_SMP
+	{
+		int othercpu;
+		othercpu = (0 == smp_processor_id()) ? 1 : 0;
+		printk("=== Now call other CPU (%d)\n", othercpu);
+		smp_call_function_single(othercpu, showacpu, (void *) 0xeeee, 0);
+	}
+#endif /* CONFIG_SMP */
+
+#else /* CONFIG_BCM_KF_CHAR_SYSRQ */
 	/*
 	 * Fall back to the workqueue based printing if the
 	 * backtrace printing did not succeed or the
@@ -239,6 +276,7 @@ static void sysrq_handle_showallcpus(int key)
 		}
 		schedule_work(&sysrq_showallcpus);
 	}
+#endif /* CONFIG_BCM_KF_CHAR_SYSRQ */
 }
 
 static struct sysrq_key_op sysrq_showallcpus_op = {
@@ -398,6 +436,85 @@ static struct sysrq_key_op sysrq_unrt_op = {
 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
 };
 
+#ifdef CONFIG_BCM_KF_CHAR_SYSRQ
+
+/*
+ * This function is a "safe" function to call at anytime.  It should only
+ * read data that do not require a lock and will not cause exceptions,
+ * i.e. reading statistics but not dereferencing pointers.  It should not
+ * assume interrupts are working on any of the CPU's.
+ * The 'L' option is also good.
+ */
+static void sysrq_handle_cpu_summary(int key)
+{
+	int cpu;
+	int max_cpu=1;
+#ifdef CONFIG_SMP
+	max_cpu=2;
+#endif
+
+
+	printk("CPU summary invoked on cpu %d\n", smp_processor_id());
+	for (cpu=0; cpu < max_cpu; cpu++) {
+		if (cpu_online(cpu)) {
+			printk("  cpu %d is online.\n", cpu);
+		}
+		else {
+			printk("  WARNING: cpu %d is offline!\n", cpu);
+		}
+	}
+	printk("\n\n");
+	// dump interrupt statistics
+	// print other CPU info that does not require getting a lock or
+	// interrupts to be enabled on that CPU
+}
+
+static struct sysrq_key_op sysrq_cpu_summary_op = {
+	.handler	= sysrq_handle_cpu_summary,
+	.help_msg	= "BRCM: show summary status on all CPUs(A)",
+	.action_msg	= "CPU Summary Status",
+	.enable_mask	= 0x4000,   /* typically all flags will be enabled */
+};
+
+
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+
+static char intrs_output_buf[1024];
+
+static void sysrq_handle_show_intrs(int key)
+{
+	struct seq_file intrs_seq_file;
+	loff_t pos=0;
+
+	// Just initialize the parts of seq_file that seq_printf needs
+	memset(&intrs_seq_file, 0, sizeof(intrs_seq_file));
+	intrs_seq_file.buf = intrs_output_buf;
+	intrs_seq_file.size = sizeof(intrs_output_buf);
+
+	/*
+	 * Leverage the show_interrupts() function in kernel/mips/irq.c to dump
+	 * to our buffer.
+	 */
+	while (pos <= NR_IRQS)
+	{
+		memset(intrs_output_buf, 0, sizeof(intrs_output_buf));
+		intrs_seq_file.count = 0;
+		show_interrupts(&intrs_seq_file, &pos);
+		printk("%s", intrs_output_buf);
+		pos++;
+	}
+}
+
+static struct sysrq_key_op sysrq_show_intrs_op = {
+	.handler	= sysrq_handle_show_intrs,
+	.help_msg	= "BRCM: show interrupt counts on all CPUs(Y)",
+	.action_msg	= "Interrupt Counts",
+	.enable_mask	= 0x4000,   /* typically all flags will be enabled */
+};
+
+#endif /* CONFIG_BCM_KF_CHAR_SYSRQ */
+
 /* Key Operations table and lock */
 static DEFINE_SPINLOCK(sysrq_key_table_lock);
 
@@ -413,11 +530,15 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
 	&sysrq_loglevel_op,		/* 8 */
 	&sysrq_loglevel_op,		/* 9 */
 
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+	&sysrq_cpu_summary_op,	/* a*/
+#else
 	/*
 	 * a: Don't use for system provided sysrqs, it is handled specially on
 	 * sparc and will never arrive.
 	 */
 	NULL,				/* a */
+#endif
 	&sysrq_reboot_op,		/* b */
 	&sysrq_crash_op,		/* c & ibm_emac driver debug */
 	&sysrq_showlocks_op,		/* d */
@@ -433,7 +554,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
 	NULL,				/* j */
 #endif
 	&sysrq_SAK_op,			/* k */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_BCM_KF_CHAR_SYSRQ)
 	&sysrq_showallcpus_op,		/* l */
 #else
 	NULL,				/* l */
@@ -453,8 +574,12 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
 	&sysrq_showstate_blocked_op,	/* w */
 	/* x: May be registered on ppc/powerpc for xmon */
 	NULL,				/* x */
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+	&sysrq_show_intrs_op,	/* y */
+#else
 	/* y: May be registered on sparc64 for global register dump */
 	NULL,				/* y */
+#endif
 	&sysrq_ftrace_dump_op,		/* z */
 };
 
@@ -467,6 +592,10 @@ static int sysrq_key_table_key2index(int key)
 		retval = key - '0';
 	else if ((key >= 'a') && (key <= 'z'))
 		retval = key + 10 - 'a';
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+	else if ((key >= 'A') && (key <= 'Z'))
+		retval = key + 10 - 'A';
+#endif
 	else
 		retval = -1;
 	return retval;
@@ -502,6 +631,10 @@ void __handle_sysrq(int key, bool check_mask)
 	int i;
 	unsigned long flags;
 
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+	printk("SysRq: intr handled on CPU %d\n", smp_processor_id());
+#endif
+
 	spin_lock_irqsave(&sysrq_key_table_lock, flags);
 	/*
 	 * Raise the apparent loglevel to maximum so that the sysrq header
@@ -542,6 +675,9 @@ void __handle_sysrq(int key, bool check_mask)
 			}
 		}
 		printk("\n");
+#if defined(CONFIG_BCM_KF_CHAR_SYSRQ)
+		printk("All commands are case insensitive.\n");
+#endif
 		console_loglevel = orig_log_level;
 	}
 	spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index e727b876726c99b30bd3c82e76d3f6cac2f1a414..e1712122548b045d7bb70d376c77e71064e22e21 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -849,6 +849,13 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 			clear_port_feature(hub->hdev, port1,
 					USB_PORT_FEAT_C_ENABLE);
 		}
+#if defined(CONFIG_BCM_KF_MISC_BACKPORTS)
+		if (portchange & USB_PORT_STAT_C_RESET) {
+			need_debounce_delay = true;
+			clear_port_feature(hub->hdev, port1,
+					USB_PORT_FEAT_C_RESET);
+		}
+#endif
 		if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
 				hub_is_superspeed(hub->hdev)) {
 			need_debounce_delay = true;
@@ -3691,6 +3698,29 @@ static void hub_events(void)
 				clear_port_feature(hdev, i,
 					USB_PORT_FEAT_C_CONNECTION);
 				connect_change = 1;
+#if (defined(CONFIG_BCM_KF_USB_STORAGE) && defined(CONFIG_BCM96318))
+				/*fix for 6318 EHCI data corruption */
+				{
+					static volatile unsigned *usbh_simctrl_reg_ptr = (void *)0xb0005220;
+					static volatile unsigned *ehci_portstatus_reg_ptr = (void *)0xb0005054;
+					static int ohci_memreq_disabled = 0;
+					int ehci_device_present;
+
+					ehci_device_present = (*ehci_portstatus_reg_ptr & 0x1);/*check connect status */
+
+
+					if(!ohci_memreq_disabled && ehci_device_present)
+					{
+						*usbh_simctrl_reg_ptr |= (0x2); /*set bit 1 - USBH_OHCI_MEM_REQ_DIS */  
+						ohci_memreq_disabled =1;
+					}
+					else if(ohci_memreq_disabled && !ehci_device_present)
+					{
+						*usbh_simctrl_reg_ptr &= ~(0x2); /*reset bit 1 - USBH_OHCI_MEM_REQ_DIS */  
+						ohci_memreq_disabled =0;
+					}
+				}
+#endif
 			}
 
 			if (portchange & USB_PORT_STAT_C_ENABLE) {
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index ef116a55afa43476c4acbbf9f457642889ea522c..9af67154f13eb85a8012655b0049efbe6ea475f5 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1547,6 +1547,11 @@ static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env)
 		   alt->desc.bInterfaceProtocol))
 		return -ENOMEM;
 
+#if defined(CONFIG_BCM_KF_USB_HOSTS)
+	if (add_uevent_var(env, "USBDEVNAME=%s", dev_name(dev)))
+		return -ENOMEM;
+#endif
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 2633f7595116991f0fad156f646972c459e19f2e..9b72928493aad2e5102302189fa847f48a5a0ebe 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -846,6 +846,16 @@ config USB_G_PRINTER
 	  For more information, see Documentation/usb/gadget_printer.txt
 	  which includes sample code for accessing the device file.
 
+config USB_G_ANDROID
+	boolean "Android Composite Gadget"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  The Android Composite Gadget supports multiple USB
+	  functions: adb, acm, mass storage, mtp, accessory
+	  and rndis.
+	  Each function can be configured and enabled/disabled
+	  dynamically from userspace through a sysfs interface.
+
 config USB_CDC_COMPOSITE
 	tristate "CDC Composite Device (Ethernet and ACM)"
 	depends on NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index b7f6eefc3927677bc0c870fa2212a757fcfcfa0e..c402b35e7b28c0e894f36faf14327a04c272f742 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -52,6 +52,11 @@ g_nokia-y			:= nokia.o
 g_webcam-y			:= webcam.o
 g_ncm-y				:= ncm.o
 g_acm_ms-y			:= acm_ms.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+g_android-y			:= android.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 obj-$(CONFIG_USB_ZERO)		+= g_zero.o
 obj-$(CONFIG_USB_AUDIO)		+= g_audio.o
@@ -71,3 +76,8 @@ obj-$(CONFIG_USB_G_NOKIA)	+= g_nokia.o
 obj-$(CONFIG_USB_G_WEBCAM)	+= g_webcam.o
 obj-$(CONFIG_USB_G_NCM)		+= g_ncm.o
 obj-$(CONFIG_USB_G_ACM_MS)	+= g_acm_ms.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_USB_G_ANDROID)	+= g_android.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index f788eb86707c0e29d44e455d18b86b0ef6bd203c..93467e8bd8deb9a3d8aa67e1aa96cddf6fd7ea89 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -28,7 +28,7 @@ config USB_XHCI_HCD
 	  module will be called xhci-hcd.
 
 config USB_XHCI_PLATFORM
-	tristate
+	tristate "xHCI platform support"
 	depends on USB_XHCI_HCD
 
 config USB_XHCI_HCD_DEBUGGING
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index bb73df6597bb9bf1d93edf795dc92dd8c5c99144..c002fde6228fc9c511f088add55be93b1fe3236c 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -708,6 +708,13 @@ static int ehci_init(struct usb_hcd *hcd)
 		temp &= ~(3 << 2);
 		temp |= (EHCI_TUNE_FLS << 2);
 	}
+#if (defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX))
+    /* Some DSL chips(63268,6828 )have a LPM support for HOST, but when a device 
+       with out an LPM support is connected, the device is not detected properly.
+       so disable LPM support in SW for all DSL chips by default
+     */
+
+#else
 	if (HCC_LPM(hcc_params)) {
 		/* support link power management EHCI 1.1 addendum */
 		ehci_dbg(ehci, "support lpm\n");
@@ -719,6 +726,7 @@ static int ehci_init(struct usb_hcd *hcd)
 		}
 		temp |= hird << 24;
 	}
+#endif
 	ehci->command = temp;
 
 	/* Accept arbitrarily long scatter-gather lists */
@@ -843,7 +851,6 @@ static int __maybe_unused ehci_setup (struct usb_hcd *hcd)
 }
 
 /*-------------------------------------------------------------------------*/
-
 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index c5e9e4a76f148d4eed0c4785cf46fb38d143a074..dc920dccdce5f8f82ebfc1ad68c563d54f3778ff 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -908,6 +908,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
 
 static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
 {
+
+#if (defined(CONFIG_BCM_KF_MIPS_BCM963XX) && defined(CONFIG_MIPS_BCM963XX))
+    /*as 963xx chips fake USB controllers as PCI, just return from here*/
+	if(pdev->vendor == PCI_VENDOR_ID_BROADCOM)
+		return;
+#endif
 	/* Skip Netlogic mips SoC's internal PCI USB controller.
 	 * This device does not need/support EHCI/OHCI handoff
 	 */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6b908249b019d9f3421bae170866dcbb04c55db2..235965d9441c4d93ef9867521bf84bd3234cb506 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -55,7 +55,11 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
 	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
 	if (cycle_state == 0) {
 		for (i = 0; i < TRBS_PER_SEGMENT; i++)
-			seg->trbs[i].link.control |= TRB_CYCLE;
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) || defined(CONFIG_BCM_KF_ARM_BCM963XX) 
+		seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+#else
+		seg->trbs[i].link.control |= TRB_CYCLE;
+#endif
 	}
 	seg->dma = dma;
 	seg->next = NULL;
@@ -301,7 +305,11 @@ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
 				sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
 		if (cycle_state == 0) {
 			for (i = 0; i < TRBS_PER_SEGMENT; i++)
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) || defined(CONFIG_BCM_KF_ARM_BCM963XX) 
+				seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+#else
 				seg->trbs[i].link.control |= TRB_CYCLE;
+#endif
 		}
 		/* All endpoint rings have link TRBs */
 		xhci_link_segments(xhci, seg, seg->next, type);
@@ -1659,7 +1667,11 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
 		if (!buf)
 			goto fail_sp5;
 
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) || defined(CONFIG_BCM_KF_ARM_BCM963XX) 
+		xhci->scratchpad->sp_array[i] = cpu_to_le64(dma);
+#else
 		xhci->scratchpad->sp_array[i] = dma;
+#endif
 		xhci->scratchpad->sp_buffers[i] = buf;
 		xhci->scratchpad->sp_dma_buffers[i] = dma;
 	}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 203ba315c72a276f166e43885e30f5a68f197d96..9b7975c6784e730bb264a5e10fd3e124de412d63 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1279,7 +1279,11 @@ static void handle_device_notification(struct xhci_hcd *xhci,
 	u32 slot_id;
 	struct usb_device *udev;
 
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) || defined(CONFIG_BCM_KF_ARM_BCM963XX) 
+	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
+#else
 	slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
+#endif
 	if (!xhci->devs[slot_id]) {
 		xhci_warn(xhci, "Device Notification event for "
 				"unused slot %u\n", slot_id);
@@ -2787,12 +2791,18 @@ static u32 xhci_td_remainder(unsigned int remainder)
 		return (remainder >> 10) << 17;
 }
 
+/* IGNORE_BCM_KF_EXCEPTION */
+/* The changes made to this file are backported from 3.7 kernel,
+ * I'm adding exception instead on wrapping changes with BCM_XXX
+ * as these are not broadcom specific changes
+ */
+
 /*
- * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
- * the TD (*not* including this TRB).
+ * For xHCI 1.0 host controllers, TD size is the number of max packet sized
+ * packets remaining in the TD (*not* including this TRB).
  *
  * Total TD packet count = total_packet_count =
- *     roundup(TD size in bytes / wMaxPacketSize)
+ *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
  *
  * Packets transferred up to and including this TRB = packets_transferred =
  *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
@@ -2800,15 +2810,16 @@ static u32 xhci_td_remainder(unsigned int remainder)
  * TD size = total_packet_count - packets_transferred
  *
  * It must fit in bits 21:17, so it can't be bigger than 31.
+ * The last TRB in a TD must have the TD size set to zero.
  */
-
 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
-		unsigned int total_packet_count, struct urb *urb)
+		unsigned int total_packet_count, struct urb *urb,
+		unsigned int num_trbs_left)
 {
 	int packets_transferred;
 
 	/* One TRB with a zero-length data packet. */
-	if (running_total == 0 && trb_buff_len == 0)
+	if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
 		return 0;
 
 	/* All the TRB queueing functions don't count the current TRB in
@@ -2817,7 +2828,9 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
 	packets_transferred = (running_total + trb_buff_len) /
 		usb_endpoint_maxp(&urb->ep->desc);
 
-	return xhci_td_remainder(total_packet_count - packets_transferred);
+	if ((total_packet_count - packets_transferred) > 31)
+		return 31 << 17;
+	return (total_packet_count - packets_transferred) << 17;
 }
 
 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
@@ -2844,7 +2857,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
 	num_trbs = count_sg_trbs_needed(xhci, urb);
 	num_sgs = urb->num_mapped_sgs;
-	total_packet_count = roundup(urb->transfer_buffer_length,
+	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
 			usb_endpoint_maxp(&urb->ep->desc));
 
 	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
@@ -2927,7 +2940,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 					running_total);
 		} else {
 			remainder = xhci_v1_0_td_remainder(running_total,
-					trb_buff_len, total_packet_count, urb);
+					trb_buff_len, total_packet_count, urb,
+					num_trbs - 1);
 		}
 		length_field = TRB_LEN(trb_buff_len) |
 			remainder |
@@ -3035,7 +3049,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	start_cycle = ep_ring->cycle_state;
 
 	running_total = 0;
-	total_packet_count = roundup(urb->transfer_buffer_length,
+	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
 			usb_endpoint_maxp(&urb->ep->desc));
 	/* How much data is in the first TRB? */
 	addr = (u64) urb->transfer_dma;
@@ -3081,7 +3095,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 					running_total);
 		} else {
 			remainder = xhci_v1_0_td_remainder(running_total,
-					trb_buff_len, total_packet_count, urb);
+					trb_buff_len, total_packet_count, urb,
+					num_trbs - 1);
 		}
 		length_field = TRB_LEN(trb_buff_len) |
 			remainder |
@@ -3344,7 +3359,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 		addr = start_addr + urb->iso_frame_desc[i].offset;
 		td_len = urb->iso_frame_desc[i].length;
 		td_remain_len = td_len;
-		total_packet_count = roundup(td_len,
+		total_packet_count = DIV_ROUND_UP(td_len,
 				usb_endpoint_maxp(&urb->ep->desc));
 		/* A zero-length transfer still involves at least one packet. */
 		if (total_packet_count == 0)
@@ -3421,7 +3436,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 			} else {
 				remainder = xhci_v1_0_td_remainder(
 						running_total, trb_buff_len,
-						total_packet_count, urb);
+						total_packet_count, urb,
+						(trbs_per_td - j - 1));
 			}
 			length_field = TRB_LEN(trb_buff_len) |
 				remainder |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7beed536e6b9aa9587a802ce5e499fc0e56e32cb..41d859934670e8d0def25c1668d467836e150021 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -327,6 +327,8 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
 	return;
 }
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) 
+#ifdef CONFIG_PM
 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 {
 	int i;
@@ -336,6 +338,18 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 			synchronize_irq(xhci->msix_entries[i].vector);
 	}
 }
+#endif
+#else
+static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+{
+	int i;
+
+	if (xhci->msix_entries) {
+		for (i = 0; i < xhci->msix_count; i++)
+			synchronize_irq(xhci->msix_entries[i].vector);
+	}
+}
+#endif
 
 static int xhci_try_enable_msi(struct usb_hcd *hcd)
 {
@@ -1732,8 +1746,13 @@ static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
 	 * (bit 1).  The default control endpoint is added during the Address
 	 * Device command and is never removed until the slot is disabled.
 	 */
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) || defined(CONFIG_BCM_KF_ARM_BCM963XX) 
+	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
+	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
+#else
 	valid_add_flags = ctrl_ctx->add_flags >> 2;
 	valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+#endif
 
 	/* Use hweight32 to count the number of ones in the add flags, or
 	 * number of endpoints added.  Don't count endpoints that are changed
@@ -1751,8 +1770,13 @@ static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
 	u32 valid_drop_flags;
 
 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+#if defined(CONFIG_BCM_KF_MIPS_BCM963XX) || defined(CONFIG_BCM_KF_ARM_BCM963XX) 
+	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
+	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
+#else
 	valid_add_flags = ctrl_ctx->add_flags >> 2;
 	valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+#endif
 
 	return hweight32(valid_drop_flags) -
 		hweight32(valid_add_flags & valid_drop_flags);
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 5c87db06b598164f7b803fe25175dceabd0e8a77..e88660e7ec0d3dc21b04037ffd29aee50b2d43f1 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -12,6 +12,15 @@ config USB_OTG_UTILS
 	  Select this to make sure the build includes objects from
 	  the OTG infrastructure directory.
 
+config USB_OTG_WAKELOCK
+	bool "Hold a wakelock when USB connected"
+	depends on WAKELOCK
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	select USB_OTG_UTILS
+	help
+	  Select this to automatically hold a wakelock when USB is
+	  connected, preventing suspend.
+
 if USB || USB_GADGET
 
 #
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 41aa5098b139973a4b7e961b7227d2718690eb05..fe04090a659130aa0edec484c33962ed25c7b10f 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -7,6 +7,11 @@ ccflags-$(CONFIG_USB_GADGET_DEBUG)	+= -DDEBUG
 
 # infrastructure
 obj-$(CONFIG_USB_OTG_UTILS)	+= otg.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_USB_OTG_WAKELOCK)	+= otg-wakelock.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 # transceiver drivers
 obj-$(CONFIG_USB_GPIO_VBUS)	+= gpio_vbus.o
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 7691c866637be6be835442396b1f545da5a7f09c..5b2f0bb1ac9367e512793de655a65b8e219996ce 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -201,6 +201,7 @@ config USB_STORAGE_ENE_UB6250
 	  To compile this driver as a module, choose M here: the
 	  module will be called ums-eneub6250.
 
+if !BCM_KF_ANDROID
 config USB_UAS
 	tristate "USB Attached SCSI"
 	depends on USB && SCSI
@@ -213,6 +214,21 @@ config USB_UAS
 	  say 'Y' or 'M' here and the kernel will use the right driver.
 
 	  If you compile this driver as a module, it will be named uas.
+endif
+if BCM_KF_ANDROID
+config USB_UAS
+	tristate "USB Attached SCSI"
+	depends on USB && SCSI && BROKEN
+	help
+	  The USB Attached SCSI protocol is supported by some USB
+	  storage devices.  It permits higher performance by supporting
+	  multiple outstanding commands.
+
+	  If you don't know whether you have a UAS device, it is safe to
+	  say 'Y' or 'M' here and the kernel will use the right driver.
+
+	  If you compile this driver as a module, it will be named uas.
+endif
 
 config USB_LIBUSUAL
 	bool "The shared table of common (or usual) storage devices"
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 11418da9bc0927492b820383ca433f309897e2fe..0d9640534e05d4e136c0e59609eebd6449e97e29 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -563,8 +563,13 @@ struct scsi_host_template usb_stor_host_template = {
 	/* lots of sg segments can be handled */
 	.sg_tablesize =			SCSI_MAX_SG_CHAIN_SEGMENTS,
 
+#if defined(CONFIG_BCM_KF_USB_STORAGE)
+	/* limit the total size of a transfer to 256 KB */
+	.max_sectors =                  512,
+#else
 	/* limit the total size of a transfer to 120 KB */
 	.max_sectors =                  240,
+#endif
 
 	/* merge commands... this seems to help performance, but
 	 * periodically someone should test to see which setting is more
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 2653e73db6233eabcf83f527019451820157e65d..d92c4c6a044e53855f4ef1ba8b9a7fdcb2cfa8a8 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -59,6 +59,10 @@
 #include <linux/mutex.h>
 #include <linux/utsname.h>
 
+#if defined(CONFIG_BCM_KF_USB_STORAGE)
+#include <linux/bcm_realtime.h>
+#endif
+
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -769,6 +773,14 @@ static int usb_stor_acquire_resources(struct us_data *us)
 				"Unable to start control thread\n");
 		return PTR_ERR(th);
 	}
+#if defined(CONFIG_BCM_KF_USB_STORAGE)
+    /*convert the thread to realtime RR thread */
+    {
+        struct sched_param param;
+        param.sched_priority = BCM_RTPRIO_DATA;
+        sched_setscheduler(th, SCHED_RR, &param);
+    }
+#endif
 	us->ctl_thread = th;
 
 	return 0;
diff --git a/fs/Kconfig b/fs/Kconfig
index f95ae3a027f38dd3173d5c766eeeb0ecda58d2be..7b5ad49dd2c1e11e662004f4def36e152c08e600 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -67,6 +67,7 @@ source "fs/quota/Kconfig"
 
 source "fs/autofs4/Kconfig"
 source "fs/fuse/Kconfig"
+source "fs/overlayfs/Kconfig"
 
 config CUSE
 	tristate "Character device in Userspace support"
@@ -203,6 +204,9 @@ source "fs/hfsplus/Kconfig"
 source "fs/befs/Kconfig"
 source "fs/bfs/Kconfig"
 source "fs/efs/Kconfig"
+if BCM_KF_ANDROID
+source "fs/yaffs2/Kconfig"
+endif
 source "fs/jffs2/Kconfig"
 # UBIFS File system configuration
 source "fs/ubifs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index 2fb977934673812c52e2aa9e7ed0a392e5181a34..04e537636cf4daa765b8cb2168b07b79149c879e 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_QNX6FS_FS)		+= qnx6/
 obj-$(CONFIG_AUTOFS4_FS)	+= autofs4/
 obj-$(CONFIG_ADFS_FS)		+= adfs/
 obj-$(CONFIG_FUSE_FS)		+= fuse/
+obj-$(CONFIG_OVERLAYFS_FS)	+= overlayfs/
 obj-$(CONFIG_UDF_FS)		+= udf/
 obj-$(CONFIG_SUN_OPENPROMFS)	+= openpromfs/
 obj-$(CONFIG_OMFS_FS)		+= omfs/
@@ -125,3 +126,10 @@ obj-$(CONFIG_GFS2_FS)           += gfs2/
 obj-y				+= exofs/ # Multiple modules
 obj-$(CONFIG_CEPH_FS)		+= ceph/
 obj-$(CONFIG_PSTORE)		+= pstore/
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+# Patched by YAFFS
+obj-$(CONFIG_YAFFS_FS)		+= yaffs2/
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 68954937a071abd9ce4a67b6614880a4073d1554..c54ea903a169dfaeb387e26c6b7c536c57c7de1e 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -544,6 +544,13 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
 	s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
 	s->s_blocksize = path.dentry->d_sb->s_blocksize;
 	s->s_magic = ECRYPTFS_SUPER_MAGIC;
+	s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
+
+	rc = -EINVAL;
+	if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+		printk(KERN_ERR "eCryptfs: maximum fs stacking depth exceeded\n");
+		goto out_free;
+	}
 
 	inode = ecryptfs_get_inode(path.dentry->d_inode, s);
 	rc = PTR_ERR(inode);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 3407a62590d62259f2c6bc224580bec11c55a8ec..62d0d4d6175d7cdd77395624e20e5a21b4498b99 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1141,7 +1141,11 @@ static void ext4_update_super(struct super_block *sb,
 	struct ext4_new_group_data *group_data = flex_gd->groups;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_super_block *es = sbi->s_es;
-	int i, ret;
+#if defined(CONFIG_BCM_KF_KERN_WARNING)
+	int i;
+#else
+	int i ,ret;
+#endif
 
 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
 	/*
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 75e7c1f3a08015dddd314801686faa24188cc055..d458e209c0f17d34bf5acf8589a4113b332d21f0 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -143,6 +143,10 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
 	return ret;
 }
 
+#if defined(CONFIG_BCM_KF_OCF)
+EXPORT_SYMBOL(sys_dup);
+#endif // CONFIG_BCM_KF_OCF
+
 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
 
 static int setfl(int fd, struct file * filp, unsigned long arg)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 8e6381a14265025764c69d183f17ef7446939d3d..de781e0212b60cfb4ec0faee5ac418e91ca919d7 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2037,7 +2037,11 @@ static void fuse_register_polled_file(struct fuse_conn *fc,
 {
 	spin_lock(&fc->lock);
 	if (RB_EMPTY_NODE(&ff->polled_node)) {
+#if defined(CONFIG_BCM_KF_KERN_WARNING)
+		struct rb_node **link, *parent = NULL;
+#else
 		struct rb_node **link, *parent;
+#endif
 
 		link = fuse_find_polled_node(fc, ff->kh, &parent);
 		BUG_ON(*link);
diff --git a/fs/jffs2/Kconfig b/fs/jffs2/Kconfig
index 6ae169cd8faa44e92eda4f87dbaf30cc2e68c405..90c8b77cab00ed1bea1e2ab31a03c60e7a4b317f 100644
--- a/fs/jffs2/Kconfig
+++ b/fs/jffs2/Kconfig
@@ -139,6 +139,15 @@ config JFFS2_LZO
 	  This feature was added in July, 2007. Say 'N' if you need
 	  compatibility with older bootloaders or kernels.
 
+config JFFS2_LZMA
+	bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS
+	select LZMA_COMPRESS
+	select LZMA_DECOMPRESS
+	depends on JFFS2_FS
+	default n
+	help
+	  JFFS2 wrapper to the LZMA C SDK
+
 config JFFS2_RTIME
 	bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
 	depends on JFFS2_FS
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile
index 60e5d49ca03e774fd3d02c72cdddebe7fb9eeb56..23ba6efd1f3ff2b2a31254e2150a759928497f65 100644
--- a/fs/jffs2/Makefile
+++ b/fs/jffs2/Makefile
@@ -18,4 +18,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN)	+= compr_rubin.o
 jffs2-$(CONFIG_JFFS2_RTIME)	+= compr_rtime.o
 jffs2-$(CONFIG_JFFS2_ZLIB)	+= compr_zlib.o
 jffs2-$(CONFIG_JFFS2_LZO)	+= compr_lzo.o
+jffs2-$(CONFIG_JFFS2_LZMA)      += compr_lzma.o
 jffs2-$(CONFIG_JFFS2_SUMMARY)   += summary.o
+
+CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index a3750f902adcbae8f8cabbad066f9d629b805155..81dc7cabe5970b2cd1720888ec3761c28b20a63f 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -114,6 +114,16 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
 	dbg_fsbuild("scanned flash completely\n");
 	jffs2_dbg_dump_block_lists_nolock(c);
 
+	if (c->flags & (1 << 7)) {
+                printk("%s(): unlocking the mtd device... ", __func__); 
+                mtd_unlock(c->mtd, 0, c->mtd->size); 
+                printk("done.\n"); 
+ 
+                printk("%s(): erasing all blocks after the end marker... ", __func__); 
+                jffs2_erase_pending_blocks(c, -1); 
+                printk("done.\n"); 
+	}
+
 	dbg_fsbuild("pass 1 starting\n");
 	c->flags |= JFFS2_SB_FLAG_BUILDING;
 	/* Now scan the directory tree, increasing nlink according to every dirent found. */
@@ -309,8 +319,8 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
 	   trying to GC to make more space. It'll be a fruitless task */
 	c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
 
-	dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
-		    c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
+	dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
+		  c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
 	dbg_fsbuild("Blocks required to allow deletion:    %d (%d KiB)\n",
 		  c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
 	dbg_fsbuild("Blocks required to allow writes:      %d (%d KiB)\n",
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index 4849a4c9a0e24f1b8b4202630d68e491eac113dc..36c25d21ff389173c8ddf0491188bcc4c27da4e1 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -156,6 +156,14 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
 	uint32_t orig_slen, orig_dlen;
 	uint32_t best_slen=0, best_dlen=0;
 
+#if defined(CONFIG_BCM_KF_JFFS)
+	if( (f->inocache->flags & INO_FLAGS_COMPR_NONE) == INO_FLAGS_COMPR_NONE )
+	{
+		ret = JFFS2_COMPR_NONE;
+		goto out;
+	}
+#endif
+
 	if (c->mount_opts.override_compr)
 		mode = c->mount_opts.compr;
 	else
@@ -241,6 +249,10 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
 		pr_err("unknown compression mode\n");
 	}
 
+#if defined(CONFIG_BCM_KF_JFFS)
+out:
+#endif
+
 	if (ret == JFFS2_COMPR_NONE) {
 		*cpage_out = data_in;
 		*datalen = *cdatalen;
@@ -378,6 +390,10 @@ int __init jffs2_compressors_init(void)
 #ifdef CONFIG_JFFS2_LZO
 	jffs2_lzo_init();
 #endif
+#ifdef CONFIG_JFFS2_LZMA
+        jffs2_lzma_init();
+#endif
+
 /* Setting default compression mode */
 #ifdef CONFIG_JFFS2_CMODE_NONE
 	jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
@@ -401,6 +417,9 @@ int __init jffs2_compressors_init(void)
 int jffs2_compressors_exit(void)
 {
 /* Unregistering compressors */
+#ifdef CONFIG_JFFS2_LZMA
+        jffs2_lzma_exit();
+#endif
 #ifdef CONFIG_JFFS2_LZO
 	jffs2_lzo_exit();
 #endif
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index 5e91d578f4ed858c2ac575f375619000c5c6814c..32db2e1ec610da17f621e8b8cb3c4e8a71203656 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -29,9 +29,9 @@
 #define JFFS2_DYNRUBIN_PRIORITY  20
 #define JFFS2_LZARI_PRIORITY     30
 #define JFFS2_RTIME_PRIORITY     50
-#define JFFS2_ZLIB_PRIORITY      60
-#define JFFS2_LZO_PRIORITY       80
-
+#define JFFS2_LZMA_PRIORITY      70
+#define JFFS2_ZLIB_PRIORITY      80
+#define JFFS2_LZO_PRIORITY       90
 
 #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
 #define JFFS2_DYNRUBIN_DISABLED  /*	   for decompression */
@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void);
 int jffs2_lzo_init(void);
 void jffs2_lzo_exit(void);
 #endif
+#ifdef CONFIG_JFFS2_LZMA
+int jffs2_lzma_init(void);
+void jffs2_lzma_exit(void);
+#endif
 
 #endif /* __JFFS2_COMPR_H__ */
diff --git a/fs/jffs2/compr_lzma.c b/fs/jffs2/compr_lzma.c
new file mode 100644
index 0000000000000000000000000000000000000000..0fe3b75d7dcc345fcec3e141a630858be236bb33
--- /dev/null
+++ b/fs/jffs2/compr_lzma.c
@@ -0,0 +1,128 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * JFFS2 wrapper to the LZMA C SDK
+ *
+ */
+
+#include <linux/lzma.h>
+#include "compr.h"
+
+#ifdef __KERNEL__
+	static DEFINE_MUTEX(deflate_mutex);
+#endif
+
+CLzmaEncHandle *p;
+Byte propsEncoded[LZMA_PROPS_SIZE];
+SizeT propsSize = sizeof(propsEncoded);
+
+STATIC void lzma_free_workspace(void)
+{
+	LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc);
+}
+
+STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props)
+{
+	if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL)
+	{
+		PRINT_ERROR("Failed to allocate lzma deflate workspace\n");
+		return -ENOMEM;
+	}
+
+	if (LzmaEnc_SetProps(p, props) != SZ_OK)
+	{
+		lzma_free_workspace();
+		return -1;
+	}
+	
+	if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK)
+	{
+		lzma_free_workspace();
+		return -1;
+	}
+
+        return 0;
+}
+
+STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out,
+			      uint32_t *sourcelen, uint32_t *dstlen)
+{
+	SizeT compress_size = (SizeT)(*dstlen);
+	int ret;
+
+	#ifdef __KERNEL__
+		mutex_lock(&deflate_mutex);
+	#endif
+
+	ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen,
+		0, NULL, &lzma_alloc, &lzma_alloc);
+
+	#ifdef __KERNEL__
+		mutex_unlock(&deflate_mutex);
+	#endif
+
+	if (ret != SZ_OK)
+		return -1;
+
+	*dstlen = (uint32_t)compress_size;
+
+	return 0;
+}
+
+STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out,
+				 uint32_t srclen, uint32_t destlen)
+{
+	int ret;
+	SizeT dl = (SizeT)destlen;
+	SizeT sl = (SizeT)srclen;
+	ELzmaStatus status;
+	
+	ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded,
+		propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc);
+
+	if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen)
+		return -1;
+
+	return 0;
+}
+
+static struct jffs2_compressor jffs2_lzma_comp = {
+	.priority = JFFS2_LZMA_PRIORITY,
+	.name = "lzma",
+	.compr = JFFS2_COMPR_LZMA,
+	.compress = &jffs2_lzma_compress,
+	.decompress = &jffs2_lzma_decompress,
+	.disabled = 0,
+};
+
+int INIT jffs2_lzma_init(void)
+{
+        int ret;
+	CLzmaEncProps props;
+	LzmaEncProps_Init(&props);
+
+        props.dictSize = LZMA_BEST_DICT(0x2000);
+        props.level = LZMA_BEST_LEVEL;
+        props.lc = LZMA_BEST_LC;
+        props.lp = LZMA_BEST_LP;
+        props.pb = LZMA_BEST_PB;
+        props.fb = LZMA_BEST_FB;
+
+	ret = lzma_alloc_workspace(&props);
+        if (ret < 0)
+                return ret;
+
+	ret = jffs2_register_compressor(&jffs2_lzma_comp);
+	if (ret)
+		lzma_free_workspace();
+	
+        return ret;
+}
+
+void jffs2_lzma_exit(void)
+{
+	jffs2_unregister_compressor(&jffs2_lzma_comp);
+	lzma_free_workspace();
+}
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 16a5047903a6ef3a89e031e61e4a9044fa9b99c9..406d9cc84ba8d99b7520b4ee931d9c010f9e5bea 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -33,7 +33,7 @@ static int jffs2_rtime_compress(unsigned char *data_in,
 				unsigned char *cpage_out,
 				uint32_t *sourcelen, uint32_t *dstlen)
 {
-	short positions[256];
+	unsigned short positions[256];
 	int outpos = 0;
 	int pos=0;
 
@@ -74,7 +74,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
 				  unsigned char *cpage_out,
 				  uint32_t srclen, uint32_t destlen)
 {
-	short positions[256];
+	unsigned short positions[256];
 	int outpos = 0;
 	int pos=0;
 
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 4a6cf289be248cbfaedc60dea23afc84fcbbbf44..2d69e981f1f7fdad4f8d1ae5c9be95d4a3f086ac 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -32,6 +32,9 @@ static void jffs2_erase_callback(struct erase_info *);
 static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
 static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+#if defined(CONFIG_MTD_BRCMNAND) && defined(CONFIG_BCM_KF_MTD_BCMNAND) && defined(CONFIG_BCM_KF_NAND)
+extern int g_nand_nop;
+#endif
 
 static void jffs2_erase_block(struct jffs2_sb_info *c,
 			      struct jffs2_eraseblock *jeb)
@@ -72,9 +75,49 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
 	instr->len = c->sector_size;
 	instr->callback = jffs2_erase_callback;
 	instr->priv = (unsigned long)(&instr[1]);
+	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
 
 	((struct erase_priv_struct *)instr->priv)->jeb = jeb;
 	((struct erase_priv_struct *)instr->priv)->c = c;
+#if defined(CONFIG_MTD_BRCMNAND) && defined(CONFIG_BCM_KF_MTD_BCMNAND) && defined(CONFIG_BCM_KF_NAND)
+	if (g_nand_nop == 1)
+	{ /* check if block is empty first, may not have to erase. must do this at the time of erasing a block
+	and cannot put this in jffs2_check_nand_cleanmarker since that routine is called often, even when not
+	attempting to erase the block and even in non writeable JFFS2 partition. Do this only for NOP=1 device since
+	device is not allowed to have write to spare area only (JFFS2 clean marker inserted after erase) and thus
+	there will be an erase attempt at every boot when JFFS2 checks for erased blocks and the missing clean
+	marker. Do not do erase check for NOP > 1 devices since they are allowed to have a JFFS2 clean marker
+	inserted and by not doing the erase check this will save time */
+		struct mtd_oob_ops ops;
+		loff_t page_offset;
+		int i, dirty = 0;
+		unsigned char buf[c->mtd->oobsize];
+
+		for (page_offset = 0; !dirty && (page_offset < c->mtd->erasesize); page_offset += c->mtd->writesize)
+		{ // check to see that ECC is empty to determine if page is erased
+			ops.mode = MTD_OPS_RAW;
+			ops.ooblen = c->mtd->oobsize;
+			ops.oobbuf = buf;
+			ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
+			ops.datbuf = NULL;
+
+			i = mtd_read_oob(c->mtd, jeb->offset + page_offset, &ops);
+
+			if (i || ops.oobretlen != ops.ooblen)
+				dirty = 1;
+
+			for (i = 0; !dirty && (i < c->mtd->oobsize); i++)
+				if (buf[i] != 0xFF)
+					dirty = 1;
+		}
+		if (!dirty)
+		{
+			instr->state = MTD_ERASE_DONE;
+			jffs2_erase_callback(instr);
+			return;
+		}
+	}
+#endif
 
 	ret = mtd_erase(c->mtd, instr);
 	if (!ret)
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index db3889ba8818dd473f37ba3b9e0b888e48eef708..d398e1efb895601dff83d0b484057e0600eac92d 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -138,33 +138,70 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
 	struct page *pg;
 	struct inode *inode = mapping->host;
 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	struct jffs2_raw_inode ri;
+	uint32_t alloc_len = 0;
+#endif
 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
 	uint32_t pageofs = index << PAGE_CACHE_SHIFT;
 	int ret = 0;
 
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	jffs2_dbg(1, "%s()\n", __func__);
+
+	if (pageofs > inode->i_size) {
+		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+		if (ret)
+			return ret;
+	}
+
+	mutex_lock(&f->sem);
+#endif
 	pg = grab_cache_page_write_begin(mapping, index, flags);
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	if (!pg)
+#else
+	if (!pg) {
+		if (alloc_len)
+			jffs2_complete_reservation(c);
+		mutex_unlock(&f->sem);
+#endif
 		return -ENOMEM;
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	}
+#endif
 	*pagep = pg;
 
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	jffs2_dbg(1, "%s()\n", __func__);
 
 	if (pageofs > inode->i_size) {
+#else
+	if (alloc_len) {
+#endif
 		/* Make new hole frag from old EOF to new page */
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
 		struct jffs2_raw_inode ri;
+#endif
 		struct jffs2_full_dnode *fn;
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 		uint32_t alloc_len;
+#endif
 
 		jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
 			  (unsigned int)inode->i_size, pageofs);
 
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
 					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
 		if (ret)
 			goto out_page;
 
 		mutex_lock(&f->sem);
+#endif
 		memset(&ri, 0, sizeof(ri));
 
 		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -191,7 +228,9 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
 		if (IS_ERR(fn)) {
 			ret = PTR_ERR(fn);
 			jffs2_complete_reservation(c);
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 			mutex_unlock(&f->sem);
+#endif
 			goto out_page;
 		}
 		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -206,12 +245,16 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
 			jffs2_mark_node_obsolete(c, fn->raw);
 			jffs2_free_full_dnode(fn);
 			jffs2_complete_reservation(c);
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 			mutex_unlock(&f->sem);
+#endif
 			goto out_page;
 		}
 		jffs2_complete_reservation(c);
 		inode->i_size = pageofs;
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 		mutex_unlock(&f->sem);
+#endif
 	}
 
 	/*
@@ -220,18 +263,28 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
 	 * case of a short-copy.
 	 */
 	if (!PageUptodate(pg)) {
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 		mutex_lock(&f->sem);
+#endif
 		ret = jffs2_do_readpage_nolock(inode, pg);
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 		mutex_unlock(&f->sem);
+#endif
 		if (ret)
 			goto out_page;
 	}
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	mutex_unlock(&f->sem);
+#endif
 	jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
 	return ret;
 
 out_page:
 	unlock_page(pg);
 	page_cache_release(pg);
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	mutex_unlock(&f->sem);
+#endif
 	return ret;
 }
 
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 5a2dec2b064c945aba23acac93dcee2820019d68..271df2281c37c0025d4ed5a11270bb2d38d5c076 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -903,6 +903,11 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
 
 		for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
 
+#if defined(CONFIG_BCM_KF_JFFS)
+			if (!raw)
+				break;
+#endif
+
 			cond_resched();
 
 			/* We only care about obsolete ones */
@@ -923,7 +928,20 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
 
 			/* This is an obsolete node belonging to the same directory, and it's of the right
 			   length. We need to take a closer look...*/
+ #if defined(CONFIG_BCM_KF_JFFS)
+            /* The lock, erase_free_sem, needs to be unlocked here in order to prevent a possible
+             * deadlock. Without doing this, the following condition can occur.
+             * thread 1: brcmnand_erase => brcmnand_get_device (gets and holds chip_lock) =>
+             *           jffs2_erase_pending_blocks (blocks trying to get erase_free_sem)
+             * thread 2: jffs2_garbage_collect_deletion_dirent (gets and holds erase_free_sem) =>
+             *           brcmnand_get_device (blocks trying to get chip_lock)
+             */
+			mutex_unlock(&c->erase_free_sem);
+			ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
+			mutex_lock(&c->erase_free_sem);
+#else
 			ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
+#endif
 			if (ret) {
 				pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n",
 					__func__, ret, ref_offset(raw));
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 55a0c1dceadfddcf990b8fdbcfec015fc75fab32..44dca1f041c5cbc2b057e3eb7e13840c3edd25d7 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -126,6 +126,10 @@ struct jffs2_sb_info {
 	struct jffs2_inodirty *wbuf_inodes;
 	struct rw_semaphore wbuf_sem;	/* Protects the write buffer */
 
+	struct delayed_work wbuf_dwork; /* write-buffer write-out work */
+	int wbuf_queued;                /* non-zero delayed work is queued */
+	spinlock_t wbuf_dwork_lock;     /* protects wbuf_dwork and and wbuf_queued */
+
 	unsigned char *oobbuf;
 	int oobavail; /* How many bytes are available for JFFS2 in OOB */
 #endif
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index e4619b00f7c5dec65669f0b1dd9acdf135392185..1705ac3fa3696eca69df06b7cfc66cf524771527 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -202,6 +202,10 @@ struct jffs2_inode_cache {
 #define INOCACHE_HASHSIZE_MIN 128
 #define INOCACHE_HASHSIZE_MAX 1024
 
+#if defined(CONFIG_BCM_KF_JFFS)
+#define INO_FLAGS_COMPR_NONE		0x80
+#endif
+
 #define write_ofs(c) ((c)->nextblock->offset + (c)->sector_size - (c)->nextblock->free_size)
 
 /*
@@ -231,7 +235,7 @@ struct jffs2_tmp_dnode_info
 	uint32_t version;
 	uint32_t data_crc;
 	uint32_t partial_crc;
-	uint16_t csize;
+	uint32_t csize;
 	uint16_t overlapped;
 };
 
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 6784d1e7a7eb3440b7e7707a4659f79e8cec7433..052dda7cf22d82ce778e211d0bde75c3a3cbb9b1 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -139,6 +139,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
 					spin_unlock(&c->erase_completion_lock);
 
 					schedule();
+					remove_wait_queue(&c->erase_wait, &wait);
 				} else
 					spin_unlock(&c->erase_completion_lock);
 			} else if (ret)
@@ -169,20 +170,25 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
 			   uint32_t *len, uint32_t sumsize)
 {
-	int ret = -EAGAIN;
+	int ret;
 	minsize = PAD(minsize);
 
 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
 
-	spin_lock(&c->erase_completion_lock);
-	while(ret == -EAGAIN) {
+	while (true) {
+		spin_lock(&c->erase_completion_lock);
 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
 		if (ret) {
 			jffs2_dbg(1, "%s(): looping, ret is %d\n",
 				  __func__, ret);
 		}
+		spin_unlock(&c->erase_completion_lock);
+
+		if (ret == -EAGAIN)
+			cond_resched();
+		else
+			break;
 	}
-	spin_unlock(&c->erase_completion_lock);
 	if (!ret)
 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
 
@@ -375,14 +381,23 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
 			spin_unlock(&c->erase_completion_lock);
 
 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 			if (ret)
 				return ret;
+#else
+
+#endif
 			/* Just lock it again and continue. Nothing much can change because
 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
 			   we hold c->erase_completion_lock in the majority of this function...
 			   but that's a question for another (more caffeine-rich) day. */
 			spin_lock(&c->erase_completion_lock);
 
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+			if (ret)
+				return ret;
+
+#endif
 			waste = jeb->free_size;
 			jffs2_link_node_ref(c, jeb,
 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 1cd3aec9d9ae282dd31226d0717aaf69a55f414d..dbc1d4b28f8e48ae5716f7f2e7da98c25cd597f0 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -95,6 +95,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
 #define jffs2_ubivol(c) (0)
 #define jffs2_ubivol_setup(c) (0)
 #define jffs2_ubivol_cleanup(c) do {} while (0)
+#define jffs2_dirty_trigger(c) do {} while (0)
 
 #else /* NAND and/or ECC'd NOR support present */
 
@@ -108,6 +109,8 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
 
 #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
 
+#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf))
 #define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
 
 /* wbuf.c */
@@ -135,14 +138,10 @@ void jffs2_ubivol_cleanup(struct jffs2_sb_info *c);
 #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
+void jffs2_dirty_trigger(struct jffs2_sb_info *c);
 
 #endif /* WRITEBUFFER */
 
-static inline void jffs2_dirty_trigger(struct jffs2_sb_info *c)
-{
-	OFNI_BS_2SFFJ(c)->s_dirt = 1;
-}
-
 /* background.c */
 int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
 void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index dc0437e8476322aaff40dc01737dcc2cabdc6976..c4dbc7dd847bb675b987e388f2c3979ac4d41c61 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1329,6 +1329,12 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
 	if (f->inocache->state == INO_STATE_READING)
 		jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
 
+#if defined(CONFIG_BCM_KF_JFFS)
+    /* Set a "not compressed" flag so the inode does not get compressed when moved. */
+    if( latest_node->compr == JFFS2_COMPR_NONE )
+        f->inocache->flags |= INO_FLAGS_COMPR_NONE;
+#endif
+
 	return 0;
 }
 
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 7654e87b042869ef43aff269a10e88a4088d59c3..59347b5393875cca3067bb4d114806fadcd590cb 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -148,8 +148,11 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
 		/* reset summary info for next eraseblock scan */
 		jffs2_sum_reset_collected(s);
 
-		ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
-						buf_size, s);
+		if (c->flags & (1 << 7))
+			ret = BLK_STATE_ALLFF;
+		else
+			ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+							buf_size, s);
 
 		if (ret < 0)
 			goto out;
@@ -556,6 +559,17 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
 			return err;
 	}
 
+	if ((buf[0] == 0xde) &&
+		(buf[1] == 0xad) &&
+		(buf[2] == 0xc0) &&
+		(buf[3] == 0xde)) {
+		/* end of filesystem. erase everything after this point */
+		printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
+		c->flags |= (1 << 7);
+
+		return BLK_STATE_ALLFF;
+	}
+
 	/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
 	ofs = 0;
 	max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index f9916f312bd81e3590fde1c92a025458cb64ab11..c452f66af317d326e2269faac4c4860c4378cb5f 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -63,21 +63,6 @@ static void jffs2_i_init_once(void *foo)
 	inode_init_once(&f->vfs_inode);
 }
 
-static void jffs2_write_super(struct super_block *sb)
-{
-	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
-
-	lock_super(sb);
-	sb->s_dirt = 0;
-
-	if (!(sb->s_flags & MS_RDONLY)) {
-		jffs2_dbg(1, "%s()\n", __func__);
-		jffs2_flush_wbuf_gc(c, 0);
-	}
-
-	unlock_super(sb);
-}
-
 static const char *jffs2_compr_name(unsigned int compr)
 {
 	switch (compr) {
@@ -113,7 +98,9 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
 {
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
-	jffs2_write_super(sb);
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+	cancel_delayed_work_sync(&c->wbuf_dwork);
+#endif
 
 	mutex_lock(&c->alloc_sem);
 	jffs2_flush_wbuf_pad(c);
@@ -251,7 +238,6 @@ static const struct super_operations jffs2_super_operations =
 	.alloc_inode =	jffs2_alloc_inode,
 	.destroy_inode =jffs2_destroy_inode,
 	.put_super =	jffs2_put_super,
-	.write_super =	jffs2_write_super,
 	.statfs =	jffs2_statfs,
 	.remount_fs =	jffs2_remount_fs,
 	.evict_inode =	jffs2_evict_inode,
@@ -319,9 +305,6 @@ static void jffs2_put_super (struct super_block *sb)
 
 	jffs2_dbg(2, "%s()\n", __func__);
 
-	if (sb->s_dirt)
-		jffs2_write_super(sb);
-
 	mutex_lock(&c->alloc_sem);
 	jffs2_flush_wbuf_pad(c);
 	mutex_unlock(&c->alloc_sem);
@@ -380,7 +363,34 @@ static int __init init_jffs2_fs(void)
 #ifdef CONFIG_JFFS2_SUMMARY
 	       " (SUMMARY) "
 #endif
-	       " © 2001-2006 Red Hat, Inc.\n");
+#ifdef CONFIG_JFFS2_ZLIB
+	       " (ZLIB)"
+#endif
+#ifdef CONFIG_JFFS2_LZO
+	       " (LZO)"
+#endif
+#ifdef CONFIG_JFFS2_LZMA
+	       " (LZMA)"
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+	       " (RTIME)"
+#endif
+#ifdef CONFIG_JFFS2_RUBIN
+	       " (RUBIN)"
+#endif
+#ifdef  CONFIG_JFFS2_CMODE_NONE
+	       " (CMODE_NONE)"
+#endif
+#ifdef CONFIG_JFFS2_CMODE_PRIORITY
+	       " (CMODE_PRIORITY)"
+#endif
+#ifdef CONFIG_JFFS2_CMODE_SIZE
+	       " (CMODE_SIZE)"
+#endif
+#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
+	       " (CMODE_FAVOURLZO)"
+#endif
+	       " (c) 2001-2006 Red Hat, Inc.\n");
 
 	jffs2_inode_cachep = kmem_cache_create("jffs2_i",
 					     sizeof(struct jffs2_inode_info),
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 74d9be19df3f1fff1d7defdc7824c90240a302f6..2eb191574922d5775c4afffc9d34ca7867365ebf 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -20,6 +20,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/jiffies.h>
 #include <linux/sched.h>
+#include <linux/writeback.h>
 
 #include "nodelist.h"
 
@@ -85,7 +86,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
 {
 	struct jffs2_inodirty *new;
 
-	/* Mark the superblock dirty so that kupdated will flush... */
+	/* Schedule delayed write-buffer write-out */
 	jffs2_dirty_trigger(c);
 
 	if (jffs2_wbuf_pending_for_ino(c, ino))
@@ -1043,10 +1044,18 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
 	ops.datbuf = NULL;
 
 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
+#if 0 // !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	if (ret || ops.oobretlen != ops.ooblen) {
+#else
+	if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
+#endif
 		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
+#if 0 // !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 		if (!ret)
+#else
+		if (!ret || mtd_is_bitflip(ret))
+#endif
 			ret = -EIO;
 		return ret;
 	}
@@ -1085,10 +1094,18 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
 	ops.datbuf = NULL;
 
 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
+#if 0 // !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 	if (ret || ops.oobretlen != ops.ooblen) {
+#else
+	if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
+#endif
 		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
+#if 0 // !defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 		if (!ret)
+#else
+		if (!ret || mtd_is_bitflip(ret))
+#endif
 			ret = -EIO;
 		return ret;
 	}
@@ -1148,6 +1165,47 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
 	return 1;
 }
 
+static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+
+	dwork = container_of(work, struct delayed_work, work);
+	return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
+}
+
+static void delayed_wbuf_sync(struct work_struct *work)
+{
+	struct jffs2_sb_info *c = work_to_sb(work);
+	struct super_block *sb = OFNI_BS_2SFFJ(c);
+
+	spin_lock(&c->wbuf_dwork_lock);
+	c->wbuf_queued = 0;
+	spin_unlock(&c->wbuf_dwork_lock);
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		jffs2_dbg(1, "%s()\n", __func__);
+		jffs2_flush_wbuf_gc(c, 0);
+	}
+}
+
+void jffs2_dirty_trigger(struct jffs2_sb_info *c)
+{
+	struct super_block *sb = OFNI_BS_2SFFJ(c);
+	unsigned long delay;
+
+	if (sb->s_flags & MS_RDONLY)
+		return;
+
+	spin_lock(&c->wbuf_dwork_lock);
+	if (!c->wbuf_queued) {
+		jffs2_dbg(1, "%s()\n", __func__);
+		delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+		queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
+		c->wbuf_queued = 1;
+	}
+	spin_unlock(&c->wbuf_dwork_lock);
+}
+
 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
 {
 	struct nand_ecclayout *oinfo = c->mtd->ecclayout;
@@ -1169,6 +1227,8 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
 
 	/* Initialise write buffer */
 	init_rwsem(&c->wbuf_sem);
+	spin_lock_init(&c->wbuf_dwork_lock);
+	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 	c->wbuf_pagesize = c->mtd->writesize;
 	c->wbuf_ofs = 0xFFFFFFFF;
 
@@ -1207,8 +1267,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
 
 	/* Initialize write buffer */
 	init_rwsem(&c->wbuf_sem);
-
-
+	spin_lock_init(&c->wbuf_dwork_lock);
+	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 	c->wbuf_pagesize =  c->mtd->erasesize;
 
 	/* Find a suitable c->sector_size
@@ -1267,6 +1327,9 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
 
 	/* Initialize write buffer */
 	init_rwsem(&c->wbuf_sem);
+	spin_lock_init(&c->wbuf_dwork_lock);
+	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+
 	c->wbuf_pagesize = c->mtd->writesize;
 	c->wbuf_ofs = 0xFFFFFFFF;
 
@@ -1299,6 +1362,8 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
 		return 0;
 
 	init_rwsem(&c->wbuf_sem);
+	spin_lock_init(&c->wbuf_dwork_lock);
+	INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 
 	c->wbuf_pagesize =  c->mtd->writesize;
 	c->wbuf_ofs = 0xFFFFFFFF;
diff --git a/fs/namespace.c b/fs/namespace.c
index 02f02eabfb532e7c1752cbd6817f7a56171c3081..0cd18df95fdfb7286be6150a67e2de1559c850bb 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1330,6 +1330,24 @@ void drop_collected_mounts(struct vfsmount *mnt)
 	release_mounts(&umount_list);
 }
 
+struct vfsmount *clone_private_mount(struct path *path)
+{
+	struct mount *old_mnt = real_mount(path->mnt);
+	struct mount *new_mnt;
+
+	if (IS_MNT_UNBINDABLE(old_mnt))
+		return ERR_PTR(-EINVAL);
+
+	down_read(&namespace_sem);
+	new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
+	up_read(&namespace_sem);
+	if (!new_mnt)
+		return ERR_PTR(-ENOMEM);
+
+	return &new_mnt->mnt;
+}
+EXPORT_SYMBOL_GPL(clone_private_mount);
+
 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
 		   struct vfsmount *root)
 {
diff --git a/fs/open.c b/fs/open.c
index cf1d34fc5e690265f621b12b09a9ab879edf0499..8dab3e00c2a5f9060ff5b4be4d73ff75cb232827 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -644,24 +644,24 @@ static inline int __get_file_write_access(struct inode *inode,
 	return error;
 }
 
-static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
-					struct file *f,
-					int (*open)(struct inode *, struct file *),
-					const struct cred *cred)
+static struct file *__dentry_open(struct path *path, struct file *f,
+				  int (*open)(struct inode *, struct file *),
+				  const struct cred *cred)
 {
 	static const struct file_operations empty_fops = {};
 	struct inode *inode;
 	int error;
 
+	path_get(path);
 	f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
 				FMODE_PREAD | FMODE_PWRITE;
 
 	if (unlikely(f->f_flags & O_PATH))
 		f->f_mode = FMODE_PATH;
 
-	inode = dentry->d_inode;
+	inode = path->dentry->d_inode;
 	if (f->f_mode & FMODE_WRITE) {
-		error = __get_file_write_access(inode, mnt);
+		error = __get_file_write_access(inode, path->mnt);
 		if (error)
 			goto cleanup_file;
 		if (!special_file(inode->i_mode))
@@ -669,8 +669,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
 	}
 
 	f->f_mapping = inode->i_mapping;
-	f->f_path.dentry = dentry;
-	f->f_path.mnt = mnt;
+	f->f_path = *path;
 	f->f_pos = 0;
 	file_sb_list_add(f, inode->i_sb);
 
@@ -727,7 +726,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
 			 * here, so just reset the state.
 			 */
 			file_reset_write(f);
-			mnt_drop_write(mnt);
+			mnt_drop_write(path->mnt);
 		}
 	}
 	file_sb_list_del(f);
@@ -735,8 +734,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
 	f->f_path.mnt = NULL;
 cleanup_file:
 	put_filp(f);
-	dput(dentry);
-	mntput(mnt);
+	path_put(path);
 	return ERR_PTR(error);
 }
 
@@ -762,14 +760,14 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
 struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
 		int (*open)(struct inode *, struct file *))
 {
+	struct path path = { .dentry = dentry, .mnt = nd->path.mnt };
 	const struct cred *cred = current_cred();
 
 	if (IS_ERR(nd->intent.open.file))
 		goto out;
 	if (IS_ERR(dentry))
 		goto out_err;
-	nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
-					     nd->intent.open.file,
+	nd->intent.open.file = __dentry_open(&path, nd->intent.open.file,
 					     open, cred);
 out:
 	return nd->intent.open.file;
@@ -797,11 +795,9 @@ struct file *nameidata_to_filp(struct nameidata *nd)
 	nd->intent.open.file = NULL;
 
 	/* Has the filesystem initialised the file for us? */
-	if (filp->f_path.dentry == NULL) {
-		path_get(&nd->path);
-		filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
-				     NULL, cred);
-	}
+	if (filp->f_path.dentry == NULL)
+		filp = vfs_open(&nd->path, filp, cred);
+
 	return filp;
 }
 
@@ -812,27 +808,48 @@ struct file *nameidata_to_filp(struct nameidata *nd)
 struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
 			 const struct cred *cred)
 {
-	int error;
 	struct file *f;
+	struct file *ret;
+	struct path path = { .dentry = dentry, .mnt = mnt };
 
 	validate_creds(cred);
 
 	/* We must always pass in a valid mount pointer. */
 	BUG_ON(!mnt);
 
-	error = -ENFILE;
+	ret = ERR_PTR(-ENFILE);
 	f = get_empty_filp();
-	if (f == NULL) {
-		dput(dentry);
-		mntput(mnt);
-		return ERR_PTR(error);
+	if (f != NULL) {
+		f->f_flags = flags;
+		ret = vfs_open(&path, f, cred);
 	}
+	path_put(&path);
 
-	f->f_flags = flags;
-	return __dentry_open(dentry, mnt, f, NULL, cred);
+	return ret;
 }
 EXPORT_SYMBOL(dentry_open);
 
+/**
+ * vfs_open - open the file at the given path
+ * @path: path to open
+ * @filp: newly allocated file with f_flag initialized
+ * @cred: credentials to use
+ *
+ * Open the file.  If successful, the returned file will have acquired
+ * an additional reference for path.
+ */
+struct file *vfs_open(struct path *path, struct file *filp,
+		      const struct cred *cred)
+{
+	struct inode *inode = path->dentry->d_inode;
+
+	if (inode->i_op->open)
+		return inode->i_op->open(path->dentry, filp, cred);
+	else
+		return __dentry_open(path, filp, NULL, cred);
+}
+EXPORT_SYMBOL(vfs_open);
+
 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
 {
 	struct fdtable *fdt = files_fdtable(files);
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..c4517da01fa929d4e472c8f2a8eeb811ad7c729c
--- /dev/null
+++ b/fs/overlayfs/Kconfig
@@ -0,0 +1,4 @@
+config OVERLAYFS_FS
+	tristate "Overlay filesystem support"
+	help
+	  Add support for overlay filesystem.
diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..8f91889480d0515011c3ebe64beef3660ea82b28
--- /dev/null
+++ b/fs/overlayfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the overlay filesystem.
+#
+
+obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o
+
+overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
new file mode 100644
index 0000000000000000000000000000000000000000..87dbeee0a14e99af647f6ea05387812b7837f0cd
--- /dev/null
+++ b/fs/overlayfs/copy_up.c
@@ -0,0 +1,385 @@
+/*
+ *
+ * Copyright (C) 2011 Novell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/splice.h>
+#include <linux/xattr.h>
+#include <linux/security.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include "overlayfs.h"
+
+#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
+
+static int ovl_copy_up_xattr(struct dentry *old, struct dentry *new)
+{
+	ssize_t list_size, size;
+	char *buf, *name, *value;
+	int error;
+
+	if (!old->d_inode->i_op->getxattr ||
+	    !new->d_inode->i_op->getxattr)
+		return 0;
+
+	list_size = vfs_listxattr(old, NULL, 0);
+	if (list_size <= 0) {
+		if (list_size == -EOPNOTSUPP)
+			return 0;
+		return list_size;
+	}
+
+	buf = kzalloc(list_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	error = -ENOMEM;
+	value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
+	if (!value)
+		goto out;
+
+	list_size = vfs_listxattr(old, buf, list_size);
+	if (list_size <= 0) {
+		error = list_size;
+		goto out_free_value;
+	}
+
+	for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+		size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
+		if (size <= 0) {
+			error = size;
+			goto out_free_value;
+		}
+		error = vfs_setxattr(new, name, value, size, 0);
+		if (error)
+			goto out_free_value;
+	}
+
+out_free_value:
+	kfree(value);
+out:
+	kfree(buf);
+	return error;
+}
+
+static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
+{
+	struct file *old_file;
+	struct file *new_file;
+	int error = 0;
+
+	if (len == 0)
+		return 0;
+
+	old_file = ovl_path_open(old, O_RDONLY);
+	if (IS_ERR(old_file))
+		return PTR_ERR(old_file);
+
+	new_file = ovl_path_open(new, O_WRONLY);
+	if (IS_ERR(new_file)) {
+		error = PTR_ERR(new_file);
+		goto out_fput;
+	}
+
+	/* FIXME: copy up sparse files efficiently */
+	while (len) {
+		loff_t offset = new_file->f_pos;
+		size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
+		long bytes;
+
+		if (len < this_len)
+			this_len = len;
+
+		if (signal_pending_state(TASK_KILLABLE, current)) {
+			error = -EINTR;
+			break;
+		}
+
+		bytes = do_splice_direct(old_file, &offset, new_file, this_len,
+				 SPLICE_F_MOVE);
+		if (bytes <= 0) {
+			error = bytes;
+			break;
+		}
+
+		len -= bytes;
+	}
+
+	fput(new_file);
+out_fput:
+	fput(old_file);
+	return error;
+}
+
+static char *ovl_read_symlink(struct dentry *realdentry)
+{
+	int res;
+	char *buf;
+	struct inode *inode = realdentry->d_inode;
+	mm_segment_t old_fs;
+
+	res = -EINVAL;
+	if (!inode->i_op->readlink)
+		goto err;
+
+	res = -ENOMEM;
+	buf = (char *) __get_free_page(GFP_KERNEL);
+	if (!buf)
+		goto err;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	/* The cast to a user pointer is valid due to the set_fs() */
+	res = inode->i_op->readlink(realdentry,
+				    (char __user *)buf, PAGE_SIZE - 1);
+	set_fs(old_fs);
+	if (res < 0) {
+		free_page((unsigned long) buf);
+		goto err;
+	}
+	buf[res] = '\0';
+
+	return buf;
+
+err:
+	return ERR_PTR(res);
+}
+
+static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
+{
+	struct iattr attr = {
+		.ia_valid =
+		     ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
+		.ia_atime = stat->atime,
+		.ia_mtime = stat->mtime,
+	};
+
+	return notify_change(upperdentry, &attr);
+}
+
+static int ovl_set_mode(struct dentry *upperdentry, umode_t mode)
+{
+	struct iattr attr = {
+		.ia_valid = ATTR_MODE,
+		.ia_mode = mode,
+	};
+
+	return notify_change(upperdentry, &attr);
+}
+
+static int ovl_copy_up_locked(struct dentry *upperdir, struct dentry *dentry,
+			      struct path *lowerpath, struct kstat *stat,
+			      const char *link)
+{
+	int err;
+	struct path newpath;
+	umode_t mode = stat->mode;
+
+	/* Can't properly set mode on creation because of the umask */
+	stat->mode &= S_IFMT;
+
+	ovl_path_upper(dentry, &newpath);
+	WARN_ON(newpath.dentry);
+	newpath.dentry = ovl_upper_create(upperdir, dentry, stat, link);
+	if (IS_ERR(newpath.dentry))
+		return PTR_ERR(newpath.dentry);
+
+	if (S_ISREG(stat->mode)) {
+		err = ovl_copy_up_data(lowerpath, &newpath, stat->size);
+		if (err)
+			goto err_remove;
+	}
+
+	err = ovl_copy_up_xattr(lowerpath->dentry, newpath.dentry);
+	if (err)
+		goto err_remove;
+
+	mutex_lock(&newpath.dentry->d_inode->i_mutex);
+	if (!S_ISLNK(stat->mode))
+		err = ovl_set_mode(newpath.dentry, mode);
+	if (!err)
+		err = ovl_set_timestamps(newpath.dentry, stat);
+	mutex_unlock(&newpath.dentry->d_inode->i_mutex);
+	if (err)
+		goto err_remove;
+
+	ovl_dentry_update(dentry, newpath.dentry);
+
+	/*
+	 * Easiest way to get rid of the lower dentry reference is to
+	 * drop this dentry.  This is neither needed nor possible for
+	 * directories.
+	 */
+	if (!S_ISDIR(stat->mode))
+		d_drop(dentry);
+
+	return 0;
+
+err_remove:
+	if (S_ISDIR(stat->mode))
+		vfs_rmdir(upperdir->d_inode, newpath.dentry);
+	else
+		vfs_unlink(upperdir->d_inode, newpath.dentry);
+
+	dput(newpath.dentry);
+
+	return err;
+}
+
+/*
+ * Copy up a single dentry
+ *
+ * Directory renames only allowed on "pure upper" (already created on
+ * upper filesystem, never copied up).  Directories which are on lower or
+ * are merged may not be renamed.  For these -EXDEV is returned and
+ * userspace has to deal with it.  This means, when copying up a
+ * directory we can rely on it and ancestors being stable.
+ *
+ * Non-directory renames start with copy up of source if necessary.  The
+ * actual rename will only proceed once the copy up was successful.  Copy
+ * up uses upper parent i_mutex for exclusion.  Since rename can change
+ * d_parent it is possible that the copy up will lock the old parent.  At
+ * that point the file will have already been copied up anyway.
+ */
+static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
+			   struct path *lowerpath, struct kstat *stat)
+{
+	int err;
+	struct kstat pstat;
+	struct path parentpath;
+	struct dentry *upperdir;
+	const struct cred *old_cred;
+	struct cred *override_cred;
+	char *link = NULL;
+
+	ovl_path_upper(parent, &parentpath);
+	upperdir = parentpath.dentry;
+
+	err = vfs_getattr(parentpath.mnt, parentpath.dentry, &pstat);
+	if (err)
+		return err;
+
+	if (S_ISLNK(stat->mode)) {
+		link = ovl_read_symlink(lowerpath->dentry);
+		if (IS_ERR(link))
+			return PTR_ERR(link);
+	}
+
+	err = -ENOMEM;
+	override_cred = prepare_creds();
+	if (!override_cred)
+		goto out_free_link;
+
+	override_cred->fsuid = stat->uid;
+	override_cred->fsgid = stat->gid;
+	/*
+	 * CAP_SYS_ADMIN for copying up extended attributes
+	 * CAP_DAC_OVERRIDE for create
+	 * CAP_FOWNER for chmod, timestamp update
+	 * CAP_FSETID for chmod
+	 * CAP_MKNOD for mknod
+	 */
+	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
+	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+	cap_raise(override_cred->cap_effective, CAP_FOWNER);
+	cap_raise(override_cred->cap_effective, CAP_FSETID);
+	cap_raise(override_cred->cap_effective, CAP_MKNOD);
+	old_cred = override_creds(override_cred);
+
+	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
+	if (ovl_path_type(dentry) != OVL_PATH_LOWER) {
+		err = 0;
+	} else {
+		err = ovl_copy_up_locked(upperdir, dentry, lowerpath,
+					 stat, link);
+		if (!err) {
+			/* Restore timestamps on parent (best effort) */
+			ovl_set_timestamps(upperdir, &pstat);
+		}
+	}
+
+	mutex_unlock(&upperdir->d_inode->i_mutex);
+
+	revert_creds(old_cred);
+	put_cred(override_cred);
+
+out_free_link:
+	if (link)
+		free_page((unsigned long) link);
+
+	return err;
+}
+
+int ovl_copy_up(struct dentry *dentry)
+{
+	int err;
+
+	err = 0;
+	while (!err) {
+		struct dentry *next;
+		struct dentry *parent;
+		struct path lowerpath;
+		struct kstat stat;
+		enum ovl_path_type type = ovl_path_type(dentry);
+
+		if (type != OVL_PATH_LOWER)
+			break;
+
+		next = dget(dentry);
+		/* find the topmost dentry not yet copied up */
+		for (;;) {
+			parent = dget_parent(next);
+
+			type = ovl_path_type(parent);
+			if (type != OVL_PATH_LOWER)
+				break;
+
+			dput(next);
+			next = parent;
+		}
+
+		ovl_path_lower(next, &lowerpath);
+		err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat);
+		if (!err)
+			err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
+
+		dput(parent);
+		dput(next);
+	}
+
+	return err;
+}
+
+/* Optimize by not copying up the file first and truncating later */
+int ovl_copy_up_truncate(struct dentry *dentry, loff_t size)
+{
+	int err;
+	struct kstat stat;
+	struct path lowerpath;
+	struct dentry *parent = dget_parent(dentry);
+
+	err = ovl_copy_up(parent);
+	if (err)
+		goto out_dput_parent;
+
+	ovl_path_lower(dentry, &lowerpath);
+	err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat);
+	if (err)
+		goto out_dput_parent;
+
+	if (size < stat.size)
+		stat.size = size;
+
+	err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
+
+out_dput_parent:
+	dput(parent);
+	return err;
+}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
new file mode 100644
index 0000000000000000000000000000000000000000..00aa6d99cb666d5c4437602337eb8dfbd0e22713
--- /dev/null
+++ b/fs/overlayfs/dir.c
@@ -0,0 +1,597 @@
+/*
+ *
+ * Copyright (C) 2011 Novell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/xattr.h>
+#include <linux/security.h>
+#include <linux/cred.h>
+#include "overlayfs.h"
+
+static const char *ovl_whiteout_symlink = "(overlay-whiteout)";
+
+static int ovl_whiteout(struct dentry *upperdir, struct dentry *dentry)
+{
+	int err;
+	struct dentry *newdentry;
+	const struct cred *old_cred;
+	struct cred *override_cred;
+
+	/* FIXME: recheck lower dentry to see if whiteout is really needed */
+
+	err = -ENOMEM;
+	override_cred = prepare_creds();
+	if (!override_cred)
+		goto out;
+
+	/*
+	 * CAP_SYS_ADMIN for setxattr
+	 * CAP_DAC_OVERRIDE for symlink creation
+	 * CAP_FOWNER for unlink in sticky directory
+	 */
+	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
+	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+	cap_raise(override_cred->cap_effective, CAP_FOWNER);
+	override_cred->fsuid = 0;
+	override_cred->fsgid = 0;
+	old_cred = override_creds(override_cred);
+
+	newdentry = lookup_one_len(dentry->d_name.name, upperdir,
+				   dentry->d_name.len);
+	err = PTR_ERR(newdentry);
+	if (IS_ERR(newdentry))
+		goto out_put_cred;
+
+	/* Just been removed within the same locked region */
+	WARN_ON(newdentry->d_inode);
+
+	err = vfs_symlink(upperdir->d_inode, newdentry, ovl_whiteout_symlink);
+	if (err)
+		goto out_dput;
+
+	ovl_dentry_version_inc(dentry->d_parent);
+
+	err = vfs_setxattr(newdentry, ovl_whiteout_xattr, "y", 1, 0);
+	if (err)
+		vfs_unlink(upperdir->d_inode, newdentry);
+
+out_dput:
+	dput(newdentry);
+out_put_cred:
+	revert_creds(old_cred);
+	put_cred(override_cred);
+out:
+	if (err) {
+		/*
+		 * There's no way to recover from failure to whiteout.
+		 * What should we do?  Log a big fat error and... ?
+		 */
+		printk(KERN_ERR "overlayfs: ERROR - failed to whiteout '%s'\n",
+		       dentry->d_name.name);
+	}
+
+	return err;
+}
+
+static struct dentry *ovl_lookup_create(struct dentry *upperdir,
+					struct dentry *template)
+{
+	int err;
+	struct dentry *newdentry;
+	struct qstr *name = &template->d_name;
+
+	newdentry = lookup_one_len(name->name, upperdir, name->len);
+	if (IS_ERR(newdentry))
+		return newdentry;
+
+	if (newdentry->d_inode) {
+		const struct cred *old_cred;
+		struct cred *override_cred;
+
+		/* No need to check whiteout if lower parent is non-existent */
+		err = -EEXIST;
+		if (!ovl_dentry_lower(template->d_parent))
+			goto out_dput;
+
+		if (!S_ISLNK(newdentry->d_inode->i_mode))
+			goto out_dput;
+
+		err = -ENOMEM;
+		override_cred = prepare_creds();
+		if (!override_cred)
+			goto out_dput;
+
+		/*
+		 * CAP_SYS_ADMIN for getxattr
+		 * CAP_FOWNER for unlink in sticky directory
+		 */
+		cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
+		cap_raise(override_cred->cap_effective, CAP_FOWNER);
+		old_cred = override_creds(override_cred);
+
+		err = -EEXIST;
+		if (ovl_is_whiteout(newdentry))
+			err = vfs_unlink(upperdir->d_inode, newdentry);
+
+		revert_creds(old_cred);
+		put_cred(override_cred);
+		if (err)
+			goto out_dput;
+
+		dput(newdentry);
+		newdentry = lookup_one_len(name->name, upperdir, name->len);
+		if (IS_ERR(newdentry)) {
+			ovl_whiteout(upperdir, template);
+			return newdentry;
+		}
+
+		/*
+		 * Whiteout just been successfully removed, parent
+		 * i_mutex is still held, there's no way the lookup
+		 * could return positive.
+		 */
+		WARN_ON(newdentry->d_inode);
+	}
+
+	return newdentry;
+
+out_dput:
+	dput(newdentry);
+	return ERR_PTR(err);
+}
+
+struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
+				struct kstat *stat, const char *link)
+{
+	int err;
+	struct dentry *newdentry;
+	struct inode *dir = upperdir->d_inode;
+
+	newdentry = ovl_lookup_create(upperdir, dentry);
+	if (IS_ERR(newdentry))
+		goto out;
+
+	switch (stat->mode & S_IFMT) {
+	case S_IFREG:
+		err = vfs_create(dir, newdentry, stat->mode, NULL);
+		break;
+
+	case S_IFDIR:
+		err = vfs_mkdir(dir, newdentry, stat->mode);
+		break;
+
+	case S_IFCHR:
+	case S_IFBLK:
+	case S_IFIFO:
+	case S_IFSOCK:
+		err = vfs_mknod(dir, newdentry, stat->mode, stat->rdev);
+		break;
+
+	case S_IFLNK:
+		err = vfs_symlink(dir, newdentry, link);
+		break;
+
+	default:
+		err = -EPERM;
+	}
+	if (err) {
+		if (ovl_dentry_is_opaque(dentry))
+			ovl_whiteout(upperdir, dentry);
+		dput(newdentry);
+		newdentry = ERR_PTR(err);
+	} else if (WARN_ON(!newdentry->d_inode)) {
+		/*
+		 * Not quite sure if non-instantiated dentry is legal or not.
+		 * VFS doesn't seem to care so check and warn here.
+		 */
+		dput(newdentry);
+		newdentry = ERR_PTR(-ENOENT);
+	}
+
+out:
+	return newdentry;
+
+}
+
+static int ovl_set_opaque(struct dentry *upperdentry)
+{
+	int err;
+	const struct cred *old_cred;
+	struct cred *override_cred;
+
+	override_cred = prepare_creds();
+	if (!override_cred)
+		return -ENOMEM;
+
+	/* CAP_SYS_ADMIN for setxattr of "trusted" namespace */
+	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
+	old_cred = override_creds(override_cred);
+	err = vfs_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
+	revert_creds(old_cred);
+	put_cred(override_cred);
+
+	return err;
+}
+
+static int ovl_remove_opaque(struct dentry *upperdentry)
+{
+	int err;
+	const struct cred *old_cred;
+	struct cred *override_cred;
+
+	override_cred = prepare_creds();
+	if (!override_cred)
+		return -ENOMEM;
+
+	/* CAP_SYS_ADMIN for removexattr of "trusted" namespace */
+	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
+	old_cred = override_creds(override_cred);
+	err = vfs_removexattr(upperdentry, ovl_opaque_xattr);
+	revert_creds(old_cred);
+	put_cred(override_cred);
+
+	return err;
+}
+
+static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
+			 struct kstat *stat)
+{
+	int err;
+	enum ovl_path_type type;
+	struct path realpath;
+
+	type = ovl_path_real(dentry, &realpath);
+	err = vfs_getattr(realpath.mnt, realpath.dentry, stat);
+	if (err)
+		return err;
+
+	stat->dev = dentry->d_sb->s_dev;
+	stat->ino = dentry->d_inode->i_ino;
+
+	/*
+	 * It's probably not worth it to count subdirs to get the
+	 * correct link count.  nlink=1 seems to pacify 'find' and
+	 * other utilities.
+	 */
+	if (type == OVL_PATH_MERGE)
+		stat->nlink = 1;
+
+	return 0;
+}
+
+static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
+			     const char *link)
+{
+	int err;
+	struct dentry *newdentry;
+	struct dentry *upperdir;
+	struct inode *inode;
+	struct kstat stat = {
+		.mode = mode,
+		.rdev = rdev,
+	};
+
+	err = -ENOMEM;
+	inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata);
+	if (!inode)
+		goto out;
+
+	err = ovl_copy_up(dentry->d_parent);
+	if (err)
+		goto out_iput;
+
+	upperdir = ovl_dentry_upper(dentry->d_parent);
+	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
+
+	newdentry = ovl_upper_create(upperdir, dentry, &stat, link);
+	err = PTR_ERR(newdentry);
+	if (IS_ERR(newdentry))
+		goto out_unlock;
+
+	ovl_dentry_version_inc(dentry->d_parent);
+	if (ovl_dentry_is_opaque(dentry) && S_ISDIR(mode)) {
+		err = ovl_set_opaque(newdentry);
+		if (err) {
+			vfs_rmdir(upperdir->d_inode, newdentry);
+			ovl_whiteout(upperdir, dentry);
+			goto out_dput;
+		}
+	}
+	ovl_dentry_update(dentry, newdentry);
+	d_instantiate(dentry, inode);
+	inode = NULL;
+	newdentry = NULL;
+	err = 0;
+
+out_dput:
+	dput(newdentry);
+out_unlock:
+	mutex_unlock(&upperdir->d_inode->i_mutex);
+out_iput:
+	iput(inode);
+out:
+	return err;
+}
+
+static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+			struct nameidata *nd)
+{
+	return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
+}
+
+static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
+}
+
+static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+		     dev_t rdev)
+{
+	return ovl_create_object(dentry, mode, rdev, NULL);
+}
+
+static int ovl_symlink(struct inode *dir, struct dentry *dentry,
+			 const char *link)
+{
+	return ovl_create_object(dentry, S_IFLNK, 0, link);
+}
+
+static int ovl_do_remove(struct dentry *dentry, bool is_dir)
+{
+	int err;
+	enum ovl_path_type type;
+	struct path realpath;
+	struct dentry *upperdir;
+
+	err = ovl_copy_up(dentry->d_parent);
+	if (err)
+		return err;
+
+	upperdir = ovl_dentry_upper(dentry->d_parent);
+	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
+	type = ovl_path_real(dentry, &realpath);
+	if (type != OVL_PATH_LOWER) {
+		err = -ESTALE;
+		if (realpath.dentry->d_parent != upperdir)
+			goto out_d_drop;
+
+		/* FIXME: create whiteout up front and rename to target */
+
+		if (is_dir)
+			err = vfs_rmdir(upperdir->d_inode, realpath.dentry);
+		else
+			err = vfs_unlink(upperdir->d_inode, realpath.dentry);
+		if (err)
+			goto out_d_drop;
+
+		ovl_dentry_version_inc(dentry->d_parent);
+	}
+
+	if (type != OVL_PATH_UPPER || ovl_dentry_is_opaque(dentry))
+		err = ovl_whiteout(upperdir, dentry);
+
+	/*
+	 * Keeping this dentry hashed would mean having to release
+	 * upperpath/lowerpath, which could only be done if we are the
+	 * sole user of this dentry.  Too tricky...  Just unhash for
+	 * now.
+	 */
+out_d_drop:
+	d_drop(dentry);
+	mutex_unlock(&upperdir->d_inode->i_mutex);
+
+	return err;
+}
+
+static int ovl_unlink(struct inode *dir, struct dentry *dentry)
+{
+	return ovl_do_remove(dentry, false);
+}
+
+
+static int ovl_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int err;
+	enum ovl_path_type type;
+
+	type = ovl_path_type(dentry);
+	if (type != OVL_PATH_UPPER) {
+		err = ovl_check_empty_and_clear(dentry, type);
+		if (err)
+			return err;
+	}
+
+	return ovl_do_remove(dentry, true);
+}
+
+static int ovl_link(struct dentry *old, struct inode *newdir,
+		    struct dentry *new)
+{
+	int err;
+	struct dentry *olddentry;
+	struct dentry *newdentry;
+	struct dentry *upperdir;
+
+	err = ovl_copy_up(old);
+	if (err)
+		goto out;
+
+	err = ovl_copy_up(new->d_parent);
+	if (err)
+		goto out;
+
+	upperdir = ovl_dentry_upper(new->d_parent);
+	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
+	newdentry = ovl_lookup_create(upperdir, new);
+	err = PTR_ERR(newdentry);
+	if (IS_ERR(newdentry))
+		goto out_unlock;
+
+	olddentry = ovl_dentry_upper(old);
+	err = vfs_link(olddentry, upperdir->d_inode, newdentry);
+	if (!err) {
+		if (WARN_ON(!newdentry->d_inode)) {
+			dput(newdentry);
+			err = -ENOENT;
+			goto out_unlock;
+		}
+
+		ovl_dentry_version_inc(new->d_parent);
+		ovl_dentry_update(new, newdentry);
+
+		ihold(old->d_inode);
+		d_instantiate(new, old->d_inode);
+	} else {
+		if (ovl_dentry_is_opaque(new))
+			ovl_whiteout(upperdir, new);
+		dput(newdentry);
+	}
+out_unlock:
+	mutex_unlock(&upperdir->d_inode->i_mutex);
+out:
+	return err;
+
+}
+
+static int ovl_rename(struct inode *olddir, struct dentry *old,
+			struct inode *newdir, struct dentry *new)
+{
+	int err;
+	enum ovl_path_type old_type;
+	enum ovl_path_type new_type;
+	struct dentry *old_upperdir;
+	struct dentry *new_upperdir;
+	struct dentry *olddentry;
+	struct dentry *newdentry;
+	struct dentry *trap;
+	bool old_opaque;
+	bool new_opaque;
+	bool new_create = false;
+	bool is_dir = S_ISDIR(old->d_inode->i_mode);
+
+	/* Don't copy up directory trees */
+	old_type = ovl_path_type(old);
+	if (old_type != OVL_PATH_UPPER && is_dir)
+		return -EXDEV;
+
+	if (new->d_inode) {
+		new_type = ovl_path_type(new);
+
+		if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
+			if (ovl_dentry_lower(old)->d_inode ==
+			    ovl_dentry_lower(new)->d_inode)
+				return 0;
+		}
+		if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
+			if (ovl_dentry_upper(old)->d_inode ==
+			    ovl_dentry_upper(new)->d_inode)
+				return 0;
+		}
+
+		if (new_type != OVL_PATH_UPPER &&
+		    S_ISDIR(new->d_inode->i_mode)) {
+			err = ovl_check_empty_and_clear(new, new_type);
+			if (err)
+				return err;
+		}
+	} else {
+		new_type = OVL_PATH_UPPER;
+	}
+
+	err = ovl_copy_up(old);
+	if (err)
+		return err;
+
+	err = ovl_copy_up(new->d_parent);
+	if (err)
+		return err;
+
+	old_upperdir = ovl_dentry_upper(old->d_parent);
+	new_upperdir = ovl_dentry_upper(new->d_parent);
+
+	trap = lock_rename(new_upperdir, old_upperdir);
+
+	olddentry = ovl_dentry_upper(old);
+	newdentry = ovl_dentry_upper(new);
+	if (newdentry) {
+		dget(newdentry);
+	} else {
+		new_create = true;
+		newdentry = ovl_lookup_create(new_upperdir, new);
+		err = PTR_ERR(newdentry);
+		if (IS_ERR(newdentry))
+			goto out_unlock;
+	}
+
+	err = -ESTALE;
+	if (olddentry->d_parent != old_upperdir)
+		goto out_dput;
+	if (newdentry->d_parent != new_upperdir)
+		goto out_dput;
+	if (olddentry == trap)
+		goto out_dput;
+	if (newdentry == trap)
+		goto out_dput;
+
+	old_opaque = ovl_dentry_is_opaque(old);
+	new_opaque = ovl_dentry_is_opaque(new) || new_type != OVL_PATH_UPPER;
+
+	if (is_dir && !old_opaque && new_opaque) {
+		err = ovl_set_opaque(olddentry);
+		if (err)
+			goto out_dput;
+	}
+
+	err = vfs_rename(old_upperdir->d_inode, olddentry,
+			 new_upperdir->d_inode, newdentry);
+
+	if (err) {
+		if (new_create && ovl_dentry_is_opaque(new))
+			ovl_whiteout(new_upperdir, new);
+		if (is_dir && !old_opaque && new_opaque)
+			ovl_remove_opaque(olddentry);
+		goto out_dput;
+	}
+
+	if (old_type != OVL_PATH_UPPER || old_opaque)
+		err = ovl_whiteout(old_upperdir, old);
+	if (is_dir && old_opaque && !new_opaque)
+		ovl_remove_opaque(olddentry);
+
+	if (old_opaque != new_opaque)
+		ovl_dentry_set_opaque(old, new_opaque);
+
+	ovl_dentry_version_inc(old->d_parent);
+	ovl_dentry_version_inc(new->d_parent);
+
+out_dput:
+	dput(newdentry);
+out_unlock:
+	unlock_rename(new_upperdir, old_upperdir);
+	return err;
+}
+
+const struct inode_operations ovl_dir_inode_operations = {
+	.lookup		= ovl_lookup,
+	.mkdir		= ovl_mkdir,
+	.symlink	= ovl_symlink,
+	.unlink		= ovl_unlink,
+	.rmdir		= ovl_rmdir,
+	.rename		= ovl_rename,
+	.link		= ovl_link,
+	.setattr	= ovl_setattr,
+	.create		= ovl_create,
+	.mknod		= ovl_mknod,
+	.permission	= ovl_permission,
+	.getattr	= ovl_dir_getattr,
+	.setxattr	= ovl_setxattr,
+	.getxattr	= ovl_getxattr,
+	.listxattr	= ovl_listxattr,
+	.removexattr	= ovl_removexattr,
+};
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
new file mode 100644
index 0000000000000000000000000000000000000000..c220ea745f24ebe2e69ba91ccf9609555ae66c9e
--- /dev/null
+++ b/fs/overlayfs/inode.c
@@ -0,0 +1,384 @@
+/*
+ *
+ * Copyright (C) 2011 Novell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include "overlayfs.h"
+
+int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct dentry *upperdentry;
+	int err;
+
+	if ((attr->ia_valid & ATTR_SIZE) && !ovl_dentry_upper(dentry))
+		err = ovl_copy_up_truncate(dentry, attr->ia_size);
+	else
+		err = ovl_copy_up(dentry);
+	if (err)
+		return err;
+
+	upperdentry = ovl_dentry_upper(dentry);
+
+	if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+		attr->ia_valid &= ~ATTR_MODE;
+
+	mutex_lock(&upperdentry->d_inode->i_mutex);
+	err = notify_change(upperdentry, attr);
+	mutex_unlock(&upperdentry->d_inode->i_mutex);
+
+	return err;
+}
+
+static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry,
+			 struct kstat *stat)
+{
+	struct path realpath;
+
+	ovl_path_real(dentry, &realpath);
+	return vfs_getattr(realpath.mnt, realpath.dentry, stat);
+}
+
+int ovl_permission(struct inode *inode, int mask)
+{
+	struct ovl_entry *oe;
+	struct dentry *alias = NULL;
+	struct inode *realinode;
+	struct dentry *realdentry;
+	bool is_upper;
+	int err;
+
+	if (S_ISDIR(inode->i_mode)) {
+		oe = inode->i_private;
+	} else if (mask & MAY_NOT_BLOCK) {
+		return -ECHILD;
+	} else {
+		/*
+		 * For non-directories find an alias and get the info
+		 * from there.
+		 */
+		spin_lock(&inode->i_lock);
+		if (WARN_ON(list_empty(&inode->i_dentry))) {
+			spin_unlock(&inode->i_lock);
+			return -ENOENT;
+		}
+		alias = list_entry(inode->i_dentry.next,
+				   struct dentry, d_alias);
+		dget(alias);
+		spin_unlock(&inode->i_lock);
+		oe = alias->d_fsdata;
+	}
+
+	realdentry = ovl_entry_real(oe, &is_upper);
+
+	/* Careful in RCU walk mode */
+	realinode = ACCESS_ONCE(realdentry->d_inode);
+	if (!realinode) {
+		WARN_ON(!(mask & MAY_NOT_BLOCK));
+		err = -ENOENT;
+		goto out_dput;
+	}
+
+	if (mask & MAY_WRITE) {
+		umode_t mode = realinode->i_mode;
+
+		/*
+		 * Writes will always be redirected to upper layer, so
+		 * ignore lower layer being read-only.
+		 *
+		 * If the overlay itself is read-only then proceed
+		 * with the permission check, don't return EROFS.
+		 * This will only happen if this is the lower layer of
+		 * another overlayfs.
+		 *
+		 * If upper fs becomes read-only after the overlay was
+		 * constructed return EROFS to prevent modification of
+		 * upper layer.
+		 */
+		err = -EROFS;
+		if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
+		    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
+			goto out_dput;
+
+		/*
+		 * Nobody gets write access to an immutable file.
+		 */
+		err = -EACCES;
+		if (IS_IMMUTABLE(realinode))
+			goto out_dput;
+	}
+
+	if (realinode->i_op->permission)
+		err = realinode->i_op->permission(realinode, mask);
+	else
+		err = generic_permission(realinode, mask);
+out_dput:
+	dput(alias);
+	return err;
+}
+
+
+struct ovl_link_data {
+	struct dentry *realdentry;
+	void *cookie;
+};
+
+static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	void *ret;
+	struct dentry *realdentry;
+	struct inode *realinode;
+
+	realdentry = ovl_dentry_real(dentry);
+	realinode = realdentry->d_inode;
+
+	if (WARN_ON(!realinode->i_op->follow_link))
+		return ERR_PTR(-EPERM);
+
+	ret = realinode->i_op->follow_link(realdentry, nd);
+	if (IS_ERR(ret))
+		return ret;
+
+	if (realinode->i_op->put_link) {
+		struct ovl_link_data *data;
+
+		data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL);
+		if (!data) {
+			realinode->i_op->put_link(realdentry, nd, ret);
+			return ERR_PTR(-ENOMEM);
+		}
+		data->realdentry = realdentry;
+		data->cookie = ret;
+
+		return data;
+	} else {
+		return NULL;
+	}
+}
+
+static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
+{
+	struct inode *realinode;
+	struct ovl_link_data *data = c;
+
+	if (!data)
+		return;
+
+	realinode = data->realdentry->d_inode;
+	realinode->i_op->put_link(data->realdentry, nd, data->cookie);
+	kfree(data);
+}
+
+static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+	struct path realpath;
+	struct inode *realinode;
+
+	ovl_path_real(dentry, &realpath);
+	realinode = realpath.dentry->d_inode;
+
+	if (!realinode->i_op->readlink)
+		return -EINVAL;
+
+	touch_atime(&realpath);
+
+	return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
+}
+
+
+static bool ovl_is_private_xattr(const char *name)
+{
+	return strncmp(name, "trusted.overlay.", 14) == 0;
+}
+
+int ovl_setxattr(struct dentry *dentry, const char *name,
+		 const void *value, size_t size, int flags)
+{
+	int err;
+	struct dentry *upperdentry;
+
+	if (ovl_is_private_xattr(name))
+		return -EPERM;
+
+	err = ovl_copy_up(dentry);
+	if (err)
+		return err;
+
+	upperdentry = ovl_dentry_upper(dentry);
+	return  vfs_setxattr(upperdentry, name, value, size, flags);
+}
+
+ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
+		     void *value, size_t size)
+{
+	if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
+	    ovl_is_private_xattr(name))
+		return -ENODATA;
+
+	return vfs_getxattr(ovl_dentry_real(dentry), name, value, size);
+}
+
+ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+{
+	ssize_t res;
+	int off;
+
+	res = vfs_listxattr(ovl_dentry_real(dentry), list, size);
+	if (res <= 0 || size == 0)
+		return res;
+
+	if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE)
+		return res;
+
+	/* filter out private xattrs */
+	for (off = 0; off < res;) {
+		char *s = list + off;
+		size_t slen = strlen(s) + 1;
+
+		BUG_ON(off + slen > res);
+
+		if (ovl_is_private_xattr(s)) {
+			res -= slen;
+			memmove(s, s + slen, res - off);
+		} else {
+			off += slen;
+		}
+	}
+
+	return res;
+}
+
+int ovl_removexattr(struct dentry *dentry, const char *name)
+{
+	int err;
+	struct path realpath;
+	enum ovl_path_type type;
+
+	if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
+	    ovl_is_private_xattr(name))
+		return -ENODATA;
+
+	type = ovl_path_real(dentry, &realpath);
+	if (type == OVL_PATH_LOWER) {
+		err = vfs_getxattr(realpath.dentry, name, NULL, 0);
+		if (err < 0)
+			return err;
+
+		err = ovl_copy_up(dentry);
+		if (err)
+			return err;
+
+		ovl_path_upper(dentry, &realpath);
+	}
+
+	return vfs_removexattr(realpath.dentry, name);
+}
+
+static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
+				  struct dentry *realdentry)
+{
+	if (type != OVL_PATH_LOWER)
+		return false;
+
+	if (special_file(realdentry->d_inode->i_mode))
+		return false;
+
+	if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC))
+		return false;
+
+	return true;
+}
+
+static struct file *ovl_open(struct dentry *dentry, struct file *file,
+			     const struct cred *cred)
+{
+	int err;
+	struct path realpath;
+	enum ovl_path_type type;
+
+	type = ovl_path_real(dentry, &realpath);
+	if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) {
+		if (file->f_flags & O_TRUNC)
+			err = ovl_copy_up_truncate(dentry, 0);
+		else
+			err = ovl_copy_up(dentry);
+		if (err)
+			return ERR_PTR(err);
+
+		ovl_path_upper(dentry, &realpath);
+	}
+
+	return vfs_open(&realpath, file, cred);
+}
+
+static const struct inode_operations ovl_file_inode_operations = {
+	.setattr	= ovl_setattr,
+	.permission	= ovl_permission,
+	.getattr	= ovl_getattr,
+	.setxattr	= ovl_setxattr,
+	.getxattr	= ovl_getxattr,
+	.listxattr	= ovl_listxattr,
+	.removexattr	= ovl_removexattr,
+	.open		= ovl_open,
+};
+
+static const struct inode_operations ovl_symlink_inode_operations = {
+	.setattr	= ovl_setattr,
+	.follow_link	= ovl_follow_link,
+	.put_link	= ovl_put_link,
+	.readlink	= ovl_readlink,
+	.getattr	= ovl_getattr,
+	.setxattr	= ovl_setxattr,
+	.getxattr	= ovl_getxattr,
+	.listxattr	= ovl_listxattr,
+	.removexattr	= ovl_removexattr,
+};
+
+struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
+			    struct ovl_entry *oe)
+{
+	struct inode *inode;
+
+	inode = new_inode(sb);
+	if (!inode)
+		return NULL;
+
+	mode &= S_IFMT;
+
+	inode->i_ino = get_next_ino();
+	inode->i_mode = mode;
+	inode->i_flags |= S_NOATIME | S_NOCMTIME;
+
+	switch (mode) {
+	case S_IFDIR:
+		inode->i_private = oe;
+		inode->i_op = &ovl_dir_inode_operations;
+		inode->i_fop = &ovl_dir_operations;
+		break;
+
+	case S_IFLNK:
+		inode->i_op = &ovl_symlink_inode_operations;
+		break;
+
+	case S_IFREG:
+	case S_IFSOCK:
+	case S_IFBLK:
+	case S_IFCHR:
+	case S_IFIFO:
+		inode->i_op = &ovl_file_inode_operations;
+		break;
+
+	default:
+		WARN(1, "illegal file type: %i\n", mode);
+		inode = NULL;
+	}
+
+	return inode;
+
+}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
new file mode 100644
index 0000000000000000000000000000000000000000..1dd05f76604b7758cc8033f09dbb6f7b29a3f8b8
--- /dev/null
+++ b/fs/overlayfs/overlayfs.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright (C) 2011 Novell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+struct ovl_entry;
+
+enum ovl_path_type {
+	OVL_PATH_UPPER,
+	OVL_PATH_MERGE,
+	OVL_PATH_LOWER,
+};
+
+extern const char *ovl_opaque_xattr;
+extern const char *ovl_whiteout_xattr;
+extern const struct dentry_operations ovl_dentry_operations;
+
+enum ovl_path_type ovl_path_type(struct dentry *dentry);
+u64 ovl_dentry_version_get(struct dentry *dentry);
+void ovl_dentry_version_inc(struct dentry *dentry);
+void ovl_path_upper(struct dentry *dentry, struct path *path);
+void ovl_path_lower(struct dentry *dentry, struct path *path);
+enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
+struct dentry *ovl_dentry_upper(struct dentry *dentry);
+struct dentry *ovl_dentry_lower(struct dentry *dentry);
+struct dentry *ovl_dentry_real(struct dentry *dentry);
+struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper);
+bool ovl_dentry_is_opaque(struct dentry *dentry);
+void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
+bool ovl_is_whiteout(struct dentry *dentry);
+void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
+struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+			  struct nameidata *nd);
+struct file *ovl_path_open(struct path *path, int flags);
+
+struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
+				struct kstat *stat, const char *link);
+
+/* readdir.c */
+extern const struct file_operations ovl_dir_operations;
+int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type);
+
+/* inode.c */
+int ovl_setattr(struct dentry *dentry, struct iattr *attr);
+int ovl_permission(struct inode *inode, int mask);
+int ovl_setxattr(struct dentry *dentry, const char *name,
+		 const void *value, size_t size, int flags);
+ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
+		     void *value, size_t size);
+ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
+int ovl_removexattr(struct dentry *dentry, const char *name);
+
+struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
+			    struct ovl_entry *oe);
+/* dir.c */
+extern const struct inode_operations ovl_dir_inode_operations;
+
+/* copy_up.c */
+int ovl_copy_up(struct dentry *dentry);
+int ovl_copy_up_truncate(struct dentry *dentry, loff_t size);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
new file mode 100644
index 0000000000000000000000000000000000000000..0797efbc7be48413700e58b9b9cb9a907775bd54
--- /dev/null
+++ b/fs/overlayfs/readdir.c
@@ -0,0 +1,566 @@
+/*
+ *
+ * Copyright (C) 2011 Novell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/file.h>
+#include <linux/xattr.h>
+#include <linux/rbtree.h>
+#include <linux/security.h>
+#include <linux/cred.h>
+#include "overlayfs.h"
+
+struct ovl_cache_entry {
+	const char *name;
+	unsigned int len;
+	unsigned int type;
+	u64 ino;
+	bool is_whiteout;
+	struct list_head l_node;
+	struct rb_node node;
+};
+
+struct ovl_readdir_data {
+	struct rb_root *root;
+	struct list_head *list;
+	struct list_head *middle;
+	struct dentry *dir;
+	int count;
+	int err;
+};
+
+struct ovl_dir_file {
+	bool is_real;
+	bool is_cached;
+	struct list_head cursor;
+	u64 cache_version;
+	struct list_head cache;
+	struct file *realfile;
+};
+
+static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
+{
+	return container_of(n, struct ovl_cache_entry, node);
+}
+
+static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
+						    const char *name, int len)
+{
+	struct rb_node *node = root->rb_node;
+	int cmp;
+
+	while (node) {
+		struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
+
+		cmp = strncmp(name, p->name, len);
+		if (cmp > 0)
+			node = p->node.rb_right;
+		else if (cmp < 0 || len < p->len)
+			node = p->node.rb_left;
+		else
+			return p;
+	}
+
+	return NULL;
+}
+
+static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
+						   u64 ino, unsigned int d_type)
+{
+	struct ovl_cache_entry *p;
+
+	p = kmalloc(sizeof(*p) + len + 1, GFP_KERNEL);
+	if (p) {
+		char *name_copy = (char *) (p + 1);
+		memcpy(name_copy, name, len);
+		name_copy[len] = '\0';
+		p->name = name_copy;
+		p->len = len;
+		p->type = d_type;
+		p->ino = ino;
+		p->is_whiteout = false;
+	}
+
+	return p;
+}
+
+static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
+				  const char *name, int len, u64 ino,
+				  unsigned int d_type)
+{
+	struct rb_node **newp = &rdd->root->rb_node;
+	struct rb_node *parent = NULL;
+	struct ovl_cache_entry *p;
+
+	while (*newp) {
+		int cmp;
+		struct ovl_cache_entry *tmp;
+
+		parent = *newp;
+		tmp = ovl_cache_entry_from_node(*newp);
+		cmp = strncmp(name, tmp->name, len);
+		if (cmp > 0)
+			newp = &tmp->node.rb_right;
+		else if (cmp < 0 || len < tmp->len)
+			newp = &tmp->node.rb_left;
+		else
+			return 0;
+	}
+
+	p = ovl_cache_entry_new(name, len, ino, d_type);
+	if (p == NULL)
+		return -ENOMEM;
+
+	list_add_tail(&p->l_node, rdd->list);
+	rb_link_node(&p->node, parent, newp);
+	rb_insert_color(&p->node, rdd->root);
+
+	return 0;
+}
+
+static int ovl_fill_lower(void *buf, const char *name, int namelen,
+			    loff_t offset, u64 ino, unsigned int d_type)
+{
+	struct ovl_readdir_data *rdd = buf;
+	struct ovl_cache_entry *p;
+
+	rdd->count++;
+	p = ovl_cache_entry_find(rdd->root, name, namelen);
+	if (p) {
+		list_move_tail(&p->l_node, rdd->middle);
+	} else {
+		p = ovl_cache_entry_new(name, namelen, ino, d_type);
+		if (p == NULL)
+			rdd->err = -ENOMEM;
+		else
+			list_add_tail(&p->l_node, rdd->middle);
+	}
+
+	return rdd->err;
+}
+
+static void ovl_cache_free(struct list_head *list)
+{
+	struct ovl_cache_entry *p;
+	struct ovl_cache_entry *n;
+
+	list_for_each_entry_safe(p, n, list, l_node)
+		kfree(p);
+
+	INIT_LIST_HEAD(list);
+}
+
+static int ovl_fill_upper(void *buf, const char *name, int namelen,
+			  loff_t offset, u64 ino, unsigned int d_type)
+{
+	struct ovl_readdir_data *rdd = buf;
+
+	rdd->count++;
+	return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
+}
+
+static inline int ovl_dir_read(struct path *realpath,
+			       struct ovl_readdir_data *rdd, filldir_t filler)
+{
+	struct file *realfile;
+	int err;
+
+	realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
+	if (IS_ERR(realfile))
+		return PTR_ERR(realfile);
+
+	do {
+		rdd->count = 0;
+		rdd->err = 0;
+		err = vfs_readdir(realfile, filler, rdd);
+		if (err >= 0)
+			err = rdd->err;
+	} while (!err && rdd->count);
+	fput(realfile);
+
+	return 0;
+}
+
+static void ovl_dir_reset(struct file *file)
+{
+	struct ovl_dir_file *od = file->private_data;
+	enum ovl_path_type type = ovl_path_type(file->f_path.dentry);
+
+	if (ovl_dentry_version_get(file->f_path.dentry) != od->cache_version) {
+		list_del_init(&od->cursor);
+		ovl_cache_free(&od->cache);
+		od->is_cached = false;
+	}
+	WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
+	if (od->is_real && type == OVL_PATH_MERGE) {
+		fput(od->realfile);
+		od->realfile = NULL;
+		od->is_real = false;
+	}
+}
+
+static int ovl_dir_mark_whiteouts(struct ovl_readdir_data *rdd)
+{
+	struct ovl_cache_entry *p;
+	struct dentry *dentry;
+	const struct cred *old_cred;
+	struct cred *override_cred;
+
+	override_cred = prepare_creds();
+	if (!override_cred) {
+		ovl_cache_free(rdd->list);
+		return -ENOMEM;
+	}
+
+	/*
+	 * CAP_SYS_ADMIN for getxattr
+	 * CAP_DAC_OVERRIDE for lookup
+	 */
+	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
+	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+	old_cred = override_creds(override_cred);
+
+	mutex_lock(&rdd->dir->d_inode->i_mutex);
+	list_for_each_entry(p, rdd->list, l_node) {
+		if (p->type != DT_LNK)
+			continue;
+
+		dentry = lookup_one_len(p->name, rdd->dir, p->len);
+		if (IS_ERR(dentry))
+			continue;
+
+		p->is_whiteout = ovl_is_whiteout(dentry);
+		dput(dentry);
+	}
+	mutex_unlock(&rdd->dir->d_inode->i_mutex);
+
+	revert_creds(old_cred);
+	put_cred(override_cred);
+
+	return 0;
+}
+
+static inline int ovl_dir_read_merged(struct path *upperpath,
+				      struct path *lowerpath,
+				      struct ovl_readdir_data *rdd)
+{
+	int err;
+	struct rb_root root = RB_ROOT;
+	struct list_head middle;
+
+	rdd->root = &root;
+	if (upperpath->dentry) {
+		rdd->dir = upperpath->dentry;
+		err = ovl_dir_read(upperpath, rdd, ovl_fill_upper);
+		if (err)
+			goto out;
+
+		err = ovl_dir_mark_whiteouts(rdd);
+		if (err)
+			goto out;
+	}
+	/*
+	 * Insert lowerpath entries before upperpath ones, this allows
+	 * offsets to be reasonably constant
+	 */
+	list_add(&middle, rdd->list);
+	rdd->middle = &middle;
+	err = ovl_dir_read(lowerpath, rdd, ovl_fill_lower);
+	list_del(&middle);
+out:
+	rdd->root = NULL;
+
+	return err;
+}
+
+static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
+{
+	struct list_head *l;
+	loff_t off;
+
+	l = od->cache.next;
+	for (off = 0; off < pos; off++) {
+		if (l == &od->cache)
+			break;
+		l = l->next;
+	}
+	list_move_tail(&od->cursor, l);
+}
+
+static int ovl_readdir(struct file *file, void *buf, filldir_t filler)
+{
+	struct ovl_dir_file *od = file->private_data;
+	int res;
+
+	if (!file->f_pos)
+		ovl_dir_reset(file);
+
+	if (od->is_real) {
+		res = vfs_readdir(od->realfile, filler, buf);
+		file->f_pos = od->realfile->f_pos;
+
+		return res;
+	}
+
+	if (!od->is_cached) {
+		struct path lowerpath;
+		struct path upperpath;
+		struct ovl_readdir_data rdd = { .list = &od->cache };
+
+		ovl_path_lower(file->f_path.dentry, &lowerpath);
+		ovl_path_upper(file->f_path.dentry, &upperpath);
+
+		res = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd);
+		if (res) {
+			ovl_cache_free(rdd.list);
+			return res;
+		}
+
+		od->cache_version = ovl_dentry_version_get(file->f_path.dentry);
+		od->is_cached = true;
+
+		ovl_seek_cursor(od, file->f_pos);
+	}
+
+	while (od->cursor.next != &od->cache) {
+		int over;
+		loff_t off;
+		struct ovl_cache_entry *p;
+
+		p = list_entry(od->cursor.next, struct ovl_cache_entry, l_node);
+		off = file->f_pos;
+		if (!p->is_whiteout) {
+			over = filler(buf, p->name, p->len, off, p->ino,
+				      p->type);
+			if (over)
+				break;
+		}
+		file->f_pos++;
+		list_move(&od->cursor, &p->l_node);
+	}
+
+	return 0;
+}
+
+static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
+{
+	loff_t res;
+	struct ovl_dir_file *od = file->private_data;
+
+	mutex_lock(&file->f_dentry->d_inode->i_mutex);
+	if (!file->f_pos)
+		ovl_dir_reset(file);
+
+	if (od->is_real) {
+		res = vfs_llseek(od->realfile, offset, origin);
+		file->f_pos = od->realfile->f_pos;
+	} else {
+		res = -EINVAL;
+
+		switch (origin) {
+		case SEEK_CUR:
+			offset += file->f_pos;
+			break;
+		case SEEK_SET:
+			break;
+		default:
+			goto out_unlock;
+		}
+		if (offset < 0)
+			goto out_unlock;
+
+		if (offset != file->f_pos) {
+			file->f_pos = offset;
+			if (od->is_cached)
+				ovl_seek_cursor(od, offset);
+		}
+		res = offset;
+	}
+out_unlock:
+	mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+
+	return res;
+}
+
+static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
+			 int datasync)
+{
+	struct ovl_dir_file *od = file->private_data;
+
+	/* May need to reopen directory if it got copied up */
+	if (!od->realfile) {
+		struct path upperpath;
+
+		ovl_path_upper(file->f_path.dentry, &upperpath);
+		od->realfile = ovl_path_open(&upperpath, O_RDONLY);
+		if (IS_ERR(od->realfile))
+			return PTR_ERR(od->realfile);
+	}
+
+	return vfs_fsync_range(od->realfile, start, end, datasync);
+}
+
+static int ovl_dir_release(struct inode *inode, struct file *file)
+{
+	struct ovl_dir_file *od = file->private_data;
+
+	list_del(&od->cursor);
+	ovl_cache_free(&od->cache);
+	if (od->realfile)
+		fput(od->realfile);
+	kfree(od);
+
+	return 0;
+}
+
+static int ovl_dir_open(struct inode *inode, struct file *file)
+{
+	struct path realpath;
+	struct file *realfile;
+	struct ovl_dir_file *od;
+	enum ovl_path_type type;
+
+	od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
+	if (!od)
+		return -ENOMEM;
+
+	type = ovl_path_real(file->f_path.dentry, &realpath);
+	realfile = ovl_path_open(&realpath, file->f_flags);
+	if (IS_ERR(realfile)) {
+		kfree(od);
+		return PTR_ERR(realfile);
+	}
+	INIT_LIST_HEAD(&od->cache);
+	INIT_LIST_HEAD(&od->cursor);
+	od->is_cached = false;
+	od->realfile = realfile;
+	od->is_real = (type != OVL_PATH_MERGE);
+	file->private_data = od;
+
+	return 0;
+}
+
+const struct file_operations ovl_dir_operations = {
+	.read		= generic_read_dir,
+	.open		= ovl_dir_open,
+	.readdir	= ovl_readdir,
+	.llseek		= ovl_dir_llseek,
+	.fsync		= ovl_dir_fsync,
+	.release	= ovl_dir_release,
+};
+
+static int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
+{
+	int err;
+	struct path lowerpath;
+	struct path upperpath;
+	struct ovl_cache_entry *p;
+	struct ovl_readdir_data rdd = { .list = list };
+
+	ovl_path_upper(dentry, &upperpath);
+	ovl_path_lower(dentry, &lowerpath);
+
+	err = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd);
+	if (err)
+		return err;
+
+	err = 0;
+
+	list_for_each_entry(p, list, l_node) {
+		if (p->is_whiteout)
+			continue;
+
+		if (p->name[0] == '.') {
+			if (p->len == 1)
+				continue;
+			if (p->len == 2 && p->name[1] == '.')
+				continue;
+		}
+		err = -ENOTEMPTY;
+		break;
+	}
+
+	return err;
+}
+
+static int ovl_remove_whiteouts(struct dentry *dir, struct list_head *list)
+{
+	struct path upperpath;
+	struct dentry *upperdir;
+	struct ovl_cache_entry *p;
+	const struct cred *old_cred;
+	struct cred *override_cred;
+	int err;
+
+	ovl_path_upper(dir, &upperpath);
+	upperdir = upperpath.dentry;
+
+	override_cred = prepare_creds();
+	if (!override_cred)
+		return -ENOMEM;
+
+	/*
+	 * CAP_DAC_OVERRIDE for lookup and unlink
+	 * CAP_SYS_ADMIN for setxattr of "trusted" namespace
+	 * CAP_FOWNER for unlink in sticky directory
+	 */
+	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
+	cap_raise(override_cred->cap_effective, CAP_FOWNER);
+	old_cred = override_creds(override_cred);
+
+	err = vfs_setxattr(upperdir, ovl_opaque_xattr, "y", 1, 0);
+	if (err)
+		goto out_revert_creds;
+
+	mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
+	list_for_each_entry(p, list, l_node) {
+		struct dentry *dentry;
+		int ret;
+
+		if (!p->is_whiteout)
+			continue;
+
+		dentry = lookup_one_len(p->name, upperdir, p->len);
+		if (IS_ERR(dentry)) {
+			printk(KERN_WARNING
+			    "overlayfs: failed to lookup whiteout %.*s: %li\n",
+			    p->len, p->name, PTR_ERR(dentry));
+			continue;
+		}
+		ret = vfs_unlink(upperdir->d_inode, dentry);
+		dput(dentry);
+		if (ret)
+			printk(KERN_WARNING
+			    "overlayfs: failed to unlink whiteout %.*s: %i\n",
+			    p->len, p->name, ret);
+	}
+	mutex_unlock(&upperdir->d_inode->i_mutex);
+
+out_revert_creds:
+	revert_creds(old_cred);
+	put_cred(override_cred);
+
+	return err;
+}
+
+int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type)
+{
+	int err;
+	LIST_HEAD(list);
+
+	err = ovl_check_empty_dir(dentry, &list);
+	if (!err && type == OVL_PATH_MERGE)
+		err = ovl_remove_whiteouts(dentry, &list);
+
+	ovl_cache_free(&list);
+
+	return err;
+}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
new file mode 100644
index 0000000000000000000000000000000000000000..24bdcc54fcb042d5421955b7a7b195d4de635da5
--- /dev/null
+++ b/fs/overlayfs/super.c
@@ -0,0 +1,664 @@
+/*
+ *
+ * Copyright (C) 2011 Novell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/xattr.h>
+#include <linux/security.h>
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/cred.h>
+#include <linux/sched.h>
+#include "overlayfs.h"
+
+MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
+MODULE_DESCRIPTION("Overlay filesystem");
+MODULE_LICENSE("GPL");
+
+struct ovl_config {
+	char *lowerdir;
+	char *upperdir;
+};
+
+/* private information held for overlayfs's superblock */
+struct ovl_fs {
+	struct vfsmount *upper_mnt;
+	struct vfsmount *lower_mnt;
+	/* pathnames of lower and upper dirs, for show_options */
+	struct ovl_config config;
+};
+
+/* private information held for every overlayfs dentry */
+struct ovl_entry {
+	/*
+	 * Keep "double reference" on upper dentries, so that
+	 * d_delete() doesn't think it's OK to reset d_inode to NULL.
+	 */
+	struct dentry *__upperdentry;
+	struct dentry *lowerdentry;
+	union {
+		struct {
+			u64 version;
+			bool opaque;
+		};
+		struct rcu_head rcu;
+	};
+};
+
+const char *ovl_whiteout_xattr = "trusted.overlay.whiteout";
+const char *ovl_opaque_xattr = "trusted.overlay.opaque";
+
+
+enum ovl_path_type ovl_path_type(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	if (oe->__upperdentry) {
+		if (oe->lowerdentry && S_ISDIR(dentry->d_inode->i_mode))
+			return OVL_PATH_MERGE;
+		else
+			return OVL_PATH_UPPER;
+	} else {
+		return OVL_PATH_LOWER;
+	}
+}
+
+static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
+{
+	struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
+	smp_read_barrier_depends();
+	return upperdentry;
+}
+
+void ovl_path_upper(struct dentry *dentry, struct path *path)
+{
+	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	path->mnt = ofs->upper_mnt;
+	path->dentry = ovl_upperdentry_dereference(oe);
+}
+
+void ovl_path_lower(struct dentry *dentry, struct path *path)
+{
+	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	path->mnt = ofs->lower_mnt;
+	path->dentry = oe->lowerdentry;
+}
+
+enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
+{
+
+	enum ovl_path_type type = ovl_path_type(dentry);
+
+	if (type == OVL_PATH_LOWER)
+		ovl_path_lower(dentry, path);
+	else
+		ovl_path_upper(dentry, path);
+
+	return type;
+}
+
+struct dentry *ovl_dentry_upper(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	return ovl_upperdentry_dereference(oe);
+}
+
+struct dentry *ovl_dentry_lower(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	return oe->lowerdentry;
+}
+
+struct dentry *ovl_dentry_real(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+	struct dentry *realdentry;
+
+	realdentry = ovl_upperdentry_dereference(oe);
+	if (!realdentry)
+		realdentry = oe->lowerdentry;
+
+	return realdentry;
+}
+
+struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
+{
+	struct dentry *realdentry;
+
+	realdentry = ovl_upperdentry_dereference(oe);
+	if (realdentry) {
+		*is_upper = true;
+	} else {
+		realdentry = oe->lowerdentry;
+		*is_upper = false;
+	}
+	return realdentry;
+}
+
+bool ovl_dentry_is_opaque(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+	return oe->opaque;
+}
+
+void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+	oe->opaque = opaque;
+}
+
+void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex));
+	WARN_ON(oe->__upperdentry);
+	BUG_ON(!upperdentry->d_inode);
+	smp_wmb();
+	oe->__upperdentry = dget(upperdentry);
+}
+
+void ovl_dentry_version_inc(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+	oe->version++;
+}
+
+u64 ovl_dentry_version_get(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+	return oe->version;
+}
+
+bool ovl_is_whiteout(struct dentry *dentry)
+{
+	int res;
+	char val;
+
+	if (!dentry)
+		return false;
+	if (!dentry->d_inode)
+		return false;
+	if (!S_ISLNK(dentry->d_inode->i_mode))
+		return false;
+
+	res = vfs_getxattr(dentry, ovl_whiteout_xattr, &val, 1);
+	if (res == 1 && val == 'y')
+		return true;
+
+	return false;
+}
+
+static bool ovl_is_opaquedir(struct dentry *dentry)
+{
+	int res;
+	char val;
+
+	if (!S_ISDIR(dentry->d_inode->i_mode))
+		return false;
+
+	res = vfs_getxattr(dentry, ovl_opaque_xattr, &val, 1);
+	if (res == 1 && val == 'y')
+		return true;
+
+	return false;
+}
+
+static void ovl_entry_free(struct rcu_head *head)
+{
+	struct ovl_entry *oe = container_of(head, struct ovl_entry, rcu);
+	kfree(oe);
+}
+
+static void ovl_dentry_release(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	if (oe) {
+		dput(oe->__upperdentry);
+		dput(oe->__upperdentry);
+		dput(oe->lowerdentry);
+		call_rcu(&oe->rcu, ovl_entry_free);
+	}
+}
+
+const struct dentry_operations ovl_dentry_operations = {
+	.d_release = ovl_dentry_release,
+};
+
+static struct ovl_entry *ovl_alloc_entry(void)
+{
+	return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
+}
+
+static inline struct dentry *ovl_lookup_real(struct dentry *dir,
+					     struct qstr *name)
+{
+	struct dentry *dentry;
+
+	mutex_lock(&dir->d_inode->i_mutex);
+	dentry = lookup_one_len(name->name, dir, name->len);
+	mutex_unlock(&dir->d_inode->i_mutex);
+
+	if (IS_ERR(dentry)) {
+		if (PTR_ERR(dentry) == -ENOENT)
+			dentry = NULL;
+	} else if (!dentry->d_inode) {
+		dput(dentry);
+		dentry = NULL;
+	}
+	return dentry;
+}
+
+static int ovl_do_lookup(struct dentry *dentry)
+{
+	struct ovl_entry *oe;
+	struct dentry *upperdir;
+	struct dentry *lowerdir;
+	struct dentry *upperdentry = NULL;
+	struct dentry *lowerdentry = NULL;
+	struct inode *inode = NULL;
+	int err;
+
+	err = -ENOMEM;
+	oe = ovl_alloc_entry();
+	if (!oe)
+		goto out;
+
+	upperdir = ovl_dentry_upper(dentry->d_parent);
+	lowerdir = ovl_dentry_lower(dentry->d_parent);
+
+	if (upperdir) {
+		upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
+		err = PTR_ERR(upperdentry);
+		if (IS_ERR(upperdentry))
+			goto out_put_dir;
+
+		if (lowerdir && upperdentry &&
+		    (S_ISLNK(upperdentry->d_inode->i_mode) ||
+		     S_ISDIR(upperdentry->d_inode->i_mode))) {
+			const struct cred *old_cred;
+			struct cred *override_cred;
+
+			err = -ENOMEM;
+			override_cred = prepare_creds();
+			if (!override_cred)
+				goto out_dput_upper;
+
+			/* CAP_SYS_ADMIN needed for getxattr */
+			cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
+			old_cred = override_creds(override_cred);
+
+			if (ovl_is_opaquedir(upperdentry)) {
+				oe->opaque = true;
+			} else if (ovl_is_whiteout(upperdentry)) {
+				dput(upperdentry);
+				upperdentry = NULL;
+				oe->opaque = true;
+			}
+			revert_creds(old_cred);
+			put_cred(override_cred);
+		}
+	}
+	if (lowerdir && !oe->opaque) {
+		lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
+		err = PTR_ERR(lowerdentry);
+		if (IS_ERR(lowerdentry))
+			goto out_dput_upper;
+	}
+
+	if (lowerdentry && upperdentry &&
+	    (!S_ISDIR(upperdentry->d_inode->i_mode) ||
+	     !S_ISDIR(lowerdentry->d_inode->i_mode))) {
+		dput(lowerdentry);
+		lowerdentry = NULL;
+		oe->opaque = true;
+	}
+
+	if (lowerdentry || upperdentry) {
+		struct dentry *realdentry;
+
+		realdentry = upperdentry ? upperdentry : lowerdentry;
+		err = -ENOMEM;
+		inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
+				      oe);
+		if (!inode)
+			goto out_dput;
+	}
+
+	if (upperdentry)
+		oe->__upperdentry = dget(upperdentry);
+
+	if (lowerdentry)
+		oe->lowerdentry = lowerdentry;
+
+	dentry->d_fsdata = oe;
+	dentry->d_op = &ovl_dentry_operations;
+	d_add(dentry, inode);
+
+	return 0;
+
+out_dput:
+	dput(lowerdentry);
+out_dput_upper:
+	dput(upperdentry);
+out_put_dir:
+	kfree(oe);
+out:
+	return err;
+}
+
+struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+			  struct nameidata *nd)
+{
+	int err = ovl_do_lookup(dentry);
+
+	if (err)
+		return ERR_PTR(err);
+
+	return NULL;
+}
+
+struct file *ovl_path_open(struct path *path, int flags)
+{
+	path_get(path);
+	return dentry_open(path->dentry, path->mnt, flags, current_cred());
+}
+
+static void ovl_put_super(struct super_block *sb)
+{
+	struct ovl_fs *ufs = sb->s_fs_info;
+
+	if (!(sb->s_flags & MS_RDONLY))
+		mnt_drop_write(ufs->upper_mnt);
+
+	mntput(ufs->upper_mnt);
+	mntput(ufs->lower_mnt);
+
+	kfree(ufs->config.lowerdir);
+	kfree(ufs->config.upperdir);
+	kfree(ufs);
+}
+
+static int ovl_remount_fs(struct super_block *sb, int *flagsp, char *data)
+{
+	int flags = *flagsp;
+	struct ovl_fs *ufs = sb->s_fs_info;
+
+	/* When remounting rw or ro, we need to adjust the write access to the
+	 * upper fs.
+	 */
+	if (((flags ^ sb->s_flags) & MS_RDONLY) == 0)
+		/* No change to readonly status */
+		return 0;
+
+	if (flags & MS_RDONLY) {
+		mnt_drop_write(ufs->upper_mnt);
+		return 0;
+	} else
+		return mnt_want_write(ufs->upper_mnt);
+}
+
+/**
+ * ovl_statfs
+ * @sb: The overlayfs super block
+ * @buf: The struct kstatfs to fill in with stats
+ *
+ * Get the filesystem statistics.  As writes always target the upper layer
+ * filesystem pass the statfs to the same filesystem.
+ */
+static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	struct dentry *root_dentry = dentry->d_sb->s_root;
+	struct path path;
+	ovl_path_upper(root_dentry, &path);
+
+	if (!path.dentry->d_sb->s_op->statfs)
+		return -ENOSYS;
+	return path.dentry->d_sb->s_op->statfs(path.dentry, buf);
+}
+
+/**
+ * ovl_show_options
+ *
+ * Prints the mount options for a given superblock.
+ * Returns zero; does not fail.
+ */
+static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
+{
+	struct super_block *sb = dentry->d_sb;
+	struct ovl_fs *ufs = sb->s_fs_info;
+
+	seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
+	seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
+	return 0;
+}
+
+static const struct super_operations ovl_super_operations = {
+	.put_super	= ovl_put_super,
+	.remount_fs	= ovl_remount_fs,
+	.statfs		= ovl_statfs,
+	.show_options	= ovl_show_options,
+};
+
+enum {
+	Opt_lowerdir,
+	Opt_upperdir,
+	Opt_err,
+};
+
+static const match_table_t ovl_tokens = {
+	{Opt_lowerdir,			"lowerdir=%s"},
+	{Opt_upperdir,			"upperdir=%s"},
+	{Opt_err,			NULL}
+};
+
+static int ovl_parse_opt(char *opt, struct ovl_config *config)
+{
+	char *p;
+
+	config->upperdir = NULL;
+	config->lowerdir = NULL;
+
+	while ((p = strsep(&opt, ",")) != NULL) {
+		int token;
+		substring_t args[MAX_OPT_ARGS];
+
+		if (!*p)
+			continue;
+
+		token = match_token(p, ovl_tokens, args);
+		switch (token) {
+		case Opt_upperdir:
+			kfree(config->upperdir);
+			config->upperdir = match_strdup(&args[0]);
+			if (!config->upperdir)
+				return -ENOMEM;
+			break;
+
+		case Opt_lowerdir:
+			kfree(config->lowerdir);
+			config->lowerdir = match_strdup(&args[0]);
+			if (!config->lowerdir)
+				return -ENOMEM;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct path lowerpath;
+	struct path upperpath;
+	struct inode *root_inode;
+	struct dentry *root_dentry;
+	struct ovl_entry *oe;
+	struct ovl_fs *ufs;
+	int err;
+
+	err = -ENOMEM;
+	ufs = kmalloc(sizeof(struct ovl_fs), GFP_KERNEL);
+	if (!ufs)
+		goto out;
+
+	err = ovl_parse_opt((char *) data, &ufs->config);
+	if (err)
+		goto out_free_ufs;
+
+	err = -EINVAL;
+	if (!ufs->config.upperdir || !ufs->config.lowerdir) {
+		printk(KERN_ERR "overlayfs: missing upperdir or lowerdir\n");
+		goto out_free_config;
+	}
+
+	oe = ovl_alloc_entry();
+	if (oe == NULL)
+		goto out_free_config;
+
+	err = kern_path(ufs->config.upperdir, LOOKUP_FOLLOW, &upperpath);
+	if (err)
+		goto out_free_oe;
+
+	err = kern_path(ufs->config.lowerdir, LOOKUP_FOLLOW, &lowerpath);
+	if (err)
+		goto out_put_upperpath;
+
+	err = -ENOTDIR;
+	if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
+	    !S_ISDIR(lowerpath.dentry->d_inode->i_mode))
+		goto out_put_lowerpath;
+
+	sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
+				lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
+
+	err = -EINVAL;
+	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+		printk(KERN_ERR "overlayfs: maximum fs stacking depth exceeded\n");
+		goto out_put_lowerpath;
+	}
+
+
+	ufs->upper_mnt = clone_private_mount(&upperpath);
+	err = PTR_ERR(ufs->upper_mnt);
+	if (IS_ERR(ufs->upper_mnt)) {
+		printk(KERN_ERR "overlayfs: failed to clone upperpath\n");
+		goto out_put_lowerpath;
+	}
+
+	ufs->lower_mnt = clone_private_mount(&lowerpath);
+	err = PTR_ERR(ufs->lower_mnt);
+	if (IS_ERR(ufs->lower_mnt)) {
+		printk(KERN_ERR "overlayfs: failed to clone lowerpath\n");
+		goto out_put_upper_mnt;
+	}
+
+	/*
+	 * Make lower_mnt R/O.  That way fchmod/fchown on lower file
+	 * will fail instead of modifying lower fs.
+	 */
+	ufs->lower_mnt->mnt_flags |= MNT_READONLY;
+
+	/* If the upper fs is r/o, we mark overlayfs r/o too */
+	if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
+		sb->s_flags |= MS_RDONLY;
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		err = mnt_want_write(ufs->upper_mnt);
+		if (err)
+			goto out_put_lower_mnt;
+	}
+
+	err = -ENOMEM;
+	root_inode = ovl_new_inode(sb, S_IFDIR, oe);
+	if (!root_inode)
+		goto out_drop_write;
+
+	root_dentry = d_make_root(root_inode);
+	if (!root_dentry)
+		goto out_drop_write;
+
+	mntput(upperpath.mnt);
+	mntput(lowerpath.mnt);
+
+	oe->__upperdentry = dget(upperpath.dentry);
+	oe->lowerdentry = lowerpath.dentry;
+
+	root_dentry->d_fsdata = oe;
+	root_dentry->d_op = &ovl_dentry_operations;
+
+	sb->s_op = &ovl_super_operations;
+	sb->s_root = root_dentry;
+	sb->s_fs_info = ufs;
+
+	return 0;
+
+out_drop_write:
+	if (!(sb->s_flags & MS_RDONLY))
+		mnt_drop_write(ufs->upper_mnt);
+out_put_lower_mnt:
+	mntput(ufs->lower_mnt);
+out_put_upper_mnt:
+	mntput(ufs->upper_mnt);
+out_put_lowerpath:
+	path_put(&lowerpath);
+out_put_upperpath:
+	path_put(&upperpath);
+out_free_oe:
+	kfree(oe);
+out_free_config:
+	kfree(ufs->config.lowerdir);
+	kfree(ufs->config.upperdir);
+out_free_ufs:
+	kfree(ufs);
+out:
+	return err;
+}
+
+static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags,
+				const char *dev_name, void *raw_data)
+{
+	return mount_nodev(fs_type, flags, raw_data, ovl_fill_super);
+}
+
+static struct file_system_type ovl_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "overlayfs",
+	.mount		= ovl_mount,
+	.kill_sb	= kill_anon_super,
+};
+
+static int __init ovl_init(void)
+{
+	return register_filesystem(&ovl_fs_type);
+}
+
+static void __exit ovl_exit(void)
+{
+	unregister_filesystem(&ovl_fs_type);
+}
+
+module_init(ovl_init);
+module_exit(ovl_exit);
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index c1c729335924803f92e5530d4197b1eb3bd4907c..e5970e28a2bbcc61e21c1ceb7dac5cf0058b2a8e 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -28,3 +28,6 @@ proc-$(CONFIG_PROC_VMCORE)	+= vmcore.o
 proc-$(CONFIG_PROC_DEVICETREE)	+= proc_devtree.o
 proc-$(CONFIG_PRINTK)	+= kmsg.o
 proc-$(CONFIG_PROC_PAGE_MONITOR)	+= page.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_PROC_BCM)
+proc-$(CONFIG_BCM_KF_PROC_BCM)	+= proc_brcm.o
+endif # BCM_KF
\ No newline at end of file
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9fc77b412ac4a00f8c8ad8881e20f538581b18a7..6c86ff8f4fb5df24c4492dd67dac9efe1355a056 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1267,6 +1267,130 @@ static const struct file_operations proc_pid_sched_operations = {
 
 #endif
 
+#if defined(CONFIG_BCM_KF_SCHEDAUDIT)
+
+void proc_schedaudit_show_task(struct task_struct *p, struct seq_file *m)
+{
+	seq_printf(m, "trig_latency=%u\n", p->bcm_saudit.trig_latency);
+	seq_printf(m, "trig_runtime=%u\n", p->bcm_saudit.trig_runtime);
+	seq_printf(m, "trig_printk=%u\n", p->bcm_saudit.trig_printk);
+	seq_printf(m, "conforming_latency=%u\n", p->bcm_saudit.conforming_latency);
+	seq_printf(m, "conforming_runtime=%u\n", p->bcm_saudit.conforming_runtime);
+	seq_printf(m, "latency_violations=%u\n", p->bcm_saudit.latency_violations);
+	seq_printf(m, "runtime_violations=%u\n", p->bcm_saudit.runtime_violations);
+	seq_printf(m, "max_latency=%u\n", p->bcm_saudit.max_latency);
+	seq_printf(m, "max_runtime=%u\n", p->bcm_saudit.max_runtime);
+}
+EXPORT_SYMBOL(proc_schedaudit_show_task);
+
+void proc_schedaudit_set_task(struct task_struct *p, uint32_t setindex,
+          uint32_t trig_latency, uint32_t trig_runtime, uint32_t trig_printk)
+{
+	if (setindex == 0) {
+		p->bcm_saudit.conforming_latency = 0;
+		p->bcm_saudit.conforming_runtime = 0;
+		p->bcm_saudit.latency_violations = 0;
+		p->bcm_saudit.runtime_violations = 0;
+		p->bcm_saudit.max_latency = 0;
+		p->bcm_saudit.max_runtime = 0;
+	} else if (setindex == 1) {
+		p->bcm_saudit.trig_latency = trig_latency;
+	} else if (setindex == 2) {
+		p->bcm_saudit.trig_runtime = trig_runtime;
+	} else if (setindex == 3) {
+		p->bcm_saudit.trig_printk = trig_printk;
+	}
+}
+EXPORT_SYMBOL(proc_schedaudit_set_task);
+#endif
+
+#if defined(CONFIG_BCM_KF_SCHEDAUDIT)
+/*
+ * Print out various scheduling related per-task fields:
+ */
+static int schedaudit_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+	proc_schedaudit_show_task(p, m);
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+schedaudit_write(struct file *file, const char __user *buf,
+                 size_t count, loff_t *offset)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct task_struct *p;
+	uint32_t setindex=0;
+	uint32_t trig_latency=0;
+	uint32_t trig_runtime=0;
+	uint32_t trig_printk=0;
+	char kbuf[100]={0};
+
+	if (copy_from_user(kbuf, buf, sizeof(kbuf)-1))
+		return -EFAULT;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	if (!strncmp(kbuf, "reset", 5)) {
+		setindex=0;
+	} else if (!strncmp(kbuf, "trig_latency=", 13)) {
+		setindex=1;
+		trig_latency=simple_strtol(&kbuf[13], NULL, 0);
+	} else if (!strncmp(kbuf, "trig_runtime=", 13)) {
+		setindex=2;
+		trig_runtime=simple_strtol(&kbuf[13], NULL, 0);
+	} else if (!strncmp(kbuf, "trig_printk=", 12)) {
+		setindex=3;
+		trig_printk=simple_strtol(&kbuf[12], NULL, 0);
+	} else {
+		printk(KERN_WARNING "invalid input, ignored\n");
+		setindex = 999;
+	}
+
+	if (setindex < 999)
+		proc_schedaudit_set_task(p, setindex,
+                                 trig_latency, trig_runtime, trig_printk);
+
+	put_task_struct(p);
+
+	return count;
+}
+
+static int schedaudit_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	ret = single_open(filp, schedaudit_show, NULL);
+	if (!ret) {
+		struct seq_file *m = filp->private_data;
+
+		m->private = inode;
+	}
+	return ret;
+}
+
+static const struct file_operations proc_pid_schedaudit_operations = {
+	.open		= schedaudit_open,
+	.read		= seq_read,
+	.write		= schedaudit_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif  /* CONFIG_BCM_KF_SCHEDAUDIT */
+
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 /*
  * Print out autogroup related information:
@@ -2963,6 +3087,9 @@ static const struct pid_entry tgid_base_stuff[] = {
 #ifdef CONFIG_SCHED_DEBUG
 	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
+#if defined(CONFIG_BCM_KF_SCHEDAUDIT)
+	REG("bcm_schedaudit",  S_IRUGO|S_IWUSR, proc_pid_schedaudit_operations),
+#endif
 #ifdef CONFIG_SCHED_AUTOGROUP
 	REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
 #endif
@@ -3324,6 +3451,9 @@ static const struct pid_entry tid_base_stuff[] = {
 	INF("limits",	 S_IRUGO, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
 	REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
+#endif
+#if defined(CONFIG_BCM_KF_SCHEDAUDIT)
+	REG("bcm_schedaudit",  S_IRUGO|S_IWUSR, proc_pid_schedaudit_operations),
 #endif
 	REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
index 82676e3fcd1d096c74a9b5bca2f7f29335b336be..f68cc57aa10c243bae8b7d20cd4e21ec12a64fb7 100644
--- a/fs/proc/cmdline.c
+++ b/fs/proc/cmdline.c
@@ -3,9 +3,12 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
+/* Command line args run-time added by Broadcom drivers */
+extern char board_command_line[];
+
 static int cmdline_proc_show(struct seq_file *m, void *v)
 {
-	seq_printf(m, "%s\n", saved_command_line);
+	seq_printf(m, "%s %s\n", saved_command_line, board_command_line);
 	return 0;
 }
 
diff --git a/fs/proc/proc_brcm.c b/fs/proc/proc_brcm.c
new file mode 100644
index 0000000000000000000000000000000000000000..621a39584903a94a46de67ba47a25aeee4c9372e
--- /dev/null
+++ b/fs/proc/proc_brcm.c
@@ -0,0 +1,506 @@
+/*
+ *
+    <:copyright-BRCM:2011:DUAL/GPL:standard
+    
+       Copyright (c) 2011 Broadcom Corporation
+       All Rights Reserved
+    
+    Unless you and Broadcom execute a separate written software license
+    agreement governing use of this software, this software is licensed
+    to you under the terms of the GNU General Public License version 2
+    (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+    with the following added to such license:
+    
+       As a special exception, the copyright holders of this software give
+       you permission to link this software with independent modules, and
+       to copy and distribute the resulting executable under terms of your
+       choice, provided that you also meet, for each linked independent
+       module, the terms and conditions of the license of that module.
+       An independent module is a module which is not derived from this
+       software.  The special exception does not apply to any modifications
+       of the software.
+    
+    Not withstanding the above, under no circumstances may you combine
+    this software in any way with any other Broadcom software provided
+    under a license other than the GPL, without Broadcom's express prior
+    written consent.
+    
+    :> 
+*/
+
+/************************************************************
+    proc_brcm.c
+
+    procfs entries like proc/shirkmem, proc/brcm/pagewalk and proc/brcm/cstat
+
+     9/27/2006  Xi Wang      Created  
+   11/12/2008  Xi Wang      Updated for 2.6.21
+
+
+ ************************************************************/
+
+#include <generated/autoconf.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/mman.h>
+#include <linux/proc_fs.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/signal.h>
+#include <linux/module.h>
+#include <linux/init.h>
+//#include <linux/smp_lock.h>
+#include <linux/seq_file.h>
+#include <linux/times.h>
+#include <linux/profile.h>
+#include <linux/blkdev.h>
+#include <linux/hugetlb.h>
+#include <linux/jiffies.h>
+#include <linux/sysrq.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/tlb.h>
+#include <asm/div64.h>
+
+
+//extern int proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len);
+int proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len)
+{
+	if (len <= off+count) *eof = 1;
+	*start = page + off;
+	len -= off;
+	if (len>count) len = count;
+	if (len<0) len = 0;
+	return len;
+}
+
+
+#ifdef CONFIG_BCM_CSTAT
+
+
+#define PERF_C_INTERVAL (HZ*1)
+#define DIV 1000
+#define N_INST
+#define COUNTER_RESET_V 0xffffffffu
+
+#define BRCM_PERFREG_BASE 0xff420000
+typedef struct {
+    unsigned global_ctrl;
+    unsigned ctrl[2];
+    unsigned donottouch[1];
+    unsigned counters[4];
+} PerformanceControl;
+#define BRCM_PERF ((volatile PerformanceControl *) BRCM_PERFREG_BASE)
+
+struct timer_list cachestat_timer;
+int cachestat_interval = 0;
+
+void static brcm_perf_timer_func(unsigned long data);
+static int perf_counters_proc(char *page, char **start, off_t off, int count, int *eof, void *data);
+
+
+static void cachestat_timer_func(unsigned long data)
+{
+    static int tp = 0;
+    static int item = 0;
+    register unsigned long temp;
+    int i,ratio;
+    unsigned tdiv = cachestat_interval*DIV;
+    unsigned counters[4];
+    
+    for (i=0;i<4;i++) {
+        counters[i] = COUNTER_RESET_V - BRCM_PERF->counters[i];
+        BRCM_PERF->counters[i]=COUNTER_RESET_V;
+    }
+    
+    if (item == 0) {   
+        printk("TP %d instruction miss %uk/sec\n", tp, counters[0]/tdiv);
+        printk("TP %d instruction hit %uk/sec\n", tp, counters[1]/tdiv);
+        ratio = (counters[0]+counters[1])? counters[0]*1000/(counters[0]+counters[1]) : 0;
+        printk("TP %d miss ratio %u\n", tp, ratio);
+    }
+
+    if (item == 1) {   
+        printk("TP %d data miss %uk/sec\n", tp, counters[0]/tdiv);
+        printk("TP %d data hit %uk/sec\n", tp, counters[1]/tdiv);
+        ratio = (counters[0]+counters[1])? counters[0]*1000/(counters[0]+counters[1]) : 0;
+        printk("TP %d miss ratio %u\n", tp, ratio);
+    }
+
+#if defined(N_INST)
+    printk("TP %d number of instructions %uk/sec\n", tp, counters[2]/tdiv);
+    printk("TP %d number of cycles %uk/sec\n", tp, counters[3]/tdiv);
+#endif
+
+    if (tp >= 1) {
+        printk("\n");
+        tp = 0;
+        if (item >= 1) {
+            item = 0;
+        }
+        else {            
+            item++;
+        }
+    }
+    else {
+        tp++;
+    }
+    
+    if (tp ==0) {
+        asm("mfc0 %0,$22,2" : "=d" (temp));
+        temp &= 0x3fffffff;
+        temp |= 0x00000000;
+        asm("mtc0 %0,$22,2" :: "d" (temp));
+    }
+    else {
+        asm("mfc0 %0,$22,2" : "=d" (temp));
+        temp &= 0x3fffffff;
+        temp |= 0x40000000;
+        asm("mtc0 %0,$22,2" :: "d" (temp));
+    }    
+
+    if (item == 0) {
+        BRCM_PERF->global_ctrl = 0x0;
+        BRCM_PERF->global_ctrl = 0x80000018;
+        if (tp == 0) {
+            BRCM_PERF->ctrl[0] = 0x80188014;
+        }
+        else {
+            BRCM_PERF->ctrl[0] = 0xa018a014;
+        }
+    }
+    
+    if (item == 1) {
+        BRCM_PERF->global_ctrl = 0x0;
+        BRCM_PERF->global_ctrl = 0x80000011;
+        if (tp == 0) {
+            BRCM_PERF->ctrl[0] = 0x80288024;
+        }
+        else {
+            BRCM_PERF->ctrl[0] = 0xa028a024;
+        }
+    }
+
+#if defined(N_INST)
+    if (tp ==0) {
+        BRCM_PERF->ctrl[1] = 0x80488044;
+    }
+    else {
+        BRCM_PERF->ctrl[1] = 0xa048a044;
+    }
+#endif
+
+    cachestat_timer.expires = jiffies+cachestat_interval*HZ;
+    add_timer(&cachestat_timer);
+}
+
+
+static void cachestat_start()
+{
+    int i;
+
+    printk("Starting cache performance counters..\n\n");
+    
+    init_timer(&cachestat_timer);
+    cachestat_timer.expires = jiffies+HZ;
+    cachestat_timer.data = 0;
+    cachestat_timer.function = cachestat_timer_func;
+
+    for (i=0;i<4;i++) {
+        BRCM_PERF->counters[i]=COUNTER_RESET_V;
+    }
+
+    BRCM_PERF->global_ctrl = 0x80000018;
+    BRCM_PERF->global_ctrl = 0x80000011;
+
+    add_timer(&cachestat_timer);
+}
+
+static void cachestat_stop()
+{
+    del_timer_sync(&cachestat_timer);
+    printk("Cache performance counting stopped..\n");
+}
+
+static int cachestat_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	int len=0;
+
+    len += sprintf(page, "%d\n", cachestat_interval);
+
+	return proc_calc_metrics(page, start, off, count, eof, len);
+
+}
+
+
+static int cachestat_write_proc(struct file *file, const char *buf, unsigned long count, void *data)
+{
+    char ibuf[20];
+    int arg;
+
+    if (count<1 || count>sizeof(ibuf)) {
+        return -EFAULT;
+    }
+    if (copy_from_user(ibuf, buf, count)) {
+        return -EFAULT;
+    }
+    ibuf[count] = 0;
+
+    if (sscanf(ibuf, "%d\n", &arg) == 1) {
+        if (arg>=0) {
+            if (arg && !cachestat_interval) {
+                cachestat_interval = arg;
+                cachestat_start();
+            }
+            else if (!arg && cachestat_interval) {
+                cachestat_interval = arg;
+                cachestat_stop();
+            }
+            else {
+                cachestat_interval = arg;
+            }
+        }
+        return count;
+    }
+    return -EFAULT;
+}
+
+#endif
+
+#if defined(CONFIG_BRCM_OLT_FPGA_RESTORE)
+/* These functions save and restore the state of the olt fpga */
+static int olt_fpga_restore_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	char *op;
+	struct pci_dev *dev;
+	op = page + off;
+	*op = '\0';
+	*eof = 1;
+	dev = pci_get_device(0x1172, 0x0004, NULL);
+	if (dev != NULL) {
+		printk("found fpga\n");
+		printk("save fpga config\n");
+		(void)pci_save_state(dev);
+	} else {
+		printk("no fpga\n");
+        	return -EFAULT;
+	}
+	return 0;
+}
+
+static int olt_fpga_restore_write_proc(struct file *file, const char *buf, unsigned long count, void *data)
+{
+	struct pci_dev *dev;
+	dev = pci_get_device(0x1172, 0x0004, NULL);
+	if (dev != NULL) {
+		printk("found fpga\n");
+	} else {
+		printk("no fpga\n");
+        	return -EFAULT;
+	}
+		printk("restore fpga config\n");
+		(void)pci_restore_state(dev);
+	return count;
+}
+
+#endif
+
+#if 0
+static int cp0regs_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+    register unsigned long temp;
+    unsigned long *mips_core_base = NULL;
+	int len=0;
+
+    len += sprintf(page+len, "Running on processor %d\n", smp_processor_id());
+
+    len += sprintf(page+len, "Status = %x\n", __read_32bit_c0_register($12, 0));
+    len += sprintf(page+len, "Cause = %x\n", __read_32bit_c0_register($13, 0));
+
+    len += sprintf(page+len, "BRCM Config_0 = %x\n", __read_32bit_c0_register($22, 0));
+    len += sprintf(page+len, "CMT Interrupt = %x\n", __read_32bit_c0_register($22, 1));
+    len += sprintf(page+len, "CMT Control = %x\n", __read_32bit_c0_register($22, 2));
+    len += sprintf(page+len, "CMT Local = %x\n", __read_32bit_c0_register($22, 3));
+    len += sprintf(page+len, "BRCM Config_1 = %x\n", __read_32bit_c0_register($22, 5));
+
+    temp = __read_32bit_c0_register($22, 6);
+    mips_core_base =(unsigned long *) (temp & 0xfffc0000);
+    len += sprintf(page+len, "Core Base = %x\n", temp);
+    len += sprintf(page+len, "RAC Config (%x) = %x\n", mips_core_base, *mips_core_base);
+    len += sprintf(page+len, "RAC Range (%x) = %x\n", mips_core_base+1, *(mips_core_base+1));
+    len += sprintf(page+len, "RAC Config1 (%x) = %x\n", mips_core_base+2, *(mips_core_base+2));
+    len += sprintf(page+len, "LMB (%x) = %x\n", mips_core_base+7, *(mips_core_base+7));
+    
+    len += sprintf(page+len, "\n");
+    
+    *(page+len) = 0;
+    len++;
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+#endif
+
+
+#ifdef CONFIG_BRCM_VMTOOLS
+
+
+#define ALLPAGES (1024*1024)
+
+extern int pagewalk(char *print);
+extern int proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len);
+extern int meminfo_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data);
+
+
+static int shrinkmem_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+    int len;
+    
+    len = sprintf(page, "\nTry to free as many pages as possible (most of pages left will be data pages), then show memory info.\n\n");
+
+    shrink_all_memory(ALLPAGES);
+
+    len += meminfo_read_proc(page+len, start, off, count, eof, data);
+
+    return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+
+static int pagewalk_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+    int len;
+
+    printk("\nList all occupied memory pages in the system and show what they are used for. ");
+    printk("(output generated by printk - i.e. not for file operations)\n\n");
+
+    len = pagewalk(page);
+
+    return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+
+static int shrinkpagewalk_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	int len;
+
+    printk("\nTry to free as many pages as possible (most of pages left will be data pages), then:\n");
+    printk("List all occupied memory pages in the system and show what they are used for. ");
+    printk("(output generated by printk - i.e. not for file operations)\n\n");
+
+    shrink_all_memory(ALLPAGES);
+
+    len = pagewalk(page);
+
+    return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
+#endif
+
+
+/****************************************************************************
+ * This section is for reporting kernel configs
+ ****************************************************************************/
+
+
+int bcm_kernel_config_smp=0;
+int bcm_kernel_config_preempt=0;
+int bcm_kernel_config_debug_spinlock=0;
+int bcm_kernel_config_debug_mutexes=0;
+
+EXPORT_SYMBOL(bcm_kernel_config_smp);
+EXPORT_SYMBOL(bcm_kernel_config_preempt);
+EXPORT_SYMBOL(bcm_kernel_config_debug_spinlock);
+EXPORT_SYMBOL(bcm_kernel_config_debug_mutexes);
+
+static int bcm_kernel_config_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "CONFIG_SMP=%d\n", bcm_kernel_config_smp);
+	seq_printf(m, "CONFIG_PREEMPT=%d\n", bcm_kernel_config_preempt);
+	seq_printf(m, "CONFIG_DEBUG_SPINLOCK=%d\n", bcm_kernel_config_debug_spinlock);
+	seq_printf(m, "CONFIG_DEBUG_MUTEXES=%d\n", bcm_kernel_config_debug_mutexes);
+	return 0;
+}
+
+static int bcm_kernel_config_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, bcm_kernel_config_show, NULL);
+}
+
+static const struct file_operations proc_kernel_config_operations = {
+	.open		= bcm_kernel_config_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int __init bcm_kernel_config_init(struct proc_dir_entry *pentry)
+{
+
+#ifdef CONFIG_SMP
+	bcm_kernel_config_smp=1;
+#endif
+#ifdef CONFIG_PREEMPT
+	bcm_kernel_config_preempt=1;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+	bcm_kernel_config_debug_spinlock=1;
+#endif
+#ifdef CONFIG_DEBUG_MUTEXES
+	bcm_kernel_config_debug_mutexes=1;
+#endif
+
+	printk(KERN_INFO "--Kernel Config--\n");
+	printk(KERN_INFO "  SMP=%d\n", bcm_kernel_config_smp);
+	printk(KERN_INFO "  PREEMPT=%d\n", bcm_kernel_config_preempt);
+	printk(KERN_INFO "  DEBUG_SPINLOCK=%d\n", bcm_kernel_config_debug_spinlock);
+	printk(KERN_INFO "  DEBUG_MUTEXES=%d\n", bcm_kernel_config_debug_mutexes);
+
+	proc_create("kernel_config", 0, pentry, &proc_kernel_config_operations);
+	return 0;
+}
+
+
+/****************************************************************************
+ * Entry point from proc_root_init
+ ****************************************************************************/
+void __init proc_brcm_init(struct proc_dir_entry *pentry)
+{
+
+    struct proc_dir_entry *entry __attribute ((unused));
+
+#ifdef CONFIG_BRCM_VMTOOLS
+    create_proc_read_entry("shrinkmem", 0, pentry, shrinkmem_read_proc, NULL);
+    create_proc_read_entry("pagewalk", 0, pentry, pagewalk_read_proc, NULL);
+    create_proc_read_entry("shrinkpagewalk", 0, pentry, shrinkpagewalk_read_proc, NULL);
+#endif
+
+#ifdef CONFIG_BCM_CSTAT
+    entry = create_proc_entry("cstat", 0, pentry);
+    entry->read_proc = cachestat_read_proc;
+    entry->write_proc = cachestat_write_proc;
+#endif
+
+#if defined(CONFIG_BRCM_OLT_FPGA_RESTORE)
+    entry = create_proc_entry("olt_fpga_restore", 0, pentry);
+    entry->read_proc = olt_fpga_restore_read_proc;
+    entry->write_proc = olt_fpga_restore_write_proc;
+#endif
+
+#if 0
+    create_proc_read_entry("cp0regs", 0, pentry, cp0regs_read_proc, NULL);
+#endif
+
+    bcm_kernel_config_init(pentry);
+}
+
diff --git a/fs/proc/root.c b/fs/proc/root.c
index eed44bfc85db7c6ea420233dd09c2e5b9b5f0350..09ba539469d97ac57bf4c2149196bece8f126f57 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -22,6 +22,12 @@
 
 #include "internal.h"
 
+#if defined(CONFIG_BCM_KF_PROC_BCM)
+struct proc_dir_entry *proc_brcm;
+extern void proc_brcm_init(struct proc_dir_entry *pentry);
+#endif
+
+
 static int proc_test_super(struct super_block *sb, void *data)
 {
 	return sb->s_fs_info == data;
@@ -38,12 +44,18 @@ static int proc_set_super(struct super_block *sb, void *data)
 }
 
 enum {
-	Opt_gid, Opt_hidepid, Opt_err,
+	Opt_gid, Opt_hidepid, Opt_err, 
+#if defined(CONFIG_BCM_KF_PROC_DEFAULT)
+	Opt_default
+#endif
 };
 
 static const match_table_t tokens = {
 	{Opt_hidepid, "hidepid=%u"},
 	{Opt_gid, "gid=%u"},
+#if defined(CONFIG_BCM_KF_PROC_DEFAULT)
+	{Opt_default, "defaults"},
+#endif
 	{Opt_err, NULL},
 };
 
@@ -78,6 +90,10 @@ static int proc_parse_options(char *options, struct pid_namespace *pid)
 			}
 			pid->hide_pid = option;
 			break;
+#if defined(CONFIG_BCM_KF_PROC_DEFAULT)
+		case Opt_default:
+			break;
+#endif
 		default:
 			pr_err("proc: unrecognized mount option \"%s\" "
 			       "or missing value\n", p);
@@ -189,6 +205,12 @@ void __init proc_root_init(void)
 	proc_device_tree_init();
 #endif
 	proc_mkdir("bus", NULL);
+
+#if defined(CONFIG_BCM_KF_PROC_BCM)
+	proc_brcm = proc_mkdir("brcm", NULL);
+	proc_brcm_init(proc_brcm);
+#endif
+
 	proc_sys_init();
 }
 
diff --git a/fs/splice.c b/fs/splice.c
index 5cac690f810348eb726e5ed65992abd634f2ad35..2f70fe1ad6a005dca3e85dad7a5f323ac8dc915e 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -32,6 +32,14 @@
 #include <linux/gfp.h>
 #include <linux/socket.h>
 
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+#include <net/sock.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_M2M_DMA) && defined(CONFIG_BCM_M2M_DMA)
+#include <linux/bcm_m2mdma.h>
+#endif
+
 /*
  * Attempt to steal a page from a pipe buffer. This should perhaps go into
  * a vm helper function, it's already simplified quite a bit by the
@@ -1303,6 +1311,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
 
 	return ret;
 }
+EXPORT_SYMBOL(do_splice_direct);
 
 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
 			       struct pipe_inode_info *opipe,
@@ -1383,6 +1392,199 @@ static long do_splice(struct file *in, loff_t __user *off_in,
 	return -EINVAL;
 }
 
+# if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+/*copy data directly from socket to file(pagecache) */
+
+static ssize_t do_splice_from_socket(struct file *file, struct socket *sock,
+				     loff_t __user *off_out, size_t count)
+{
+	struct address_space *mapping = file->f_mapping;
+	struct inode	*inode = mapping->host;
+	loff_t pos, start_pos;
+	int count_tmp, copied_bytes;
+	int err = 0;
+	int idx;		
+	int cPagesAllocated = 0;
+	struct recvfile_ctl_blk *rv_cb;
+	struct kvec *iov;
+	struct msghdr msg;
+	long rcvtimeo;
+	int ret;
+
+	if(count > MAX_PAGES_PER_RECVFILE * PAGE_SIZE) {
+		printk(KERN_WARNING "%s: count(%u) exceeds maxinum\n", __func__, count);
+		return -EINVAL;
+	}
+
+	if(off_out){
+		if(copy_from_user(&start_pos, off_out, sizeof(loff_t)))
+			return -EFAULT;
+	} else{
+		return -EINVAL;
+	}
+
+	pos = start_pos;
+
+	rv_cb = kmalloc(MAX_PAGES_PER_RECVFILE * sizeof(struct recvfile_ctl_blk), GFP_KERNEL);
+	if(!rv_cb){
+		printk(KERN_WARNING "%s:memory allocation for rcv_cb failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	iov = kmalloc(MAX_PAGES_PER_RECVFILE * sizeof(struct kvec), GFP_KERNEL);
+	if(!iov){
+		kfree(rv_cb);
+		printk(KERN_WARNING "%s:memory allocation for iov failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&inode->i_mutex);
+
+	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+
+	/* We can write back this queue in page reclaim */
+	current->backing_dev_info = mapping->backing_dev_info;
+
+	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+	if (err != 0 || count == 0)
+		goto done;
+
+	file_remove_suid(file);
+	file_update_time(file);	
+
+	count_tmp = count;
+	do {
+		unsigned long bytes;	/* Bytes to write to page */
+		unsigned long offset;	/* Offset into pagecache page */
+		struct page *pageP;
+		void *fsdata;
+
+		offset = (pos & (PAGE_CACHE_SIZE - 1));
+		bytes = PAGE_CACHE_SIZE - offset;
+		if (bytes > count_tmp)
+			bytes = count_tmp;
+
+		ret = mapping->a_ops->write_begin(file, mapping, pos, bytes,
+				AOP_FLAG_UNINTERRUPTIBLE,
+				&pageP, &fsdata);
+
+		if (unlikely(ret)) {
+			err = ret;
+			for(idx = 0; idx < cPagesAllocated; idx++) {
+				kunmap(rv_cb[idx].rv_page);
+				ret = mapping->a_ops->write_end(file, mapping,
+						rv_cb[idx].rv_pos,
+						rv_cb[idx].rv_count,
+						0,
+						rv_cb[idx].rv_page,
+						rv_cb[idx].rv_fsdata);
+			}
+			goto done;
+		}
+		rv_cb[cPagesAllocated].rv_page = pageP;
+		rv_cb[cPagesAllocated].rv_pos = pos;
+		rv_cb[cPagesAllocated].rv_count = bytes;
+		rv_cb[cPagesAllocated].rv_fsdata = fsdata;
+		iov[cPagesAllocated].iov_base = kmap(pageP) + offset;
+		iov[cPagesAllocated].iov_len = bytes;
+		cPagesAllocated++;
+		count_tmp -= bytes;
+		pos += bytes;
+	} while (count_tmp);
+
+	/* IOV is ready, receive the data from socket now */
+	msg.msg_name = NULL;
+	msg.msg_namelen = 0;
+	msg.msg_iov = (struct iovec *)&iov[0];
+	msg.msg_iovlen = cPagesAllocated ;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+	msg.msg_flags = MSG_KERNSPACE;
+	rcvtimeo = sock->sk->sk_rcvtimeo;    
+	sock->sk->sk_rcvtimeo = 8 * HZ;
+
+	ret = kernel_recvmsg(sock, &msg, &iov[0], cPagesAllocated, count,
+			MSG_WAITALL | MSG_NOCATCHSIG);
+
+	sock->sk->sk_rcvtimeo = rcvtimeo;
+
+	if(unlikely(ret != count)){
+		if( ret < 0){
+			err = -EPIPE;
+			//err = ret;
+			count = 0;
+		}
+		else{
+			/* we have read some data from socket */
+			count = ret;
+		}
+	}
+	else
+	{
+		err = 0;
+	}
+
+	/* adjust the pagecache pages len based on the amount of data copied 
+	 * truncate the pages which are not used 
+	 */
+	count_tmp = count;
+
+	for(idx=0; idx < cPagesAllocated;idx++) {
+
+		if(count_tmp)
+		{
+			//flush_dcache_page(rv_cb[idx].rv_page);
+			copied_bytes = min(rv_cb[idx].rv_count, (unsigned int)count_tmp);
+			count_tmp -= copied_bytes;
+		}
+		else
+		{
+			copied_bytes = 0;
+		}
+
+		kunmap(rv_cb[idx].rv_page);
+		ret = mapping->a_ops->write_end(file, mapping,
+				rv_cb[idx].rv_pos,
+				rv_cb[idx].rv_count,
+				copied_bytes,
+				rv_cb[idx].rv_page,
+				rv_cb[idx].rv_fsdata);
+
+		if (unlikely(ret < 0))
+		{
+			printk(KERN_WARNING"%s: write_end fail,ret = %d\n", __func__, ret);
+		}
+
+#if !(defined(CONFIG_BCM_KF_M2M_DMA) && defined(CONFIG_BCM_M2M_DMA))
+		/* when M2M DMA is used there is no need to flush  as cache is not used*/
+		flush_dcache_page(rv_cb[idx].rv_page);
+#endif
+	}
+
+	if(count)
+	{
+		balance_dirty_pages_ratelimited_nr(mapping, cPagesAllocated);
+	}
+
+	/* fix pos based on returned bytes from recvmsg */
+	pos = start_pos + count;
+	if (off_out && copy_to_user(off_out, &pos, sizeof(loff_t)))
+		ret = -EFAULT;
+
+done:
+	current->backing_dev_info = NULL;
+	mutex_unlock(&inode->i_mutex);
+
+	kfree(rv_cb);
+	kfree(iov);
+
+	if(err)
+		return err;
+	else 
+		return count;
+}
+#endif
+
 /*
  * Map an iov into an array of pages and offset/length tupples. With the
  * partial_page structure, we can map several non-contiguous ranges into
@@ -1696,6 +1898,61 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
 		return 0;
 
 	error = -EBADF;
+    
+
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+    /* if input is socket & output is file try to copy from socket to file directly */ 
+	{
+		struct socket *sock = NULL;
+		/* check if fd_in is a socket */
+		sock = sockfd_lookup(fd_in, (int *)&error);
+		if (sock) {
+			out = NULL;
+			if (!sock->sk)
+				goto done;
+
+			out = fget_light(fd_out, &fput_out);
+
+			if (out) {
+
+				struct pipe_inode_info *opipe;
+
+				opipe = get_pipe_info(out);
+				if(opipe)
+				{
+					/*output is pipe go regular processing */
+					printk("out_fd is a pipe\n");
+					goto regular_proc;
+				}
+
+				if (!(out->f_mode & FMODE_WRITE))
+					goto done;
+
+
+				if((out->f_op && out->f_op->splice_write))
+				{
+					error = do_splice_from_socket(out, sock, off_out, len);
+				}
+				else
+				{
+					/*splice from socket->file not supported */
+					error = -EBADF;
+				}
+			}       
+done:
+			if(out)
+				fput_light(out, fput_out);
+			fput(sock->file);
+			return error;
+
+regular_proc:
+			if(out)
+				fput_light(out, fput_out);
+			fput(sock->file);
+		}
+	}
+#endif
+
 	in = fget_light(fd_in, &fput_in);
 	if (in) {
 		if (in->f_mode & FMODE_READ) {
diff --git a/fs/squashfs/lzma/LzmaDecode.c b/fs/squashfs/lzma/LzmaDecode.c
new file mode 100644
index 0000000000000000000000000000000000000000..cb8345377ea8a0d14e305073620f3c8d80c66288
--- /dev/null
+++ b/fs/squashfs/lzma/LzmaDecode.c
@@ -0,0 +1,584 @@
+/*
+  LzmaDecode.c
+  LZMA Decoder (optimized for Speed version)
+  
+  LZMA SDK 4.40 Copyright (c) 1999-2006 Igor Pavlov (2006-05-01)
+  http://www.7-zip.org/
+
+  LZMA SDK is licensed under two licenses:
+  1) GNU Lesser General Public License (GNU LGPL)
+  2) Common Public License (CPL)
+  It means that you can select one of these two licenses and 
+  follow rules of that license.
+
+  SPECIAL EXCEPTION:
+  Igor Pavlov, as the author of this Code, expressly permits you to 
+  statically or dynamically link your Code (or bind by name) to the 
+  interfaces of this file without subjecting your linked Code to the 
+  terms of the CPL or GNU LGPL. Any modifications or additions 
+  to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#include "LzmaDecode.h"
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+#define RC_READ_BYTE (*Buffer++)
+
+#define RC_INIT2 Code = 0; Range = 0xFFFFFFFF; \
+  { int i; for(i = 0; i < 5; i++) { RC_TEST; Code = (Code << 8) | RC_READ_BYTE; }}
+
+#ifdef _LZMA_IN_CB
+
+#define RC_TEST { if (Buffer == BufferLim) \
+  { SizeT size; int result = InCallback->Read(InCallback, &Buffer, &size); if (result != LZMA_RESULT_OK) return result; \
+  BufferLim = Buffer + size; if (size == 0) return LZMA_RESULT_DATA_ERROR; }}
+
+#define RC_INIT Buffer = BufferLim = 0; RC_INIT2
+
+#else
+
+#define RC_TEST { if (Buffer == BufferLim) return LZMA_RESULT_DATA_ERROR; }
+
+#define RC_INIT(buffer, bufferSize) Buffer = buffer; BufferLim = buffer + bufferSize; RC_INIT2
+ 
+#endif
+
+#define RC_NORMALIZE if (Range < kTopValue) { RC_TEST; Range <<= 8; Code = (Code << 8) | RC_READ_BYTE; }
+
+#define IfBit0(p) RC_NORMALIZE; bound = (Range >> kNumBitModelTotalBits) * *(p); if (Code < bound)
+#define UpdateBit0(p) Range = bound; *(p) += (kBitModelTotal - *(p)) >> kNumMoveBits;
+#define UpdateBit1(p) Range -= bound; Code -= bound; *(p) -= (*(p)) >> kNumMoveBits;
+
+#define RC_GET_BIT2(p, mi, A0, A1) IfBit0(p) \
+  { UpdateBit0(p); mi <<= 1; A0; } else \
+  { UpdateBit1(p); mi = (mi + mi) + 1; A1; } 
+  
+#define RC_GET_BIT(p, mi) RC_GET_BIT2(p, mi, ; , ;)               
+
+#define RangeDecoderBitTreeDecode(probs, numLevels, res) \
+  { int i = numLevels; res = 1; \
+  do { CProb *p = probs + res; RC_GET_BIT(p, res) } while(--i != 0); \
+  res -= (1 << numLevels); }
+
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols) 
+
+
+#define kNumStates 12
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size)
+{
+  unsigned char prop0;
+  if (size < LZMA_PROPERTIES_SIZE)
+    return LZMA_RESULT_DATA_ERROR;
+  prop0 = propsData[0];
+  if (prop0 >= (9 * 5 * 5))
+    return LZMA_RESULT_DATA_ERROR;
+  {
+    for (propsRes->pb = 0; prop0 >= (9 * 5); propsRes->pb++, prop0 -= (9 * 5));
+    for (propsRes->lp = 0; prop0 >= 9; propsRes->lp++, prop0 -= 9);
+    propsRes->lc = prop0;
+    /*
+    unsigned char remainder = (unsigned char)(prop0 / 9);
+    propsRes->lc = prop0 % 9;
+    propsRes->pb = remainder / 5;
+    propsRes->lp = remainder % 5;
+    */
+  }
+
+  #ifdef _LZMA_OUT_READ
+  {
+    int i;
+    propsRes->DictionarySize = 0;
+    for (i = 0; i < 4; i++)
+      propsRes->DictionarySize += (UInt32)(propsData[1 + i]) << (i * 8);
+    if (propsRes->DictionarySize == 0)
+      propsRes->DictionarySize = 1;
+  }
+  #endif
+  return LZMA_RESULT_OK;
+}
+
+#define kLzmaStreamWasFinishedId (-1)
+
+int LzmaDecode(CLzmaDecoderState *vs,
+    #ifdef _LZMA_IN_CB
+    ILzmaInCallback *InCallback,
+    #else
+    const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+    #endif
+    unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed)
+{
+  CProb *p = vs->Probs;
+  SizeT nowPos = 0;
+  Byte previousByte = 0;
+  UInt32 posStateMask = (1 << (vs->Properties.pb)) - 1;
+  UInt32 literalPosMask = (1 << (vs->Properties.lp)) - 1;
+  int lc = vs->Properties.lc;
+
+  #ifdef _LZMA_OUT_READ
+  
+  UInt32 Range = vs->Range;
+  UInt32 Code = vs->Code;
+  #ifdef _LZMA_IN_CB
+  const Byte *Buffer = vs->Buffer;
+  const Byte *BufferLim = vs->BufferLim;
+  #else
+  const Byte *Buffer = inStream;
+  const Byte *BufferLim = inStream + inSize;
+  #endif
+  int state = vs->State;
+  UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
+  int len = vs->RemainLen;
+  UInt32 globalPos = vs->GlobalPos;
+  UInt32 distanceLimit = vs->DistanceLimit;
+
+  Byte *dictionary = vs->Dictionary;
+  UInt32 dictionarySize = vs->Properties.DictionarySize;
+  UInt32 dictionaryPos = vs->DictionaryPos;
+
+  Byte tempDictionary[4];
+
+  #ifndef _LZMA_IN_CB
+  *inSizeProcessed = 0;
+  #endif
+  *outSizeProcessed = 0;
+  if (len == kLzmaStreamWasFinishedId)
+    return LZMA_RESULT_OK;
+
+  if (dictionarySize == 0)
+  {
+    dictionary = tempDictionary;
+    dictionarySize = 1;
+    tempDictionary[0] = vs->TempDictionary[0];
+  }
+
+  if (len == kLzmaNeedInitId)
+  {
+    {
+      UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+      UInt32 i;
+      for (i = 0; i < numProbs; i++)
+        p[i] = kBitModelTotal >> 1; 
+      rep0 = rep1 = rep2 = rep3 = 1;
+      state = 0;
+      globalPos = 0;
+      distanceLimit = 0;
+      dictionaryPos = 0;
+      dictionary[dictionarySize - 1] = 0;
+      #ifdef _LZMA_IN_CB
+      RC_INIT;
+      #else
+      RC_INIT(inStream, inSize);
+      #endif
+    }
+    len = 0;
+  }
+  while(len != 0 && nowPos < outSize)
+  {
+    UInt32 pos = dictionaryPos - rep0;
+    if (pos >= dictionarySize)
+      pos += dictionarySize;
+    outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
+    if (++dictionaryPos == dictionarySize)
+      dictionaryPos = 0;
+    len--;
+  }
+  if (dictionaryPos == 0)
+    previousByte = dictionary[dictionarySize - 1];
+  else
+    previousByte = dictionary[dictionaryPos - 1];
+
+  #else /* if !_LZMA_OUT_READ */
+
+  int state = 0;
+  UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
+  int len = 0;
+  const Byte *Buffer;
+  const Byte *BufferLim;
+  UInt32 Range;
+  UInt32 Code;
+
+  #ifndef _LZMA_IN_CB
+  *inSizeProcessed = 0;
+  #endif
+  *outSizeProcessed = 0;
+
+  {
+    UInt32 i;
+    UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+    for (i = 0; i < numProbs; i++)
+      p[i] = kBitModelTotal >> 1;
+  }
+  
+  #ifdef _LZMA_IN_CB
+  RC_INIT;
+  #else
+  RC_INIT(inStream, inSize);
+  #endif
+
+  #endif /* _LZMA_OUT_READ */
+
+  while(nowPos < outSize)
+  {
+    CProb *prob;
+    UInt32 bound;
+    int posState = (int)(
+        (nowPos 
+        #ifdef _LZMA_OUT_READ
+        + globalPos
+        #endif
+        )
+        & posStateMask);
+
+    prob = p + IsMatch + (state << kNumPosBitsMax) + posState;
+    IfBit0(prob)
+    {
+      int symbol = 1;
+      UpdateBit0(prob)
+      prob = p + Literal + (LZMA_LIT_SIZE * 
+        (((
+        (nowPos 
+        #ifdef _LZMA_OUT_READ
+        + globalPos
+        #endif
+        )
+        & literalPosMask) << lc) + (previousByte >> (8 - lc))));
+
+      if (state >= kNumLitStates)
+      {
+        int matchByte;
+        #ifdef _LZMA_OUT_READ
+        UInt32 pos = dictionaryPos - rep0;
+        if (pos >= dictionarySize)
+          pos += dictionarySize;
+        matchByte = dictionary[pos];
+        #else
+        matchByte = outStream[nowPos - rep0];
+        #endif
+        do
+        {
+          int bit;
+          CProb *probLit;
+          matchByte <<= 1;
+          bit = (matchByte & 0x100);
+          probLit = prob + 0x100 + bit + symbol;
+          RC_GET_BIT2(probLit, symbol, if (bit != 0) break, if (bit == 0) break)
+        }
+        while (symbol < 0x100);
+      }
+      while (symbol < 0x100)
+      {
+        CProb *probLit = prob + symbol;
+        RC_GET_BIT(probLit, symbol)
+      }
+      previousByte = (Byte)symbol;
+
+      outStream[nowPos++] = previousByte;
+      #ifdef _LZMA_OUT_READ
+      if (distanceLimit < dictionarySize)
+        distanceLimit++;
+
+      dictionary[dictionaryPos] = previousByte;
+      if (++dictionaryPos == dictionarySize)
+        dictionaryPos = 0;
+      #endif
+      if (state < 4) state = 0;
+      else if (state < 10) state -= 3;
+      else state -= 6;
+    }
+    else             
+    {
+      UpdateBit1(prob);
+      prob = p + IsRep + state;
+      IfBit0(prob)
+      {
+        UpdateBit0(prob);
+        rep3 = rep2;
+        rep2 = rep1;
+        rep1 = rep0;
+        state = state < kNumLitStates ? 0 : 3;
+        prob = p + LenCoder;
+      }
+      else
+      {
+        UpdateBit1(prob);
+        prob = p + IsRepG0 + state;
+        IfBit0(prob)
+        {
+          UpdateBit0(prob);
+          prob = p + IsRep0Long + (state << kNumPosBitsMax) + posState;
+          IfBit0(prob)
+          {
+            #ifdef _LZMA_OUT_READ
+            UInt32 pos;
+            #endif
+            UpdateBit0(prob);
+            
+            #ifdef _LZMA_OUT_READ
+            if (distanceLimit == 0)
+            #else
+            if (nowPos == 0)
+            #endif
+              return LZMA_RESULT_DATA_ERROR;
+            
+            state = state < kNumLitStates ? 9 : 11;
+            #ifdef _LZMA_OUT_READ
+            pos = dictionaryPos - rep0;
+            if (pos >= dictionarySize)
+              pos += dictionarySize;
+            previousByte = dictionary[pos];
+            dictionary[dictionaryPos] = previousByte;
+            if (++dictionaryPos == dictionarySize)
+              dictionaryPos = 0;
+            #else
+            previousByte = outStream[nowPos - rep0];
+            #endif
+            outStream[nowPos++] = previousByte;
+            #ifdef _LZMA_OUT_READ
+            if (distanceLimit < dictionarySize)
+              distanceLimit++;
+            #endif
+
+            continue;
+          }
+          else
+          {
+            UpdateBit1(prob);
+          }
+        }
+        else
+        {
+          UInt32 distance;
+          UpdateBit1(prob);
+          prob = p + IsRepG1 + state;
+          IfBit0(prob)
+          {
+            UpdateBit0(prob);
+            distance = rep1;
+          }
+          else 
+          {
+            UpdateBit1(prob);
+            prob = p + IsRepG2 + state;
+            IfBit0(prob)
+            {
+              UpdateBit0(prob);
+              distance = rep2;
+            }
+            else
+            {
+              UpdateBit1(prob);
+              distance = rep3;
+              rep3 = rep2;
+            }
+            rep2 = rep1;
+          }
+          rep1 = rep0;
+          rep0 = distance;
+        }
+        state = state < kNumLitStates ? 8 : 11;
+        prob = p + RepLenCoder;
+      }
+      {
+        int numBits, offset;
+        CProb *probLen = prob + LenChoice;
+        IfBit0(probLen)
+        {
+          UpdateBit0(probLen);
+          probLen = prob + LenLow + (posState << kLenNumLowBits);
+          offset = 0;
+          numBits = kLenNumLowBits;
+        }
+        else
+        {
+          UpdateBit1(probLen);
+          probLen = prob + LenChoice2;
+          IfBit0(probLen)
+          {
+            UpdateBit0(probLen);
+            probLen = prob + LenMid + (posState << kLenNumMidBits);
+            offset = kLenNumLowSymbols;
+            numBits = kLenNumMidBits;
+          }
+          else
+          {
+            UpdateBit1(probLen);
+            probLen = prob + LenHigh;
+            offset = kLenNumLowSymbols + kLenNumMidSymbols;
+            numBits = kLenNumHighBits;
+          }
+        }
+        RangeDecoderBitTreeDecode(probLen, numBits, len);
+        len += offset;
+      }
+
+      if (state < 4)
+      {
+        int posSlot;
+        state += kNumLitStates;
+        prob = p + PosSlot +
+            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << 
+            kNumPosSlotBits);
+        RangeDecoderBitTreeDecode(prob, kNumPosSlotBits, posSlot);
+        if (posSlot >= kStartPosModelIndex)
+        {
+          int numDirectBits = ((posSlot >> 1) - 1);
+          rep0 = (2 | ((UInt32)posSlot & 1));
+          if (posSlot < kEndPosModelIndex)
+          {
+            rep0 <<= numDirectBits;
+            prob = p + SpecPos + rep0 - posSlot - 1;
+          }
+          else
+          {
+            numDirectBits -= kNumAlignBits;
+            do
+            {
+              RC_NORMALIZE
+              Range >>= 1;
+              rep0 <<= 1;
+              if (Code >= Range)
+              {
+                Code -= Range;
+                rep0 |= 1;
+              }
+            }
+            while (--numDirectBits != 0);
+            prob = p + Align;
+            rep0 <<= kNumAlignBits;
+            numDirectBits = kNumAlignBits;
+          }
+          {
+            int i = 1;
+            int mi = 1;
+            do
+            {
+              CProb *prob3 = prob + mi;
+              RC_GET_BIT2(prob3, mi, ; , rep0 |= i);
+              i <<= 1;
+            }
+            while(--numDirectBits != 0);
+          }
+        }
+        else
+          rep0 = posSlot;
+        if (++rep0 == (UInt32)(0))
+        {
+          /* it's for stream version */
+          len = kLzmaStreamWasFinishedId;
+          break;
+        }
+      }
+
+      len += kMatchMinLen;
+      #ifdef _LZMA_OUT_READ
+      if (rep0 > distanceLimit) 
+      #else
+      if (rep0 > nowPos)
+      #endif
+        return LZMA_RESULT_DATA_ERROR;
+
+      #ifdef _LZMA_OUT_READ
+      if (dictionarySize - distanceLimit > (UInt32)len)
+        distanceLimit += len;
+      else
+        distanceLimit = dictionarySize;
+      #endif
+
+      do
+      {
+        #ifdef _LZMA_OUT_READ
+        UInt32 pos = dictionaryPos - rep0;
+        if (pos >= dictionarySize)
+          pos += dictionarySize;
+        previousByte = dictionary[pos];
+        dictionary[dictionaryPos] = previousByte;
+        if (++dictionaryPos == dictionarySize)
+          dictionaryPos = 0;
+        #else
+        previousByte = outStream[nowPos - rep0];
+        #endif
+        len--;
+        outStream[nowPos++] = previousByte;
+      }
+      while(len != 0 && nowPos < outSize);
+    }
+  }
+  RC_NORMALIZE;
+
+  #ifdef _LZMA_OUT_READ
+  vs->Range = Range;
+  vs->Code = Code;
+  vs->DictionaryPos = dictionaryPos;
+  vs->GlobalPos = globalPos + (UInt32)nowPos;
+  vs->DistanceLimit = distanceLimit;
+  vs->Reps[0] = rep0;
+  vs->Reps[1] = rep1;
+  vs->Reps[2] = rep2;
+  vs->Reps[3] = rep3;
+  vs->State = state;
+  vs->RemainLen = len;
+  vs->TempDictionary[0] = tempDictionary[0];
+  #endif
+
+  #ifdef _LZMA_IN_CB
+  vs->Buffer = Buffer;
+  vs->BufferLim = BufferLim;
+  #else
+  *inSizeProcessed = (SizeT)(Buffer - inStream);
+  #endif
+  *outSizeProcessed = nowPos;
+  return LZMA_RESULT_OK;
+}
diff --git a/fs/squashfs/lzma/LzmaDecode.h b/fs/squashfs/lzma/LzmaDecode.h
new file mode 100644
index 0000000000000000000000000000000000000000..2870eeb9c9c1150d05a8dafe4c0b9101cb55050a
--- /dev/null
+++ b/fs/squashfs/lzma/LzmaDecode.h
@@ -0,0 +1,113 @@
+/* 
+  LzmaDecode.h
+  LZMA Decoder interface
+
+  LZMA SDK 4.40 Copyright (c) 1999-2006 Igor Pavlov (2006-05-01)
+  http://www.7-zip.org/
+
+  LZMA SDK is licensed under two licenses:
+  1) GNU Lesser General Public License (GNU LGPL)
+  2) Common Public License (CPL)
+  It means that you can select one of these two licenses and 
+  follow rules of that license.
+
+  SPECIAL EXCEPTION:
+  Igor Pavlov, as the author of this code, expressly permits you to 
+  statically or dynamically link your code (or bind by name) to the 
+  interfaces of this file without subjecting your linked code to the 
+  terms of the CPL or GNU LGPL. Any modifications or additions 
+  to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#ifndef __LZMADECODE_H
+#define __LZMADECODE_H
+
+#include "LzmaTypes.h"
+
+/* #define _LZMA_IN_CB */
+/* Use callback for input data */
+
+/* #define _LZMA_OUT_READ */
+/* Use read function for output data */
+
+/* #define _LZMA_PROB32 */
+/* It can increase speed on some 32-bit CPUs, 
+   but memory usage will be doubled in that case */
+
+/* #define _LZMA_LOC_OPT */
+/* Enable local speed optimizations inside code */
+
+#ifdef _LZMA_PROB32
+#define CProb UInt32
+#else
+#define CProb UInt16
+#endif
+
+#define LZMA_RESULT_OK 0
+#define LZMA_RESULT_DATA_ERROR 1
+
+#ifdef _LZMA_IN_CB
+typedef struct _ILzmaInCallback
+{
+  int (*Read)(void *object, const unsigned char **buffer, SizeT *bufferSize);
+} ILzmaInCallback;
+#endif
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LZMA_PROPERTIES_SIZE 5
+
+typedef struct _CLzmaProperties
+{
+  int lc;
+  int lp;
+  int pb;
+  #ifdef _LZMA_OUT_READ
+  UInt32 DictionarySize;
+  #endif
+}CLzmaProperties;
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size);
+
+#define LzmaGetNumProbs(Properties) (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((Properties)->lc + (Properties)->lp)))
+
+#define kLzmaNeedInitId (-2)
+
+typedef struct _CLzmaDecoderState
+{
+  CLzmaProperties Properties;
+  CProb *Probs;
+
+  #ifdef _LZMA_IN_CB
+  const unsigned char *Buffer;
+  const unsigned char *BufferLim;
+  #endif
+
+  #ifdef _LZMA_OUT_READ
+  unsigned char *Dictionary;
+  UInt32 Range;
+  UInt32 Code;
+  UInt32 DictionaryPos;
+  UInt32 GlobalPos;
+  UInt32 DistanceLimit;
+  UInt32 Reps[4];
+  int State;
+  int RemainLen;
+  unsigned char TempDictionary[4];
+  #endif
+} CLzmaDecoderState;
+
+#ifdef _LZMA_OUT_READ
+#define LzmaDecoderInit(vs) { (vs)->RemainLen = kLzmaNeedInitId; }
+#endif
+
+int LzmaDecode(CLzmaDecoderState *vs,
+    #ifdef _LZMA_IN_CB
+    ILzmaInCallback *inCallback,
+    #else
+    const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+    #endif
+    unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed);
+
+#endif
diff --git a/fs/squashfs/lzma/LzmaDecodeSize.c b/fs/squashfs/lzma/LzmaDecodeSize.c
new file mode 100644
index 0000000000000000000000000000000000000000..a3a5eb9d1f4382d37aec7db48ec3b8a4315c21dc
--- /dev/null
+++ b/fs/squashfs/lzma/LzmaDecodeSize.c
@@ -0,0 +1,712 @@
+/*
+  LzmaDecodeSize.c
+  LZMA Decoder (optimized for Size version)
+  
+  LZMA SDK 4.40 Copyright (c) 1999-2006 Igor Pavlov (2006-05-01)
+  http://www.7-zip.org/
+
+  LZMA SDK is licensed under two licenses:
+  1) GNU Lesser General Public License (GNU LGPL)
+  2) Common Public License (CPL)
+  It means that you can select one of these two licenses and 
+  follow rules of that license.
+
+  SPECIAL EXCEPTION:
+  Igor Pavlov, as the author of this code, expressly permits you to 
+  statically or dynamically link your code (or bind by name) to the 
+  interfaces of this file without subjecting your linked code to the 
+  terms of the CPL or GNU LGPL. Any modifications or additions 
+  to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#include "LzmaDecode.h"
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+typedef struct _CRangeDecoder
+{
+  const Byte *Buffer;
+  const Byte *BufferLim;
+  UInt32 Range;
+  UInt32 Code;
+  #ifdef _LZMA_IN_CB
+  ILzmaInCallback *InCallback;
+  int Result;
+  #endif
+  int ExtraBytes;
+} CRangeDecoder;
+
+Byte RangeDecoderReadByte(CRangeDecoder *rd)
+{
+  if (rd->Buffer == rd->BufferLim)
+  {
+    #ifdef _LZMA_IN_CB
+    SizeT size;
+    rd->Result = rd->InCallback->Read(rd->InCallback, &rd->Buffer, &size);
+    rd->BufferLim = rd->Buffer + size;
+    if (size == 0)
+    #endif
+    {
+      rd->ExtraBytes = 1;
+      return 0xFF;
+    }
+  }
+  return (*rd->Buffer++);
+}
+
+/* #define ReadByte (*rd->Buffer++) */
+#define ReadByte (RangeDecoderReadByte(rd))
+
+void RangeDecoderInit(CRangeDecoder *rd
+  #ifndef _LZMA_IN_CB
+    , const Byte *stream, SizeT bufferSize
+  #endif
+    )
+{
+  int i;
+  #ifdef _LZMA_IN_CB
+  rd->Buffer = rd->BufferLim = 0;
+  #else
+  rd->Buffer = stream;
+  rd->BufferLim = stream + bufferSize;
+  #endif
+  rd->ExtraBytes = 0;
+  rd->Code = 0;
+  rd->Range = (0xFFFFFFFF);
+  for(i = 0; i < 5; i++)
+    rd->Code = (rd->Code << 8) | ReadByte;
+}
+
+#define RC_INIT_VAR UInt32 range = rd->Range; UInt32 code = rd->Code;        
+#define RC_FLUSH_VAR rd->Range = range; rd->Code = code;
+#define RC_NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | ReadByte; }
+
+UInt32 RangeDecoderDecodeDirectBits(CRangeDecoder *rd, int numTotalBits)
+{
+  RC_INIT_VAR
+  UInt32 result = 0;
+  int i;
+  for (i = numTotalBits; i != 0; i--)
+  {
+    /* UInt32 t; */
+    range >>= 1;
+
+    result <<= 1;
+    if (code >= range)
+    {
+      code -= range;
+      result |= 1;
+    }
+    /*
+    t = (code - range) >> 31;
+    t &= 1;
+    code -= range & (t - 1);
+    result = (result + result) | (1 - t);
+    */
+    RC_NORMALIZE
+  }
+  RC_FLUSH_VAR
+  return result;
+}
+
+int RangeDecoderBitDecode(CProb *prob, CRangeDecoder *rd)
+{
+  UInt32 bound = (rd->Range >> kNumBitModelTotalBits) * *prob;
+  if (rd->Code < bound)
+  {
+    rd->Range = bound;
+    *prob += (kBitModelTotal - *prob) >> kNumMoveBits;
+    if (rd->Range < kTopValue)
+    {
+      rd->Code = (rd->Code << 8) | ReadByte;
+      rd->Range <<= 8;
+    }
+    return 0;
+  }
+  else
+  {
+    rd->Range -= bound;
+    rd->Code -= bound;
+    *prob -= (*prob) >> kNumMoveBits;
+    if (rd->Range < kTopValue)
+    {
+      rd->Code = (rd->Code << 8) | ReadByte;
+      rd->Range <<= 8;
+    }
+    return 1;
+  }
+}
+
+#define RC_GET_BIT2(prob, mi, A0, A1) \
+  UInt32 bound = (range >> kNumBitModelTotalBits) * *prob; \
+  if (code < bound) \
+    { A0; range = bound; *prob += (kBitModelTotal - *prob) >> kNumMoveBits; mi <<= 1; } \
+  else \
+    { A1; range -= bound; code -= bound; *prob -= (*prob) >> kNumMoveBits; mi = (mi + mi) + 1; } \
+  RC_NORMALIZE
+
+#define RC_GET_BIT(prob, mi) RC_GET_BIT2(prob, mi, ; , ;)               
+
+int RangeDecoderBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
+{
+  int mi = 1;
+  int i;
+  #ifdef _LZMA_LOC_OPT
+  RC_INIT_VAR
+  #endif
+  for(i = numLevels; i != 0; i--)
+  {
+    #ifdef _LZMA_LOC_OPT
+    CProb *prob = probs + mi;
+    RC_GET_BIT(prob, mi)
+    #else
+    mi = (mi + mi) + RangeDecoderBitDecode(probs + mi, rd);
+    #endif
+  }
+  #ifdef _LZMA_LOC_OPT
+  RC_FLUSH_VAR
+  #endif
+  return mi - (1 << numLevels);
+}
+
+int RangeDecoderReverseBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
+{
+  int mi = 1;
+  int i;
+  int symbol = 0;
+  #ifdef _LZMA_LOC_OPT
+  RC_INIT_VAR
+  #endif
+  for(i = 0; i < numLevels; i++)
+  {
+    #ifdef _LZMA_LOC_OPT
+    CProb *prob = probs + mi;
+    RC_GET_BIT2(prob, mi, ; , symbol |= (1 << i))
+    #else
+    int bit = RangeDecoderBitDecode(probs + mi, rd);
+    mi = mi + mi + bit;
+    symbol |= (bit << i);
+    #endif
+  }
+  #ifdef _LZMA_LOC_OPT
+  RC_FLUSH_VAR
+  #endif
+  return symbol;
+}
+
+Byte LzmaLiteralDecode(CProb *probs, CRangeDecoder *rd)
+{ 
+  int symbol = 1;
+  #ifdef _LZMA_LOC_OPT
+  RC_INIT_VAR
+  #endif
+  do
+  {
+    #ifdef _LZMA_LOC_OPT
+    CProb *prob = probs + symbol;
+    RC_GET_BIT(prob, symbol)
+    #else
+    symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
+    #endif
+  }
+  while (symbol < 0x100);
+  #ifdef _LZMA_LOC_OPT
+  RC_FLUSH_VAR
+  #endif
+  return symbol;
+}
+
+Byte LzmaLiteralDecodeMatch(CProb *probs, CRangeDecoder *rd, Byte matchByte)
+{ 
+  int symbol = 1;
+  #ifdef _LZMA_LOC_OPT
+  RC_INIT_VAR
+  #endif
+  do
+  {
+    int bit;
+    int matchBit = (matchByte >> 7) & 1;
+    matchByte <<= 1;
+    #ifdef _LZMA_LOC_OPT
+    {
+      CProb *prob = probs + 0x100 + (matchBit << 8) + symbol;
+      RC_GET_BIT2(prob, symbol, bit = 0, bit = 1)
+    }
+    #else
+    bit = RangeDecoderBitDecode(probs + 0x100 + (matchBit << 8) + symbol, rd);
+    symbol = (symbol << 1) | bit;
+    #endif
+    if (matchBit != bit)
+    {
+      while (symbol < 0x100)
+      {
+        #ifdef _LZMA_LOC_OPT
+        CProb *prob = probs + symbol;
+        RC_GET_BIT(prob, symbol)
+        #else
+        symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
+        #endif
+      }
+      break;
+    }
+  }
+  while (symbol < 0x100);
+  #ifdef _LZMA_LOC_OPT
+  RC_FLUSH_VAR
+  #endif
+  return symbol;
+}
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols) 
+
+int LzmaLenDecode(CProb *p, CRangeDecoder *rd, int posState)
+{
+  if(RangeDecoderBitDecode(p + LenChoice, rd) == 0)
+    return RangeDecoderBitTreeDecode(p + LenLow +
+        (posState << kLenNumLowBits), kLenNumLowBits, rd);
+  if(RangeDecoderBitDecode(p + LenChoice2, rd) == 0)
+    return kLenNumLowSymbols + RangeDecoderBitTreeDecode(p + LenMid +
+        (posState << kLenNumMidBits), kLenNumMidBits, rd);
+  return kLenNumLowSymbols + kLenNumMidSymbols + 
+      RangeDecoderBitTreeDecode(p + LenHigh, kLenNumHighBits, rd);
+}
+
+#define kNumStates 12
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size)
+{
+  unsigned char prop0;
+  if (size < LZMA_PROPERTIES_SIZE)
+    return LZMA_RESULT_DATA_ERROR;
+  prop0 = propsData[0];
+  if (prop0 >= (9 * 5 * 5))
+    return LZMA_RESULT_DATA_ERROR;
+  {
+    for (propsRes->pb = 0; prop0 >= (9 * 5); propsRes->pb++, prop0 -= (9 * 5));
+    for (propsRes->lp = 0; prop0 >= 9; propsRes->lp++, prop0 -= 9);
+    propsRes->lc = prop0;
+    /*
+    unsigned char remainder = (unsigned char)(prop0 / 9);
+    propsRes->lc = prop0 % 9;
+    propsRes->pb = remainder / 5;
+    propsRes->lp = remainder % 5;
+    */
+  }
+
+  #ifdef _LZMA_OUT_READ
+  {
+    int i;
+    propsRes->DictionarySize = 0;
+    for (i = 0; i < 4; i++)
+      propsRes->DictionarySize += (UInt32)(propsData[1 + i]) << (i * 8);
+    if (propsRes->DictionarySize == 0)
+      propsRes->DictionarySize = 1;
+  }
+  #endif
+  return LZMA_RESULT_OK;
+}
+
+#define kLzmaStreamWasFinishedId (-1)
+
+int LzmaDecode(CLzmaDecoderState *vs,
+    #ifdef _LZMA_IN_CB
+    ILzmaInCallback *InCallback,
+    #else
+    const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+    #endif
+    unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed)
+{
+  CProb *p = vs->Probs;
+  SizeT nowPos = 0;
+  Byte previousByte = 0;
+  UInt32 posStateMask = (1 << (vs->Properties.pb)) - 1;
+  UInt32 literalPosMask = (1 << (vs->Properties.lp)) - 1;
+  int lc = vs->Properties.lc;
+  CRangeDecoder rd;
+
+  #ifdef _LZMA_OUT_READ
+  
+  int state = vs->State;
+  UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
+  int len = vs->RemainLen;
+  UInt32 globalPos = vs->GlobalPos;
+  UInt32 distanceLimit = vs->DistanceLimit;
+
+  Byte *dictionary = vs->Dictionary;
+  UInt32 dictionarySize = vs->Properties.DictionarySize;
+  UInt32 dictionaryPos = vs->DictionaryPos;
+
+  Byte tempDictionary[4];
+
+  rd.Range = vs->Range;
+  rd.Code = vs->Code;
+  #ifdef _LZMA_IN_CB
+  rd.InCallback = InCallback;
+  rd.Buffer = vs->Buffer;
+  rd.BufferLim = vs->BufferLim;
+  #else
+  rd.Buffer = inStream;
+  rd.BufferLim = inStream + inSize;
+  #endif
+
+  #ifndef _LZMA_IN_CB
+  *inSizeProcessed = 0;
+  #endif
+  *outSizeProcessed = 0;
+  if (len == kLzmaStreamWasFinishedId)
+    return LZMA_RESULT_OK;
+
+  if (dictionarySize == 0)
+  {
+    dictionary = tempDictionary;
+    dictionarySize = 1;
+    tempDictionary[0] = vs->TempDictionary[0];
+  }
+
+  if (len == kLzmaNeedInitId)
+  {
+    {
+      UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+      UInt32 i;
+      for (i = 0; i < numProbs; i++)
+        p[i] = kBitModelTotal >> 1; 
+      rep0 = rep1 = rep2 = rep3 = 1;
+      state = 0;
+      globalPos = 0;
+      distanceLimit = 0;
+      dictionaryPos = 0;
+      dictionary[dictionarySize - 1] = 0;
+      RangeDecoderInit(&rd
+          #ifndef _LZMA_IN_CB
+          , inStream, inSize
+          #endif
+          );
+      #ifdef _LZMA_IN_CB
+      if (rd.Result != LZMA_RESULT_OK)
+        return rd.Result;
+      #endif
+      if (rd.ExtraBytes != 0)
+        return LZMA_RESULT_DATA_ERROR;
+    }
+    len = 0;
+  }
+  while(len != 0 && nowPos < outSize)
+  {
+    UInt32 pos = dictionaryPos - rep0;
+    if (pos >= dictionarySize)
+      pos += dictionarySize;
+    outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
+    if (++dictionaryPos == dictionarySize)
+      dictionaryPos = 0;
+    len--;
+  }
+  if (dictionaryPos == 0)
+    previousByte = dictionary[dictionarySize - 1];
+  else
+    previousByte = dictionary[dictionaryPos - 1];
+
+  #ifdef _LZMA_IN_CB
+  rd.Result = LZMA_RESULT_OK;
+  #endif
+  rd.ExtraBytes = 0;
+
+  #else /* if !_LZMA_OUT_READ */
+
+  int state = 0;
+  UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
+  int len = 0;
+
+  #ifndef _LZMA_IN_CB
+  *inSizeProcessed = 0;
+  #endif
+  *outSizeProcessed = 0;
+
+  {
+    UInt32 i;
+    UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+    for (i = 0; i < numProbs; i++)
+      p[i] = kBitModelTotal >> 1;
+  }
+  
+  #ifdef _LZMA_IN_CB
+  rd.InCallback = InCallback;
+  #endif
+  RangeDecoderInit(&rd
+      #ifndef _LZMA_IN_CB
+      , inStream, inSize
+      #endif
+      );
+
+  #ifdef _LZMA_IN_CB
+  if (rd.Result != LZMA_RESULT_OK)
+    return rd.Result;
+  #endif
+  if (rd.ExtraBytes != 0)
+    return LZMA_RESULT_DATA_ERROR;
+
+  #endif /* _LZMA_OUT_READ */
+
+
+  while(nowPos < outSize)
+  {
+    int posState = (int)(
+        (nowPos 
+        #ifdef _LZMA_OUT_READ
+        + globalPos
+        #endif
+        )
+        & posStateMask);
+    #ifdef _LZMA_IN_CB
+    if (rd.Result != LZMA_RESULT_OK)
+      return rd.Result;
+    #endif
+    if (rd.ExtraBytes != 0)
+      return LZMA_RESULT_DATA_ERROR;
+    if (RangeDecoderBitDecode(p + IsMatch + (state << kNumPosBitsMax) + posState, &rd) == 0)
+    {
+      CProb *probs = p + Literal + (LZMA_LIT_SIZE * 
+        (((
+        (nowPos 
+        #ifdef _LZMA_OUT_READ
+        + globalPos
+        #endif
+        )
+        & literalPosMask) << lc) + (previousByte >> (8 - lc))));
+
+      if (state >= kNumLitStates)
+      {
+        Byte matchByte;
+        #ifdef _LZMA_OUT_READ
+        UInt32 pos = dictionaryPos - rep0;
+        if (pos >= dictionarySize)
+          pos += dictionarySize;
+        matchByte = dictionary[pos];
+        #else
+        matchByte = outStream[nowPos - rep0];
+        #endif
+        previousByte = LzmaLiteralDecodeMatch(probs, &rd, matchByte);
+      }
+      else
+        previousByte = LzmaLiteralDecode(probs, &rd);
+      outStream[nowPos++] = previousByte;
+      #ifdef _LZMA_OUT_READ
+      if (distanceLimit < dictionarySize)
+        distanceLimit++;
+
+      dictionary[dictionaryPos] = previousByte;
+      if (++dictionaryPos == dictionarySize)
+        dictionaryPos = 0;
+      #endif
+      if (state < 4) state = 0;
+      else if (state < 10) state -= 3;
+      else state -= 6;
+    }
+    else             
+    {
+      if (RangeDecoderBitDecode(p + IsRep + state, &rd) == 1)
+      {
+        if (RangeDecoderBitDecode(p + IsRepG0 + state, &rd) == 0)
+        {
+          if (RangeDecoderBitDecode(p + IsRep0Long + (state << kNumPosBitsMax) + posState, &rd) == 0)
+          {
+            #ifdef _LZMA_OUT_READ
+            UInt32 pos;
+            #endif
+      
+            #ifdef _LZMA_OUT_READ
+            if (distanceLimit == 0)
+            #else
+            if (nowPos == 0)
+            #endif
+              return LZMA_RESULT_DATA_ERROR;
+
+            state = state < 7 ? 9 : 11;
+            #ifdef _LZMA_OUT_READ
+            pos = dictionaryPos - rep0;
+            if (pos >= dictionarySize)
+              pos += dictionarySize;
+            previousByte = dictionary[pos];
+            dictionary[dictionaryPos] = previousByte;
+            if (++dictionaryPos == dictionarySize)
+              dictionaryPos = 0;
+            #else
+            previousByte = outStream[nowPos - rep0];
+            #endif
+            outStream[nowPos++] = previousByte;
+
+            #ifdef _LZMA_OUT_READ
+            if (distanceLimit < dictionarySize)
+              distanceLimit++;
+            #endif
+            continue;
+          }
+        }
+        else
+        {
+          UInt32 distance;
+          if(RangeDecoderBitDecode(p + IsRepG1 + state, &rd) == 0)
+            distance = rep1;
+          else 
+          {
+            if(RangeDecoderBitDecode(p + IsRepG2 + state, &rd) == 0)
+              distance = rep2;
+            else
+            {
+              distance = rep3;
+              rep3 = rep2;
+            }
+            rep2 = rep1;
+          }
+          rep1 = rep0;
+          rep0 = distance;
+        }
+        len = LzmaLenDecode(p + RepLenCoder, &rd, posState);
+        state = state < 7 ? 8 : 11;
+      }
+      else
+      {
+        int posSlot;
+        rep3 = rep2;
+        rep2 = rep1;
+        rep1 = rep0;
+        state = state < 7 ? 7 : 10;
+        len = LzmaLenDecode(p + LenCoder, &rd, posState);
+        posSlot = RangeDecoderBitTreeDecode(p + PosSlot +
+            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << 
+            kNumPosSlotBits), kNumPosSlotBits, &rd);
+        if (posSlot >= kStartPosModelIndex)
+        {
+          int numDirectBits = ((posSlot >> 1) - 1);
+          rep0 = ((2 | ((UInt32)posSlot & 1)) << numDirectBits);
+          if (posSlot < kEndPosModelIndex)
+          {
+            rep0 += RangeDecoderReverseBitTreeDecode(
+                p + SpecPos + rep0 - posSlot - 1, numDirectBits, &rd);
+          }
+          else
+          {
+            rep0 += RangeDecoderDecodeDirectBits(&rd, 
+                numDirectBits - kNumAlignBits) << kNumAlignBits;
+            rep0 += RangeDecoderReverseBitTreeDecode(p + Align, kNumAlignBits, &rd);
+          }
+        }
+        else
+          rep0 = posSlot;
+        if (++rep0 == (UInt32)(0))
+        {
+          /* it's for stream version */
+          len = kLzmaStreamWasFinishedId;
+          break;
+        }
+      }
+
+      len += kMatchMinLen;
+      #ifdef _LZMA_OUT_READ
+      if (rep0 > distanceLimit) 
+      #else
+      if (rep0 > nowPos)
+      #endif
+        return LZMA_RESULT_DATA_ERROR;
+
+      #ifdef _LZMA_OUT_READ
+      if (dictionarySize - distanceLimit > (UInt32)len)
+        distanceLimit += len;
+      else
+        distanceLimit = dictionarySize;
+      #endif
+
+      do
+      {
+        #ifdef _LZMA_OUT_READ
+        UInt32 pos = dictionaryPos - rep0;
+        if (pos >= dictionarySize)
+          pos += dictionarySize;
+        previousByte = dictionary[pos];
+        dictionary[dictionaryPos] = previousByte;
+        if (++dictionaryPos == dictionarySize)
+          dictionaryPos = 0;
+        #else
+        previousByte = outStream[nowPos - rep0];
+        #endif
+        len--;
+        outStream[nowPos++] = previousByte;
+      }
+      while(len != 0 && nowPos < outSize);
+    }
+  }
+
+
+  #ifdef _LZMA_OUT_READ
+  vs->Range = rd.Range;
+  vs->Code = rd.Code;
+  vs->DictionaryPos = dictionaryPos;
+  vs->GlobalPos = globalPos + (UInt32)nowPos;
+  vs->DistanceLimit = distanceLimit;
+  vs->Reps[0] = rep0;
+  vs->Reps[1] = rep1;
+  vs->Reps[2] = rep2;
+  vs->Reps[3] = rep3;
+  vs->State = state;
+  vs->RemainLen = len;
+  vs->TempDictionary[0] = tempDictionary[0];
+  #endif
+
+  #ifdef _LZMA_IN_CB
+  vs->Buffer = rd.Buffer;
+  vs->BufferLim = rd.BufferLim;
+  #else
+  *inSizeProcessed = (SizeT)(rd.Buffer - inStream);
+  #endif
+  *outSizeProcessed = nowPos;
+  return LZMA_RESULT_OK;
+}
diff --git a/fs/squashfs/lzma/LzmaStateDecode.c b/fs/squashfs/lzma/LzmaStateDecode.c
new file mode 100644
index 0000000000000000000000000000000000000000..5dc8f0e2b4df9d9c195fed6d962c5bfa5a144033
--- /dev/null
+++ b/fs/squashfs/lzma/LzmaStateDecode.c
@@ -0,0 +1,521 @@
+/*
+  LzmaStateDecode.c
+  LZMA Decoder (State version)
+  
+  LZMA SDK 4.40 Copyright (c) 1999-2006 Igor Pavlov (2006-05-01)
+  http://www.7-zip.org/
+
+  LZMA SDK is licensed under two licenses:
+  1) GNU Lesser General Public License (GNU LGPL)
+  2) Common Public License (CPL)
+  It means that you can select one of these two licenses and 
+  follow rules of that license.
+
+  SPECIAL EXCEPTION:
+  Igor Pavlov, as the author of this Code, expressly permits you to 
+  statically or dynamically link your Code (or bind by name) to the 
+  interfaces of this file without subjecting your linked Code to the 
+  terms of the CPL or GNU LGPL. Any modifications or additions 
+  to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#include "LzmaStateDecode.h"
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+#define RC_READ_BYTE (*Buffer++)
+
+#define RC_INIT Code = 0; Range = 0xFFFFFFFF; \
+  { int i; for(i = 0; i < 5; i++) { Code = (Code << 8) | RC_READ_BYTE; }}
+
+#define RC_NORMALIZE if (Range < kTopValue) { Range <<= 8; Code = (Code << 8) | RC_READ_BYTE; }
+
+#define IfBit0(p) RC_NORMALIZE; bound = (Range >> kNumBitModelTotalBits) * *(p); if (Code < bound)
+#define UpdateBit0(p) Range = bound; *(p) += (kBitModelTotal - *(p)) >> kNumMoveBits;
+#define UpdateBit1(p) Range -= bound; Code -= bound; *(p) -= (*(p)) >> kNumMoveBits;
+
+#define RC_GET_BIT2(p, mi, A0, A1) IfBit0(p) \
+  { UpdateBit0(p); mi <<= 1; A0; } else \
+  { UpdateBit1(p); mi = (mi + mi) + 1; A1; } 
+  
+#define RC_GET_BIT(p, mi) RC_GET_BIT2(p, mi, ; , ;)               
+
+#define RangeDecoderBitTreeDecode(probs, numLevels, res) \
+  { int i = numLevels; res = 1; \
+  do { CProb *p = probs + res; RC_GET_BIT(p, res) } while(--i != 0); \
+  res -= (1 << numLevels); }
+
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols) 
+
+
+#define kNumStates 12
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+/* kRequiredInBufferSize = number of required input bytes for worst case: 
+   longest match with longest distance.
+   kLzmaInBufferSize must be larger than kRequiredInBufferSize 
+   23 bits = 2 (match select) + 10 (len) + 6 (distance) + 4(align) + 1 (RC_NORMALIZE)
+*/
+
+#define kRequiredInBufferSize ((23 * (kNumBitModelTotalBits - kNumMoveBits + 1) + 26 + 9) / 8)
+
+#define kLzmaStreamWasFinishedId (-1)
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size)
+{
+  unsigned char prop0;
+  if (size < LZMA_PROPERTIES_SIZE)
+    return LZMA_RESULT_DATA_ERROR;
+  prop0 = propsData[0];
+  if (prop0 >= (9 * 5 * 5))
+    return LZMA_RESULT_DATA_ERROR;
+  {
+    for (propsRes->pb = 0; prop0 >= (9 * 5); propsRes->pb++, prop0 -= (9 * 5));
+    for (propsRes->lp = 0; prop0 >= 9; propsRes->lp++, prop0 -= 9);
+    propsRes->lc = prop0;
+    /*
+    unsigned char remainder = (unsigned char)(prop0 / 9);
+    propsRes->lc = prop0 % 9;
+    propsRes->pb = remainder / 5;
+    propsRes->lp = remainder % 5;
+    */
+  }
+
+  {
+    int i;
+    propsRes->DictionarySize = 0;
+    for (i = 0; i < 4; i++)
+      propsRes->DictionarySize += (UInt32)(propsData[1 + i]) << (i * 8);
+    if (propsRes->DictionarySize == 0)
+      propsRes->DictionarySize = 1;
+    return LZMA_RESULT_OK;
+  }
+}
+
+int LzmaDecode(
+    CLzmaDecoderState *vs,
+    const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+    unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed,
+    int finishDecoding)
+{
+  UInt32 Range = vs->Range;
+  UInt32 Code = vs->Code;
+
+  unsigned char *Buffer = vs->Buffer;
+  int BufferSize = vs->BufferSize; /* don't change it to unsigned int */
+  CProb *p = vs->Probs;
+
+  int state = vs->State;
+  unsigned char previousByte;
+  UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
+  SizeT nowPos = 0;
+  UInt32 posStateMask = (1 << (vs->Properties.pb)) - 1;
+  UInt32 literalPosMask = (1 << (vs->Properties.lp)) - 1;
+  int lc = vs->Properties.lc;
+  int len = vs->RemainLen;
+  UInt32 globalPos = vs->GlobalPos;
+  UInt32 distanceLimit = vs->DistanceLimit;
+
+  unsigned char *dictionary = vs->Dictionary;
+  UInt32 dictionarySize = vs->Properties.DictionarySize;
+  UInt32 dictionaryPos = vs->DictionaryPos;
+
+  unsigned char tempDictionary[4];
+
+  (*inSizeProcessed) = 0;
+  (*outSizeProcessed) = 0;
+  if (len == kLzmaStreamWasFinishedId)
+    return LZMA_RESULT_OK;
+
+  if (dictionarySize == 0)
+  {
+    dictionary = tempDictionary;
+    dictionarySize = 1;
+    tempDictionary[0] = vs->TempDictionary[0];
+  }
+
+  if (len == kLzmaNeedInitId)
+  {
+    while (inSize > 0 && BufferSize < kLzmaInBufferSize)
+    {
+      Buffer[BufferSize++] = *inStream++;
+      (*inSizeProcessed)++;
+      inSize--;
+    }
+    if (BufferSize < 5)
+    {
+      vs->BufferSize = BufferSize;
+      return finishDecoding ? LZMA_RESULT_DATA_ERROR : LZMA_RESULT_OK;
+    }
+    {
+      UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+      UInt32 i;
+      for (i = 0; i < numProbs; i++)
+        p[i] = kBitModelTotal >> 1; 
+      rep0 = rep1 = rep2 = rep3 = 1;
+      state = 0;
+      globalPos = 0;
+      distanceLimit = 0;
+      dictionaryPos = 0;
+      dictionary[dictionarySize - 1] = 0;
+      RC_INIT;
+    }
+    len = 0;
+  }
+  while(len != 0 && nowPos < outSize)
+  {
+    UInt32 pos = dictionaryPos - rep0;
+    if (pos >= dictionarySize)
+      pos += dictionarySize;
+    outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
+    if (++dictionaryPos == dictionarySize)
+      dictionaryPos = 0;
+    len--;
+  }
+  if (dictionaryPos == 0)
+    previousByte = dictionary[dictionarySize - 1];
+  else
+    previousByte = dictionary[dictionaryPos - 1];
+
+  for (;;)
+  {
+    int bufferPos = (int)(Buffer - vs->Buffer);
+    if (BufferSize - bufferPos < kRequiredInBufferSize)
+    {
+      int i;
+      BufferSize -= bufferPos;
+      if (BufferSize < 0)
+        return LZMA_RESULT_DATA_ERROR;
+      for (i = 0; i < BufferSize; i++)
+        vs->Buffer[i] = Buffer[i];
+      Buffer = vs->Buffer;
+      while (inSize > 0 && BufferSize < kLzmaInBufferSize)
+      {
+        Buffer[BufferSize++] = *inStream++;
+        (*inSizeProcessed)++;
+        inSize--;
+      }
+      if (BufferSize < kRequiredInBufferSize && !finishDecoding)
+        break;
+    }
+    if (nowPos >= outSize)
+      break;
+    {
+    CProb *prob;
+    UInt32 bound;
+    int posState = (int)((nowPos + globalPos) & posStateMask);
+
+    prob = p + IsMatch + (state << kNumPosBitsMax) + posState;
+    IfBit0(prob)
+    {
+      int symbol = 1;
+      UpdateBit0(prob)
+      prob = p + Literal + (LZMA_LIT_SIZE * 
+        ((((nowPos + globalPos)& literalPosMask) << lc) + (previousByte >> (8 - lc))));
+
+      if (state >= kNumLitStates)
+      {
+        int matchByte;
+        UInt32 pos = dictionaryPos - rep0;
+        if (pos >= dictionarySize)
+          pos += dictionarySize;
+        matchByte = dictionary[pos];
+        do
+        {
+          int bit;
+          CProb *probLit;
+          matchByte <<= 1;
+          bit = (matchByte & 0x100);
+          probLit = prob + 0x100 + bit + symbol;
+          RC_GET_BIT2(probLit, symbol, if (bit != 0) break, if (bit == 0) break)
+        }
+        while (symbol < 0x100);
+      }
+      while (symbol < 0x100)
+      {
+        CProb *probLit = prob + symbol;
+        RC_GET_BIT(probLit, symbol)
+      }
+      previousByte = (unsigned char)symbol;
+
+      outStream[nowPos++] = previousByte;
+      if (distanceLimit < dictionarySize)
+        distanceLimit++;
+
+      dictionary[dictionaryPos] = previousByte;
+      if (++dictionaryPos == dictionarySize)
+        dictionaryPos = 0;
+      if (state < 4) state = 0;
+      else if (state < 10) state -= 3;
+      else state -= 6;
+    }
+    else             
+    {
+      UpdateBit1(prob);
+      prob = p + IsRep + state;
+      IfBit0(prob)
+      {
+        UpdateBit0(prob);
+        rep3 = rep2;
+        rep2 = rep1;
+        rep1 = rep0;
+        state = state < kNumLitStates ? 0 : 3;
+        prob = p + LenCoder;
+      }
+      else
+      {
+        UpdateBit1(prob);
+        prob = p + IsRepG0 + state;
+        IfBit0(prob)
+        {
+          UpdateBit0(prob);
+          prob = p + IsRep0Long + (state << kNumPosBitsMax) + posState;
+          IfBit0(prob)
+          {
+            UInt32 pos;
+            UpdateBit0(prob);
+            if (distanceLimit == 0)
+              return LZMA_RESULT_DATA_ERROR;
+            if (distanceLimit < dictionarySize)
+              distanceLimit++;
+            state = state < kNumLitStates ? 9 : 11;
+            pos = dictionaryPos - rep0;
+            if (pos >= dictionarySize)
+              pos += dictionarySize;
+            previousByte = dictionary[pos];
+            dictionary[dictionaryPos] = previousByte;
+            if (++dictionaryPos == dictionarySize)
+              dictionaryPos = 0;
+            outStream[nowPos++] = previousByte;
+            continue;
+          }
+          else
+          {
+            UpdateBit1(prob);
+          }
+        }
+        else
+        {
+          UInt32 distance;
+          UpdateBit1(prob);
+          prob = p + IsRepG1 + state;
+          IfBit0(prob)
+          {
+            UpdateBit0(prob);
+            distance = rep1;
+          }
+          else 
+          {
+            UpdateBit1(prob);
+            prob = p + IsRepG2 + state;
+            IfBit0(prob)
+            {
+              UpdateBit0(prob);
+              distance = rep2;
+            }
+            else
+            {
+              UpdateBit1(prob);
+              distance = rep3;
+              rep3 = rep2;
+            }
+            rep2 = rep1;
+          }
+          rep1 = rep0;
+          rep0 = distance;
+        }
+        state = state < kNumLitStates ? 8 : 11;
+        prob = p + RepLenCoder;
+      }
+      {
+        int numBits, offset;
+        CProb *probLen = prob + LenChoice;
+        IfBit0(probLen)
+        {
+          UpdateBit0(probLen);
+          probLen = prob + LenLow + (posState << kLenNumLowBits);
+          offset = 0;
+          numBits = kLenNumLowBits;
+        }
+        else
+        {
+          UpdateBit1(probLen);
+          probLen = prob + LenChoice2;
+          IfBit0(probLen)
+          {
+            UpdateBit0(probLen);
+            probLen = prob + LenMid + (posState << kLenNumMidBits);
+            offset = kLenNumLowSymbols;
+            numBits = kLenNumMidBits;
+          }
+          else
+          {
+            UpdateBit1(probLen);
+            probLen = prob + LenHigh;
+            offset = kLenNumLowSymbols + kLenNumMidSymbols;
+            numBits = kLenNumHighBits;
+          }
+        }
+        RangeDecoderBitTreeDecode(probLen, numBits, len);
+        len += offset;
+      }
+
+      if (state < 4)
+      {
+        int posSlot;
+        state += kNumLitStates;
+        prob = p + PosSlot +
+            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << 
+            kNumPosSlotBits);
+        RangeDecoderBitTreeDecode(prob, kNumPosSlotBits, posSlot);
+        if (posSlot >= kStartPosModelIndex)
+        {
+          int numDirectBits = ((posSlot >> 1) - 1);
+          rep0 = (2 | ((UInt32)posSlot & 1));
+          if (posSlot < kEndPosModelIndex)
+          {
+            rep0 <<= numDirectBits;
+            prob = p + SpecPos + rep0 - posSlot - 1;
+          }
+          else
+          {
+            numDirectBits -= kNumAlignBits;
+            do
+            {
+              RC_NORMALIZE
+              Range >>= 1;
+              rep0 <<= 1;
+              if (Code >= Range)
+              {
+                Code -= Range;
+                rep0 |= 1;
+              }
+            }
+            while (--numDirectBits != 0);
+            prob = p + Align;
+            rep0 <<= kNumAlignBits;
+            numDirectBits = kNumAlignBits;
+          }
+          {
+            int i = 1;
+            int mi = 1;
+            do
+            {
+              CProb *prob3 = prob + mi;
+              RC_GET_BIT2(prob3, mi, ; , rep0 |= i);
+              i <<= 1;
+            }
+            while(--numDirectBits != 0);
+          }
+        }
+        else
+          rep0 = posSlot;
+        if (++rep0 == (UInt32)(0))
+        {
+          /* it's for stream version */
+          len = kLzmaStreamWasFinishedId;
+          break;
+        }
+      }
+
+      len += kMatchMinLen;
+      if (rep0 > distanceLimit) 
+        return LZMA_RESULT_DATA_ERROR;
+      if (dictionarySize - distanceLimit > (UInt32)len)
+        distanceLimit += len;
+      else
+        distanceLimit = dictionarySize;
+
+      do
+      {
+        UInt32 pos = dictionaryPos - rep0;
+        if (pos >= dictionarySize)
+          pos += dictionarySize;
+        previousByte = dictionary[pos];
+        dictionary[dictionaryPos] = previousByte;
+        if (++dictionaryPos == dictionarySize)
+          dictionaryPos = 0;
+        len--;
+        outStream[nowPos++] = previousByte;
+      }
+      while(len != 0 && nowPos < outSize);
+    }
+    }
+  }
+  RC_NORMALIZE;
+
+  BufferSize -= (int)(Buffer - vs->Buffer);
+  if (BufferSize < 0)
+    return LZMA_RESULT_DATA_ERROR;
+  {
+    int i;
+    for (i = 0; i < BufferSize; i++)
+      vs->Buffer[i] = Buffer[i];
+  }
+  vs->BufferSize = BufferSize;
+  vs->Range = Range;
+  vs->Code = Code;
+  vs->DictionaryPos = dictionaryPos;
+  vs->GlobalPos = (UInt32)(globalPos + nowPos);
+  vs->DistanceLimit = distanceLimit;
+  vs->Reps[0] = rep0;
+  vs->Reps[1] = rep1;
+  vs->Reps[2] = rep2;
+  vs->Reps[3] = rep3;
+  vs->State = state;
+  vs->RemainLen = len;
+  vs->TempDictionary[0] = tempDictionary[0];
+
+  (*outSizeProcessed) = nowPos;
+  return LZMA_RESULT_OK;
+}
diff --git a/fs/squashfs/lzma/LzmaStateDecode.h b/fs/squashfs/lzma/LzmaStateDecode.h
new file mode 100644
index 0000000000000000000000000000000000000000..26490d6156570aef2c095f52cfa9fd38185ccdb8
--- /dev/null
+++ b/fs/squashfs/lzma/LzmaStateDecode.h
@@ -0,0 +1,96 @@
+/* 
+  LzmaStateDecode.h
+  LZMA Decoder interface (State version)
+
+  LZMA SDK 4.40 Copyright (c) 1999-2006 Igor Pavlov (2006-05-01)
+  http://www.7-zip.org/
+
+  LZMA SDK is licensed under two licenses:
+  1) GNU Lesser General Public License (GNU LGPL)
+  2) Common Public License (CPL)
+  It means that you can select one of these two licenses and 
+  follow rules of that license.
+
+  SPECIAL EXCEPTION:
+  Igor Pavlov, as the author of this code, expressly permits you to 
+  statically or dynamically link your code (or bind by name) to the 
+  interfaces of this file without subjecting your linked code to the 
+  terms of the CPL or GNU LGPL. Any modifications or additions 
+  to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#ifndef __LZMASTATEDECODE_H
+#define __LZMASTATEDECODE_H
+
+#include "LzmaTypes.h"
+
+/* #define _LZMA_PROB32 */
+/* It can increase speed on some 32-bit CPUs, 
+   but memory usage will be doubled in that case */
+
+#ifdef _LZMA_PROB32
+#define CProb UInt32
+#else
+#define CProb UInt16
+#endif
+
+#define LZMA_RESULT_OK 0
+#define LZMA_RESULT_DATA_ERROR 1
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LZMA_PROPERTIES_SIZE 5
+
+typedef struct _CLzmaProperties
+{
+  int lc;
+  int lp;
+  int pb;
+  UInt32 DictionarySize;
+}CLzmaProperties;
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size);
+
+#define LzmaGetNumProbs(lzmaProps) (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((lzmaProps)->lc + (lzmaProps)->lp)))
+
+#define kLzmaInBufferSize 64   /* don't change it. it must be larger than kRequiredInBufferSize */
+
+#define kLzmaNeedInitId (-2)
+
+typedef struct _CLzmaDecoderState
+{
+  CLzmaProperties Properties;
+  CProb *Probs;
+  unsigned char *Dictionary;
+
+  unsigned char Buffer[kLzmaInBufferSize];
+  int BufferSize;
+
+  UInt32 Range;
+  UInt32 Code;
+  UInt32 DictionaryPos;
+  UInt32 GlobalPos;
+  UInt32 DistanceLimit;
+  UInt32 Reps[4];
+  int State;
+  int RemainLen;  /* -2: decoder needs internal initialization
+                     -1: stream was finished, 
+                      0: ok
+                    > 0: need to write RemainLen bytes as match Reps[0],
+                  */
+  unsigned char TempDictionary[4];  /* it's required when DictionarySize = 0 */
+} CLzmaDecoderState;
+
+#define LzmaDecoderInit(vs) { (vs)->RemainLen = kLzmaNeedInitId; (vs)->BufferSize = 0; }
+
+/* LzmaDecode: decoding from input stream to output stream.
+  If finishDecoding != 0, then there are no more bytes in input stream
+  after inStream[inSize - 1]. */
+
+int LzmaDecode(CLzmaDecoderState *vs,
+    const unsigned char *inStream, SizeT inSize,  SizeT *inSizeProcessed,
+    unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed,
+    int finishDecoding);
+
+#endif
diff --git a/fs/squashfs/lzma/LzmaStateTest.c b/fs/squashfs/lzma/LzmaStateTest.c
new file mode 100644
index 0000000000000000000000000000000000000000..5df4e4386750f4e33a53eab1675e4b056f97b723
--- /dev/null
+++ b/fs/squashfs/lzma/LzmaStateTest.c
@@ -0,0 +1,195 @@
+/* 
+LzmaStateTest.c
+Test application for LZMA Decoder (State version)
+
+This file written and distributed to public domain by Igor Pavlov.
+This file is part of LZMA SDK 4.26 (2005-08-02)
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "LzmaStateDecode.h"
+
+const char *kCantReadMessage = "Can not read input file";
+const char *kCantWriteMessage = "Can not write output file";
+const char *kCantAllocateMessage = "Can not allocate memory";
+
+#define kInBufferSize (1 << 15)
+#define kOutBufferSize (1 << 15)
+
+unsigned char g_InBuffer[kInBufferSize];
+unsigned char g_OutBuffer[kOutBufferSize];
+
+size_t MyReadFile(FILE *file, void *data, size_t size)
+  { return fread(data, 1, size, file); }
+
+int MyReadFileAndCheck(FILE *file, void *data, size_t size)
+  { return (MyReadFile(file, data, size) == size); }
+
+int PrintError(char *buffer, const char *message)
+{
+  sprintf(buffer + strlen(buffer), "\nError: ");
+  sprintf(buffer + strlen(buffer), message);
+  return 1;
+}
+
+int main3(FILE *inFile, FILE *outFile, char *rs)
+{
+  /* We use two 32-bit integers to construct 64-bit integer for file size.
+     You can remove outSizeHigh, if you don't need >= 4GB supporting,
+     or you can use UInt64 outSize, if your compiler supports 64-bit integers*/
+  UInt32 outSize = 0;
+  UInt32 outSizeHigh = 0; 
+  
+  int waitEOS = 1; 
+  /* waitEOS = 1, if there is no uncompressed size in headers, 
+   so decoder will wait EOS (End of Stream Marker) in compressed stream */
+
+  int i;
+  int res = 0;
+  CLzmaDecoderState state;  /* it's about 140 bytes structure, if int is 32-bit */
+  unsigned char properties[LZMA_PROPERTIES_SIZE];
+  SizeT inAvail = 0;
+  unsigned char *inBuffer = 0;
+
+  if (sizeof(UInt32) < 4)
+    return PrintError(rs, "LZMA decoder needs correct UInt32");
+
+  /* Read LZMA properties for compressed stream */
+
+  if (!MyReadFileAndCheck(inFile, properties, sizeof(properties)))
+    return PrintError(rs, kCantReadMessage);
+
+  /* Read uncompressed size */
+  
+  for (i = 0; i < 8; i++)
+  {
+    unsigned char b;
+    if (!MyReadFileAndCheck(inFile, &b, 1))
+      return PrintError(rs, kCantReadMessage);
+    if (b != 0xFF)
+      waitEOS = 0;
+    if (i < 4)
+      outSize += (UInt32)(b) << (i * 8);
+    else
+      outSizeHigh += (UInt32)(b) << ((i - 4) * 8);
+  }
+
+  /* Decode LZMA properties and allocate memory */
+  
+  if (LzmaDecodeProperties(&state.Properties, properties, LZMA_PROPERTIES_SIZE) != LZMA_RESULT_OK)
+    return PrintError(rs, "Incorrect stream properties");
+  state.Probs = (CProb *)malloc(LzmaGetNumProbs(&state.Properties) * sizeof(CProb));
+  if (state.Probs == 0)
+    return PrintError(rs, kCantAllocateMessage);
+  
+  if (state.Properties.DictionarySize == 0)
+    state.Dictionary = 0;
+  else
+  {
+    state.Dictionary = (unsigned char *)malloc(state.Properties.DictionarySize);
+    if (state.Dictionary == 0)
+    {
+      free(state.Probs);
+      return PrintError(rs, kCantAllocateMessage);
+    }
+  }
+  
+  /* Decompress */
+  
+  LzmaDecoderInit(&state);
+  
+  do
+  {
+    SizeT inProcessed, outProcessed;
+    int finishDecoding;
+    UInt32 outAvail = kOutBufferSize;
+    if (!waitEOS && outSizeHigh == 0 && outAvail > outSize)
+      outAvail = outSize;
+    if (inAvail == 0)
+    {
+      inAvail = (SizeT)MyReadFile(inFile, g_InBuffer, kInBufferSize);
+      inBuffer = g_InBuffer;
+    }
+    finishDecoding = (inAvail == 0);
+    res = LzmaDecode(&state,
+        inBuffer, inAvail, &inProcessed,
+        g_OutBuffer, outAvail, &outProcessed,
+        finishDecoding);
+    if (res != 0)
+    {
+      sprintf(rs + strlen(rs), "\nDecoding error = %d\n", res);
+      res = 1;
+      break;
+    }
+    inAvail -= inProcessed;
+    inBuffer += inProcessed;
+    
+    if (outFile != 0)  
+      if (fwrite(g_OutBuffer, 1, outProcessed, outFile) != outProcessed)
+      {
+        PrintError(rs, kCantWriteMessage);
+        res = 1;
+        break;
+      }
+      
+    if (outSize < outProcessed)
+      outSizeHigh--;
+    outSize -= (UInt32)outProcessed;
+    outSize &= 0xFFFFFFFF;
+
+    if (outProcessed == 0 && finishDecoding)
+    {
+      if (!waitEOS && (outSize != 0 || outSizeHigh != 0))
+        res = 1;
+      break;
+    }
+  }
+  while ((outSize != 0 && outSizeHigh == 0) || outSizeHigh != 0  || waitEOS);
+
+  free(state.Dictionary);
+  free(state.Probs);
+  return res;
+}
+
+int main2(int numArgs, const char *args[], char *rs)
+{
+  FILE *inFile = 0;
+  FILE *outFile = 0;
+  int res;
+
+  sprintf(rs + strlen(rs), "\nLZMA Decoder 4.26 Copyright (c) 1999-2005 Igor Pavlov  2005-08-02\n");
+  if (numArgs < 2 || numArgs > 3)
+  {
+    sprintf(rs + strlen(rs), "\nUsage:  lzmadec file.lzma [outFile]\n");
+    return 1;
+  }
+
+  inFile = fopen(args[1], "rb");
+  if (inFile == 0)
+    return PrintError(rs, "Can not open input file");
+
+  if (numArgs > 2)
+  {
+    outFile = fopen(args[2], "wb+");
+    if (outFile == 0)
+      return PrintError(rs, "Can not open output file");
+  }
+
+  res = main3(inFile, outFile, rs);
+
+  if (outFile != 0)
+    fclose(outFile);
+  fclose(inFile);
+  return res;
+}
+
+int main(int numArgs, const char *args[])
+{
+  char rs[800] = { 0 };
+  int res = main2(numArgs, args, rs);
+  printf(rs);
+  return res;
+}
diff --git a/fs/squashfs/lzma/LzmaTest.c b/fs/squashfs/lzma/LzmaTest.c
new file mode 100644
index 0000000000000000000000000000000000000000..f95a753b1809d6a68baf40834a27c6cd4582a80f
--- /dev/null
+++ b/fs/squashfs/lzma/LzmaTest.c
@@ -0,0 +1,342 @@
+/* 
+LzmaTest.c
+Test application for LZMA Decoder
+
+This file written and distributed to public domain by Igor Pavlov.
+This file is part of LZMA SDK 4.26 (2005-08-05)
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "LzmaDecode.h"
+
+const char *kCantReadMessage = "Can not read input file";
+const char *kCantWriteMessage = "Can not write output file";
+const char *kCantAllocateMessage = "Can not allocate memory";
+
+size_t MyReadFile(FILE *file, void *data, size_t size)
+{ 
+  if (size == 0)
+    return 0;
+  return fread(data, 1, size, file); 
+}
+
+int MyReadFileAndCheck(FILE *file, void *data, size_t size)
+  { return (MyReadFile(file, data, size) == size);} 
+
+size_t MyWriteFile(FILE *file, const void *data, size_t size)
+{ 
+  if (size == 0)
+    return 0;
+  return fwrite(data, 1, size, file); 
+}
+
+int MyWriteFileAndCheck(FILE *file, const void *data, size_t size)
+  { return (MyWriteFile(file, data, size) == size); }
+
+#ifdef _LZMA_IN_CB
+#define kInBufferSize (1 << 15)
+typedef struct _CBuffer
+{
+  ILzmaInCallback InCallback;
+  FILE *File;
+  unsigned char Buffer[kInBufferSize];
+} CBuffer;
+
+int LzmaReadCompressed(void *object, const unsigned char **buffer, SizeT *size)
+{
+  CBuffer *b = (CBuffer *)object;
+  *buffer = b->Buffer;
+  *size = (SizeT)MyReadFile(b->File, b->Buffer, kInBufferSize);
+  return LZMA_RESULT_OK;
+}
+CBuffer g_InBuffer;
+
+#endif
+
+#ifdef _LZMA_OUT_READ
+#define kOutBufferSize (1 << 15)
+unsigned char g_OutBuffer[kOutBufferSize];
+#endif
+
+int PrintError(char *buffer, const char *message)
+{
+  sprintf(buffer + strlen(buffer), "\nError: ");
+  sprintf(buffer + strlen(buffer), message);
+  return 1;
+}
+
+int main3(FILE *inFile, FILE *outFile, char *rs)
+{
+  /* We use two 32-bit integers to construct 64-bit integer for file size.
+     You can remove outSizeHigh, if you don't need >= 4GB supporting,
+     or you can use UInt64 outSize, if your compiler supports 64-bit integers*/
+  UInt32 outSize = 0;
+  UInt32 outSizeHigh = 0;
+  #ifndef _LZMA_OUT_READ
+  SizeT outSizeFull;
+  unsigned char *outStream;
+  #endif
+  
+  int waitEOS = 1; 
+  /* waitEOS = 1, if there is no uncompressed size in headers, 
+   so decoder will wait EOS (End of Stream Marker) in compressed stream */
+
+  #ifndef _LZMA_IN_CB
+  SizeT compressedSize;
+  unsigned char *inStream;
+  #endif
+
+  CLzmaDecoderState state;  /* it's about 24-80 bytes structure, if int is 32-bit */
+  unsigned char properties[LZMA_PROPERTIES_SIZE];
+
+  int res;
+
+  #ifdef _LZMA_IN_CB
+  g_InBuffer.File = inFile;
+  #endif
+
+  if (sizeof(UInt32) < 4)
+    return PrintError(rs, "LZMA decoder needs correct UInt32");
+
+  #ifndef _LZMA_IN_CB
+  {
+    long length;
+    fseek(inFile, 0, SEEK_END);
+    length = ftell(inFile);
+    fseek(inFile, 0, SEEK_SET);
+    if ((long)(SizeT)length != length)
+      return PrintError(rs, "Too big compressed stream");
+    compressedSize = (SizeT)(length - (LZMA_PROPERTIES_SIZE + 8));
+  }
+  #endif
+
+  /* Read LZMA properties for compressed stream */
+
+  if (!MyReadFileAndCheck(inFile, properties, sizeof(properties)))
+    return PrintError(rs, kCantReadMessage);
+
+  /* Read uncompressed size */
+
+  {
+    int i;
+    for (i = 0; i < 8; i++)
+    {
+      unsigned char b;
+      if (!MyReadFileAndCheck(inFile, &b, 1))
+        return PrintError(rs, kCantReadMessage);
+      if (b != 0xFF)
+        waitEOS = 0;
+      if (i < 4)
+        outSize += (UInt32)(b) << (i * 8);
+      else
+        outSizeHigh += (UInt32)(b) << ((i - 4) * 8);
+    }
+    
+    #ifndef _LZMA_OUT_READ
+    if (waitEOS)
+      return PrintError(rs, "Stream with EOS marker is not supported");
+    outSizeFull = (SizeT)outSize;
+    if (sizeof(SizeT) >= 8)
+      outSizeFull |= (((SizeT)outSizeHigh << 16) << 16);
+    else if (outSizeHigh != 0 || (UInt32)(SizeT)outSize != outSize)
+      return PrintError(rs, "Too big uncompressed stream");
+    #endif
+  }
+
+  /* Decode LZMA properties and allocate memory */
+  
+  if (LzmaDecodeProperties(&state.Properties, properties, LZMA_PROPERTIES_SIZE) != LZMA_RESULT_OK)
+    return PrintError(rs, "Incorrect stream properties");
+  state.Probs = (CProb *)malloc(LzmaGetNumProbs(&state.Properties) * sizeof(CProb));
+
+  #ifdef _LZMA_OUT_READ
+  if (state.Properties.DictionarySize == 0)
+    state.Dictionary = 0;
+  else
+    state.Dictionary = (unsigned char *)malloc(state.Properties.DictionarySize);
+  #else
+  if (outSizeFull == 0)
+    outStream = 0;
+  else
+    outStream = (unsigned char *)malloc(outSizeFull);
+  #endif
+
+  #ifndef _LZMA_IN_CB
+  if (compressedSize == 0)
+    inStream = 0;
+  else
+    inStream = (unsigned char *)malloc(compressedSize);
+  #endif
+
+  if (state.Probs == 0 
+    #ifdef _LZMA_OUT_READ
+    || (state.Dictionary == 0 && state.Properties.DictionarySize != 0)
+    #else
+    || (outStream == 0 && outSizeFull != 0)
+    #endif
+    #ifndef _LZMA_IN_CB
+    || (inStream == 0 && compressedSize != 0)
+    #endif
+    )
+  {
+    free(state.Probs);
+    #ifdef _LZMA_OUT_READ
+    free(state.Dictionary);
+    #else
+    free(outStream);
+    #endif
+    #ifndef _LZMA_IN_CB
+    free(inStream);
+    #endif
+    return PrintError(rs, kCantAllocateMessage);
+  }
+
+  /* Decompress */
+
+  #ifdef _LZMA_IN_CB
+  g_InBuffer.InCallback.Read = LzmaReadCompressed;
+  #else
+  if (!MyReadFileAndCheck(inFile, inStream, compressedSize))
+    return PrintError(rs, kCantReadMessage);
+  #endif
+
+  #ifdef _LZMA_OUT_READ
+  {
+    #ifndef _LZMA_IN_CB
+    SizeT inAvail = compressedSize;
+    const unsigned char *inBuffer = inStream;
+    #endif
+    LzmaDecoderInit(&state);
+    do
+    {
+      #ifndef _LZMA_IN_CB
+      SizeT inProcessed;
+      #endif
+      SizeT outProcessed;
+      SizeT outAvail = kOutBufferSize;
+      if (!waitEOS && outSizeHigh == 0 && outAvail > outSize)
+        outAvail = (SizeT)outSize;
+      res = LzmaDecode(&state,
+        #ifdef _LZMA_IN_CB
+        &g_InBuffer.InCallback,
+        #else
+        inBuffer, inAvail, &inProcessed,
+        #endif
+        g_OutBuffer, outAvail, &outProcessed);
+      if (res != 0)
+      {
+        sprintf(rs + strlen(rs), "\nDecoding error = %d\n", res);
+        res = 1;
+        break;
+      }
+      #ifndef _LZMA_IN_CB
+      inAvail -= inProcessed;
+      inBuffer += inProcessed;
+      #endif
+      
+      if (outFile != 0)  
+        if (!MyWriteFileAndCheck(outFile, g_OutBuffer, (size_t)outProcessed))
+        {
+          PrintError(rs, kCantWriteMessage);
+          res = 1;
+          break;
+        }
+        
+      if (outSize < outProcessed)
+        outSizeHigh--;
+      outSize -= (UInt32)outProcessed;
+      outSize &= 0xFFFFFFFF;
+        
+      if (outProcessed == 0)
+      {
+        if (!waitEOS && (outSize != 0 || outSizeHigh != 0))
+          res = 1;
+        break;
+      }
+    }
+    while ((outSize != 0 && outSizeHigh == 0) || outSizeHigh != 0  || waitEOS);
+  }
+
+  #else
+  {
+    #ifndef _LZMA_IN_CB
+    SizeT inProcessed;
+    #endif
+    SizeT outProcessed;
+    res = LzmaDecode(&state,
+      #ifdef _LZMA_IN_CB
+      &g_InBuffer.InCallback,
+      #else
+      inStream, compressedSize, &inProcessed,
+      #endif
+      outStream, outSizeFull, &outProcessed);
+    if (res != 0)
+    {
+      sprintf(rs + strlen(rs), "\nDecoding error = %d\n", res);
+      res = 1;
+    }
+    else if (outFile != 0)
+    {
+      if (!MyWriteFileAndCheck(outFile, outStream, (size_t)outProcessed))
+      {
+        PrintError(rs, kCantWriteMessage);
+        res = 1;
+      }
+    }
+  }
+  #endif
+
+  free(state.Probs);
+  #ifdef _LZMA_OUT_READ
+  free(state.Dictionary);
+  #else
+  free(outStream);
+  #endif
+  #ifndef _LZMA_IN_CB
+  free(inStream);
+  #endif
+  return res;
+}
+
+int main2(int numArgs, const char *args[], char *rs)
+{
+  FILE *inFile = 0;
+  FILE *outFile = 0;
+  int res;
+
+  sprintf(rs + strlen(rs), "\nLZMA Decoder 4.26 Copyright (c) 1999-2005 Igor Pavlov  2005-08-05\n");
+  if (numArgs < 2 || numArgs > 3)
+  {
+    sprintf(rs + strlen(rs), "\nUsage:  lzmadec file.lzma [outFile]\n");
+    return 1;
+  }
+
+  inFile = fopen(args[1], "rb");
+  if (inFile == 0)
+    return PrintError(rs, "Can not open input file");
+
+  if (numArgs > 2)
+  {
+    outFile = fopen(args[2], "wb+");
+    if (outFile == 0)
+      return PrintError(rs, "Can not open output file");
+  }
+
+  res = main3(inFile, outFile, rs);
+
+  if (outFile != 0)
+    fclose(outFile);
+  fclose(inFile);
+  return res;
+}
+
+int main(int numArgs, const char *args[])
+{
+  char rs[800] = { 0 };
+  int res = main2(numArgs, args, rs);
+  printf(rs);
+  return res;
+}
diff --git a/fs/squashfs/lzma/LzmaTypes.h b/fs/squashfs/lzma/LzmaTypes.h
new file mode 100644
index 0000000000000000000000000000000000000000..9c27290757c4f7cd959e9ed9e577ebfb4a883127
--- /dev/null
+++ b/fs/squashfs/lzma/LzmaTypes.h
@@ -0,0 +1,45 @@
+/* 
+LzmaTypes.h 
+
+Types for LZMA Decoder
+
+This file written and distributed to public domain by Igor Pavlov.
+This file is part of LZMA SDK 4.40 (2006-05-01)
+*/
+
+#ifndef __LZMATYPES_H
+#define __LZMATYPES_H
+
+#ifndef _7ZIP_BYTE_DEFINED
+#define _7ZIP_BYTE_DEFINED
+typedef unsigned char Byte;
+#endif 
+
+#ifndef _7ZIP_UINT16_DEFINED
+#define _7ZIP_UINT16_DEFINED
+typedef unsigned short UInt16;
+#endif 
+
+#ifndef _7ZIP_UINT32_DEFINED
+#define _7ZIP_UINT32_DEFINED
+#ifdef _LZMA_UINT32_IS_ULONG
+typedef unsigned long UInt32;
+#else
+typedef unsigned int UInt32;
+#endif
+#endif 
+
+/* #define _LZMA_NO_SYSTEM_SIZE_T */
+/* You can use it, if you don't want <stddef.h> */
+
+#ifndef _7ZIP_SIZET_DEFINED
+#define _7ZIP_SIZET_DEFINED
+#ifdef _LZMA_NO_SYSTEM_SIZE_T
+typedef UInt32 SizeT;
+#else
+#include <stddef.h>
+typedef size_t SizeT;
+#endif
+#endif
+
+#endif
diff --git a/fs/squashfs/lzma/testflags.c b/fs/squashfs/lzma/testflags.c
new file mode 100644
index 0000000000000000000000000000000000000000..5bac6b10b29513f278274acead5c540dd4a71f1c
--- /dev/null
+++ b/fs/squashfs/lzma/testflags.c
@@ -0,0 +1,5 @@
+#ifdef _LZMA_PROB32
+-D_LZMA_PROB32
+#else
+-U_LZMA_PROB32
+#endif
diff --git a/fs/squashfs/lzma/uncomp.c b/fs/squashfs/lzma/uncomp.c
new file mode 100644
index 0000000000000000000000000000000000000000..dd44534d739f86fd0efd063e1483e59870ae3295
--- /dev/null
+++ b/fs/squashfs/lzma/uncomp.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2006-2008 Junjiro Okajima
+ * Copyright (C) 2006-2008 Tomas Matejicek, slax.org
+ *
+ * LICENSE follows the described one in lzma.txt.
+ */
+
+/* $Id: uncomp.c,v 1.7 2008-03-12 16:58:34 jro Exp $ */
+
+/* extract some parts from lzma443/C/7zip/Compress/LZMA_C/LzmaTest.c */
+
+#ifndef __KERNEL__
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <pthread.h>
+#define unlikely(x)		__builtin_expect(!!(x), 0)
+#define BUG_ON(x)		assert(!(x))
+/* sqlzma buffers are always larger than a page. true? */
+#define kmalloc(sz,gfp)		malloc(sz)
+#define kfree(p)		free(p)
+#define zlib_inflate(s, f)	inflate(s, f)
+#define zlib_inflateInit(s)	inflateInit(s)
+#define zlib_inflateReset(s)	inflateReset(s)
+#define zlib_inflateEnd(s)	inflateEnd(s)
+#else
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(b)	WARN_ON(b)
+#endif
+#endif /* __KERNEL__ */
+
+#include "../sqlzma.h"
+#include "LzmaDecode.h"
+
+static int LzmaUncompress(struct sqlzma_un *un)
+{
+	int err, i, ret;
+	SizeT outSize, inProcessed, outProcessed, srclen;
+	/* it's about 24-80 bytes structure, if int is 32-bit */
+	CLzmaDecoderState state;
+	unsigned char *dst, *src, a[8];
+	struct sized_buf *sbuf;
+
+	/* Decode LZMA properties and allocate memory */
+	err = -EINVAL;
+	src = (void *)un->un_cmbuf;
+	ret = LzmaDecodeProperties(&state.Properties, src,
+				   LZMA_PROPERTIES_SIZE);
+	src += LZMA_PROPERTIES_SIZE;
+	if (unlikely(ret != LZMA_RESULT_OK))
+		goto out;
+	i = LzmaGetNumProbs(&state.Properties);
+	if (unlikely(i <= 0))
+		i = 1;
+	i *= sizeof(CProb);
+	sbuf = un->un_a + SQUN_PROB;
+	if (unlikely(sbuf->sz < i)) {
+		if (sbuf->buf && sbuf->buf != un->un_prob)
+			kfree(sbuf->buf);
+#ifdef __KERNEL__
+		printk("%s:%d: %d --> %d\n", __func__, __LINE__, sbuf->sz, i);
+#else
+		printf("%d --> %d\n", sbuf->sz, i);
+#endif
+		err = -ENOMEM;
+		sbuf->sz = 0;
+		sbuf->buf = kmalloc(i, GFP_ATOMIC);
+		if (unlikely(!sbuf->buf))
+			goto out;
+		sbuf->sz = i;
+	}
+	state.Probs = (void *)sbuf->buf;
+
+	/* Read uncompressed size */
+	memcpy(a, src, sizeof(a));
+	src += sizeof(a);
+	outSize = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24);
+
+	err = -EINVAL;
+	dst = un->un_resbuf;
+	if (unlikely(!dst || outSize > un->un_reslen))
+		goto out;
+	un->un_reslen = outSize;
+	srclen = un->un_cmlen - (src - un->un_cmbuf);
+
+	/* Decompress */
+	err = LzmaDecode(&state, src, srclen, &inProcessed, dst, outSize,
+			 &outProcessed);
+	if (unlikely(err))
+		err = -EINVAL;
+
+ out:
+#ifndef __KERNEL__
+	if (err)
+		fprintf(stderr, "err %d\n", err);
+#endif
+	return err;
+}
+
+int sqlzma_un(struct sqlzma_un *un, struct sized_buf *src,
+	      struct sized_buf *dst)
+{
+	int err, by_lzma = 1;
+	if (un->un_lzma && is_lzma(*src->buf)) {
+		un->un_cmbuf = src->buf;
+		un->un_cmlen = src->sz;
+		un->un_resbuf = dst->buf;
+		un->un_reslen = dst->sz;
+
+		/* this library is thread-safe */
+		err = LzmaUncompress(un);
+		goto out;
+	}
+
+	by_lzma = 0;
+
+#if defined(CONFIG_MIPS_BRCM)
+    /* The FS is compressed with LZMA for BRCM: do not use zlib */
+    printk("%s: ZLIB decompression is not supported\n", __func__);
+    err = -EINVAL;
+#else
+	err = zlib_inflateReset(&un->un_stream);
+	if (unlikely(err != Z_OK))
+		goto out;
+	un->un_stream.next_in = src->buf;
+	un->un_stream.avail_in = src->sz;
+	un->un_stream.next_out = dst->buf;
+	un->un_stream.avail_out = dst->sz;
+	err = zlib_inflate(&un->un_stream, Z_FINISH);
+	if (err == Z_STREAM_END)
+		err = 0;
+#endif 
+ out:
+	if (unlikely(err)) {
+#ifdef __KERNEL__
+		WARN_ON_ONCE(1);
+#else
+		char a[64] = "ZLIB ";
+		if (by_lzma) {
+			strcpy(a, "LZMA ");
+#ifdef _REENTRANT
+			strerror_r(err, a + 5, sizeof(a) - 5);
+#else
+			strncat(a, strerror(err), sizeof(a) - 5);
+#endif
+		} else
+			strncat(a, zError(err), sizeof(a) - 5);
+		fprintf(stderr, "%s: %.*s\n", __func__, sizeof(a), a);
+#endif
+	}
+	return err;
+}
+
+int sqlzma_init(struct sqlzma_un *un, int do_lzma, unsigned int res_sz)
+{
+	int err;
+
+	err = -ENOMEM;
+	un->un_lzma = do_lzma;
+	memset(un->un_a, 0, sizeof(un->un_a));
+	un->un_a[SQUN_PROB].buf = un->un_prob;
+	un->un_a[SQUN_PROB].sz = sizeof(un->un_prob);
+	if (res_sz) {
+		un->un_a[SQUN_RESULT].buf = kmalloc(res_sz, GFP_KERNEL);
+		if (unlikely(!un->un_a[SQUN_RESULT].buf))
+			return err;
+		un->un_a[SQUN_RESULT].sz = res_sz;
+	}
+
+	un->un_stream.next_in = NULL;
+	un->un_stream.avail_in = 0;
+#if defined(CONFIG_MIPS_BRCM)
+	/* The FS is compressed with LZMA for BRCM: do not use zlib */
+	un->un_stream.workspace = NULL;
+	err = 0;
+#else
+#ifdef __KERNEL__
+	un->un_stream.workspace = kmalloc(zlib_inflate_workspacesize(),
+					  GFP_KERNEL);
+	if (unlikely(!un->un_stream.workspace))
+		return err;
+#else
+	un->un_stream.opaque = NULL;
+	un->un_stream.zalloc = Z_NULL;
+	un->un_stream.zfree = Z_NULL;
+#endif
+	err = zlib_inflateInit(&un->un_stream);
+	if (unlikely(err == Z_MEM_ERROR))
+		return -ENOMEM;
+#endif
+	BUG_ON(err);
+	return err;
+}
+
+void sqlzma_fin(struct sqlzma_un *un)
+{
+	int i;
+	for (i = 0; i < SQUN_LAST; i++)
+		if (un->un_a[i].buf && un->un_a[i].buf != un->un_prob)
+			kfree(un->un_a[i].buf);
+#if defined(CONFIG_MIPS_BRCM)
+    /* The FS is compressed with LZMA for BRCM: do not use zlib */
+#else        
+	BUG_ON(zlib_inflateEnd(&un->un_stream) != Z_OK);
+#endif
+}
+
+#ifdef __KERNEL__
+EXPORT_SYMBOL(sqlzma_un);
+EXPORT_SYMBOL(sqlzma_init);
+EXPORT_SYMBOL(sqlzma_fin);
+
+#if 0
+static int __init sqlzma_init(void)
+{
+	return 0;
+}
+
+static void __exit sqlzma_exit(void)
+{
+}
+
+module_init(sqlzma_init);
+module_exit(sqlzma_exit);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Junjiro Okajima <sfjro at users dot sf dot net>");
+MODULE_VERSION("$Id: uncomp.c,v 1.7 2008-03-12 16:58:34 jro Exp $");
+MODULE_DESCRIPTION("LZMA uncompress for squashfs. "
+		   "Some functions for squashfs to support LZMA and "
+		   "a tiny wrapper for LzmaDecode.c in LZMA SDK from www.7-zip.org.");
+#endif
diff --git a/fs/squashfs/sqlzma.h b/fs/squashfs/sqlzma.h
new file mode 100644
index 0000000000000000000000000000000000000000..876fad5db3aeac797c4798b4ac98e9b33c223b4f
--- /dev/null
+++ b/fs/squashfs/sqlzma.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2006-2008 Junjiro Okajima
+ * Copyright (C) 2006-2008 Tomas Matejicek, slax.org
+ *
+ * LICENSE follows the described one in lzma.
+ */
+
+/* $Id: sqlzma.h,v 1.20 2008-03-12 16:58:34 jro Exp $ */
+
+#ifndef __sqlzma_h__
+#define __sqlzma_h__
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#include <string.h>
+#include <zlib.h>
+#ifdef _REENTRANT
+#include <pthread.h>
+#endif
+#else
+#include <linux/zlib.h>
+#endif
+#define _7ZIP_BYTE_DEFINED
+
+/*
+ * detect the compression method automatically by the first byte of compressed
+ * data.
+ * according to rfc1950, the first byte of zlib compression must be 0x?8.
+ */
+#define is_lzma(c)	(c == 0x5d)
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef __KERNEL__
+/* for mksquashfs only */
+struct sqlzma_opts {
+	unsigned int	try_lzma;
+	unsigned int 	dicsize;
+};
+int sqlzma_cm(struct sqlzma_opts *opts, z_stream *stream, Bytef *next_in,
+	      uInt avail_in, Bytef *next_out, uInt avail_out);
+#endif
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Three patterns for sqlzma uncompression. very dirty code.
+ * - kernel space (squashfs kernel module)
+ * - user space with pthread (mksquashfs)
+ * - user space without pthread (unsquashfs)
+ */
+
+struct sized_buf {
+	unsigned int	sz;
+	unsigned char	*buf;
+};
+
+enum {SQUN_PROB, SQUN_RESULT, SQUN_LAST};
+struct sqlzma_un {
+	int			un_lzma;
+	struct sized_buf	un_a[SQUN_LAST];
+	unsigned char           un_prob[31960]; /* unlzma 64KB - 1MB */
+	z_stream		un_stream;
+#define un_cmbuf	un_stream.next_in
+#define un_cmlen	un_stream.avail_in
+#define un_resbuf	un_stream.next_out
+#define un_resroom	un_stream.avail_out
+#define un_reslen	un_stream.total_out
+};
+
+int sqlzma_init(struct sqlzma_un *un, int do_lzma, unsigned int res_sz);
+int sqlzma_un(struct sqlzma_un *un, struct sized_buf *src,
+	      struct sized_buf *dst);
+void sqlzma_fin(struct sqlzma_un *un);
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef __cplusplus
+};
+#endif
+#endif
diff --git a/fs/squashfs/sqmagic.h b/fs/squashfs/sqmagic.h
new file mode 100644
index 0000000000000000000000000000000000000000..580624ffade0455253444249c0e11ead618e76db
--- /dev/null
+++ b/fs/squashfs/sqmagic.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2006 Junjiro Okajima
+ * Copyright (C) 2006 Tomas Matejicek, slax.org
+ *
+ * LICENSE must follow the one in squashfs.
+ */
+
+/* $Id: sqmagic.h,v 1.2 2006-11-27 03:54:58 jro Exp $ */
+
+#ifndef __sqmagic_h__
+#define __sqmagic_h__
+
+/* see SQUASHFS_MAGIC in squashfs_fs.h */
+#define SQUASHFS_MAGIC_LZMA		0x71736873
+#define SQUASHFS_MAGIC_LZMA_SWAP	0x73687371
+
+#endif
diff --git a/fs/squashfs/squashfs_swap.c b/fs/squashfs/squashfs_swap.c
new file mode 100644
index 0000000000000000000000000000000000000000..ed1c1cc8310a0f2b4645c91ab0e9d290d4ae69e7
--- /dev/null
+++ b/fs/squashfs/squashfs_swap.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs_swap.c
+ */
+
+ void swap_le16(unsigned short *src, unsigned short *dest)
+{
+	unsigned char *s = (unsigned char *) src;
+	unsigned char *d = (unsigned char *) dest;
+
+	d[0] = s[1];
+	d[1] = s[0];
+}
+
+
+void swap_le32(unsigned int *src, unsigned int *dest)
+{
+	unsigned char *s = (unsigned char *) src;
+	unsigned char *d = (unsigned char *) dest;
+
+	d[0] = s[3];
+	d[1] = s[2];
+	d[2] = s[1];
+	d[3] = s[0];
+}
+
+
+void swap_le64(long long *src, long long *dest)
+{
+	unsigned char *s = (unsigned char *) src;
+	unsigned char *d = (unsigned char *) dest;
+
+	d[0] = s[7];
+	d[1] = s[6];
+	d[2] = s[5];
+	d[3] = s[4];
+	d[4] = s[3];
+	d[5] = s[2];
+	d[6] = s[1];
+	d[7] = s[0];
+}
+
+
diff --git a/fs/squashfs/squashfs_swap.h b/fs/squashfs/squashfs_swap.h
new file mode 100644
index 0000000000000000000000000000000000000000..f1367669eee3edac34007b11b275e098348cdff8
--- /dev/null
+++ b/fs/squashfs/squashfs_swap.h
@@ -0,0 +1,64 @@
+#ifndef SQUASHFS_SWAP_H
+#define SQUASHFS_SWAP_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs_swap.h
+ */
+
+/*
+ * macros to convert each stucture from big endian to little endian
+ */
+
+extern void swap_le16(unsigned short *, unsigned short *);
+extern void swap_le32(unsigned int *, unsigned int *);
+extern void swap_le64(long long *, long long *);
+
+#define SWAP_LE16(d, s, field)	swap_le16(&((s)->field), &((d)->field))
+#define SWAP_LE32(d, s, field)	swap_le32(&((s)->field), &((d)->field))
+#define SWAP_LE64(d, s, field)	swap_le64(&((s)->field), &((d)->field))
+
+#define _SQUASHFS_SWAP_SUPER_BLOCK(s, d, SWAP_FUNC) {\
+	SWAP_FUNC##32(s, d, s_magic);\
+	SWAP_FUNC##32(s, d, inodes);\
+	SWAP_FUNC##32(s, d, mkfs_time);\
+	SWAP_FUNC##32(s, d, block_size);\
+	SWAP_FUNC##32(s, d, fragments);\
+	SWAP_FUNC##16(s, d, compression);\
+	SWAP_FUNC##16(s, d, block_log);\
+	SWAP_FUNC##16(s, d, flags);\
+	SWAP_FUNC##16(s, d, no_ids);\
+	SWAP_FUNC##16(s, d, s_major);\
+	SWAP_FUNC##16(s, d, s_minor);\
+	SWAP_FUNC##64(s, d, root_inode);\
+	SWAP_FUNC##64(s, d, bytes_used);\
+	SWAP_FUNC##64(s, d, id_table_start);\
+	SWAP_FUNC##64(s, d, xattr_table_start);\
+	SWAP_FUNC##64(s, d, inode_table_start);\
+	SWAP_FUNC##64(s, d, directory_table_start);\
+	SWAP_FUNC##64(s, d, fragment_table_start);\
+	SWAP_FUNC##64(s, d, lookup_table_start);\
+}
+
+#define SQUASHFS_SWAP_SUPER_BLOCK(s, d)	\
+			_SQUASHFS_SWAP_SUPER_BLOCK(s, d, SWAP_LE)
+
+#endif
+
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index f8b0160da2da8be993d8fe93c246c9bdd1dd35aa..3b3667e75f0dbecffe255611d2529e36c53c6800 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -5,8 +5,10 @@ config UBIFS_FS
 	select CRYPTO if UBIFS_FS_ADVANCED_COMPR
 	select CRYPTO if UBIFS_FS_LZO
 	select CRYPTO if UBIFS_FS_ZLIB
+	select CRYPTO if UBIFS_FS_LZMA
 	select CRYPTO_LZO if UBIFS_FS_LZO
 	select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
+	select CRYPTO_LZMA if UBIFS_FS_LZMA
 	depends on MTD_UBI
 	help
 	  UBIFS is a file system for flash devices which works on top of UBI.
@@ -42,6 +44,14 @@ config UBIFS_FS_ZLIB
 	help
 	  Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
 
+config UBIFS_FS_LZMA
+	bool "LZMA compression support" if UBIFS_FS_ADVANCED_COMPR
+	depends on UBIFS_FS
+	default y
+	help
+	   LZMA compressor is generally slower than zlib and lzo but compresses
+           better. Say 'Y' if unsure.
+
 # Debugging-related stuff
 config UBIFS_FS_DEBUG
 	bool "Enable debugging support"
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index fb3b5c813a30da7fe5654435f941d39e950729c4..b2ca12fd593bf10c408c543e98df0f2afead8b7b 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -164,17 +164,12 @@ static int do_commit(struct ubifs_info *c)
 	if (err)
 		goto out;
 	err = ubifs_orphan_end_commit(c);
-	if (err)
-		goto out;
-	old_ltail_lnum = c->ltail_lnum;
-	err = ubifs_log_end_commit(c, new_ltail_lnum);
 	if (err)
 		goto out;
 	err = dbg_check_old_index(c, &zroot);
 	if (err)
 		goto out;
 
-	mutex_lock(&c->mst_mutex);
 	c->mst_node->cmt_no      = cpu_to_le64(c->cmt_no);
 	c->mst_node->log_lnum    = cpu_to_le32(new_ltail_lnum);
 	c->mst_node->root_lnum   = cpu_to_le32(zroot.lnum);
@@ -203,8 +198,9 @@ static int do_commit(struct ubifs_info *c)
 		c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
 	else
 		c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
-	err = ubifs_write_master(c);
-	mutex_unlock(&c->mst_mutex);
+
+	old_ltail_lnum = c->ltail_lnum;
+	err = ubifs_log_end_commit(c, new_ltail_lnum);
 	if (err)
 		goto out;
 
diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c
index 11e4132f314acfcabc610cf97f4c4dd9e52b768d..c1aba2c04e82365dd6158154217fc8044db16ca7 100644
--- a/fs/ubifs/compress.c
+++ b/fs/ubifs/compress.c
@@ -71,6 +71,22 @@ static struct ubifs_compressor zlib_compr = {
 };
 #endif
 
+#ifdef CONFIG_UBIFS_FS_LZMA
+static DEFINE_MUTEX(lzma_mutex);
+
+static struct ubifs_compressor lzma_compr = {
+	.compr_type = UBIFS_COMPR_LZMA,
+	.comp_mutex = &lzma_mutex,
+	.name = "lzma",
+	.capi_name = "lzma",
+};
+#else
+static struct ubifs_compressor lzma_compr = {
+	.compr_type = UBIFS_COMPR_LZMA,
+	.name = "lzma",
+};
+#endif
+
 /* All UBIFS compressors */
 struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
 
@@ -233,9 +249,15 @@ int __init ubifs_compressors_init(void)
 	if (err)
 		goto out_lzo;
 
+	err = compr_init(&lzma_compr);
+	if (err)
+		goto out_zlib;
+
 	ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr;
 	return 0;
 
+out_zlib:
+	compr_exit(&zlib_compr);
 out_lzo:
 	compr_exit(&lzo_compr);
 	return err;
@@ -248,4 +270,5 @@ void ubifs_compressors_exit(void)
 {
 	compr_exit(&lzo_compr);
 	compr_exit(&zlib_compr);
+	compr_exit(&lzma_compr);
 }
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ec9f1870ab7f083d1a9c5084ad3cbf8110259754..25c472bb8ab5c49ff0522ad5f0c541ca8f0e2480 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -357,31 +357,50 @@ static unsigned int vfs_dent_type(uint8_t type)
 static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
 {
 	int err, over = 0;
+	loff_t pos = file->f_pos;
 	struct qstr nm;
 	union ubifs_key key;
 	struct ubifs_dent_node *dent;
 	struct inode *dir = file->f_path.dentry->d_inode;
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
 
-	dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
+	dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
 
-	if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
+	if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
 		/*
 		 * The directory was seek'ed to a senseless position or there
 		 * are no more entries.
 		 */
 		return 0;
 
+	if (file->f_version == 0) {
+		/*
+		 * The file was seek'ed, which means that @file->private_data
+		 * is now invalid. This may also be just the first
+		 * 'ubifs_readdir()' invocation, in which case
+		 * @file->private_data is NULL, and the below code is
+		 * basically a no-op.
+		 */
+		kfree(file->private_data);
+		file->private_data = NULL;
+	}
+
+	/*
+	 * 'generic_file_llseek()' unconditionally sets @file->f_version to
+	 * zero, and we use this for detecting whether the file was seek'ed.
+	 */
+	file->f_version = 1;
+
 	/* File positions 0 and 1 correspond to "." and ".." */
-	if (file->f_pos == 0) {
+	if (pos == 0) {
 		ubifs_assert(!file->private_data);
 		over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
 		if (over)
 			return 0;
-		file->f_pos = 1;
+		file->f_pos = pos = 1;
 	}
 
-	if (file->f_pos == 1) {
+	if (pos == 1) {
 		ubifs_assert(!file->private_data);
 		over = filldir(dirent, "..", 2, 1,
 			       parent_ino(file->f_path.dentry), DT_DIR);
@@ -397,7 +416,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
 			goto out;
 		}
 
-		file->f_pos = key_hash_flash(c, &dent->key);
+		file->f_pos = pos = key_hash_flash(c, &dent->key);
 		file->private_data = dent;
 	}
 
@@ -405,17 +424,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
 	if (!dent) {
 		/*
 		 * The directory was seek'ed to and is now readdir'ed.
-		 * Find the entry corresponding to @file->f_pos or the
-		 * closest one.
+		 * Find the entry corresponding to @pos or the closest one.
 		 */
-		dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
+		dent_key_init_hash(c, &key, dir->i_ino, pos);
 		nm.name = NULL;
 		dent = ubifs_tnc_next_ent(c, &key, &nm);
 		if (IS_ERR(dent)) {
 			err = PTR_ERR(dent);
 			goto out;
 		}
-		file->f_pos = key_hash_flash(c, &dent->key);
+		file->f_pos = pos = key_hash_flash(c, &dent->key);
 		file->private_data = dent;
 	}
 
@@ -427,7 +445,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
 			     ubifs_inode(dir)->creat_sqnum);
 
 		nm.len = le16_to_cpu(dent->nlen);
-		over = filldir(dirent, dent->name, nm.len, file->f_pos,
+		over = filldir(dirent, dent->name, nm.len, pos,
 			       le64_to_cpu(dent->inum),
 			       vfs_dent_type(dent->type));
 		if (over)
@@ -443,9 +461,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
 		}
 
 		kfree(file->private_data);
-		file->f_pos = key_hash_flash(c, &dent->key);
+		file->f_pos = pos = key_hash_flash(c, &dent->key);
 		file->private_data = dent;
 		cond_resched();
+
+		if (file->f_version == 0)
+			/*
+			 * The file was seek'ed meanwhile, lets return and start
+			 * reading direntries from the new position on the next
+			 * invocation.
+			 */
+			return 0;
 	}
 
 out:
@@ -456,15 +482,13 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
 
 	kfree(file->private_data);
 	file->private_data = NULL;
+	/* 2 is a special value indicating that there are no more direntries */
 	file->f_pos = 2;
 	return 0;
 }
 
-/* If a directory is seeked, we have to free saved readdir() state */
 static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
 {
-	kfree(file->private_data);
-	file->private_data = NULL;
 	return generic_file_llseek(file, offset, origin);
 }
 
@@ -977,7 +1001,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
 			.dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
 	struct timespec time;
-	unsigned int saved_nlink;
+	unsigned int uninitialized_var(saved_nlink);
 
 	/*
 	 * Budget request settings: deletion direntry, new direntry, removing
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 5c8f6dc1d28bfdc978a552dc0689b71d15256b7b..d0d5a3841c98960e99cd68cbdb01342d7808fa4b 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1575,6 +1575,12 @@ const struct inode_operations ubifs_symlink_inode_operations = {
 	.follow_link = ubifs_follow_link,
 	.setattr     = ubifs_setattr,
 	.getattr     = ubifs_getattr,
+#ifdef CONFIG_UBIFS_FS_XATTR
+	.setxattr    = ubifs_setxattr,
+	.getxattr    = ubifs_getxattr,
+	.listxattr   = ubifs_listxattr,
+	.removexattr = ubifs_removexattr,
+#endif
 };
 
 const struct file_operations ubifs_file_operations = {
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 2559d174e0040a45578fcc2e7612522a08e99a2f..6407dec22368ce30779997b3d34520290a912f86 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -681,8 +681,21 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c)
 	if (!lprops) {
 		lprops = ubifs_fast_find_freeable(c);
 		if (!lprops) {
+#if 0 //!defined(CONFIG_BCM_KF_ANDROID) || !defined(CONFIG_BCM_ANDROID)
 			ubifs_assert(c->freeable_cnt == 0);
 			if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
+#else
+			/*
+			 * The first condition means the following: go scan the
+			 * LPT if there are uncategorized lprops, which means
+			 * there may be freeable LEBs there (UBIFS does not
+			 * store the information about freeable LEBs in the
+			 * master node).
+			 */
+			if (c->in_a_category_cnt != c->main_lebs ||
+			    c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
+				ubifs_assert(c->freeable_cnt == 0);
+#endif
 				lprops = scan_for_leb_for_idx(c);
 				if (IS_ERR(lprops)) {
 					err = PTR_ERR(lprops);
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 2f438ab2e7a2730f2dfff7498ad8c3301f425479..b5de7b4686024a543ce6b363422a128dbecb62fa 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -553,7 +553,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
 
 	dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
 		inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
-	ubifs_assert(dir_ui->data_len == 0);
+	if (!xent)
+		ubifs_assert(dir_ui->data_len == 0);
 	ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex));
 
 	dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
@@ -573,6 +574,13 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
 	aligned_dlen = ALIGN(dlen, 8);
 	aligned_ilen = ALIGN(ilen, 8);
 	len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
+	if (xent) {
+		/*
+		 * Make sure to account for dir_ui->data_len in
+		 * length calculation in case there is extended attribute.
+		 */
+		len += dir_ui->data_len;
+	}
 	dent = kmalloc(len, GFP_NOFS);
 	if (!dent)
 		return -ENOMEM;
@@ -649,7 +657,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
 
 	ino_key_init(c, &ino_key, dir->i_ino);
 	ino_offs += aligned_ilen;
-	err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ);
+	err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
+			    UBIFS_INO_NODE_SZ + dir_ui->data_len);
 	if (err)
 		goto out_ro;
 
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index f9fd068d1ae0971ea7a27a821d65c3164b1ad26c..843beda25767b5318be0db2f888bfbfd583a4c1f 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -110,10 +110,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
 	h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
 	t = (long long)c->ltail_lnum * c->leb_size;
 
-	if (h >= t)
+	if (h > t)
 		return c->log_bytes - h + t;
-	else
+	else if (h != t)
 		return t - h;
+	else if (c->lhead_lnum != c->ltail_lnum)
+		return 0;
+	else
+		return c->log_bytes;
 }
 
 /**
@@ -453,9 +457,9 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
  * @ltail_lnum: new log tail LEB number
  *
  * This function is called on when the commit operation was finished. It
- * moves log tail to new position and unmaps LEBs which contain obsolete data.
- * Returns zero in case of success and a negative error code in case of
- * failure.
+ * moves log tail to new position and updates the master node so that it stores
+ * the new log tail LEB number. Returns zero in case of success and a negative
+ * error code in case of failure.
  */
 int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
 {
@@ -483,7 +487,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
 	spin_unlock(&c->buds_lock);
 
 	err = dbg_check_bud_bytes(c);
+	if (err)
+		goto out;
 
+	err = ubifs_write_master(c);
+
+out:
 	mutex_unlock(&c->log_mutex);
 	return err;
 }
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index f8a181e647ccfb4bb12d08ce3dac798e6bc7822c..d1c78049c54f80e06cd1a1333bdf2ac85a1f2189 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -302,6 +302,10 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
 	}
 	lprops->flags &= ~LPROPS_CAT_MASK;
 	lprops->flags |= cat;
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	c->in_a_category_cnt += 1;
+	ubifs_assert(c->in_a_category_cnt <= c->main_lebs);
+#endif
 }
 
 /**
@@ -334,6 +338,11 @@ static void ubifs_remove_from_cat(struct ubifs_info *c,
 	default:
 		ubifs_assert(0);
 	}
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+
+	c->in_a_category_cnt -= 1;
+	ubifs_assert(c->in_a_category_cnt >= 0);
+#endif
 }
 
 /**
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 278c2382e8c21a7daafaea11c63caf9663c30d52..bb9f48107815fb9b22825aca357fc358004853de 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c)
  * ubifs_write_master - write master node.
  * @c: UBIFS file-system description object
  *
- * This function writes the master node. The caller has to take the
- * @c->mst_mutex lock before calling this function. Returns zero in case of
- * success and a negative error code in case of failure. The master node is
- * written twice to enable recovery.
+ * This function writes the master node. Returns zero in case of success and a
+ * negative error code in case of failure. The master node is written twice to
+ * enable recovery.
  */
 int ubifs_write_master(struct ubifs_info *c)
 {
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index c542c73cfa3c5e83210badd78c8c1acdc719ab28..f9c90b552452e98bab5f15a27e62e7225e9ed281 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -130,13 +130,14 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
 		else if (inum > o->inum)
 			p = p->rb_right;
 		else {
-			if (o->dnext) {
+			if (o->del) {
 				spin_unlock(&c->orphan_lock);
 				dbg_gen("deleted twice ino %lu",
 					(unsigned long)inum);
 				return;
 			}
 			if (o->cnext) {
+				o->del = 1;
 				o->dnext = c->orph_dnext;
 				c->orph_dnext = o;
 				spin_unlock(&c->orphan_lock);
@@ -447,6 +448,7 @@ static void erase_deleted(struct ubifs_info *c)
 		orphan = dnext;
 		dnext = orphan->dnext;
 		ubifs_assert(!orphan->new);
+		ubifs_assert(orphan->del);
 		rb_erase(&orphan->rb, &c->orph_tree);
 		list_del(&orphan->list);
 		c->tot_orphans -= 1;
@@ -536,6 +538,7 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
 	rb_link_node(&orphan->rb, parent, p);
 	rb_insert_color(&orphan->rb, &c->orph_tree);
 	list_add_tail(&orphan->list, &c->orph_list);
+	orphan->del = 1;
 	orphan->dnext = c->orph_dnext;
 	c->orph_dnext = orphan;
 	dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 9e1d05666fed5d1ad03589995f8ecb3f78ec7b82..e0a7a764a903a7af615f8399101cf50b2cb0495e 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -128,7 +128,6 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
 			freed = ubifs_destroy_tnc_subtree(znode);
 			atomic_long_sub(freed, &ubifs_clean_zn_cnt);
 			atomic_long_sub(freed, &c->clean_zn_cnt);
-			ubifs_assert(atomic_long_read(&c->clean_zn_cnt) >= 0);
 			total_freed += freed;
 			znode = zprev;
 		}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 76e4e0566ad6c260069a87f56c253f7e9a17727b..07abcc15fbe804162b9412e0fe9bc824cb38e4ab 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1047,6 +1047,8 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
 				c->mount_opts.compr_type = UBIFS_COMPR_LZO;
 			else if (!strcmp(name, "zlib"))
 				c->mount_opts.compr_type = UBIFS_COMPR_ZLIB;
+			else if (!strcmp(name, "lzma"))
+				c->mount_opts.compr_type = UBIFS_COMPR_LZMA;
 			else {
 				ubifs_err("unknown compressor \"%s\"", name);
 				kfree(name);
@@ -1582,6 +1584,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
 	c->remounting_rw = 1;
 	c->ro_mount = 0;
 
+	if (c->space_fixup) {
+		err = ubifs_fixup_free_space(c);
+		if (err)
+			return err;
+	}
+
 	err = check_free_space(c);
 	if (err)
 		goto out;
@@ -1698,12 +1706,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
 		err = dbg_check_space_info(c);
 	}
 
-	if (c->space_fixup) {
-		err = ubifs_fixup_free_space(c);
-		if (err)
-			goto out;
-	}
-
 	mutex_unlock(&c->umount_mutex);
 	return err;
 
@@ -1984,7 +1986,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
 		mutex_init(&c->lp_mutex);
 		mutex_init(&c->tnc_mutex);
 		mutex_init(&c->log_mutex);
-		mutex_init(&c->mst_mutex);
 		mutex_init(&c->umount_mutex);
 		mutex_init(&c->bu_mutex);
 		mutex_init(&c->write_reserve_mutex);
diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h
index e24380cf46ed4b0506dcc2f2e9258f04e4367a28..8db7e8d3412eb0de1a02626a1d47150ee3f997b6 100644
--- a/fs/ubifs/ubifs-media.h
+++ b/fs/ubifs/ubifs-media.h
@@ -332,12 +332,14 @@ enum {
  * UBIFS_COMPR_NONE: no compression
  * UBIFS_COMPR_LZO: LZO compression
  * UBIFS_COMPR_ZLIB: ZLIB compression
+ * UBIFS_COMPR_LZMA: LZMA compression
  * UBIFS_COMPR_TYPES_CNT: count of supported compression types
  */
 enum {
 	UBIFS_COMPR_NONE,
 	UBIFS_COMPR_LZO,
 	UBIFS_COMPR_ZLIB,
+	UBIFS_COMPR_LZMA,
 	UBIFS_COMPR_TYPES_CNT,
 };
 
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 93d59aceaaef99b454107b8f70d73c41cecf8a20..272604faf3036c8877fcfca1ff40a9dc2c02eb33 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -905,6 +905,7 @@ struct ubifs_budget_req {
  * @dnext: next orphan to delete
  * @inum: inode number
  * @new: %1 => added since the last commit, otherwise %0
+ * @del: %1 => delete pending, otherwise %0
  */
 struct ubifs_orphan {
 	struct rb_node rb;
@@ -914,6 +915,7 @@ struct ubifs_orphan {
 	struct ubifs_orphan *dnext;
 	ino_t inum;
 	int new;
+	unsigned del:1;
 };
 
 /**
@@ -1039,7 +1041,6 @@ struct ubifs_debug_info;
  *
  * @mst_node: master node
  * @mst_offs: offset of valid master node
- * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
  *
  * @max_bu_buf_len: maximum bulk-read buffer length
  * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
@@ -1184,6 +1185,8 @@ struct ubifs_debug_info;
  * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size)
  * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size)
  * @freeable_cnt: number of freeable LEBs in @freeable_list
+ * @in_a_category_cnt: count of lprops which are in a certain category, which
+ *                     basically meants that they were loaded from the flash
  *
  * @ltab_lnum: LEB number of LPT's own lprops table
  * @ltab_offs: offset of LPT's own lprops table
@@ -1277,7 +1280,6 @@ struct ubifs_info {
 
 	struct ubifs_mst_node *mst_node;
 	int mst_offs;
-	struct mutex mst_mutex;
 
 	int max_bu_buf_len;
 	struct mutex bu_mutex;
@@ -1413,6 +1415,9 @@ struct ubifs_info {
 	struct list_head freeable_list;
 	struct list_head frdi_idx_list;
 	int freeable_cnt;
+#if 1 //defined(CONFIG_BCM_KF_ANDROID) && defined(CONFIG_BCM_ANDROID)
+	int in_a_category_cnt;
+#endif
 
 	int ltab_lnum;
 	int ltab_offs;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 85b2722687545b0e3299d7bdc0275ebe9fb2d2e2..7fae53b862dfbdb60709ea4a15eb5a392f068594 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -144,7 +144,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
 		goto out_free;
 	}
 	inode->i_size = ui->ui_size = size;
-	ui->data_len = size;
 
 	mutex_lock(&host_ui->ui_mutex);
 	host->i_ctime = ubifs_current_time(host);
@@ -152,6 +151,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
 	host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
 	host_ui->xattr_size += CALC_XATTR_BYTES(size);
 	host_ui->xattr_names += nm->len;
+	ui->data_len = size;
 
 	err = ubifs_jnl_update(c, host, nm, inode, 0, 1);
 	if (err)
diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..128310368e45797a74d3ac11b62ae8eaab490e63
--- /dev/null
+++ b/fs/yaffs2/Kconfig
@@ -0,0 +1,189 @@
+if BCM_KF_ANDROID
+#
+# yaffs file system configurations
+#
+
+config YAFFS_FS
+	tristate "yaffs2 file system support"
+	default n
+	depends on MTD_BLOCK
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	select YAFFS_YAFFS1
+	select YAFFS_YAFFS2
+	help
+	  yaffs2, or Yet Another Flash File System, is a file system
+	  optimised for NAND Flash chips.
+
+	  To compile the yaffs2 file system support as a module, choose M
+	  here: the module will be called yaffs2.
+
+	  If unsure, say N.
+
+	  Further information on yaffs2 is available at
+	  <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+	bool "512 byte / page devices"
+	depends on YAFFS_FS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+	help
+	  Enable yaffs1 support -- yaffs for 512 byte / page devices
+
+	  Not needed for 2K-page devices.
+
+	  If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+	bool "Use older-style on-NAND data format with pageStatus byte"
+	depends on YAFFS_YAFFS1
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+
+	  Older-style on-NAND data format has a "pageStatus" byte to record
+	  chunk/page state.  This byte is zero when the page is discarded.
+	  Choose this option if you have existing on-NAND data using this
+	  format that you need to continue to support.  New data written
+	  also uses the older-style format.  Note: Use of this option
+	  generally requires that MTD's oob layout be adjusted to use the
+	  older-style format.  See notes on tags formats and MTD versions
+	  in yaffs_mtdif1.c.
+
+	  If unsure, say N.
+
+config YAFFS_DOES_ECC
+	bool "Lets yaffs do its own ECC"
+	depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  This enables yaffs to use its own ECC functions instead of using
+	  the ones from the generic MTD-NAND driver.
+
+	  If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+	bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+	depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  This makes yaffs_ecc.c use the same ecc byte order as Steven
+	  Hill's nand_ecc.c. If not set, then you get the same ecc byte
+	  order as SmartMedia.
+
+	  If unsure, say N.
+
+config YAFFS_YAFFS2
+	bool "2048 byte (or larger) / page devices"
+	depends on YAFFS_FS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+	help
+	  Enable yaffs2 support -- yaffs for >= 2K bytes per page devices
+
+	  If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+	bool "Autoselect yaffs2 format"
+	depends on YAFFS_YAFFS2
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+	help
+	  Without this, you need to explicitely use yaffs2 as the file
+	  system type. With this, you can say "yaffs" and yaffs or yaffs2
+	  will be used depending on the device page size (yaffs on
+	  512-byte page devices, yaffs2 on 2K page devices).
+
+	  If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+	bool "Disable yaffs from doing ECC on tags by default"
+	depends on YAFFS_FS && YAFFS_YAFFS2
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  This defaults yaffs to using its own ECC calculations on tags instead of
+	  just relying on the MTD.
+	  This behavior can also be overridden with tags_ecc_on and
+	  tags_ecc_off mount options.
+
+	  If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+	bool "Force chunk erase check"
+	depends on YAFFS_FS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+          Normally yaffs only checks chunks before writing until an erased
+	  chunk is found. This helps to detect any partially written
+	  chunks that might have happened due to power loss.
+
+	  Enabling this forces on the test that chunks are erased in flash
+	  before writing to them. This takes more time but is potentially
+	  a bit more secure.
+
+	  Suggest setting Y during development and ironing out driver
+	  issues etc. Suggest setting to N if you want faster writing.
+
+	  If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+	bool "Empty lost and found on boot"
+	depends on YAFFS_FS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  If this is enabled then the contents of lost and found is
+	  automatically dumped at mount.
+
+	  If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+	bool "Disable yaffs2 block refreshing"
+	depends on YAFFS_FS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	 If this is set, then block refreshing is disabled.
+	 Block refreshing infrequently refreshes the oldest block in
+	 a yaffs2 file system. This mechanism helps to refresh flash to
+	 mitigate against data loss. This is particularly useful for MLC.
+
+	  If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+	bool "Disable yaffs2 background processing"
+	depends on YAFFS_FS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	 If this is set, then background processing is disabled.
+	 Background processing makes many foreground activities faster.
+
+	 If unsure, say N.
+
+config YAFFS_DISABLE_BAD_BLOCK_MARKING
+	bool "Disable yaffs2 bad block marking"
+	depends on YAFFS_FS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	 Useful during early flash bring up to prevent problems causing
+	 lots of bad block marking.
+
+	 If unsure, say N.
+
+config YAFFS_XATTR
+	bool "Enable yaffs2 xattr support"
+	depends on YAFFS_FS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+	help
+	 If this is set then yaffs2 will provide xattr support.
+	 If unsure, say Y.
+
+endif
+
diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..64b89e59f2380042344bc3c3741ccb0c8219f223
--- /dev/null
+++ b/fs/yaffs2/Makefile
@@ -0,0 +1,21 @@
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o
+yaffs-y += yaffs_mtdif.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_summary.o
+yaffs-y += yaffs_verify.o
+
+
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/include/asm-generic/gcclib.h b/include/asm-generic/gcclib.h
new file mode 100644
index 0000000000000000000000000000000000000000..480cb4c9472716b8d55c49d4f6a0123e0fe315bc
--- /dev/null
+++ b/include/asm-generic/gcclib.h
@@ -0,0 +1,35 @@
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+
+/* gcclib.h -- definitions for various functions 'borrowed' from gcc-2.95.3 */
+/* I Molton     29/07/01 */
+
+#include <generated/autoconf.h>
+
+#ifndef _GCCLIB_H_
+#define _GCCLIB_H_
+
+#define BITS_PER_UNIT  8
+#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+
+typedef unsigned int UQItype    __attribute__ ((mode (QI)));
+typedef          int SItype     __attribute__ ((mode (SI)));
+typedef unsigned int USItype    __attribute__ ((mode (SI)));
+typedef          int DItype     __attribute__ ((mode (DI)));
+typedef          int word_type 	__attribute__ ((mode (__word__)));
+typedef unsigned int UDItype    __attribute__ ((mode (DI)));
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+  struct DIstruct {USItype low; SItype high;};
+#else
+  struct DIstruct {SItype high; USItype low;};
+#endif
+
+typedef union
+{
+  struct DIstruct s;
+  DItype ll;
+} DIunion;
+
+#endif
+
+#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 5f52690c3c8f3a632350d14b1511908299887efb..bd144325a99472676612d5a2e4213563d7eb64c3 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -196,6 +196,10 @@ extern void gpio_unexport(unsigned gpio);
 #endif	/* CONFIG_GPIO_SYSFS */
 
 #else	/* !CONFIG_GPIOLIB */
+#if defined(CONFIG_BCM_KF_KERN_WARNING)
+extern int __gpio_get_value(unsigned gpio);
+extern void __gpio_set_value(unsigned gpio, int value);
+#endif
 
 static inline bool gpio_is_valid(int number)
 {
diff --git a/include/asm-generic/longlong.h b/include/asm-generic/longlong.h
new file mode 100644
index 0000000000000000000000000000000000000000..b886f2bdc8d3759ef6c58b55ee29f30a5aa4bdf4
--- /dev/null
+++ b/include/asm-generic/longlong.h
@@ -0,0 +1,1328 @@
+/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
+   Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000
+   Free Software Foundation, Inc.
+
+   This definition file is free software; you can redistribute it
+   and/or modify it under the terms of the GNU General Public
+   License as published by the Free Software Foundation; either
+   version 2, or (at your option) any later version.
+
+   This definition file is distributed in the hope that it will be
+   useful, but WITHOUT ANY WARRANTY; without even the implied
+   warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+   See the GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+/* You have to define the following before including this file:
+
+   UWtype -- An unsigned type, default type for operations (typically a "word")
+   UHWtype -- An unsigned type, at least half the size of UWtype.
+   UDWtype -- An unsigned type, at least twice as large a UWtype
+   W_TYPE_SIZE -- size in bits of UWtype
+
+   UQItype -- Unsigned 8 bit type.
+   SItype, USItype -- Signed and unsigned 32 bit types.
+   DItype, UDItype -- Signed and unsigned 64 bit types.
+
+   On a 32 bit machine UWtype should typically be USItype;
+   on a 64 bit machine, UWtype should typically be UDItype.
+*/
+
+#define __BITS4 (W_TYPE_SIZE / 4)
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+#ifndef W_TYPE_SIZE
+#define W_TYPE_SIZE	32
+#define UWtype		USItype
+#define UHWtype		USItype
+#define UDWtype		UDItype
+#endif
+
+/* Define auxiliary asm macros.
+
+   1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
+   UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+   word product in HIGH_PROD and LOW_PROD.
+
+   2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
+   UDWtype product.  This is just a variant of umul_ppmm.
+
+   3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+   denominator) divides a UDWtype, composed by the UWtype integers
+   HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+   in QUOTIENT and the remainder in REMAINDER.  HIGH_NUMERATOR must be less
+   than DENOMINATOR for correct operation.  If, in addition, the most
+   significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+   UDIV_NEEDS_NORMALIZATION is defined to 1.
+
+   4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+   denominator).  Like udiv_qrnnd but the numbers are signed.  The quotient
+   is rounded towards 0.
+
+   5) count_leading_zeros(count, x) counts the number of zero-bits from the
+   msb to the first nonzero bit in the UWtype X.  This is the number of
+   steps X needs to be shifted left to set the msb.  Undefined for X == 0,
+   unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
+
+   6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
+   from the least significant end.
+
+   7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+   high_addend_2, low_addend_2) adds two UWtype integers, composed by
+   HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+   respectively.  The result is placed in HIGH_SUM and LOW_SUM.  Overflow
+   (i.e. carry out) is not stored anywhere, and is lost.
+
+   8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+   high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+   composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+   LOW_SUBTRAHEND_2 respectively.  The result is placed in HIGH_DIFFERENCE
+   and LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
+   and is lost.
+
+   If any of these macros are left undefined for a particular CPU,
+   C macros are used.  */
+
+/* The CPUs come in alphabetical order below.
+
+   Please add support for more CPUs here, or improve the current support
+   for the CPUs below!
+   (E.g. WE32100, IBM360.)  */
+
+#if defined (__GNUC__) && !defined (NO_ASM)
+
+/* We sometimes need to clobber "cc" with gcc2, but that would not be
+   understood by gcc1.  Use cpp to avoid major code duplication.  */
+#if __GNUC__ < 2
+#define __CLOBBER_CC
+#define __AND_CLOBBER_CC
+#else /* __GNUC__ >= 2 */
+#define __CLOBBER_CC : "cc"
+#define __AND_CLOBBER_CC , "cc"
+#endif /* __GNUC__ < 2 */
+
+#if defined (__alpha) && W_TYPE_SIZE == 64
+#define umul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    UDItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("umulh %r1,%2,%0"						\
+	     : "=r" ((UDItype) ph)					\
+	     : "%rJ" (__m0),						\
+	       "rI" (__m1));						\
+    (pl) = __m0 * __m1;							\
+  } while (0)
+#define UMUL_TIME 46
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  do { UDItype __r;							\
+    (q) = __udiv_qrnnd (&__r, (n1), (n0), (d));				\
+    (r) = __r;								\
+  } while (0)
+extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype);
+#define UDIV_TIME 220
+#endif /* LONGLONG_STANDALONE */
+#ifdef __alpha_cix__
+#define count_leading_zeros(COUNT,X) \
+  __asm__("ctlz %1,%0" : "=r"(COUNT) : "r"(X))
+#define count_trailing_zeros(COUNT,X) \
+  __asm__("cttz %1,%0" : "=r"(COUNT) : "r"(X))
+#define COUNT_LEADING_ZEROS_0 64
+#else
+extern const UQItype __clz_tab[];
+#define count_leading_zeros(COUNT,X) \
+  do {									\
+    UDItype __xr = (X), __t, __a;					\
+    __asm__("cmpbge $31,%1,%0" : "=r"(__t) : "r"(__xr));		\
+    __a = __clz_tab[__t ^ 0xff] - 1;					\
+    __asm__("extbl %1,%2,%0" : "=r"(__t) : "r"(__xr), "r"(__a));	\
+    (COUNT) = 64 - (__clz_tab[__t] + __a*8);				\
+  } while (0)
+#define count_trailing_zeros(COUNT,X) \
+  do {									\
+    UDItype __xr = (X), __t, __a;					\
+    __asm__("cmpbge $31,%1,%0" : "=r"(__t) : "r"(__xr));		\
+    __t = ~__t & -~__t;							\
+    __a = ((__t & 0xCC) != 0) * 2;					\
+    __a += ((__t & 0xF0) != 0) * 4;					\
+    __a += ((__t & 0xAA) != 0);						\
+    __asm__("extbl %1,%2,%0" : "=r"(__t) : "r"(__xr), "r"(__a));	\
+    __a <<= 3;								\
+    __t &= -__t;							\
+    __a += ((__t & 0xCC) != 0) * 2;					\
+    __a += ((__t & 0xF0) != 0) * 4;					\
+    __a += ((__t & 0xAA) != 0);						\
+    (COUNT) = __a;							\
+  } while (0)
+#endif /* __alpha_cix__ */
+#endif /* __alpha */
+
+#if defined (__arc__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add.f	%1, %4, %5\n\tadc	%0, %2, %3"		\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%r" ((USItype) (ah)),					\
+	     "rIJ" ((USItype) (bh)),					\
+	     "%r" ((USItype) (al)),					\
+	     "rIJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub.f	%1, %4, %5\n\tsbc	%0, %2, %3"		\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "r" ((USItype) (ah)),					\
+	     "rIJ" ((USItype) (bh)),					\
+	     "r" ((USItype) (al)),					\
+	     "rIJ" ((USItype) (bl)))
+/* Call libgcc routine.  */
+#define umul_ppmm(w1, w0, u, v) \
+do {									\
+  DWunion __w;								\
+  __w.ll = __umulsidi3 (u, v);						\
+  w1 = __w.s.high;							\
+  w0 = __w.s.low;							\
+} while (0)
+#define __umulsidi3 __umulsidi3
+UDItype __umulsidi3 (USItype, USItype);
+#endif
+
+#if defined (__arm__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("adds	%1, %4, %5\n\tadc	%0, %2, %3"		\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%r" ((USItype) (ah)),					\
+	     "rI" ((USItype) (bh)),					\
+	     "%r" ((USItype) (al)),					\
+	     "rI" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subs	%1, %4, %5\n\tsbc	%0, %2, %3"		\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "r" ((USItype) (ah)),					\
+	     "rI" ((USItype) (bh)),					\
+	     "r" ((USItype) (al)),					\
+	     "rI" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, a, b) \
+{register USItype __t0, __t1, __t2;					\
+  __asm__ ("%@ Inlined umul_ppmm\n"					\
+	   "	mov	%2, %5, lsr #16\n"				\
+	   "	mov	%0, %6, lsr #16\n"				\
+	   "	bic	%3, %5, %2, lsl #16\n"				\
+	   "	bic	%4, %6, %0, lsl #16\n"				\
+	   "	mul	%1, %3, %4\n"					\
+	   "	mul	%4, %2, %4\n"					\
+	   "	mul	%3, %0, %3\n"					\
+	   "	mul	%0, %2, %0\n"					\
+	   "	adds	%3, %4, %3\n"					\
+	   "	addcs	%0, %0, #65536\n"				\
+	   "	adds	%1, %1, %3, lsl #16\n"				\
+	   "	adc	%0, %0, %3, lsr #16"				\
+	   : "=&r" ((USItype) (xh)),					\
+	     "=r" ((USItype) (xl)),					\
+	     "=&r" (__t0), "=&r" (__t1), "=r" (__t2)			\
+	   : "r" ((USItype) (a)),					\
+	     "r" ((USItype) (b)));}
+#define UMUL_TIME 20
+#define UDIV_TIME 100
+#endif /* __arm__ */
+
+#if defined (__hppa) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0"				\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%rM" ((USItype) (ah)),					\
+	     "rM" ((USItype) (bh)),					\
+	     "%rM" ((USItype) (al)),					\
+	     "rM" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub %4,%5,%1\n\tsubb %2,%3,%0"				\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "rM" ((USItype) (ah)),					\
+	     "rM" ((USItype) (bh)),					\
+	     "rM" ((USItype) (al)),					\
+	     "rM" ((USItype) (bl)))
+#if defined (_PA_RISC1_1)
+#define umul_ppmm(w1, w0, u, v) \
+  do {									\
+    union								\
+      {									\
+	UDItype __f;							\
+	struct {USItype __w1, __w0;} __w1w0;				\
+      } __t;								\
+    __asm__ ("xmpyu %1,%2,%0"						\
+	     : "=x" (__t.__f)						\
+	     : "x" ((USItype) (u)),					\
+	       "x" ((USItype) (v)));					\
+    (w1) = __t.__w1w0.__w1;						\
+    (w0) = __t.__w1w0.__w0;						\
+     } while (0)
+#define UMUL_TIME 8
+#else
+#define UMUL_TIME 30
+#endif
+#define UDIV_TIME 40
+#define count_leading_zeros(count, x) \
+  do {									\
+    USItype __tmp;							\
+    __asm__ (								\
+       "ldi		1,%0\n"						\
+"	extru,=		%1,15,16,%%r0		; Bits 31..16 zero?\n"	\
+"	extru,tr	%1,15,16,%1		; No.  Shift down, skip add.\n"\
+"	ldo		16(%0),%0		; Yes.  Perform add.\n"	\
+"	extru,=		%1,23,8,%%r0		; Bits 15..8 zero?\n"	\
+"	extru,tr	%1,23,8,%1		; No.  Shift down, skip add.\n"\
+"	ldo		8(%0),%0		; Yes.  Perform add.\n"	\
+"	extru,=		%1,27,4,%%r0		; Bits 7..4 zero?\n"	\
+"	extru,tr	%1,27,4,%1		; No.  Shift down, skip add.\n"\
+"	ldo		4(%0),%0		; Yes.  Perform add.\n"	\
+"	extru,=		%1,29,2,%%r0		; Bits 3..2 zero?\n"	\
+"	extru,tr	%1,29,2,%1		; No.  Shift down, skip add.\n"\
+"	ldo		2(%0),%0		; Yes.  Perform add.\n"	\
+"	extru		%1,30,1,%1		; Extract bit 1.\n"	\
+"	sub		%0,%1,%0		; Subtract it.\n"	\
+	: "=r" (count), "=r" (__tmp) : "1" (x));			\
+  } while (0)
+#endif
+
+#if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32
+#define smul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    union {DItype __ll;							\
+	   struct {USItype __h, __l;} __i;				\
+	  } __x;							\
+    __asm__ ("lr %N0,%1\n\tmr %0,%2"					\
+	     : "=&r" (__x.__ll)						\
+	     : "r" (m0), "r" (m1));					\
+    (xh) = __x.__i.__h; (xl) = __x.__i.__l;				\
+  } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+  do {									\
+    union {DItype __ll;							\
+	   struct {USItype __h, __l;} __i;				\
+	  } __x;							\
+    __x.__i.__h = n1; __x.__i.__l = n0;					\
+    __asm__ ("dr %0,%2"							\
+	     : "=r" (__x.__ll)						\
+	     : "0" (__x.__ll), "r" (d));				\
+    (q) = __x.__i.__l; (r) = __x.__i.__h;				\
+  } while (0)
+#endif
+
+#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addl %5,%1\n\tadcl %3,%0"					\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%0" ((USItype) (ah)),					\
+	     "g" ((USItype) (bh)),					\
+	     "%1" ((USItype) (al)),					\
+	     "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subl %5,%1\n\tsbbl %3,%0"					\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "0" ((USItype) (ah)),					\
+	     "g" ((USItype) (bh)),					\
+	     "1" ((USItype) (al)),					\
+	     "g" ((USItype) (bl)))
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("mull %3"							\
+	   : "=a" ((USItype) (w0)),					\
+	     "=d" ((USItype) (w1))					\
+	   : "%0" ((USItype) (u)),					\
+	     "rm" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, dv) \
+  __asm__ ("divl %4"							\
+	   : "=a" ((USItype) (q)),					\
+	     "=d" ((USItype) (r))					\
+	   : "0" ((USItype) (n0)),					\
+	     "1" ((USItype) (n1)),					\
+	     "rm" ((USItype) (dv)))
+#define count_leading_zeros(count, x) \
+  do {									\
+    USItype __cbtmp;							\
+    __asm__ ("bsrl %1,%0"						\
+	     : "=r" (__cbtmp) : "rm" ((USItype) (x)));			\
+    (count) = __cbtmp ^ 31;						\
+  } while (0)
+#define count_trailing_zeros(count, x) \
+  __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
+#define UMUL_TIME 40
+#define UDIV_TIME 40
+#endif /* 80x86 */
+
+#if defined (__i960__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __xx;							\
+  __asm__ ("emul	%2,%1,%0"					\
+	   : "=d" (__xx.__ll)						\
+	   : "%dI" ((USItype) (u)),					\
+	     "dI" ((USItype) (v)));					\
+  (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+  ({UDItype __w;							\
+    __asm__ ("emul	%2,%1,%0"					\
+	     : "=d" (__w)						\
+	     : "%dI" ((USItype) (u)),					\
+	       "dI" ((USItype) (v)));					\
+    __w; })
+#endif /* __i960__ */
+
+#if defined (__M32R__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  /* The cmp clears the condition bit.  */ \
+  __asm__ ("cmp %0,%0\n\taddx %%5,%1\n\taddx %%3,%0"			\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%0" ((USItype) (ah)),					\
+	     "r" ((USItype) (bh)),					\
+	     "%1" ((USItype) (al)),					\
+	     "r" ((USItype) (bl))					\
+	   : "cbit")
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  /* The cmp clears the condition bit.  */ \
+  __asm__ ("cmp %0,%0\n\tsubx %5,%1\n\tsubx %3,%0"			\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "0" ((USItype) (ah)),					\
+	     "r" ((USItype) (bh)),					\
+	     "1" ((USItype) (al)),					\
+	     "r" ((USItype) (bl))					\
+	   : "cbit")
+#endif /* __M32R__ */
+
+#if defined (__mc68000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0"				\
+	   : "=d" ((USItype) (sh)),					\
+	     "=&d" ((USItype) (sl))					\
+	   : "%0" ((USItype) (ah)),					\
+	     "d" ((USItype) (bh)),					\
+	     "%1" ((USItype) (al)),					\
+	     "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0"				\
+	   : "=d" ((USItype) (sh)),					\
+	     "=&d" ((USItype) (sl))					\
+	   : "0" ((USItype) (ah)),					\
+	     "d" ((USItype) (bh)),					\
+	     "1" ((USItype) (al)),					\
+	     "g" ((USItype) (bl)))
+
+/* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r.  */
+#if defined (__mc68020__) || defined(mc68020) \
+	|| defined(__mc68030__) || defined(mc68030) \
+	|| defined(__mc68040__) || defined(mc68040) \
+	|| defined(__mcpu32__) || defined(mcpu32)
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("mulu%.l %3,%1:%0"						\
+	   : "=d" ((USItype) (w0)),					\
+	     "=d" ((USItype) (w1))					\
+	   : "%0" ((USItype) (u)),					\
+	     "dmi" ((USItype) (v)))
+#define UMUL_TIME 45
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  __asm__ ("divu%.l %4,%1:%0"						\
+	   : "=d" ((USItype) (q)),					\
+	     "=d" ((USItype) (r))					\
+	   : "0" ((USItype) (n0)),					\
+	     "1" ((USItype) (n1)),					\
+	     "dmi" ((USItype) (d)))
+#define UDIV_TIME 90
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+  __asm__ ("divs%.l %4,%1:%0"						\
+	   : "=d" ((USItype) (q)),					\
+	     "=d" ((USItype) (r))					\
+	   : "0" ((USItype) (n0)),					\
+	     "1" ((USItype) (n1)),					\
+	     "dmi" ((USItype) (d)))
+
+#else /* not mc68020 */
+#if !defined(__mcf5200__)
+/* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX.  */
+#define umul_ppmm(xh, xl, a, b) \
+  __asm__ ("| Inlined umul_ppmm\n"					\
+	   "	move%.l	%2,%/d0\n"					\
+	   "	move%.l	%3,%/d1\n"					\
+	   "	move%.l	%/d0,%/d2\n"					\
+	   "	swap	%/d0\n"						\
+	   "	move%.l	%/d1,%/d3\n"					\
+	   "	swap	%/d1\n"						\
+	   "	move%.w	%/d2,%/d4\n"					\
+	   "	mulu	%/d3,%/d4\n"					\
+	   "	mulu	%/d1,%/d2\n"					\
+	   "	mulu	%/d0,%/d3\n"					\
+	   "	mulu	%/d0,%/d1\n"					\
+	   "	move%.l	%/d4,%/d0\n"					\
+	   "	eor%.w	%/d0,%/d0\n"					\
+	   "	swap	%/d0\n"						\
+	   "	add%.l	%/d0,%/d2\n"					\
+	   "	add%.l	%/d3,%/d2\n"					\
+	   "	jcc	1f\n"						\
+	   "	add%.l	%#65536,%/d1\n"					\
+	   "1:	swap	%/d2\n"						\
+	   "	moveq	%#0,%/d0\n"					\
+	   "	move%.w	%/d2,%/d0\n"					\
+	   "	move%.w	%/d4,%/d2\n"					\
+	   "	move%.l	%/d2,%1\n"					\
+	   "	add%.l	%/d1,%/d0\n"					\
+	   "	move%.l	%/d0,%0"					\
+	   : "=g" ((USItype) (xh)),					\
+	     "=g" ((USItype) (xl))					\
+	   : "g" ((USItype) (a)),					\
+	     "g" ((USItype) (b))					\
+	   : "d0", "d1", "d2", "d3", "d4")
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+#endif /* not mcf5200 */
+#endif /* not mc68020 */
+
+/* The '020, '030, '040 and '060 have bitfield insns.  */
+#if defined (__mc68020__) || defined(mc68020) \
+	|| defined(__mc68030__) || defined(mc68030) \
+	|| defined(__mc68040__) || defined(mc68040) \
+	|| defined(__mc68060__) || defined(mc68060)
+#define count_leading_zeros(count, x) \
+  __asm__ ("bfffo %1{%b2:%b2},%0"					\
+	   : "=d" ((USItype) (count))					\
+	   : "od" ((USItype) (x)), "n" (0))
+#endif
+#endif /* mc68000 */
+
+#if defined (__m88000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3"			\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%rJ" ((USItype) (ah)),					\
+	     "rJ" ((USItype) (bh)),					\
+	     "%rJ" ((USItype) (al)),					\
+	     "rJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3"			\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "rJ" ((USItype) (ah)),					\
+	     "rJ" ((USItype) (bh)),					\
+	     "rJ" ((USItype) (al)),					\
+	     "rJ" ((USItype) (bl)))
+#define count_leading_zeros(count, x) \
+  do {									\
+    USItype __cbtmp;							\
+    __asm__ ("ff1 %0,%1"						\
+	     : "=r" (__cbtmp)						\
+	     : "r" ((USItype) (x)));					\
+    (count) = __cbtmp ^ 31;						\
+  } while (0)
+#define COUNT_LEADING_ZEROS_0 63 /* sic */
+#if defined (__mc88110__)
+#define umul_ppmm(wh, wl, u, v) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __xx;							\
+    __asm__ ("mulu.d	%0,%1,%2"					\
+	     : "=r" (__xx.__ll)						\
+	     : "r" ((USItype) (u)),					\
+	       "r" ((USItype) (v)));					\
+    (wh) = __xx.__i.__h;						\
+    (wl) = __xx.__i.__l;						\
+  } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __h, __l;} __i;				\
+	  } __xx;							\
+  USItype __q;								\
+  __xx.__i.__h = (n1); __xx.__i.__l = (n0);				\
+  __asm__ ("divu.d %0,%1,%2"						\
+	   : "=r" (__q)							\
+	   : "r" (__xx.__ll),						\
+	     "r" ((USItype) (d)));					\
+  (r) = (n0) - __q * (d); (q) = __q; })
+#define UMUL_TIME 5
+#define UDIV_TIME 25
+#else
+#define UMUL_TIME 17
+#define UDIV_TIME 150
+#endif /* __mc88110__ */
+#endif /* __m88000__ */
+
+#if defined (__mips__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("multu %2,%3"						\
+	   : "=l" ((USItype) (w0)),					\
+	     "=h" ((USItype) (w1))					\
+	   : "d" ((USItype) (u)),					\
+	     "d" ((USItype) (v)))
+#define UMUL_TIME 10
+#define UDIV_TIME 100
+#endif /* __mips__ */
+
+#if defined (__ns32000__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __xx;							\
+  __asm__ ("meid %2,%0"							\
+	   : "=g" (__xx.__ll)						\
+	   : "%0" ((USItype) (u)),					\
+	     "g" ((USItype) (v)));					\
+  (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+  ({UDItype __w;							\
+    __asm__ ("meid %2,%0"						\
+	     : "=g" (__w)						\
+	     : "%0" ((USItype) (u)),					\
+	       "g" ((USItype) (v)));					\
+    __w; })
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  ({union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __xx;							\
+  __xx.__i.__h = (n1); __xx.__i.__l = (n0);				\
+  __asm__ ("deid %2,%0"							\
+	   : "=g" (__xx.__ll)						\
+	   : "0" (__xx.__ll),						\
+	     "g" ((USItype) (d)));					\
+  (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
+#define count_trailing_zeros(count,x) \
+  do {									\
+    __asm__ ("ffsd     %2,%0"						\
+            : "=r" ((USItype) (count))					\
+            : "0" ((USItype) 0),					\
+              "r" ((USItype) (x)));					\
+  } while (0)
+#endif /* __ns32000__ */
+
+/* FIXME: We should test _IBMR2 here when we add assembly support for the
+   system vendor compilers.
+   FIXME: What's needed for gcc PowerPC VxWorks?  __vxworks__ is not good
+   enough, since that hits ARM and m68k too.  */
+#if (defined (_ARCH_PPC)	/* AIX */				\
+     || defined (_ARCH_PWR)	/* AIX */				\
+     || defined (_ARCH_COM)	/* AIX */				\
+     || defined (__powerpc__)	/* gcc */				\
+     || defined (__POWERPC__)	/* BEOS */				\
+     || defined (__ppc__)	/* Darwin */				\
+     || defined (PPC)		/* GNU/Linux, SysV */			\
+     ) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  do {									\
+    if (__builtin_constant_p (bh) && (bh) == 0)				\
+      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"		\
+	     : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+    else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0)		\
+      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"		\
+	     : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+    else								\
+      __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"		\
+	     : "=r" (sh), "=&r" (sl)					\
+	     : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl));		\
+  } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  do {									\
+    if (__builtin_constant_p (ah) && (ah) == 0)				\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"	\
+	       : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+    else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0)		\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"	\
+	       : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+    else if (__builtin_constant_p (bh) && (bh) == 0)			\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"		\
+	       : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+    else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0)		\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"		\
+	       : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+    else								\
+      __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"	\
+	       : "=r" (sh), "=&r" (sl)					\
+	       : "r" (ah), "r" (bh), "rI" (al), "r" (bl));		\
+  } while (0)
+#define count_leading_zeros(count, x) \
+  __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x))
+#define COUNT_LEADING_ZEROS_0 32
+#if defined (_ARCH_PPC) || defined (__powerpc__) || defined (__POWERPC__) \
+  || defined (__ppc__) || defined (PPC) || defined (__vxworks__)
+#define umul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));	\
+    (pl) = __m0 * __m1;							\
+  } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    SItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));	\
+    (pl) = __m0 * __m1;							\
+  } while (0)
+#define SMUL_TIME 14
+#define UDIV_TIME 120
+#elif defined (_ARCH_PWR)
+#define UMUL_TIME 8
+#define smul_ppmm(xh, xl, m0, m1) \
+  __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
+#define SMUL_TIME 4
+#define sdiv_qrnnd(q, r, nh, nl, d) \
+  __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
+#define UDIV_TIME 100
+#endif
+#endif /* 32-bit POWER architecture variants.  */
+
+/* We should test _IBMR2 here when we add assembly support for the system
+   vendor compilers.  */
+#if (defined (_ARCH_PPC64) || defined (__powerpc64__)) && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  do {									\
+    if (__builtin_constant_p (bh) && (bh) == 0)				\
+      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"		\
+	     : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+    else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)		\
+      __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"		\
+	     : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+    else								\
+      __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"		\
+	     : "=r" (sh), "=&r" (sl)					\
+	     : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl));		\
+  } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  do {									\
+    if (__builtin_constant_p (ah) && (ah) == 0)				\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"	\
+	       : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+    else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0)		\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"	\
+	       : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+    else if (__builtin_constant_p (bh) && (bh) == 0)			\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"		\
+	       : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+    else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)		\
+      __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"		\
+	       : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+    else								\
+      __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"	\
+	       : "=r" (sh), "=&r" (sl)					\
+	       : "r" (ah), "r" (bh), "rI" (al), "r" (bl));		\
+  } while (0)
+#define count_leading_zeros(count, x) \
+  __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
+#define COUNT_LEADING_ZEROS_0 64
+#define umul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    UDItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));	\
+    (pl) = __m0 * __m1;							\
+  } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    DItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));	\
+    (pl) = __m0 * __m1;							\
+  } while (0)
+#define SMUL_TIME 14  /* ??? */
+#define UDIV_TIME 120 /* ??? */
+#endif /* 64-bit PowerPC.  */
+
+#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("a %1,%5\n\tae %0,%3"					\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%0" ((USItype) (ah)),					\
+	     "r" ((USItype) (bh)),					\
+	     "%1" ((USItype) (al)),					\
+	     "r" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("s %1,%5\n\tse %0,%3"					\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "0" ((USItype) (ah)),					\
+	     "r" ((USItype) (bh)),					\
+	     "1" ((USItype) (al)),					\
+	     "r" ((USItype) (bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+  do {									\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ (								\
+       "s	r2,r2\n"						\
+"	mts	r10,%2\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	m	r2,%3\n"						\
+"	cas	%0,r2,r0\n"						\
+"	mfs	r10,%1"							\
+	     : "=r" ((USItype) (ph)),					\
+	       "=r" ((USItype) (pl))					\
+	     : "%r" (__m0),						\
+		"r" (__m1)						\
+	     : "r2");							\
+    (ph) += ((((SItype) __m0 >> 31) & __m1)				\
+	     + (((SItype) __m1 >> 31) & __m0));				\
+  } while (0)
+#define UMUL_TIME 20
+#define UDIV_TIME 200
+#define count_leading_zeros(count, x) \
+  do {									\
+    if ((x) >= 0x10000)							\
+      __asm__ ("clz	%0,%1"						\
+	       : "=r" ((USItype) (count))				\
+	       : "r" ((USItype) (x) >> 16));				\
+    else								\
+      {									\
+	__asm__ ("clz	%0,%1"						\
+		 : "=r" ((USItype) (count))				\
+		 : "r" ((USItype) (x)));					\
+	(count) += 16;							\
+      }									\
+  } while (0)
+#endif
+
+#if defined (__sh2__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ (								\
+       "dmulu.l	%2,%3\n\tsts	macl,%1\n\tsts	mach,%0"		\
+	   : "=r" ((USItype)(w1)),					\
+	     "=r" ((USItype)(w0))					\
+	   : "r" ((USItype)(u)),					\
+	     "r" ((USItype)(v))						\
+	   : "macl", "mach")
+#define UMUL_TIME 5
+#endif
+
+#if defined (__SH5__) && __SHMEDIA__ && W_TYPE_SIZE == 32
+#define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
+#define count_leading_zeros(count, x) \
+  do									\
+    {									\
+      UDItype x_ = (USItype)(x);					\
+      SItype c_;							\
+									\
+      __asm__ ("nsb %1, %0" : "=r" (c_) : "r" (x_));			\
+      (count) = c_ - 31;						\
+    }									\
+  while (0)
+#define COUNT_LEADING_ZEROS_0 32
+#endif
+
+#if defined (__sparc__) && !defined (__arch64__) && !defined (__sparcv9) \
+    && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0"				\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%rJ" ((USItype) (ah)),					\
+	     "rI" ((USItype) (bh)),					\
+	     "%rJ" ((USItype) (al)),					\
+	     "rI" ((USItype) (bl))					\
+	   __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0"				\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "rJ" ((USItype) (ah)),					\
+	     "rI" ((USItype) (bh)),					\
+	     "rJ" ((USItype) (al)),					\
+	     "rI" ((USItype) (bl))					\
+	   __CLOBBER_CC)
+#if defined (__sparc_v8__)
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("umul %2,%3,%1;rd %%y,%0"					\
+	   : "=r" ((USItype) (w1)),					\
+	     "=r" ((USItype) (w0))					\
+	   : "r" ((USItype) (u)),					\
+	     "r" ((USItype) (v)))
+#define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
+  __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
+	   : "=&r" ((USItype) (__q)),					\
+	     "=&r" ((USItype) (__r))					\
+	   : "r" ((USItype) (__n1)),					\
+	     "r" ((USItype) (__n0)),					\
+	     "r" ((USItype) (__d)))
+#else
+#if defined (__sparclite__)
+/* This has hardware multiply but not divide.  It also has two additional
+   instructions scan (ffs from high bit) and divscc.  */
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("umul %2,%3,%1;rd %%y,%0"					\
+	   : "=r" ((USItype) (w1)),					\
+	     "=r" ((USItype) (w0))					\
+	   : "r" ((USItype) (u)),					\
+	     "r" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+  __asm__ ("! Inlined udiv_qrnnd\n"					\
+"	wr	%%g0,%2,%%y	! Not a delayed write for sparclite\n"	\
+"	tst	%%g0\n"							\
+"	divscc	%3,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%%g1\n"						\
+"	divscc	%%g1,%4,%0\n"						\
+"	rd	%%y,%1\n"						\
+"	bl,a 1f\n"							\
+"	add	%1,%4,%1\n"						\
+"1:	! End of inline udiv_qrnnd"					\
+	   : "=r" ((USItype) (q)),					\
+	     "=r" ((USItype) (r))					\
+	   : "r" ((USItype) (n1)),					\
+	     "r" ((USItype) (n0)),					\
+	     "rI" ((USItype) (d))					\
+	   : "g1" __AND_CLOBBER_CC)
+#define UDIV_TIME 37
+#define count_leading_zeros(count, x) \
+  do {                                                                  \
+  __asm__ ("scan %1,1,%0"                                               \
+           : "=r" ((USItype) (count))                                   \
+           : "r" ((USItype) (x)));					\
+  } while (0)
+/* Early sparclites return 63 for an argument of 0, but they warn that future
+   implementations might change this.  Therefore, leave COUNT_LEADING_ZEROS_0
+   undefined.  */
+#else
+/* SPARC without integer multiplication and divide instructions.
+   (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("! Inlined umul_ppmm\n"					\
+"	wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr\n"\
+"	sra	%3,31,%%o5	! Don't move this insn\n"		\
+"	and	%2,%%o5,%%o5	! Don't move this insn\n"		\
+"	andcc	%%g0,0,%%g1	! Don't move this insn\n"		\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,%3,%%g1\n"						\
+"	mulscc	%%g1,0,%%g1\n"						\
+"	add	%%g1,%%o5,%0\n"						\
+"	rd	%%y,%1"							\
+	   : "=r" ((USItype) (w1)),					\
+	     "=r" ((USItype) (w0))					\
+	   : "%rI" ((USItype) (u)),					\
+	     "r" ((USItype) (v))						\
+	   : "g1", "o5" __AND_CLOBBER_CC)
+#define UMUL_TIME 39		/* 39 instructions */
+/* It's quite necessary to add this much assembler for the sparc.
+   The default udiv_qrnnd (in C) is more than 10 times slower!  */
+#define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
+  __asm__ ("! Inlined udiv_qrnnd\n"					\
+"	mov	32,%%g1\n"						\
+"	subcc	%1,%2,%%g0\n"						\
+"1:	bcs	5f\n"							\
+"	 addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n"	\
+"	sub	%1,%2,%1	! this kills msb of n\n"		\
+"	addx	%1,%1,%1	! so this can't give carry\n"		\
+"	subcc	%%g1,1,%%g1\n"						\
+"2:	bne	1b\n"							\
+"	 subcc	%1,%2,%%g0\n"						\
+"	bcs	3f\n"							\
+"	 addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n"	\
+"	b	3f\n"							\
+"	 sub	%1,%2,%1	! this kills msb of n\n"		\
+"4:	sub	%1,%2,%1\n"						\
+"5:	addxcc	%1,%1,%1\n"						\
+"	bcc	2b\n"							\
+"	 subcc	%%g1,1,%%g1\n"						\
+"! Got carry from n.  Subtract next step to cancel this carry.\n"	\
+"	bne	4b\n"							\
+"	 addcc	%0,%0,%0	! shift n1n0 and a 0-bit in lsb\n"	\
+"	sub	%1,%2,%1\n"						\
+"3:	xnor	%0,0,%0\n"						\
+"	! End of inline udiv_qrnnd"					\
+	   : "=&r" ((USItype) (__q)),					\
+	     "=&r" ((USItype) (__r))					\
+	   : "r" ((USItype) (__d)),					\
+	     "1" ((USItype) (__n1)),					\
+	     "0" ((USItype) (__n0)) : "g1" __AND_CLOBBER_CC)
+#define UDIV_TIME (3+7*32)	/* 7 instructions/iteration. 32 iterations.  */
+#endif /* __sparclite__ */
+#endif /* __sparc_v8__ */
+#endif /* sparc32 */
+
+#if ((defined (__sparc__) && defined (__arch64__)) || defined (__sparcv9)) \
+    && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl)				\
+  __asm__ ("addcc %r4,%5,%1\n\t"					\
+   	   "add %r2,%3,%0\n\t"						\
+   	   "bcs,a,pn %%xcc, 1f\n\t"					\
+   	   "add %0, 1, %0\n"						\
+	   "1:"								\
+	   : "=r" ((UDItype)(sh)),				      	\
+	     "=&r" ((UDItype)(sl))				      	\
+	   : "%rJ" ((UDItype)(ah)),				     	\
+	     "rI" ((UDItype)(bh)),				      	\
+	     "%rJ" ((UDItype)(al)),				     	\
+	     "rI" ((UDItype)(bl))				       	\
+	   __CLOBBER_CC)
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) 				\
+  __asm__ ("subcc %r4,%5,%1\n\t"					\
+   	   "sub %r2,%3,%0\n\t"						\
+   	   "bcs,a,pn %%xcc, 1f\n\t"					\
+   	   "sub %0, 1, %0\n\t"						\
+	   "1:"								\
+	   : "=r" ((UDItype)(sh)),				      	\
+	     "=&r" ((UDItype)(sl))				      	\
+	   : "rJ" ((UDItype)(ah)),				     	\
+	     "rI" ((UDItype)(bh)),				      	\
+	     "rJ" ((UDItype)(al)),				     	\
+	     "rI" ((UDItype)(bl))				       	\
+	   __CLOBBER_CC)
+
+#define umul_ppmm(wh, wl, u, v)						\
+  do {									\
+	  UDItype tmp1, tmp2, tmp3, tmp4;				\
+	  __asm__ __volatile__ (					\
+		   "srl %7,0,%3\n\t"					\
+		   "mulx %3,%6,%1\n\t"					\
+		   "srlx %6,32,%2\n\t"					\
+		   "mulx %2,%3,%4\n\t"					\
+		   "sllx %4,32,%5\n\t"					\
+		   "srl %6,0,%3\n\t"					\
+		   "sub %1,%5,%5\n\t"					\
+		   "srlx %5,32,%5\n\t"					\
+		   "addcc %4,%5,%4\n\t"					\
+		   "srlx %7,32,%5\n\t"					\
+		   "mulx %3,%5,%3\n\t"					\
+		   "mulx %2,%5,%5\n\t"					\
+		   "sethi %%hi(0x80000000),%2\n\t"			\
+		   "addcc %4,%3,%4\n\t"					\
+		   "srlx %4,32,%4\n\t"					\
+		   "add %2,%2,%2\n\t"					\
+		   "movcc %%xcc,%%g0,%2\n\t"				\
+		   "addcc %5,%4,%5\n\t"					\
+		   "sllx %3,32,%3\n\t"					\
+		   "add %1,%3,%1\n\t"					\
+		   "add %5,%2,%0"					\
+	   : "=r" ((UDItype)(wh)),					\
+	     "=&r" ((UDItype)(wl)),					\
+	     "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4)	\
+	   : "r" ((UDItype)(u)),					\
+	     "r" ((UDItype)(v))						\
+	   __CLOBBER_CC);						\
+  } while (0)
+#define UMUL_TIME 96
+#define UDIV_TIME 230
+#endif /* sparc64 */
+
+#if defined (__vax__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("addl2 %5,%1\n\tadwc %3,%0"					\
+	   : "=g" ((USItype) (sh)),					\
+	     "=&g" ((USItype) (sl))					\
+	   : "%0" ((USItype) (ah)),					\
+	     "g" ((USItype) (bh)),					\
+	     "%1" ((USItype) (al)),					\
+	     "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subl2 %5,%1\n\tsbwc %3,%0"					\
+	   : "=g" ((USItype) (sh)),					\
+	     "=&g" ((USItype) (sl))					\
+	   : "0" ((USItype) (ah)),					\
+	     "g" ((USItype) (bh)),					\
+	     "1" ((USItype) (al)),					\
+	     "g" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    union {								\
+	UDItype __ll;							\
+	struct {USItype __l, __h;} __i;					\
+      } __xx;								\
+    USItype __m0 = (m0), __m1 = (m1);					\
+    __asm__ ("emul %1,%2,$0,%0"						\
+	     : "=r" (__xx.__ll)						\
+	     : "g" (__m0),						\
+	       "g" (__m1));						\
+    (xh) = __xx.__i.__h;						\
+    (xl) = __xx.__i.__l;						\
+    (xh) += ((((SItype) __m0 >> 31) & __m1)				\
+	     + (((SItype) __m1 >> 31) & __m0));				\
+  } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+  do {									\
+    union {DItype __ll;							\
+	   struct {SItype __l, __h;} __i;				\
+	  } __xx;							\
+    __xx.__i.__h = n1; __xx.__i.__l = n0;				\
+    __asm__ ("ediv %3,%2,%0,%1"						\
+	     : "=g" (q), "=g" (r)					\
+	     : "g" (__xx.__ll), "g" (d));				\
+  } while (0)
+#endif /* __vax__ */
+
+#if defined (__z8000__) && W_TYPE_SIZE == 16
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("add	%H1,%H5\n\tadc	%H0,%H3"				\
+	   : "=r" ((unsigned int)(sh)),					\
+	     "=&r" ((unsigned int)(sl))					\
+	   : "%0" ((unsigned int)(ah)),					\
+	     "r" ((unsigned int)(bh)),					\
+	     "%1" ((unsigned int)(al)),					\
+	     "rQR" ((unsigned int)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("sub	%H1,%H5\n\tsbc	%H0,%H3"				\
+	   : "=r" ((unsigned int)(sh)),					\
+	     "=&r" ((unsigned int)(sl))					\
+	   : "0" ((unsigned int)(ah)),					\
+	     "r" ((unsigned int)(bh)),					\
+	     "1" ((unsigned int)(al)),					\
+	     "rQR" ((unsigned int)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+  do {									\
+    union {long int __ll;						\
+	   struct {unsigned int __h, __l;} __i;				\
+	  } __xx;							\
+    unsigned int __m0 = (m0), __m1 = (m1);				\
+    __asm__ ("mult	%S0,%H3"					\
+	     : "=r" (__xx.__i.__h),					\
+	       "=r" (__xx.__i.__l)					\
+	     : "%1" (__m0),						\
+	       "rQR" (__m1));						\
+    (xh) = __xx.__i.__h; (xl) = __xx.__i.__l;				\
+    (xh) += ((((signed int) __m0 >> 15) & __m1)				\
+	     + (((signed int) __m1 >> 15) & __m0));			\
+  } while (0)
+#endif /* __z8000__ */
+
+#endif /* __GNUC__ */
+
+/* If this machine has no inline assembler, use C macros.  */
+
+#if !defined (add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  do {									\
+    UWtype __x;								\
+    __x = (al) + (bl);							\
+    (sh) = (ah) + (bh) + (__x < (al));					\
+    (sl) = __x;								\
+  } while (0)
+#endif
+
+#if !defined (sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  do {									\
+    UWtype __x;								\
+    __x = (al) - (bl);							\
+    (sh) = (ah) - (bh) - (__x > (al));					\
+    (sl) = __x;								\
+  } while (0)
+#endif
+
+/* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
+   smul_ppmm.  */
+#if !defined (umul_ppmm) && defined (smul_ppmm)
+#define umul_ppmm(w1, w0, u, v)						\
+  do {									\
+    UWtype __w1;							\
+    UWtype __xm0 = (u), __xm1 = (v);					\
+    smul_ppmm (__w1, w0, __xm0, __xm1);					\
+    (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1)		\
+		+ (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0);		\
+  } while (0)
+#endif
+
+/* If we still don't have umul_ppmm, define it using plain C.  */
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v)						\
+  do {									\
+    UWtype __x0, __x1, __x2, __x3;					\
+    UHWtype __ul, __vl, __uh, __vh;					\
+									\
+    __ul = __ll_lowpart (u);						\
+    __uh = __ll_highpart (u);						\
+    __vl = __ll_lowpart (v);						\
+    __vh = __ll_highpart (v);						\
+									\
+    __x0 = (UWtype) __ul * __vl;					\
+    __x1 = (UWtype) __ul * __vh;					\
+    __x2 = (UWtype) __uh * __vl;					\
+    __x3 = (UWtype) __uh * __vh;					\
+									\
+    __x1 += __ll_highpart (__x0);/* this can't give carry */		\
+    __x1 += __x2;		/* but this indeed can */		\
+    if (__x1 < __x2)		/* did we get it? */			\
+      __x3 += __ll_B;		/* yes, add it in the proper pos.  */	\
+									\
+    (w1) = __x3 + __ll_highpart (__x1);					\
+    (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0);		\
+  } while (0)
+#endif
+
+#if !defined (__umulsidi3)
+#define __umulsidi3(u, v) \
+  ({DWunion __w;							\
+    umul_ppmm (__w.s.high, __w.s.low, u, v);				\
+    __w.ll; })
+#endif
+
+/* Define this unconditionally, so it can be used for debugging.  */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+  do {									\
+    UWtype __d1, __d0, __q1, __q0;					\
+    UWtype __r1, __r0, __m;						\
+    __d1 = __ll_highpart (d);						\
+    __d0 = __ll_lowpart (d);						\
+									\
+    __r1 = (n1) % __d1;							\
+    __q1 = (n1) / __d1;							\
+    __m = (UWtype) __q1 * __d0;						\
+    __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
+    if (__r1 < __m)							\
+      {									\
+	__q1--, __r1 += (d);						\
+	if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+	  if (__r1 < __m)						\
+	    __q1--, __r1 += (d);					\
+      }									\
+    __r1 -= __m;							\
+									\
+    __r0 = __r1 % __d1;							\
+    __q0 = __r1 / __d1;							\
+    __m = (UWtype) __q0 * __d0;						\
+    __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
+    if (__r0 < __m)							\
+      {									\
+	__q0--, __r0 += (d);						\
+	if (__r0 >= (d))						\
+	  if (__r0 < __m)						\
+	    __q0--, __r0 += (d);					\
+      }									\
+    __r0 -= __m;							\
+									\
+    (q) = (UWtype) __q1 * __ll_B | __q0;				\
+    (r) = __r0;								\
+  } while (0)
+
+/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
+   __udiv_w_sdiv (defined in libgcc or elsewhere).  */
+#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
+#define udiv_qrnnd(q, r, nh, nl, d) \
+  do {									\
+    USItype __r;							\
+    (q) = __udiv_w_sdiv (&__r, nh, nl, d);				\
+    (r) = __r;								\
+  } while (0)
+#endif
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c.  */
+#if !defined (udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+#if !defined (count_leading_zeros)
+extern const UQItype __clz_tab[];
+#define count_leading_zeros(count, x) \
+  do {									\
+    UWtype __xr = (x);							\
+    UWtype __a;								\
+									\
+    if (W_TYPE_SIZE <= 32)						\
+      {									\
+	__a = __xr < ((UWtype)1<<2*__BITS4)				\
+	  ? (__xr < ((UWtype)1<<__BITS4) ? 0 : __BITS4)			\
+	  : (__xr < ((UWtype)1<<3*__BITS4) ?  2*__BITS4 : 3*__BITS4);	\
+      }									\
+    else								\
+      {									\
+	for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8)			\
+	  if (((__xr >> __a) & 0xff) != 0)				\
+	    break;							\
+      }									\
+									\
+    (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a);		\
+  } while (0)
+#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
+#endif
+
+#if !defined (count_trailing_zeros)
+/* Define count_trailing_zeros using count_leading_zeros.  The latter might be
+   defined in asm, but if it is not, the C version above is good enough.  */
+#define count_trailing_zeros(count, x) \
+  do {									\
+    UWtype __ctz_x = (x);						\
+    UWtype __ctz_c;							\
+    count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x);			\
+    (count) = W_TYPE_SIZE - 1 - __ctz_c;				\
+  } while (0)
+#endif
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+#define UDIV_NEEDS_NORMALIZATION 0
+#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8aeadf6b553a11346aa5a5a256df1303416535e9..e05284d0b64bbe031b59e7e92e158aa700f23e83 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -368,6 +368,10 @@
 									\
 	/* Built-in module versions. */					\
 	__modver : AT(ADDR(__modver) - LOAD_OFFSET) {			\
+/* #if defined(CONFIG_BCM_KF_LINKER_WORKAROUND)  */				\
+    /* IGNORE_BCM_KF_EXCEPTION */						\
+		*(__modver_tmp)						\
+/* #endif */                                                                 \
 		VMLINUX_SYMBOL(__start___modver) = .;			\
 		*(__modver)						\
 		VMLINUX_SYMBOL(__stop___modver) = .;			\
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 26cb1eb16f4c0df0be234d081a419596fb081167..aa76f0e8d867f1fe7424eada890db882b644a091 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -33,6 +33,10 @@ struct ahash_request {
 
 	/* This field may only be used by the ahash API code. */
 	void *priv;
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined (CONFIG_BCM_SPU_MODULE)) && !(defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	int alloc_buff_spu;
+	int headerLen;
+#endif
 
 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index f2f73f9b986faadfc909a9fe0609468ba51eeca0..4bf4100975a0912091dc6f142c473cf1320417bb 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -183,6 +183,8 @@ header-y += if_plip.h
 header-y += if_ppp.h
 header-y += if_pppol2tp.h
 header-y += if_pppox.h
+header-y += if_pppolac.h
+header-y += if_pppopns.h
 header-y += if_slip.h
 header-y += if_strip.h
 header-y += if_team.h
@@ -375,6 +377,7 @@ header-y += tty.h
 header-y += types.h
 header-y += udf_fs_i.h
 header-y += udp.h
+header-y += uhid.h
 header-y += uinput.h
 header-y += uio.h
 header-y += ultrasound.h
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 06fd4bbc58f68c17c2e93b7b05385159e41dc6ea..655988e2d9f057cf3971fbd17d996138ee35e73d 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -106,6 +106,11 @@ struct atm_dev_stats {
 #endif
 #define ATM_DROPPARTY 	_IOW('a', ATMIOC_SPECIAL+5,int)
 					/* drop party from p2mp call */
+					
+#if defined(CONFIG_BCM_KF_ATM_BACKEND)
+#define ATM_EXTBACKENDIF _IOW('a',ATMIOC_SPECIAL+6,atm_backend_t)
+#define ATM_SETEXTFILT  _IOW('a',ATMIOC_SPECIAL+7,atm_backend_t)
+#endif
 
 /*
  * These are backend handkers that can be set via the ATM_SETBACKEND call
@@ -116,6 +121,14 @@ struct atm_dev_stats {
 #define ATM_BACKEND_PPP		1	/* PPPoATM - RFC2364 */
 #define ATM_BACKEND_BR2684	2	/* Bridged RFC1483/2684 */
 
+#if defined(CONFIG_BCM_KF_ATM_BACKEND)
+#define ATM_BACKEND_RT2684       3  /* Routed RFC1483/2684 */
+#define ATM_BACKEND_BR2684_BCM   4  /* Bridged RFC1483/2684 uses Broadcom ATMAPI*/
+#define ATM_BACKEND_PPP_BCM      5  /* PPPoA uses Broadcom bcmxtmrt driver */
+#define ATM_BACKEND_PPP_BCM_DISCONN    6  /* PPPoA LCP disconnect */
+#define ATM_BACKEND_PPP_BCM_CLOSE_DEV  7  /* PPPoA close device */
+#endif
+
 /* for ATM_GETTYPE */
 #define ATM_ITFTYP_LEN	8	/* maximum length of interface type name */
 
diff --git a/include/linux/bcm_assert.h b/include/linux/bcm_assert.h
new file mode 100644
index 0000000000000000000000000000000000000000..fe9964773abf30714f4abda92d2a78d3f67e125b
--- /dev/null
+++ b/include/linux/bcm_assert.h
@@ -0,0 +1,143 @@
+#if defined(CONFIG_BCM_KF_ASSERT) || !defined(CONFIG_BCM_IN_KERNEL)
+
+/*
+<:copyright-BRCM:2007:GPL/GPL:standard
+
+   Copyright (c) 2007 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+/*
+ *--------------------------------------------------------------------------
+ *
+ * Asserts are controlled from top level make menuconfig, under
+ * Debug Selection>Enable Asserts and Enable Fatal Asserts
+ *
+ * If Asserts are not enabled, they will be compiled out of the image.
+ * If Fatal Asserts are not enabled, code which fails an assert will be
+ * allowed to continue to execute (see details below.)
+ *
+ *--------------------------------------------------------------------------
+ */
+#ifndef __BCM_ASSERT_H__
+#define __BCM_ASSERT_H__
+
+#include <linux/bcm_colors.h>
+
+
+/************************************************************************
+ * Various helpers and conditional macros for the 3 main assert
+ * statements below.
+ ************************************************************************/
+
+#ifdef CONFIG_BCM_ASSERTS
+#define COND_ASSERT_CODE(code)      code
+#else
+#define COND_ASSERT_CODE(code)
+#endif
+
+
+#ifdef __KERNEL__
+// need to include header file for printk
+#define KUW_PRINT              printk
+#else
+#include <stdio.h>
+#include <string.h>
+#define KUW_PRINT              printf
+#endif
+
+
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define FATAL_ACTION       BUG()
+/* For the kernel, it would be nice to use WARN for the non-fatal assert,
+ * but WARN also kills the process.  What I want is a stack trace and some
+ * info, but the code keeps executing.  For now, just do nothing in the
+ * non-fatal assert case.  */
+#define NON_FATAL_ACTION
+#else
+// could also use abort here
+#define FATAL_ACTION       exit(-1);
+#define NON_FATAL_ACTION
+#endif
+
+
+#ifdef CONFIG_BCM_FATAL_ASSERTS
+#define COND_FATAL         FATAL_ACTION
+#else
+#define COND_FATAL         NON_FATAL_ACTION
+#endif
+
+
+#define BCM_ASSERT_PRINT(cond)                                           \
+               do {                                                      \
+                       KUW_PRINT(CLRerr "ASSERT[%s:%d]:" #cond CLRnl,    \
+                             __FUNCTION__, __LINE__);                    \
+               } while (0)
+
+/************************************************************************
+ * Here are the 3 main ASSERT statements.
+ * They differ in how they behave when an assertion fails when fatal
+ * asserts are not enabled.
+ * BCM_ASSERT_C will continue to execute the rest of the function.
+ * BCM_ASSERT_V will return from the current function.
+ * BCM_ASSERT_R will return a value from the current function.
+ * If fatal asserts are enabled, all 3 asserts behave the same, i.e.
+ * the current process is killed and no more code from this path of
+ * execution is executed.
+ *
+ * BCM_ASSERT_A is a special case assert.  It is always compiled in
+ * and failure will always be fatal.  Use this one sparingly.
+ ************************************************************************/
+
+#define BCM_ASSERT_C(cond)                                               \
+               COND_ASSERT_CODE(                                         \
+                       if (!(cond)) {                                    \
+                          BCM_ASSERT_PRINT(#cond);                       \
+                          COND_FATAL;                                    \
+                       }                                                 \
+               )
+
+#define BCM_ASSERT_V(cond)                                               \
+               COND_ASSERT_CODE(                                         \
+                       if (!(cond)) {                                    \
+                          BCM_ASSERT_PRINT(#cond);                       \
+                          COND_FATAL;                                    \
+                          return;                                        \
+                       }                                                 \
+               )
+
+#define BCM_ASSERT_R(cond, ret)                                          \
+               COND_ASSERT_CODE(                                         \
+                       if (!(cond)) {                                    \
+                          BCM_ASSERT_PRINT(#cond);                       \
+                          COND_FATAL;                                    \
+                          return ret;                                    \
+                       }                                                 \
+               )
+
+#define BCM_ASSERT_A(cond)                                               \
+                       if (!(cond)) {                                    \
+                          BCM_ASSERT_PRINT(#cond);                       \
+                          FATAL_ACTION;                                  \
+                       }
+
+
+#endif /* __BCM_ASSERT_H__ */
+
+#endif // defined(CONFIG_BRCM_KF_ASSERT)
diff --git a/include/linux/bcm_assert_locks.h b/include/linux/bcm_assert_locks.h
new file mode 100644
index 0000000000000000000000000000000000000000..55c0614efa9e4bb3270bf16be77d20723dc1760f
--- /dev/null
+++ b/include/linux/bcm_assert_locks.h
@@ -0,0 +1,109 @@
+#if defined(CONFIG_BCM_KF_ASSERT) || !defined(CONFIG_BCM_IN_KERNEL)
+
+/*
+<:copyright-BRCM:2007:GPL/GPL:standard
+
+   Copyright (c) 2007 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * These asserts allow functions to basically (note 1) verify that the caller
+ * of the function either has the lock or does not have the lock when calling
+ * the function.
+ * These asserts only work when "Enable Asserts" and "Enable Kernel Hacking",
+ * "Enable Debug Spinlock" and "Enable Debug Mutexes" are selected from
+ * the debug selection section of make menuconfig.
+ *
+ * Note 1: For spinlocks, the check is more actually thorough than the name
+ * implies.  HAS_SPINLOCK_COND verifies the caller has the spinlock *AND*
+ * the spinlock was acquired on the current CPU.  It should be impossible
+ * for the caller to acquire the spinlock on a different CPU and then be
+ * migrated to this CPU.
+ * NOT_HAS_SPINLOCK verifies the caller does not have the spinlock *AND*
+ * the spinlock is not currently held by any other process or thread context
+ * on the same CPU.  If it is, the subsequent attempt by this function to
+ * acquire the spinlock will deadlock.
+ *--------------------------------------------------------------------------
+ */
+
+#ifndef __BCM_ASSERT_LOCKS_H__
+#define __BCM_ASSERT_LOCKS_H__
+
+#include <linux/bcm_assert.h>
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#define     HAS_SPINLOCK_COND(s)  (s->rlock.owner == current && s->rlock.owner_cpu == smp_processor_id())
+#define NOT_HAS_SPINLOCK_COND(s)  (s->rlock.owner != current && s->rlock.owner_cpu != smp_processor_id())
+#else
+#define     HAS_SPINLOCK_COND(s)  (1)
+#define NOT_HAS_SPINLOCK_COND(s)  (1)
+#endif
+
+#define BCM_ASSERT_HAS_SPINLOCK_C(s)       BCM_ASSERT_C(HAS_SPINLOCK_COND((s)))
+
+#define BCM_ASSERT_HAS_SPINLOCK_V(s)       BCM_ASSERT_V(HAS_SPINLOCK_COND((s)))
+
+#define BCM_ASSERT_HAS_SPINLOCK_R(s, ret)  BCM_ASSERT_R(HAS_SPINLOCK_COND((s)), ret)
+
+#define BCM_ASSERT_HAS_SPINLOCK_A(s)       BCM_ASSERT_A(HAS_SPINLOCK_COND((s)))
+
+#define BCM_ASSERT_NOT_HAS_SPINLOCK_C(s)   BCM_ASSERT_C(NOT_HAS_SPINLOCK_COND((s)))
+
+#define BCM_ASSERT_NOT_HAS_SPINLOCK_V(s)   BCM_ASSERT_V(NOT_HAS_SPINLOCK_COND((s)))
+
+#define BCM_ASSERT_NOT_HAS_SPINLOCK_R(s, ret)  BCM_ASSERT_R(NOT_HAS_SPINLOCK_COND((s)), ret)
+
+#define BCM_ASSERT_NOT_HAS_SPINLOCK_A(s)   BCM_ASSERT_A(NOT_HAS_SPINLOCK_COND((s)))
+
+
+#ifdef CONFIG_DEBUG_MUTEXES
+#include <linux/mutex.h>
+#define     HAS_MUTEX_COND(m)     (m->owner == current)
+#define NOT_HAS_MUTEX_COND(m)     (m->owner != current)
+#else
+#define     HAS_MUTEX_COND(m)     (1)
+#define NOT_HAS_MUTEX_COND(m)     (1)
+#endif
+
+#define BCM_ASSERT_HAS_MUTEX_C(m)       BCM_ASSERT_C(HAS_MUTEX_COND((m)))
+
+#define BCM_ASSERT_HAS_MUTEX_V(m)       BCM_ASSERT_V(HAS_MUTEX_COND((m)))
+
+#define BCM_ASSERT_HAS_MUTEX_R(m, ret)  BCM_ASSERT_R(HAS_MUTEX_COND((m)), ret)
+
+#define BCM_ASSERT_HAS_MUTEX_A(m)       BCM_ASSERT_A(HAS_MUTEX_COND((m)))
+
+#define BCM_ASSERT_NOT_HAS_MUTEX_C(m)   BCM_ASSERT_C(NOT_HAS_MUTEX_COND((m)))
+
+#define BCM_ASSERT_NOT_HAS_MUTEX_V(m)   BCM_ASSERT_V(NOT_HAS_MUTEX_COND((m)))
+
+#define BCM_ASSERT_NOT_HAS_MUTEX_R(m, ret)  BCM_ASSERT_R(NOT_HAS_MUTEX_COND((m)), ret)
+
+#define BCM_ASSERT_NOT_HAS_MUTEX_A(m)   BCM_ASSERT_A(NOT_HAS_MUTEX_COND((m)))
+
+
+#endif /* __BCM_ASSERT_LOCKS_H__ */
+
+#endif // defined(CONFIG_BRCM_KF_ASSERT)
+
diff --git a/include/linux/bcm_colors.h b/include/linux/bcm_colors.h
new file mode 100644
index 0000000000000000000000000000000000000000..176d9155ff542f7934e3ee954254d08be292b7f5
--- /dev/null
+++ b/include/linux/bcm_colors.h
@@ -0,0 +1,75 @@
+/*
+<:copyright-gpl
+ Copyright 2010 Broadcom Corp. All Rights Reserved.
+
+ This program is free software; you can distribute it and/or modify it
+ under the terms of the GNU General Public License (Version 2) as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+:>
+ */
+
+/*
+ *--------------------------------------------------------------------------
+ * Color encodings for console printing:
+ *
+ * This feature is controlled from top level make menuconfig, under
+ * Debug Selection>Enable Colorized Prints
+ *
+ * You may select a color specific to your subsystem by:
+ *  #define CLRsys CLRg
+ *
+ * Usage:  PRINT(CLRr "format" CLRNL);
+ *--------------------------------------------------------------------------
+ */
+
+#ifndef __BCM_COLORS_H__
+#define __BCM_COLORS_H__
+
+#ifdef CONFIG_BCM_COLORIZE_PRINTS
+#define BCMCOLOR(clr_code)     clr_code
+#else
+#define BCMCOLOR(clr_code)
+#endif
+
+/* White background */
+#define CLRr             BCMCOLOR("\e[0;31m")       /* red              */
+#define CLRg             BCMCOLOR("\e[0;32m")       /* green            */
+#define CLRy             BCMCOLOR("\e[0;33m")       /* yellow           */
+#define CLRb             BCMCOLOR("\e[0;34m")       /* blue             */
+#define CLRm             BCMCOLOR("\e[0;35m")       /* magenta          */
+#define CLRc             BCMCOLOR("\e[0;36m")       /* cyan             */
+
+/* blacK "inverted" background */
+#define CLRrk            BCMCOLOR("\e[0;31;40m")    /* red     on blacK */
+#define CLRgk            BCMCOLOR("\e[0;32;40m")    /* green   on blacK */
+#define CLRyk            BCMCOLOR("\e[0;33;40m")    /* yellow  on blacK */
+#define CLRmk            BCMCOLOR("\e[0;35;40m")    /* magenta on blacK */
+#define CLRck            BCMCOLOR("\e[0;36;40m")    /* cyan    on blacK */
+#define CLRwk            BCMCOLOR("\e[0;37;40m")    /* whilte  on blacK */
+
+/* Colored background */
+#define CLRcb            BCMCOLOR("\e[0;36;44m")    /* cyan    on blue  */
+#define CLRyr            BCMCOLOR("\e[0;33;41m")    /* yellow  on red   */
+#define CLRym            BCMCOLOR("\e[0;33;45m")    /* yellow  on magen */
+
+/* Generic foreground colors */
+#define CLRhigh          CLRm                    /* Highlight color  */
+#define CLRbold          CLRcb                   /* Bold      color  */
+#define CLRbold2         CLRym                   /* Bold2     color  */
+#define CLRerr           CLRyr                   /* Error     color  */
+#define CLRnorm          BCMCOLOR("\e[0m")       /* Normal    color  */
+#define CLRnl            CLRnorm "\n"            /* Normal + newline */
+
+/* Each subsystem may define CLRsys */
+
+#endif /* __BCM_COLORS_H__ */
+
diff --git a/include/linux/bcm_dslcpe_wlan_info.h b/include/linux/bcm_dslcpe_wlan_info.h
new file mode 100644
index 0000000000000000000000000000000000000000..697bbd69726b2bc8e5a94164044840420bbe7cef
--- /dev/null
+++ b/include/linux/bcm_dslcpe_wlan_info.h
@@ -0,0 +1,28 @@
+#ifndef __BCM_DSLCPE_WLAN_INFO_H_
+#define __BCM_DSLCPE_WLAN_INFO_H_
+#include <linux/netdevice.h>
+#include <linux/blog.h>
+#define WLAN_CLIENT_INFO_OK (0)
+#define WLAN_CLIENT_INFO_ERR (-1)
+typedef enum {
+    WLAN_CLIENT_TYPE_CPU,
+    WLAN_CLIENT_TYPE_WFD,
+    WLAN_CLIENT_TYPE_RUNNER,
+    WLAN_CLIENT_TYPE_MAX
+} wlan_client_type_t;
+
+typedef struct  {
+    wlan_client_type_t type;
+    union {
+        uint32_t        wl;
+        BlogWfd_t       wfd;
+        BlogRnr_t       rnr;
+    };
+} wlan_client_info_t;
+
+
+typedef int (* wlan_client_get_info_t)(struct net_device *dev,char *mac_address_p,int priority, wlan_client_info_t *info_p);
+
+/* function called when wireless STA get disassocated  */
+extern int wlan_client_disconnect_notifier(struct net_device *dev,char *mac);
+#endif
diff --git a/include/linux/bcm_log.h b/include/linux/bcm_log.h
new file mode 100644
index 0000000000000000000000000000000000000000..60069bd14b7a7455e17492ace98521fd588cac81
--- /dev/null
+++ b/include/linux/bcm_log.h
@@ -0,0 +1,319 @@
+#if defined(CONFIG_BCM_KF_LOG)
+/*
+* <:copyright-BRCM:2012:DUAL/GPL:standard
+* 
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+:>
+*/
+#ifndef _BCM_LOG_SERVICES_
+#define _BCM_LOG_SERVICES_
+
+#if !defined(__KERNEL__)
+#include <stdint.h>             /**< ISO C99 7.18 Integer Types */
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include <linux/bcm_log_mod.h>
+
+#if defined(__KERNEL__)
+#define bcmPrint            printk
+#else
+#define bcmPrint            printf
+#define BUG()               do { } while(0)
+#define EXPORT_SYMBOL(sym)
+#endif
+
+/*********
+ *********
+ * Private:
+ *********
+ *********/
+
+
+#define IN /*Input parameters*/
+#define OUT /*Output parameters*/
+#define INOUT /*Input/Output parameters*/
+
+/*
+ * This block of defines selects supported functionality for everything
+ * that includes bcm_log.h.  Selection of functionality will eventually
+ * be moved to make menuconfig.  CONFIG_BRCM_COLORIZE_PRINTS is already
+ * in make menuconfig, but it is locally disabled here.
+ */
+#ifdef CONFIG_BCM_LOG
+#undef CONFIG_BRCM_COLORIZE_PRINTS
+#define BCM_ASSERT_SUPPORTED
+#define BCM_LOG_SUPPORTED
+#define BCM_DATADUMP_SUPPORTED
+#define BCM_ERROR_SUPPORTED
+#undef BCM_SNAPSHOT_SUPPORTED
+#endif /* CONFIG_BCM_LOG */
+
+#include <linux/bcm_colors.h>
+
+#if defined(BCM_ASSERT_SUPPORTED)
+#define BCM_ASSERTCODE(code)    code
+#else
+#define BCM_ASSERTCODE(code)
+#endif /*defined(BCM_ASSERT_SUPPORTED)*/
+
+#if defined(BCM_LOG_SUPPORTED)
+#define BCM_LOGCODE(code)    code
+#else
+#define BCM_LOGCODE(code)
+#endif /*defined(BCM_LOG_SUPPORTED)*/
+
+#if defined(BCM_ERROR_SUPPORTED)
+#define BCM_ERRORCODE(code)    code
+#else
+#define BCM_ERRORCODE(code)
+#endif /*defined(BCM_ERROR_SUPPORTED)*/
+
+#if defined(BCM_DATADUMP_SUPPORTED)
+#define BCM_DATADUMPCODE(code)    code
+#else
+#define BCM_DATADUMPCODE(code) 0
+#endif /*defined(BCM_DATADUMP_SUPPORTED)*/
+
+#if defined(BCM_SNAPSHOT_SUPPORTED)
+#define BCM_SNAPSHOTCODE(code)    code
+#else
+#define BCM_SNAPSHOTCODE(code) 0
+#endif /*defined(BCM_SNAPSHOT_SUPPORTED)*/
+
+typedef enum {
+    BCM_LOG_DD_IMPORTANT=0,
+    BCM_LOG_DD_INFO,
+    BCM_LOG_DD_DETAIL,
+    BCM_LOG_DD_MAX
+} bcmLogDataDumpLevel_t;
+
+typedef void (*bcmLogLevelChangeCallback_t)(bcmLogId_t logId, bcmLogLevel_t level, void *ctx);
+
+typedef struct {
+    bcmLogId_t logId;
+    char *name;
+    bcmLogLevel_t logLevel;
+    bcmLogDataDumpLevel_t ddLevel;
+    bcmLogLevelChangeCallback_t lcCallback;
+    void * lcCallbackCtx;
+} bcmLogModuleInfo_t;
+
+typedef struct
+{
+    int (*reserveSlave)(int busNum, int slaveId, int maxFreq);
+    int (*syncTrans)(unsigned char *txBuf, unsigned char *rxBuf, int prependcnt, int nbytes, int busNum, int slaveId);
+    int (*kerSysSlaveWrite)(int dev, unsigned long addr, unsigned long data, unsigned long len);
+    int (*kerSysSlaveRead)(int dev, unsigned long addr, unsigned long *data, unsigned long len);
+    int (*bpGet6829PortInfo)( unsigned char *portInfo6829 );
+    
+} bcmLogSpiCallbacks_t;
+
+
+/********
+ ********
+ * Public: service API offered by LOGdriver to other drivers
+ ********
+ ********/
+
+/**
+ * Logging API: Activate by #defining BCM_LOG_SUPPORTED
+ **/
+
+#if defined(BCM_LOG_SUPPORTED)
+bcmLogModuleInfo_t *bcmLog_logIsEnabled(bcmLogId_t logId, bcmLogLevel_t logLevel);
+#else
+#define bcmLog_logIsEnabled(arg1, arg2) 0
+#endif
+
+#define BCM_LOG_FUNC(logId)                     \
+    BCM_LOG_DEBUG((logId), " ")
+
+#define BCM_LOG_DEBUG(logId, fmt, arg...)                               \
+    BCM_LOGCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_logIsEnabled(logId, BCM_LOG_LEVEL_DEBUG); \
+                      if (_pModInfo)                                              \
+                          bcmPrint(CLRm "[DBG " "%s" "] %-10s: " fmt CLRnl, \
+                                 _pModInfo->name, __FUNCTION__, ##arg); } while(0) )
+
+#define BCM_LOG_INFO(logId, fmt, arg...)                               \
+    BCM_LOGCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_logIsEnabled(logId, BCM_LOG_LEVEL_INFO); \
+                      if (_pModInfo)                                              \
+                          bcmPrint(CLRg "[INF " "%s" "] %-10s: " fmt CLRnl, \
+                                 _pModInfo->name, __FUNCTION__, ##arg); } while(0) )
+
+#define BCM_LOG_NOTICE(logId, fmt, arg...)                               \
+    BCM_LOGCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_logIsEnabled(logId, BCM_LOG_LEVEL_NOTICE); \
+                      if (_pModInfo)                                              \
+                          bcmPrint(CLRb "[NTC " "%s" "] %-10s: " fmt CLRnl, \
+                                 _pModInfo->name, __FUNCTION__, ##arg); } while(0) )
+
+
+/**
+ * Error Reporting API: Activate by #defining BCM_ERROR_SUPPORTED
+ **/
+
+#define BCM_LOG_ERROR(logId, fmt, arg...)                                \
+    BCM_ERRORCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_logIsEnabled(logId, BCM_LOG_LEVEL_ERROR); \
+                      if (_pModInfo)                                              \
+                          bcmPrint(CLRerr "[ERROR " "%s" "] %-10s,%d: " fmt CLRnl, \
+                                 _pModInfo->name, __FUNCTION__, __LINE__, ##arg); } while(0) )
+
+
+/**
+ * Assert API: Activate by #defining BCM_ASSERT_SUPPORTED
+ **/
+
+#define BCM_ASSERT(cond)                                         \
+    BCM_ASSERTCODE( if ( !(cond) ) {                                    \
+                        bcmPrint(CLRerr "[ASSERT " "%s" "] %-10s,%d: " #cond CLRnl, \
+                               __FILE__, __FUNCTION__, __LINE__); \
+                        BUG();                                          \
+                     } )
+
+
+/**
+ * Datadump API: Activate by #defining BCM_DATADUMP_SUPPORTED
+ **/
+
+/*
+ * Prototype of datadump print functions.
+ * Note: parse functions must be exported (EXPORT_SYMBOL)
+ */
+typedef int (Bcm_DataDumpPrintFunc)(uint32_t dataDumpId, IN void* dataPtr, uint32_t numDataBytes,
+                                    OUT char* buf, uint32_t bufSize);
+
+#if defined(BCM_DATADUMP_SUPPORTED)
+bcmLogModuleInfo_t *bcmLog_ddIsEnabled(bcmLogId_t logId, bcmLogDataDumpLevel_t ddLevel);
+void bcm_dataDumpRegPrinter(uint32_t qId, uint32_t dataDumpId, Bcm_DataDumpPrintFunc *printFun);
+void bcm_dataDump(uint32_t qID, uint32_t dataDumpID, const char* dataDumpName, void *ptr, uint32_t numBytes);
+uint32_t bcm_dataDumpCreateQ(const char* qName);
+void bcm_dataDumpDeleteQ(uint32_t qid);
+#endif
+
+/*
+ * Create a DataDump queue. Different modules can share a queue.
+ * Returns a queue ID (uint32_t).
+ */
+#define BCM_DATADUMP_CREATE_Q(qName) BCM_DATADUMPCODE(bcm_dataDumpCreateQ(qName))
+
+/*
+ * Delete a DataDump queue.
+ */
+#define BCM_DATADUMP_DELETE_Q(qID) BCM_DATADUMPCODE(bcm_dataDumpDeleteQ(qID))
+
+/*
+ * Dump data
+ */
+#define BCM_DATADUMP_IMPORTANT(logId, qID, dataDumpID, ptr, numBytes) \
+    BCM_DATADUMPCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_ddIsEnabled(logId, BCM_LOG_DD_IMPORTANT); \
+                      if (_pModInfo)                                              \
+                          bcm_dataDump(qID, dataDumpID, #dataDumpID, (void*)(ptr), numBytes); } while(0) )
+#define BCM_DATADUMP_INFO(logId, qID, dataDumpID, ptr, numBytes) \
+    BCM_DATADUMPCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_ddIsEnabled(logId, BCM_LOG_DD_INFO); \
+                      if (_pModInfo)                                              \
+                          bcm_dataDump(qID, dataDumpID, #dataDumpID, (void*)(ptr), numBytes); } while(0) )
+#define BCM_DATADUMP_DETAIL(logId, qID, dataDumpID, ptr, numBytes) \
+    BCM_DATADUMPCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_ddIsEnabled(logId, BCM_LOG_DD_DETAIL); \
+                      if (_pModInfo)                                              \
+                          bcm_dataDump(qID, dataDumpID, #dataDumpID, (void*)(ptr), numBytes); } while(0) )
+#define BCM_DATADUMP_MAX(logId, qID, dataDumpID, ptr, numBytes) \
+    BCM_DATADUMPCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_ddIsEnabled(logId, BCM_LOG_DD_MAX); \
+                      if (_pModInfo)                                              \
+                          bcm_dataDump(qID, dataDumpID, #dataDumpID, (void*)(ptr), numBytes); } while(0) )
+
+/*
+ * Register a printer for a certain DataDump ID.
+ * Datadumps for which no printer is registered will use a default printer.
+ * The default printer will print the data as an array of bytes.
+ */
+#define BCM_DATADUMP_REG_PRINTER(qId, dataDumpId, printFun)             \
+    BCM_DATADUMPCODE(bcm_dataDumpRegPrinter(qId, dataDumpId, printFun))
+
+/* A helper macro for datadump printers */
+#define DDPRINTF(buf, len, bufSize, arg...)                             \
+    ({len += snprintf((buf)+(len), max_t(uint32_t, 0, (bufSize)-80-(len)), ##arg); \
+        if ((len) >= (bufSize)-80) snprintf((buf)+(len), 80, "---BUFFER FULL---\n");})
+
+
+/**
+ * Snapshot API: Commit all logs to the Snapshot queue
+ **/
+
+#define BCM_LOG_SNAPSHOT() BCM_SNAPSHOTCODE() /*TBD*/
+
+
+/**
+ * API Function Prototypes
+ **/
+
+#ifdef CONFIG_BCM_LOG
+
+void __init bcmLog_init( void );
+
+void bcmLog_setGlobalLogLevel(bcmLogLevel_t logLevel);
+bcmLogLevel_t bcmLog_getGlobalLogLevel(void);
+
+void bcmLog_setLogLevel(bcmLogId_t logId, bcmLogLevel_t logLevel);
+bcmLogLevel_t bcmLog_getLogLevel(bcmLogId_t logId);
+
+char *bcmLog_getModName(bcmLogId_t logId);
+
+void bcmLog_registerSpiCallbacks(bcmLogSpiCallbacks_t callbacks);
+
+typedef int (bcmFun_t)(void *);
+
+/*Register a function with the bcmLog driver*/
+void bcmFun_reg(bcmFunId_t funId, bcmFun_t *f);
+
+/*De-Register a function with the bcmLog driver*/
+void bcmFun_dereg(bcmFunId_t funId);
+
+/*Look up a function by FunId. Returns NULL if the function is not
+ *registered.*/
+bcmFun_t* bcmFun_get(bcmFunId_t funId);
+
+void bcmLog_registerLevelChangeCallback(bcmLogId_t logId, bcmLogLevelChangeCallback_t callback, void *ctx);
+
+#else
+
+/* BCM LOG not configured: create empty stubs for all functions */
+static inline void bcmLog_init( void )                                           {}
+static inline void bcmLog_setGlobalLogLevel(bcmLogLevel_t loglevel)              {}
+static inline bcmLogLevel_t bcmLog_getGlobalLogLevel(void)                       { return 0; }
+static inline char *bcmLog_getModName(bcmLogId_t logId)                          { return NULL; }
+static inline void bcmLog_registerSpiCallbacks(bcmLogSpiCallbacks_t callbacks)   {}
+static inline void bcmLog_setLogLevel(bcmLogId_t logId, bcmLogLevel_t logLevel)  {}
+static inline bcmLogLevel_t bcmLog_getLogLevel(bcmLogId_t logId)                 { return 0; }
+typedef int (bcmFun_t)(void *);
+static inline void bcmFun_reg(bcmFunId_t funId, bcmFun_t f)                      {}
+static inline void bcmFun_dereg(bcmFunId_t funId)                                {}
+static inline bcmFun_t* bcmFun_get(bcmFunId_t funId)                             { return NULL; }
+static inline void bcmLog_registerLevelChangeCallback(bcmLogId_t logId, bcmLogLevelChangeCallback_t callback, void *ctx) {}
+
+
+#endif /* CONFIG_BCM_LOG */
+#endif /*_BCM_LOG_SERVICES_*/
+#endif /* CONFIG_BCM_KF_LOG */
diff --git a/include/linux/bcm_log_mod.h b/include/linux/bcm_log_mod.h
new file mode 100644
index 0000000000000000000000000000000000000000..d5511cbe916e0495d912eb6160c089d5135f10a0
--- /dev/null
+++ b/include/linux/bcm_log_mod.h
@@ -0,0 +1,209 @@
+/*
+* <:copyright-BRCM:2010:DUAL/GPL:standard
+* 
+*    Copyright (c) 2010 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+* :>
+
+*/
+
+#ifndef _BCM_LOG_MODULES_
+#define _BCM_LOG_MODULES_
+
+typedef enum {
+    BCM_LOG_LEVEL_ERROR=0,
+    BCM_LOG_LEVEL_NOTICE,
+    BCM_LOG_LEVEL_INFO,
+    BCM_LOG_LEVEL_DEBUG,
+    BCM_LOG_LEVEL_MAX
+} bcmLogLevel_t;
+
+/* To support a new module, create a new log ID in bcmLogId_t,
+   and a new entry in BCM_LOG_MODULE_INFO */
+
+
+typedef enum {
+    BCM_LOG_ID_LOG=0,
+    BCM_LOG_ID_VLAN,
+    BCM_LOG_ID_GPON,
+    BCM_LOG_ID_PLOAM,
+    BCM_LOG_ID_PLOAM_FSM,
+    BCM_LOG_ID_PLOAM_HAL,
+    BCM_LOG_ID_PLOAM_PORT,
+    BCM_LOG_ID_PLOAM_ALARM,
+    BCM_LOG_ID_OMCI,
+    BCM_LOG_ID_I2C,
+    BCM_LOG_ID_ENET,
+    BCM_LOG_ID_CMF,
+    BCM_LOG_ID_CMFAPI,
+    BCM_LOG_ID_CMFNAT,
+    BCM_LOG_ID_CMFHAL,
+    BCM_LOG_ID_CMFHW,
+    BCM_LOG_ID_CMFHWIF,
+    BCM_LOG_ID_CMFFFE,
+    BCM_LOG_ID_GPON_SERDES,
+    BCM_LOG_ID_FAP,
+    BCM_LOG_ID_FAPPROTO,
+    BCM_LOG_ID_FAP4KE,
+    BCM_LOG_ID_AE,
+    BCM_LOG_ID_XTM,
+    BCM_LOG_ID_VOICE_EPT,
+    BCM_LOG_ID_VOICE_XDRV,
+    BCM_LOG_ID_VOICE_BOS,
+    BCM_LOG_ID_VOICE_XDRV_SLIC,
+    BCM_LOG_ID_IQ,
+    BCM_LOG_ID_BPM,
+    BCM_LOG_ID_ARL,
+    BCM_LOG_ID_EPON,
+    BCM_LOG_ID_GMAC,   
+    BCM_LOG_ID_RDPA,
+    BCM_LOG_ID_RDPA_CMD_DRV,
+    BCM_LOG_ID_PKTRUNNER,
+    BCM_LOG_ID_SIM_CARD,
+    BCM_LOG_ID_PMD,
+    BCM_LOG_ID_MAX
+} bcmLogId_t;
+
+#define BCM_LOG_MODULE_INFO                             \
+    {                                                   \
+        {.logId = BCM_LOG_ID_LOG, .name = "bcmlog", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_VLAN, .name = "vlan", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_GPON, .name = "gpon", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_PLOAM, .name = "ploam", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_PLOAM_FSM, .name = "ploamFsm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_PLOAM_HAL, .name = "ploamHal", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_PLOAM_PORT, .name = "ploamPort", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_PLOAM_ALARM, .name = "ploamAlarm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_OMCI, .name = "omci", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_I2C, .name = "i2c", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_ENET, .name = "enet", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_CMF, .name = "pktcmf", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_CMFAPI, .name = "cmfapi", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_CMFNAT, .name = "cmfnat", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_CMFHAL, .name = "cmfhal", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_CMFHW, .name = "cmfhw", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_CMFHWIF, .name = "cmfhwif", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_CMFFFE, .name = "cmfffe", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_GPON_SERDES, .name = "gponSerdes", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_FAP, .name = "fap", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_FAPPROTO, .name = "fapProto", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_FAP4KE, .name = "fap4ke", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_AE, .name = "ae", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_XTM, .name = "xtm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_VOICE_EPT, .name = "ept", .logLevel = BCM_LOG_LEVEL_ERROR}, \
+        {.logId = BCM_LOG_ID_VOICE_XDRV, .name = "xdrv", .logLevel = BCM_LOG_LEVEL_ERROR}, \
+        {.logId = BCM_LOG_ID_VOICE_BOS, .name = "bos", .logLevel = BCM_LOG_LEVEL_ERROR}, \
+        {.logId = BCM_LOG_ID_VOICE_XDRV_SLIC, .name = "xdrv_slic", .logLevel = BCM_LOG_LEVEL_DEBUG}, \
+        {.logId = BCM_LOG_ID_IQ, .name = "iq", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_BPM, .name = "bpm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_ARL, .name = "arl", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_EPON, .name = "eponlue", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_GMAC, .name = "gmac", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_RDPA, .name = "rdpa", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_RDPA_CMD_DRV, .name = "rdpadrv", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_PKTRUNNER, .name = "pktrunner", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+		{.logId = BCM_LOG_ID_SIM_CARD, .name = "sim_card", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+        {.logId = BCM_LOG_ID_PMD, .name = "pmd", .logLevel = BCM_LOG_LEVEL_NOTICE}, \
+    }
+
+/* To support a new registered function,
+ * create a new BCM_FUN_ID */
+
+typedef enum {
+    BCM_FUN_ID_RESET_SWITCH=0,
+    BCM_FUN_ID_ENET_LINK_CHG,
+    BCM_FUN_ID_ENET_CHECK_SWITCH_LOCKUP,
+    BCM_FUN_ID_ENET_GET_PORT_BUF_USAGE,
+    BCM_FUN_ID_GPON_GET_GEM_PID_QUEUE,
+    BCM_FUN_ID_ENET_HANDLE,
+    BCM_FUN_ID_EPON_HANDLE,
+    BCM_FUN_ID_CMF_FFE_CLK,
+    BCM_FUN_IN_ENET_CLEAR_ARL_ENTRY,
+#if defined(CONFIG_BCM_GMAC)
+    BCM_FUN_ID_ENET_GMAC_ACTIVE,
+    BCM_FUN_ID_ENET_GMAC_PORT,
+#endif
+    BCM_FUN_ID_CMF_ETH_RESET_STATS,
+    BCM_FUN_ID_CMF_ETH_GET_STATS,
+    BCM_FUN_ID_CMF_XTM_RESET_STATS,
+    BCM_FUN_ID_CMF_XTM_GET_STATS,
+    BCM_FUN_ID_ENET_IS_WAN_PORT, /* Take Logical port number as argument */
+    BCM_FUN_ID_ENET_IS_SWSWITCH_PORT,
+    BCM_FUN_ID_MAX
+
+} bcmFunId_t;
+
+/* Structures passed in above function calls */
+typedef struct {
+    uint16_t gemPortIndex; /* Input */
+    uint16_t gemPortId;    /* Output */
+    uint8_t  usQueueIdx;   /* Output */
+}BCM_GponGemPidQueueInfo;
+
+typedef enum {
+    BCM_ENET_FUN_TYPE_LEARN_CTRL = 0,
+    BCM_ENET_FUN_TYPE_ARL_WRITE,
+    BCM_ENET_FUN_TYPE_AGE_PORT,
+    BCM_ENET_FUN_TYPE_UNI_UNI_CTRL,
+    BCM_ENET_FUN_TYPE_PORT_RX_CTRL,
+    BCM_ENET_FUN_TYPE_GET_VPORT_CNT,
+    BCM_ENET_FUN_TYPE_GET_IF_NAME_OF_VPORT,
+    BCM_ENET_FUN_TYPE_GET_UNIPORT_MASK,
+    BCM_ENET_FUN_TYPE_MAX
+} bcmFun_Type_t;
+
+typedef struct {
+    uint16_t vid;
+    uint16_t val;
+    uint8_t mac[6];
+} arlEntry_t;
+
+typedef struct {
+    bcmFun_Type_t type; /* Action Needed in Enet Driver */
+    union {
+        uint8_t port;
+        uint8_t uniport_cnt;
+        uint16_t portMask;
+        arlEntry_t arl_entry;
+    };
+    char name[16];
+    uint8_t enable;
+}BCM_EnetHandle_t;
+
+typedef enum {
+    BCM_EPON_FUN_TYPE_UNI_UNI_CTRL = 0,
+    BCM_EPON_FUN_TYPE_MAX
+} bcmEponFun_Type_t;
+
+typedef struct {
+    bcmEponFun_Type_t type; /* Action Needed in Epon Driver */
+    uint8_t enable;
+}BCM_EponHandle_t;
+
+typedef struct {
+    uint8_t port; /* switch port */
+    uint8_t enable; /* enable/disable the clock */
+}BCM_CmfFfeClk_t;
+
+#endif /* _BCM_LOG_MODULES_ */
diff --git a/include/linux/bcm_m2mdma.h b/include/linux/bcm_m2mdma.h
new file mode 100644
index 0000000000000000000000000000000000000000..3d339edb625a9966d1febd9e95e04eea7bb5f2b2
--- /dev/null
+++ b/include/linux/bcm_m2mdma.h
@@ -0,0 +1,41 @@
+/*
+<:copyright-BRCM:2015:DUAL/GPL:standard
+
+   Copyright (c) 2015 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#if !defined(__BCM_M2M_DMA_H__) && defined(CONFIG_BCM_KF_M2M_DMA) && defined(CONFIG_BCM_M2M_DMA)
+#define __BCM_M2M_DMA_H__
+
+
+extern uint32_t bcm_m2m_dma_memcpy_async_uncached(uint32_t phys_dest, uint32_t phys_src, uint16_t len);
+
+extern uint32_t bcm_m2m_dma_memcpy_async(void *dest, void *src, uint16_t len);
+extern uint32_t bcm_m2m_dma_memcpy_async_no_flush(void *dest, void *src, uint16_t len);
+
+extern uint32_t bcm_m2m_dma_memcpy_async_no_flush_inv(void *dest, void *src, uint16_t len);
+extern int bcm_m2m_wait_for_complete(uint32_t desc_id);
+
+#endif /* __BCM_M2M_DMA_H__ */
diff --git a/include/linux/bcm_realtime.h b/include/linux/bcm_realtime.h
new file mode 100644
index 0000000000000000000000000000000000000000..15b0f108ae36cbec89f7f3da51256ab43b7222b5
--- /dev/null
+++ b/include/linux/bcm_realtime.h
@@ -0,0 +1,71 @@
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#ifndef _BCM_REALTIME_H_
+#define _BCM_REALTIME_H_
+
+/*
+ * This file defines the real time priority levels used by the various
+ * threads in the system.  It is important that all threads coordinate
+ * their priority levels so that the desired effect is achieved.
+ * These priorities are also related cgroups, so check the cgroups
+ * groupings and cpu allocations (if cgroups is enabled).
+ */
+
+/** highest priority threads in the system.
+ *
+ * Threads at this priority require the absolute minium latency.  However,
+ * they should only run very briefly (<2ms per run).
+ * These threads should also run at sched policy FIFO.
+ */
+#define BCM_RTPRIO_HIGH               75
+
+
+/** priority for the voip DSP.
+ *
+ * Note this is not for all voip threads, just the DSP thread.
+ * The other voice threads should be run at the other priorities that are
+ * defined.
+ */
+#define BCM_RTPRIO_VOIPDSP            35
+
+
+/** priority for all data forwarding.
+ *
+ * This is for data and video streaming.  Not clear if we need to split out
+ * sub-categories here such as video, versus web data, versus voice.
+ * Probably need to use cgroups if a system needs to handle many types of
+ * streams.
+ * Threads running at this priority should use sched policy Round-Robin.
+ */
+#define BCM_RTPRIO_DATA                 5
+
+
+#endif /* _BCM_REALTIME_H_ */
+
diff --git a/include/linux/bcm_skb_defines.h b/include/linux/bcm_skb_defines.h
new file mode 100644
index 0000000000000000000000000000000000000000..622ff68faa3e98b19fc9be578f2afde8c50d10c8
--- /dev/null
+++ b/include/linux/bcm_skb_defines.h
@@ -0,0 +1,133 @@
+#if defined(CONFIG_BCM_KF_SKB_DEFINES)
+/*
+* <:copyright-BRCM:2014:DUAL/GPL:standard
+* 
+*    Copyright (c) 2014 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+* :>
+*/
+
+#ifndef _BCM_SKB_DEFINES_
+#define _BCM_SKB_DEFINES_
+
+/* queue = mark[4:0] */
+#define SKBMARK_Q_S             0
+#define SKBMARK_Q_M             (0x1F << SKBMARK_Q_S)
+#define SKBMARK_GET_Q(MARK)     ((MARK & SKBMARK_Q_M) >> SKBMARK_Q_S)
+#define SKBMARK_SET_Q(MARK, Q)  ((MARK & ~SKBMARK_Q_M) | (Q << SKBMARK_Q_S))
+/* traffic_class_id = mark[10:5] */
+#define SKBMARK_TC_ID_S         5
+#define SKBMARK_TC_ID_M         (0x3F << SKBMARK_TC_ID_S)
+#define SKBMARK_GET_TC_ID(MARK) ((MARK & SKBMARK_TC_ID_M) >> SKBMARK_TC_ID_S)
+#define SKBMARK_SET_TC_ID(MARK, TC) \
+    ((MARK & ~SKBMARK_TC_ID_M) | (TC << SKBMARK_TC_ID_S))
+/* flow_id = mark[18:11] */
+#define SKBMARK_FLOW_ID_S       11
+#define SKBMARK_FLOW_ID_M       (0xFF << SKBMARK_FLOW_ID_S)
+#define SKBMARK_GET_FLOW_ID(MARK) \
+    ((MARK & SKBMARK_FLOW_ID_M) >> SKBMARK_FLOW_ID_S)
+#define SKBMARK_SET_FLOW_ID(MARK, FLOW) \
+    ((MARK & ~SKBMARK_FLOW_ID_M) | (FLOW << SKBMARK_FLOW_ID_S))
+/* iq_prio = mark[19]; for Ingress QoS used when TX is WLAN */
+#define SKBMARK_IQPRIO_MARK_S    19
+#define SKBMARK_IQPRIO_MARK_M    (0x01 << SKBMARK_IQPRIO_MARK_S)
+#define SKBMARK_GET_IQPRIO_MARK(MARK) \
+    ((MARK & SKBMARK_IQPRIO_MARK_M) >> SKBMARK_IQPRIO_MARK_S)
+#define SKBMARK_SET_IQPRIO_MARK(MARK, IQPRIO_MARK) \
+    ((MARK & ~SKBMARK_IQPRIO_MARK_M) | (IQPRIO_MARK << SKBMARK_IQPRIO_MARK_S))
+/* port = mark[26:20]; for enet driver of gpon port, this is gem_id */
+#define SKBMARK_PORT_S          20
+#define SKBMARK_PORT_M          (0x7F << SKBMARK_PORT_S)
+#define SKBMARK_GET_PORT(MARK) \
+    ((MARK & SKBMARK_PORT_M) >> SKBMARK_PORT_S)
+#define SKBMARK_SET_PORT(MARK, PORT) \
+    ((MARK & ~SKBMARK_PORT_M) | (PORT << SKBMARK_PORT_S))
+#if defined(CONFIG_BCM_KF_ENET_SWITCH)
+/* iffwan_mark = mark[27] --  BRCM defined-- */
+#define SKBMARK_IFFWAN_MARK_S    27
+#define SKBMARK_IFFWAN_MARK_M    (0x01 << SKBMARK_IFFWAN_MARK_S)
+#define SKBMARK_GET_IFFWAN_MARK(MARK) \
+    ((MARK & SKBMARK_IFFWAN_MARK_M) >> SKBMARK_IFFWAN_MARK_S)
+#define SKBMARK_SET_IFFWAN_MARK(MARK, IFFWAN_MARK) \
+    ((MARK & ~SKBMARK_IFFWAN_MARK_M) | (IFFWAN_MARK << SKBMARK_IFFWAN_MARK_S))
+#endif
+/* ipsec_mark = mark[28] */
+#define SKBMARK_IPSEC_MARK_S    28
+#define SKBMARK_IPSEC_MARK_M    (0x01 << SKBMARK_IPSEC_MARK_S)
+#define SKBMARK_GET_IPSEC_MARK(MARK) \
+    ((MARK & SKBMARK_IPSEC_MARK_M) >> SKBMARK_IPSEC_MARK_S)
+#define SKBMARK_SET_IPSEC_MARK(MARK, IPSEC_MARK) \
+    ((MARK & ~SKBMARK_IPSEC_MARK_M) | (IPSEC_MARK << SKBMARK_IPSEC_MARK_S))
+/* policy_routing = mark[31:29] */
+#define SKBMARK_POLICY_RTNG_S   29
+#define SKBMARK_POLICY_RTNG_M   (0x07 << SKBMARK_POLICY_RTNG_S)
+#define SKBMARK_GET_POLICY_RTNG(MARK)  \
+    ((MARK & SKBMARK_POLICY_RTNG_M) >> SKBMARK_POLICY_RTNG_S)
+#define SKBMARK_SET_POLICY_RTNG(MARK, POLICY) \
+    ((MARK & ~SKBMARK_POLICY_RTNG_M) | (POLICY << SKBMARK_POLICY_RTNG_S))
+
+/* The enet driver subdivides queue field (mark[4:0]) in the skb->mark into
+   priority and channel */
+/* priority = queue[2:0] (=>mark[2:0]) */
+#define SKBMARK_Q_PRIO_S        (SKBMARK_Q_S)
+#define SKBMARK_Q_PRIO_M        (0x07 << SKBMARK_Q_PRIO_S)
+#define SKBMARK_GET_Q_PRIO(MARK) \
+    ((MARK & SKBMARK_Q_PRIO_M) >> SKBMARK_Q_PRIO_S)
+#define SKBMARK_SET_Q_PRIO(MARK, Q) \
+    ((MARK & ~SKBMARK_Q_PRIO_M) | (Q << SKBMARK_Q_PRIO_S))
+/* channel = queue[4:3] (=>mark[4:3]) */
+#define SKBMARK_Q_CH_S          (SKBMARK_Q_S + 3)
+#define SKBMARK_Q_CH_M          (0x03 << SKBMARK_Q_CH_S)
+#define SKBMARK_GET_Q_CHANNEL(MARK) ((MARK & SKBMARK_Q_CH_M) >> SKBMARK_Q_CH_S)
+#define SKBMARK_SET_Q_CHANNEL(MARK, CH) \
+    ((MARK & ~SKBMARK_Q_CH_M) | (CH << SKBMARK_Q_CH_S))
+
+#define SKBMARK_ALL_GEM_PORT  (0xFF) 
+
+#define WLAN_PRIORITY_BIT_POS  (1)
+#define WLAN_PRIORITY_MASK     (0x7 << WLAN_PRIORITY_BIT_POS)
+#define GET_WLAN_PRIORITY(VAL) ((VAL & WLAN_PRIORITY_MASK) >> WLAN_PRIORITY_BIT_POS)
+#define SET_WLAN_PRIORITY(ENCODEVAL, PRIO) ((ENCODEVAL & (~WLAN_PRIORITY_MASK)) | (PRIO << WLAN_PRIORITY_BIT_POS))
+
+#define WLAN_IQPRIO_BIT_POS    (0)
+#define WLAN_IQPRIO_MASK       (0x1 << WLAN_IQPRIO_BIT_POS)
+#define GET_WLAN_IQPRIO(VAL)   ((VAL & WLAN_IQPRIO_MASK) >> WLAN_IQPRIO_BIT_POS)
+#define SET_WLAN_IQPRIO(ENCODEVAL, IQPRIO) ((ENCODEVAL & (~WLAN_IQPRIO_MASK)) | (IQPRIO << WLAN_IQPRIO_BIT_POS))
+
+// LINUX_PRIORITY_BIT_POS_IN_MARK macro must be in sync with PRIO_LOC_NFMARK
+// defined in linux_osl_dslcpe.h
+#define LINUX_PRIORITY_BIT_POS_IN_MARK    16
+#define LINUX_PRIORITY_BIT_MASK          (0x7 << LINUX_PRIORITY_BIT_POS_IN_MARK)
+#define LINUX_GET_PRIO_MARK(MARK)        ((MARK & LINUX_PRIORITY_BIT_MASK) >> LINUX_PRIORITY_BIT_POS_IN_MARK)
+#define LINUX_SET_PRIO_MARK(MARK, PRIO)  ((MARK & (~LINUX_PRIORITY_BIT_MASK)) | (PRIO << LINUX_PRIORITY_BIT_POS_IN_MARK)) 
+
+//Encode 3 bits of priority and 1 bit of IQPRIO into 4 bits as follows (3bitPrio:1bitIQPrio)
+#define ENCODE_WLAN_PRIORITY_MARK(u8EncodeVal, u32Mark) \
+    (u8EncodeVal = SET_WLAN_PRIORITY(u8EncodeVal, LINUX_GET_PRIO_MARK(u32Mark)) | SET_WLAN_IQPRIO(u8EncodeVal, SKBMARK_GET_IQPRIO_MARK(u32Mark)))
+#define DECODE_WLAN_PRIORITY_MARK(encodedVal, u32Mark) \
+    (u32Mark = (LINUX_SET_PRIO_MARK(u32Mark, GET_WLAN_PRIORITY(encodedVal)) | SKBMARK_SET_IQPRIO_MARK(u32Mark, GET_WLAN_IQPRIO(encodedVal))))
+
+
+#endif /* _BCM_SKB_DEFINES_ */
+#endif /* CONFIG_BCM_KF_SKB_DEFINES */
diff --git a/include/linux/bcm_swversion.h b/include/linux/bcm_swversion.h
new file mode 100644
index 0000000000000000000000000000000000000000..6b22e67927e96c17b3b5f22bfb044482bad9aabe
--- /dev/null
+++ b/include/linux/bcm_swversion.h
@@ -0,0 +1,9 @@
+/* this file is automatically generated from top level Makefile */
+#ifndef __BCM_SWVERSION_H__
+#define __BCM_SWVERSION_H__
+#define BCM_REL_VERSION 4
+#define BCM_REL_RELEASE 16
+#define BCM_REL_PATCH 4
+#define BCM_SW_VERSIONCODE (4*65536+16*256+4)
+#define BCM_SW_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+#endif
diff --git a/include/linux/bcm_tstamp.h b/include/linux/bcm_tstamp.h
new file mode 100644
index 0000000000000000000000000000000000000000..e1745eb2aaa9037cf8179f131bc87c8ebff54468
--- /dev/null
+++ b/include/linux/bcm_tstamp.h
@@ -0,0 +1,69 @@
+#if defined(CONFIG_BCM_KF_TSTAMP)
+/*
+<:copyright-BRCM:2011:GPL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+
+#ifndef _BCM_TSTAMP_H_
+#define _BCM_TSTAMP_H_
+
+#include <linux/types.h>
+
+/*
+ * This is a just a simple set of utility functions for measuring
+ * small amounts of time (<20 seconds) using the MIPS c0 counter.
+ * The main limitation with this implementation is that the c0 counter
+ * will roll over about every 21 seconds, so measurements of time that
+ * are longer than 20 seconds will be unreliable.
+ * These functions maintain no state (other than the initial multipliers
+ * and divisors based on clock speed), so no SMP locking is needed to
+ * use these functions.
+ * It is OK to read a starting timestamp on one CPU, and then read the
+ * ending timestamp on the other.  The c0 counters of both CPU's are
+ * within about 20 cycles of each other, and bcm_tstamp_delta() tries
+ * to detect if a migration plus read of a slightly behind end timestamp
+ * has happened (seems extremely unlikely though).  In this case, it
+ * returns 1 cycle (instead of 4 billion cycles, which is unlikely unless
+ * you are measuring something that is close to 20 seconds long.)
+ */
+
+/** Get current timestamp
+ */
+u32 bcm_tstamp_read(void);
+
+/** Return the number of cycles elapsed between start and end.
+ */
+u32 bcm_tstamp_delta(u32 start, u32 end);
+
+/** Return the number of cycles elapsed between start and now.
+ */
+u32 bcm_tstamp_elapsed(u32 start);
+
+/** Convert a timestamp to microseconds.
+ */
+u32 bcm_tstamp2us(u32 i);
+
+/** Convert a timestamp to nanoseconds.  Note 64 bit return val.
+ */
+u64 bcm_tstamp2ns(u32 i);
+
+#endif /* _BCM_TSTAMP_H_ */
+#endif  /* defined(BCM_KF_TSTAMP) */
diff --git a/include/linux/blog.h b/include/linux/blog.h
new file mode 100644
index 0000000000000000000000000000000000000000..0f751f9d01595c6c9956198e132e75ca80fa0867
--- /dev/null
+++ b/include/linux/blog.h
@@ -0,0 +1,2002 @@
+#if defined(CONFIG_BCM_KF_BLOG)
+
+#ifndef __BLOG_H_INCLUDED__
+#define __BLOG_H_INCLUDED__
+
+/*--------------------------------*/
+/* Blog.h and Blog.c for Linux OS */
+/*--------------------------------*/
+
+/* 
+* <:copyright-BRCM:2003:DUAL/GPL:standard
+* 
+*    Copyright (c) 2003 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+:>
+*/
+
+/*
+ *******************************************************************************
+ *
+ * File Name  : blog.h
+ *
+ * Description:
+ *
+ * A Blog is an extension of the native OS network stack's packet context.
+ * In Linux a Blog would be an extansion of the Linux socket buffer (aka skbuff)
+ * or a network device driver level packet context FkBuff. The nbuff layer
+ * provides a transparent access SHIM to the underlying packet context, may it
+ * be a skbuff or a fkbuff. In a BSD network stack, a packet context is the
+ * BSD memory buffer (aka mbuff).
+ *
+ * Blog layer provides Blog clients a SHIM to the native OS network stack:
+ * Blog clients may be impleted to:
+ *  - debug trace a packet as it passes through the network stack,
+ *  - develop traffic generators (loop) at the network device driver level.
+ *  - develop network driver level promiscuous mode bound applications and use
+ *    the Blog SHIM to isolate themselves from the native OS network constructs
+ *    or proprietery network constructs such as Ethernet bridges, VLAN network
+ *    interfaces, IGMP, firewall and connection tracking systems.
+ *
+ * As such, Blog provides an extension of the packet context and contains the
+ * received and transmitted packets data and parsed information. Parsing results
+ * are saved to describe, the type of layer 1, 2, 3 and 4 headers seen, whether
+ * the packet was a unicast, broadcast or multicast, a tunnel 4in6 or 6in4 etc.
+ *
+ * Blog views a receive or transmit end-point to be any construct that can
+ * described by a end point context and a handler op. An end-point could hence
+ * be a:
+ *  - a network device (Linux net_device with hard start transmit handler),
+ *  - a link or queue in the network stack (e.g. a Linux Traffic Control queue
+ *    or a netlink or raw socket queue),
+ *  - a file system logging interface and its logging handler,
+ *  - a virtual interface to some hardware block that provides some hardware
+ *    assisted functionality (e.g. IPSEC acceleration or checksum offloading
+ *    or GSO block),
+ *  - a raw interface to an external hardware test traffic generator using say
+ *    a DMA mapped packet reception or transmission.
+ *
+ * Blog clients are hence applications that provide value added capability by
+ * binding at such end-points.
+ *
+ * A simple Blog client application is a loop traffic generator that simply
+ * acts as a sink of packets belonging to a specific "l3 flow" and mirrors
+ * them to another interface or loops them back into the stack by serving as a
+ * source to a receive network device, while measuring the packet processing
+ * datapath performance in the native OS network stack/proprietary constructs.
+ * Such a loop traffic generator could be used to inject N cells/packets
+ * that cycle through the system endlessly, serving as background traffic while
+ * a few flows are studied from say a QOS perspective.
+ *
+ * Another example of a Blog client is a proxy accelerator (hardware / software)
+ * that is capable of snooping on specific flows and accelerating them while
+ * bypassing the native OS network stack and/or proprietery constructs. It is
+ * however required that the native OS constructs can co-exist. E.g. it may be
+ * necessary to refresh a network bridge's ARL table, or a connection/session
+ * tracker, or update statistics, when individual packets bypass such network
+ * constructs. A proxy accelerator may also reside between a Rx network device
+ * a hardware IPSEC accelerator block and a Tx network device.
+ *
+ * Blog layer provides a logical composite SHIM to the network constructs
+ * Linux or proprietery, allowing 3rd party network constructs to be seemlesly
+ * supported in the native OS.  E.g a network stack that uses a proprietery
+ * session tracker with firewalling capability would need to be transparently
+ * accessed, so that a Blog client may refresh the session tracking object when
+ * packets bypass the network stack.
+ *
+ * For each OS (eCOS, Linux, BSD) a blog.c implementation file is provided that
+ * implements the OS specific SHIM. Support for 3rd-party network constructs
+ * would need to be defined in the blog.c . E.g. for Linux, if a proprietery
+ * session tracker replaces the Linux netfilter connection tracking framework,
+ * then the void * ct_p and the corresponding query/set operations would need to
+ * be implemented. The Blog clients SHOULD NOT rely on any function other than
+ * those specifically defined allowing a coexistence of the Blog client and the
+ * native construct. In the example of a ct_p, for all practice and purposes,
+ * the void *, could have been a key or a handle to a connection tracking object
+ *
+ * Likewise, the Blog client may save need to save a client key with the
+ * network constuct. Again a client key may be a pointer to a client object or
+ * simply a hash key or some handle semantics.
+ *
+ * The logical SHIM is defined as follows:
+ *
+ * __doc_include_if_linux__
+ *
+ * 1. Extension of a packet context with a logging context:
+ * ========================================================
+ *   Explicit APIS to allocate/Free a Blog structure, and bind to the packet
+ *   context, may it be a skbuff or a fkbuff. Support for transferring a
+ *   Blog_t structure from one packet context to another during the course of
+ *   a packet in the network stack involving a packet context clone/copy is
+ *   also included. The release and recycling of Blog_t structures when a 
+ *   packet context is freed are also providied.
+ *   Binding is bi-directional: packet context <-- --> Blog_t
+ * 
+ *
+ * 2. Associating native OS or 3rd-party network constructs: blog_link()
+ * ==========================================================================
+ *   Examples of network constructs
+ *      "dev"   - Network device 
+ *      "ct"    - Connection or session tracker
+ *      "fdb"   - Network bridge forwarding database entity
+ *
+ *   Association is pseudo bi-directional, using "void *" binding in a Blog_t to
+ *   a network construct. In the reverse, a network construct will link to a
+ *   Blog client entity using a Key concept. Two types of keys are currently
+ *   employed, a BlogFlowKey and a BlogGroupKey. 
+ *
+ *   A BlogFlowKey would typically refer to a single unidirectional packet
+ *   stream defined by say all packets belonging to a unidirectional IPv4 flow,
+ *   whereas a BlogGroupKey could be used to represent a single downstream
+ *   multicast stream (IP multicast group) that results in replicated streams
+ *   pertaining to multiple clients joining a the IPv4 multicast group.
+ *
+ *   Likewise, one may represent a single unidirectional IPv4 UDP flow using
+ *   BlogFlowKey, and the reverse direction IPv4 UDP reply flow
+ *   using another BlogFlowKey, and represent the mated pair using a
+ *   BlogGroupKey.
+ *
+ *   In a Blog traffic generator client, where in several IPv4 UDP flows, each
+ *   represented independently using a BlogFlowKey, allows for a set of them
+ *   (background downstream stress traffic) to be managed as a group using a
+ *   BlogGroupKey.
+ *
+ *   Designer Note:
+ *   A network construct may be required to save a BlogFlowKey and/or
+ *   BlogGroupKey to complete the reverse binding between a network construct
+ *   and the Blog client application. An alternate approach would be to save
+ *   a pointer to the Blog_t in the network construct with an additional
+ *   dereference through the keys saved within the Blog_t object.
+ *
+ *   A BlogFlowKey and a BlogGroupKey is a 32bt sized unit and can serve either
+ *   as a pointer (32bit processor) or a index or a hash key or ...
+ *
+ *
+ * 3. Network construct and Blog client co-existence call backs:
+ * =============================================================
+ *
+ * blog_notify():
+ * ==============
+ * A network construct may notify a Blog client of a change of status and may
+ * be viewed as a "downcall" from specialized network construct to a Blog client
+ * E.g. if a connection/session tracking system deems that a flow needs to be
+ * deleted or say it itself is being destroyed, then it needs to notify the Blog
+ * client. This would allow the Blog client to cleanup any association with the
+ * network construct.
+ * Ability for a Blog client to receive general system wide notifications of
+ * changes, to include, network interfaces or link state changes, protocol stack
+ * service access point changes, etc.
+ * Designer Note: Linux notification list?
+ *
+ * blog_request():
+ * ===============
+ * A Blog client may request a change in state in the network construct and may
+ * be viewed as a "upcall" from the Blog client into the network construct. A
+ * timer refresh of the bridge fdb or connection tracking object, or a query
+ * whether the session tracker has successfully established (e.g. a TCP 3-way
+ * handshake has completed, or a IGMP client was permitted to join a group, or a
+ * RTSP session was successful) a uni-driectional or bi-directional flow.
+ *
+ *
+ * 4. Network end-point binding of Blog client
+ * ===========================================
+ *
+ * blog_init(), blog_sinit(), blog_finit():
+ * ========================================
+ * __comment_if_linux__ : This function is invoked by a Linux network device on
+ * packet reception to pass the packet to a Blog client application.
+ *
+ * Pass a packet context to a Blog client at a "RX" network device either using
+ * a skbuff or a fkbuff packet context. Blog client MAY ONLY ACCESS fkbuff
+ * fields. As per the nbuff specification, a FkBuff may be considered as a
+ * base class and a skbuff is a derived class, inheriting the base class members
+ * of the base class, fkbuff. The basic fields of a packet context are a pointer
+ * to the received packet's data, data length, a set of reserved fields to carry
+ * layer 1 information, queue priority, etc, and packet context and or packet
+ * recycling. The layer 1 information is described in terms of channels and
+ * and link layer phy preambles. A channel could be an ATM VCI, a DSL queue, a
+ * PON Gem Port. A Phy could describe the LINK layer type and or a preamble for
+ * instance a RFC2684 header in the DSL world.
+ *
+ * blog_[s|f]init() will setup the L1 coarse key<channel,phy> and invokes a Blog
+ * client's receive hook. A Blog client may consume the packet bypassing the
+ * native OS network stack, may suggest that the packet context be extended by
+ * a Blog_t structure or may deem that the packet is of not interest. As such
+ * the Blog client will return PKT_DONE, PKT_BLOG or PKT_NORM, respectively. In
+ * case no Blog client has been registered for receiving packets (promiscuous)
+ * driectly from RX network devices, then the packet will follow a normal data
+ * path within the network stack (PKT_NORM).
+ *
+ * Designer Note: Blog clients MAY NOT use fields not defined in FkBuff.
+ * 
+ *
+ * blog_emit():
+ * ============
+ * __comment_if_linux__ : This function is invoked by a Linux network device
+ * prior to packet transmission to pass the packet to a Blog client application.
+ *
+ * Pass a packet context to a Blog client at a "TX" network device either using
+ * a skbuff or a fkbuff packet context. The same restrictions on a Blog client
+ * pertaining to packet field context access as defined in the blog_init()
+ * variant of APIs is applicable to blog_emit(). A Blog client may also return
+ * PKT_NORM or PKT_DONE, to indicate normal processing, or packet consumption.
+ *
+ * Designer Note: blog_emit() will ONLY pass those packets to Blog clients that
+ * have a packet context extended with a Blog_t structure. Hence skbuffs or
+ * fkbuffs that do not have a Blog_t extension will not be handed to the Blog
+ * client. Do we need blog_semit/blog_femit variants.
+ *
+ *
+ * 5. Binding Blog client applications: blog_bind()
+ * ================================================
+ * blog_bind() enables a "single" client to bind into the network stack by
+ * specifying a network device packet reception handler, a network device packet
+ * transmission handler, network stack to blog client notify hook.
+ *
+ *
+ * 6. Miscellanous
+ * ===============
+ * - Blog_t management.
+ * - Data-filling a Blog_t.
+ * - Protocol Header specifications independent of OS.
+ * - Debug printing.
+ *
+ *
+ * __end_include_if_linux__
+ *
+ *  Version 1.0 SKB based blogging
+ *  Version 2.0 NBuff/FKB based blogging (mbuf)
+ *  Version 2.1 IPv6 Support
+ *  Version 3.0 Restructuring Blog SHIM to support eCOS, Linux and proprietery
+ *              network constructs
+ *
+ *******************************************************************************
+ */
+
+#define BLOG_VERSION            "v3.0"
+
+#if defined(__KERNEL__)                 /* Kernel space compilation           */
+#include <linux/types.h>                /* LINUX ISO C99 7.18 Integer types   */
+#else                                   /* User space compilation             */
+#include <stdint.h>                     /* C-Lib ISO C99 7.18 Integer types   */
+#endif
+#include <linux/blog_net.h>             /* IEEE and RFC standard definitions  */
+#include <linux/nbuff_types.h>          /* for IS_SKBUFF_PTR                  */
+
+#ifndef NULL_STMT
+#define NULL_STMT                   do { /* NULL BODY */ } while (0)
+#endif
+
+#undef  BLOG_DECL
+#define BLOG_DECL(x)                x,
+
+#ifndef BLOG_OFFSETOF
+#define BLOG_OFFSETOF(stype, member)     ((size_t) &((struct stype *)0)->member)
+#endif
+
+/* Forward declarations */
+struct blog_t;
+typedef struct blog_t Blog_t;
+#define BLOG_NULL                   ((Blog_t*)NULL)
+#define BLOG_KEY_NONE               0
+
+/* __bgn_include_if_linux__ */
+
+struct sk_buff;                         /* linux/skbuff.h                     */
+struct fkbuff;                          /* linux/nbuff.h                      */
+
+/* See RFC 4008 */
+#define BLOG_NAT_TCP_DEFAULT_IDLE_TIMEOUT (86400 *HZ)
+#define BLOG_NAT_UDP_DEFAULT_IDLE_TIMEOUT (300 *HZ)
+
+extern uint32_t blog_nat_tcp_def_idle_timeout;
+extern uint32_t blog_nat_udp_def_idle_timeout;
+
+/* used to pass timer info between the stack and blog layer */
+typedef struct blogCtTime {
+    uint8_t         unknown;        /* unknown proto */
+    uint8_t         proto;          /* known proto TCP, UDP */
+    uint8_t         intv;           /* intv in sec */
+    uint8_t         idle;           /* idle time in sec */
+    unsigned long   idle_jiffies;   /* idle time in jiffies */
+    unsigned long   extra_jiffies;  /* max time for the flow type */
+} BlogCtTime_t;
+
+/*
+ * Linux Netfilter Conntrack registers it's conntrack refresh function which
+ * will be invoked to refresh a conntrack when packets belonging to a flow
+ * managed by Linux conntrack are bypassed by a Blog client.
+ */
+typedef void (*blog_cttime_upd_t)(void * ct_p, BlogCtTime_t *ct_time_p);
+extern blog_cttime_upd_t blog_cttime_update_fn;
+
+typedef int (*blog_xtm_get_tx_chan_t)(void *dev_p, int channel, unsigned mark);
+extern blog_xtm_get_tx_chan_t blog_xtm_get_tx_chan_fn;
+
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+typedef int (*blog_gre_rcv_check_t)(void *dev, BlogIpv4Hdr_t *iph, uint16_t len, 
+              void **tunl_pp, uint32_t *pkt_seqno_p);
+extern blog_gre_rcv_check_t blog_gre_rcv_check_fn;
+
+typedef int (*blog_gre_xmit_upd_t)(void * tunl_p, BlogIpv4Hdr_t *iph, uint16_t len);
+extern blog_gre_xmit_upd_t blog_gre_xmit_update_fn;
+#endif
+
+
+#define PPTP_NOT_ACK 0
+#define PPTP_WITH_ACK 1
+ 
+typedef int (*blog_pptp_xmit_upd_t)(uint16_t call_id, uint32_t seqNum, 
+                                    uint32_t ackNum, uint32_t daddr);
+extern blog_pptp_xmit_upd_t blog_pptp_xmit_update_fn;
+
+typedef int (*blog_pptp_xmit_get_t)(uint16_t call_id, uint32_t* seqNum, 
+                                    uint32_t* ackNum, uint32_t daddr);
+extern blog_pptp_xmit_get_t blog_pptp_xmit_get_fn;
+
+typedef int (*blog_pptp_rcv_check_t)(uint16_t call_id, uint32_t *rcv_pktSeq, 
+                                     uint32_t rcv_pktAck, uint32_t saddr);
+extern blog_pptp_rcv_check_t blog_pptp_rcv_check_fn;
+ 
+typedef int (*blog_l2tp_rcv_check_t)(void *dev, uint16_t tunnel_id, 
+                                     uint16_t session_id);
+extern blog_l2tp_rcv_check_t blog_l2tp_rcv_check_fn;
+
+/* __end_include_if_linux__ */
+
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Denotes a Blog client,
+ *------------------------------------------------------------------------------
+ */
+typedef enum {
+        BLOG_DECL(BlogClient_fcache)
+#if defined(CONFIG_BCM_KF_FAP)
+        BLOG_DECL(BlogClient_fap)
+#endif
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+        BLOG_DECL(BlogClient_runner)
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+        BLOG_DECL(BlogClient_MAX)
+} BlogClient_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Denotes whether a packet is consumed and freed by a Blog client application,
+ * whether a packet needs to be processed normally within the network stack or
+ * whether a packet context is extended with a Blog_t object.
+ *------------------------------------------------------------------------------
+ */
+typedef enum {
+        BLOG_DECL(PKT_DONE)             /* Packet consumed and freed          */
+        BLOG_DECL(PKT_NORM)             /* Continue normal stack processing   */
+        BLOG_DECL(PKT_BLOG)             /* Continue stack with blogging       */
+        BLOG_DECL(PKT_DROP)             /* Drop Packet                        */
+        BLOG_DECL(BLOG_ACTION_MAX)
+} BlogAction_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Denotes the direction in the network stack when a packet is processed by a
+ * virtual network interface/network device.
+ *------------------------------------------------------------------------------
+ */
+typedef enum {
+        BLOG_DECL(DIR_RX)               /* Receive path in network stack      */
+        BLOG_DECL(DIR_TX)               /* Transmit path in network stack     */
+        BLOG_DECL(BLOG_DIR_MAX)
+} BlogDir_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Denotes the type of Network entity associated with a Blog_t.
+ *
+ * BlogNetEntity_t may be linked to a blog using blog_link to make the Blog_t
+ * point to the BlogNetEntity_t. A reverse linking from the BlogNetEntity_t to
+ * Blog_t is only possible via a key (if necessary when a one to one association
+ * between the BlogNetEntity_t and a Blog exists. For instance, there is a
+ * one to one association between a Flow Connection Tracker and a Blog. In fact
+ * a Linux Netfilter Connection Tracking object manages a bi-directional flow
+ * and thus may have 2 keys to reference the corresponding Blog_t. However, a
+ * network device (physical end device or a virtual device) may have multiple
+ * Flows passing through it and hence no one-to-one association exists. In this
+ * can a Blog may have a link to a network device, but the reverse link (via a
+ * key) is not saved in the network device.
+ *
+ * Linking a BlogNetEntity_t to a blog is done via blog_link() whereas saving
+ * a reference key into a BlogNetEntity_t is done via blog_request() by the
+ * Blog client application, if needed.
+ *
+ *------------------------------------------------------------------------------
+ */
+
+#define BLOG_CT_PLD             0U
+#define BLOG_CT_DEL             1U
+#define BLOG_CT_MAX             2U
+
+/* FLOWTRACK: param1 is ORIG=0 or REPLY=1 direction */
+#define BLOG_PARAM1_DIR_ORIG    0U
+#define BLOG_PARAM1_DIR_REPLY   1U
+
+/* FLOWTRACK: param2 is IPv4=0, IPv6=1, GRE=2 */
+#define BLOG_PARAM2_IPV4        0U
+#define BLOG_PARAM2_IPV6        1U
+#define BLOG_PARAM2_GRE_IPV4    2U
+#define BLOG_PARAM2_MAX         3U
+#define BLOG_CT_VER_MAX         2U
+
+/* BRIDGEFDB: param1 is src|dst */
+#define BLOG_PARAM1_SRCFDB      0U
+#define BLOG_PARAM1_DSTFDB      1U
+
+/* IF_DEVICE: param1 is direction RX or TX, param 2 is minMtu */
+
+typedef enum {
+        BLOG_DECL(FLOWTRACK)            /* Flow (connection|session) tracker  */
+        BLOG_DECL(BRIDGEFDB)            /* Bridge Forwarding Database entity  */
+        BLOG_DECL(MCAST_FDB)            /* Multicast Client FDB entity        */
+        BLOG_DECL(IF_DEVICE)            /* Virtual Interface (network device) */
+        BLOG_DECL(IF_DEVICE_MCAST)      /* Virtual Interface (network device) */
+        BLOG_DECL(GRE_TUNL)             /* GRE Tunnel                         */
+        BLOG_DECL(TOS_MODE)             /* TOS_MODE                           */
+        BLOG_DECL(BLOG_NET_ENTITY_MAX)
+} BlogNetEntity_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Denotes a type of notification sent from the network stack to the Blog client
+ * See blog_notify(BlogNotify_t, void *, uint32_t param1, uint32_t param2);
+ *------------------------------------------------------------------------------
+ */
+
+/* MCAST_CONTROL_EVT: param1 is add|del, and param2 is IPv4|IPv6 */
+#define BLOG_PARAM1_MCAST_ADD       0U
+#define BLOG_PARAM1_MCAST_DEL       1U
+#define BLOG_PARAM2_MCAST_IPV4      0U
+#define BLOG_PARAM2_MCAST_IPV6      1U
+
+/* LINK_STATE_CHANGE: param1 */
+#define BLOG_PARAM1_LINK_STATE_UP   0U
+#define BLOG_PARAM1_LINK_STATE_DOWN 1U
+
+/* FETCH_NETIF_STATS: param1 is address of BlogStats_t, param2 */
+#define BLOG_PARAM2_NO_CLEAR        0U
+#define BLOG_PARAM2_DO_CLEAR        1U
+
+typedef enum {
+        BLOG_DECL(DESTROY_FLOWTRACK)    /* Session/connection is deleted      */
+        BLOG_DECL(DESTROY_BRIDGEFDB)    /* Bridge FDB has aged                */
+        BLOG_DECL(MCAST_CONTROL_EVT)    /* Mcast client joins a group event   */
+        BLOG_DECL(MCAST_SYNC_EVT)       /* Topology change for mcast event    */
+        BLOG_DECL(DESTROY_NETDEVICE)    /* Network device going down          */
+        BLOG_DECL(LINK_STATE_CHANGE)    /* Physical network link event        */
+        BLOG_DECL(FETCH_NETIF_STATS)    /* Fetch accumulated stats            */
+        BLOG_DECL(DYNAMIC_DSCP_EVENT)   /* Dynamic DSCP change event          */
+        BLOG_DECL(UPDATE_NETDEVICE)     /* Netdevice has been modified (MTU, etc) */
+        BLOG_DECL(ARP_BIND_CHG)         /* ARP IP/MAC binding change event    */
+        BLOG_DECL(CONFIG_CHANGE)        /* Certain configuration change event */
+        BLOG_DECL(BLOG_NOTIFY_MAX)
+} BlogNotify_t;
+
+typedef enum {
+        BLOG_DECL(QUERY_FLOWTRACK)      /* Session/connection time is queried */
+        BLOG_DECL(QUERY_BRIDGEFDB)      /* Bridge FDB time is queried         */
+        BLOG_DECL(BLOG_QUERY_MAX)
+} BlogQuery_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Denotes a type of request from a Blog client to a network stack entity.
+ *------------------------------------------------------------------------------
+ */
+
+typedef enum {
+        BLOG_DECL(FLOWTRACK_KEY_SET)    /* Set Client key into Flowtracker    */
+        BLOG_DECL(FLOWTRACK_KEY_GET)    /* Get Client key into Flowtracker    */
+        BLOG_DECL(FLOWTRACK_DSCP_GET)   /* Get DSCP from Flow tracker:DYNDSCP */
+        BLOG_DECL(FLOWTRACK_CONFIRMED)  /* Test whether session is confirmed  */
+        BLOG_DECL(FLOWTRACK_ASSURED)    /* Test whether session is assured    */
+        BLOG_DECL(FLOWTRACK_ALG_HELPER) /* Test whether flow has an ALG       */
+        BLOG_DECL(FLOWTRACK_EXCLUDE)    /* Clear flow candidacy by Client     */
+        BLOG_DECL(FLOWTRACK_REFRESH)    /* Refresh a flow tracker             */
+        BLOG_DECL(FLOWTRACK_TIME_SET)   /* Set time in a flow tracker         */
+        BLOG_DECL(NETIF_PUT_STATS)      /* Push accumulated stats to devices  */
+        BLOG_DECL(LINK_XMIT_FN)         /* Fetch device link transmit function*/
+        BLOG_DECL(LINK_NOCARRIER)       /* Fetch device link carrier          */
+        BLOG_DECL(NETDEV_NAME)          /* Network device name                */
+        BLOG_DECL(MCAST_KEY_SET)        /* Set Client key into IGMP/MLD       */
+        BLOG_DECL(MCAST_KEY_GET)        /* Get Client key from IGMP/MLD       */
+        BLOG_DECL(MCAST_DFLT_MIPS)      /* Delete action in blogRule chain    */
+        BLOG_DECL(IQPRIO_SKBMARK_SET)   /* Set IQOS Prio in skb->mark         */
+        BLOG_DECL(TCPACK_PRIO)          /* TCP ACK prioritization             */
+        BLOG_DECL(BRIDGEFDB_KEY_SET)    /* Set Client key into bridge FDB     */
+        BLOG_DECL(BRIDGEFDB_KEY_GET)    /* Get Client key into bridge FDB     */
+        BLOG_DECL(BRIDGEFDB_TIME_SET)   /* Refresh bridge FDB time            */
+        BLOG_DECL(SYS_TIME_GET)         /* Get the system time in jiffies     */
+        BLOG_DECL(GRE_TUNL_XMIT)        /* GRE Tunnel tx                      */
+        BLOG_DECL(SKB_DST_ENTRY_SET)    /* get dst_entry from skb             */
+        BLOG_DECL(SKB_DST_ENTRY_RELEASE)/* release dst_entry from blog        */
+        BLOG_DECL(BLOG_REQUEST_MAX)
+} BlogRequest_t;
+
+
+/*----- LinkType: First header type ------------------------------------------*/
+/* Used by network drivers to determine the Layer 1 encapsulation or LinkType */
+typedef enum {
+        BLOG_DECL(TYPE_ETH)             /* LAN: ETH, WAN: EoA, MER, PPPoE     */
+        BLOG_DECL(TYPE_PPP)             /*           WAN: PPPoA               */
+        BLOG_DECL(TYPE_IP)              /*           WAN: IPoA                */
+} BlogLinkType_t;
+
+
+typedef enum {
+        BLOG_DECL(DPI_PARENTAL)        /* Parental control */
+        BLOG_DECL(DPI_QOS)             /* QoS */
+        BLOG_DECL(DPI_MAX)
+} BlogDpiType_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Clean this up.
+ *------------------------------------------------------------------------------
+ */
+
+#define BLOG_ENCAP_MAX          6       /* Maximum number of L2 encaps        */
+#define BLOG_HDRSZ_MAX          32      /* Maximum size of L2 encaps          */
+
+typedef enum {
+        BLOG_DECL(GRE_ETH)             /* e.g. BLOG_XTMPHY, BLOG_GPONPHY     */
+        BLOG_DECL(BCM_XPHY)             /* e.g. BLOG_XTMPHY, BLOG_GPONPHY     */
+        BLOG_DECL(BCM_SWC)              /* BRCM LAN Switch Tag/Header         */
+        BLOG_DECL(ETH_802x)             /* Ethernet                           */
+        BLOG_DECL(VLAN_8021Q)           /* Vlan 8021Q (incld stacked)         */
+        BLOG_DECL(PPPoE_2516)           /* PPPoE RFC 2516                     */
+        BLOG_DECL(PPP_1661)             /* PPP RFC 1661                       */
+        BLOG_DECL(PLD_IPv4)             /* Delivery IPv4                      */
+        BLOG_DECL(PLD_IPv6)             /* Delivery IPv6                      */
+        BLOG_DECL(PPTP)                 /* PPTP Header                        */
+        BLOG_DECL(L2TP)                 /* L2TP Header                        */
+        BLOG_DECL(GRE)                  /* GRE Header                         */
+        BLOG_DECL(DEL_IPv4)             /* Outer IPv4                         */
+        BLOG_DECL(DEL_IPv6)             /* Outer IPv6                         */
+        BLOG_DECL(PROTO_MAX)
+} BlogEncap_t;
+
+
+
+/*
+ *------------------------------------------------------------------------------
+ * RFC 2684 header logging.
+ * CAUTION: 0'th enum corresponds to either header was stripped or zero length
+ *          header. VC_MUX_PPPOA and VC_MUX_IPOA have 0 length RFC2684 header.
+ *          PTM does not have an rfc2684 header.
+ *------------------------------------------------------------------------------
+ */
+typedef enum {
+        BLOG_DECL(RFC2684_NONE)         /*                               */
+        BLOG_DECL(LLC_SNAP_ETHERNET)    /* AA AA 03 00 80 C2 00 07 00 00 */
+        BLOG_DECL(LLC_SNAP_ROUTE_IP)    /* AA AA 03 00 00 00 08 00       */
+        BLOG_DECL(LLC_ENCAPS_PPP)       /* FE FE 03 CF                   */
+        BLOG_DECL(VC_MUX_ETHERNET)      /* 00 00                         */
+        BLOG_DECL(VC_MUX_IPOA)          /*                               */
+        BLOG_DECL(VC_MUX_PPPOA)         /*                               */
+        BLOG_DECL(PTM)                  /*                               */
+        BLOG_DECL(RFC2684_MAX)
+} Rfc2684_t;
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Denotes the type of physical interface and the presence of a preamble.
+ *------------------------------------------------------------------------------
+ */
+typedef enum {
+    BLOG_DECL(BLOG_XTMPHY)
+    BLOG_DECL(BLOG_ENETPHY)
+    BLOG_DECL(BLOG_GPONPHY)
+    BLOG_DECL(BLOG_EPONPHY)
+    BLOG_DECL(BLOG_USBPHY)
+    BLOG_DECL(BLOG_WLANPHY)
+    BLOG_DECL(BLOG_MOCAPHY)
+    BLOG_DECL(BLOG_EXTRA1PHY)
+    BLOG_DECL(BLOG_EXTRA2PHY)
+    BLOG_DECL(BLOG_EXTRA3PHY)
+    BLOG_DECL(BLOG_SIDPHY)
+    BLOG_DECL(BLOG_TCP4_LOCALPHY)
+    BLOG_DECL(BLOG_MAXPHY)
+} BlogPhy_t;
+
+/* CAUTION: Following macros have binary dependencies. Please do not change these
+   macros without consulting with Broadcom or the subsystem owners
+   Macro definition START */
+#define BLOG_IS_HWACC_DISABLED_WLAN_EXTRAPHY(rxphy,txphy) ((rxphy == BLOG_EXTRA1PHY) || \
+                                                           (rxphy == BLOG_EXTRA2PHY) || \
+                                                           (rxphy == BLOG_EXTRA3PHY) || \
+														   (txphy == BLOG_EXTRA1PHY) || \
+                                                           (txphy == BLOG_EXTRA2PHY) || \
+                                                           (txphy == BLOG_EXTRA3PHY))
+#define BLOG_IS_TX_HWACC_ENABLED_WLAN_PHY(txphy) (txphy == BLOG_WLANPHY)
+/* Macro definition END */
+
+/*
+ *------------------------------------------------------------------------------
+ * Logging of a maximum 4 "virtual" network devices that a flow can traverse.
+ * Virtual devices are interfaces that do not perform the actual DMA transfer.
+ * E.g. an ATM interface would be referred to as a physical interface whereas
+ * a ppp interface would be referred to as a Virtual interface.
+ *------------------------------------------------------------------------------
+ */
+#define MAX_VIRT_DEV           7
+
+#define DEV_DIR_MASK           0x3u
+#define DEV_PTR_MASK           (~DEV_DIR_MASK)
+#define DEV_DIR(ptr)           ((uint32_t)(ptr) & DEV_DIR_MASK)
+
+#define IS_RX_DIR(ptr)         ( DEV_DIR(ptr) == DIR_RX )
+#define IS_TX_DIR(ptr)         ( DEV_DIR(ptr) == DIR_TX )
+
+/*
+ *------------------------------------------------------------------------------
+ * Device pointer conversion between with and without embeded direction info
+ *------------------------------------------------------------------------------
+ */
+#define DEVP_APPEND_DIR(ptr,dir) ((void *)((uint32_t)(ptr) | (uint32_t)(dir)))
+#define DEVP_DETACH_DIR(ptr)     ((void *)((uint32_t)(ptr) & (uint32_t) \
+                                                              DEV_PTR_MASK))
+/*
+ *------------------------------------------------------------------------------
+ * Denotes the tos mode.
+ *------------------------------------------------------------------------------
+ */
+typedef enum {
+    BLOG_DECL(BLOG_TOS_FIXED)
+    BLOG_DECL(BLOG_TOS_INHERIT)
+    BLOG_DECL(BLOG_TOS_MAX)
+} BlogTos_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Blog statistics structure
+ *------------------------------------------------------------------------------
+ */
+typedef struct{
+    unsigned long   rx_packets;             /* total blog packets received    */
+    unsigned long   tx_packets;             /* total blog packets transmitted */
+    unsigned long   rx_bytes;               /* total blog bytes received      */
+    unsigned long   tx_bytes;               /* total blog bytes transmitted   */
+    unsigned long   multicast;              /* total blog multicast packets   */
+#if defined(CONFIG_BCM_KF_EXTSTATS)	
+    unsigned long   tx_multicast_packets;   /* multicast packets transmitted */
+    unsigned long   rx_multicast_bytes;     /* multicast bytes recieved */ 
+    unsigned long   tx_multicast_bytes;     /* multicast bytes transmitted */
+    unsigned long   rx_unicast_packets;     /* unicast packets recieved */
+    unsigned long   tx_unicast_packets;     /* unicast packets transmitted */
+#endif	
+} BlogStats_t;
+
+
+/*
+ * -----------------------------------------------------------------------------
+ * Support blogging of multicast packets.
+ *
+ * When Multicast support is enabled system wide, the default to be used may
+ * be set in CC_BLOG_SUPPORT_MCAST which gets saved in blog_support_mcast_g.
+ * One may change the default (at runtime) by invoking blog_support_mcast().
+ * -----------------------------------------------------------------------------
+ */
+
+/* Multicast Support for IPv4 and IPv6 Control */
+#define BLOG_MCAST_DISABLE          0
+#define BLOG_MCAST_IPV4             1
+#define BLOG_MCAST_IPV6             2
+
+#ifdef CONFIG_BLOG_MCAST
+#define CC_BLOG_SUPPORT_MCAST        BLOG_MCAST_IPV4 + BLOG_MCAST_IPV6
+#else
+#define CC_BLOG_SUPPORT_MCAST        BLOG_MCAST_DISABLE
+#endif
+
+extern int blog_support_mcast_g;
+extern void blog_support_mcast(int enable);
+
+/*
+ * -----------------------------------------------------------------------------
+ * Support learning of multicast packets.
+ *
+ * When Multicast learn support is enabled system wide, the default to be used
+ * may be set in CC_BLOG_SUPPORT_MCAST_LEARN which gets saved in
+ * blog_support_mcast_learn_g. One may change the default (at runtime) by
+ * invoking blog_support_mcast_learn().
+ * -----------------------------------------------------------------------------
+ */
+
+/* Multicast Learning Support Enable/Disable Control */
+#define BLOG_MCAST_LEARN_DISABLE          0
+#define BLOG_MCAST_LEARN_ENABLE           1
+
+#ifdef CONFIG_BLOG_MCAST_LEARN
+#define CC_BLOG_SUPPORT_MCAST_LEARN        BLOG_MCAST_LEARN_ENABLE
+#else
+#define CC_BLOG_SUPPORT_MCAST_LEARN        BLOG_MCAST_LEARN_DISABLE
+#endif
+
+extern int blog_support_mcast_learn_g;
+extern void blog_support_mcast_learn(int enable);
+
+/*
+ * -----------------------------------------------------------------------------
+ * Support blogging of IPv6 traffic
+ *
+ * When IPv6 support is enabled system wide, the default to be used may
+ * be set in CC_BLOG_SUPPORT_IPV6 which gets saved in blog_support_ipv6_g.
+ * One may change the default (at runtime) by invoking blog_support_ipv6().
+ * -----------------------------------------------------------------------------
+ */
+
+/* IPv6 Support Control: see blog_support_ipv6_g and blog_support_ipv6() */
+#define BLOG_IPV6_DISABLE           0
+#define BLOG_IPV6_ENABLE            1
+
+#ifdef CONFIG_BLOG_IPV6
+#define CC_BLOG_SUPPORT_IPV6        BLOG_IPV6_ENABLE
+#else
+#define CC_BLOG_SUPPORT_IPV6        BLOG_IPV6_DISABLE
+#endif
+
+extern int blog_support_ipv6_g;
+extern void blog_support_ipv6(int enable);
+
+/*
+ * -----------------------------------------------------------------------------
+ * Support blogging of 6rd tos
+ *
+ * When 6rd is configured, the default to be used may be set in
+ * CC_BLOG_DEFAULT_TUNL_TOS which gets saved in blog_tunl_tos_g.
+ * One may change the default (at runtime) by invoking blog_tunl_tos().
+ * -----------------------------------------------------------------------------
+ */
+
+/* GRE Support: tunnel and pass-thru modes */
+#define BLOG_GRE_DISABLE          0
+#define BLOG_GRE_TUNNEL           1
+#define BLOG_GRE_PASS_THRU        2
+
+#ifdef CONFIG_BLOG_GRE
+#define CC_BLOG_SUPPORT_GRE        BLOG_GRE_TUNNEL
+#else
+#define CC_BLOG_SUPPORT_GRE        BLOG_GRE_DISABLE
+#endif
+
+extern int blog_gre_tunnel_accelerated_g;
+extern int blog_support_gre_g;
+extern void blog_support_gre(int enable);
+
+/* L2TP Support */
+#define BLOG_L2TP_DISABLE             0
+#define BLOG_L2TP_TUNNEL              1
+#define BLOG_L2TP_TUNNEL_WITHCHKSUM   2
+
+#ifdef CONFIG_BLOG_L2TP
+#define CC_BLOG_SUPPORT_L2TP       BLOG_L2TP_TUNNEL
+#else
+#define CC_BLOG_SUPPORT_L2TP       BLOG_L2TP_DISABLE
+#endif
+
+extern int blog_l2tp_tunnel_accelerated_g;
+extern int blog_support_l2tp_g;
+extern void blog_support_l2tp(int enable);
+
+/* Traffic type */
+typedef enum {
+    BLOG_DECL(BlogTraffic_IPV4_UCAST)
+    BLOG_DECL(BlogTraffic_IPV6_UCAST)
+    BLOG_DECL(BlogTraffic_IPV4_MCAST)
+    BLOG_DECL(BlogTraffic_IPV6_MCAST)
+    BLOG_DECL(BlogTraffic_Layer2_Flow)
+    BLOG_DECL(BlogTraffic_MAX)
+} BlogTraffic_t;
+
+
+#define BLOG_KEY_INVALID             0xFFFFFFFF
+typedef union {
+    uint32_t    u32;
+    struct {
+        BE_DECL(
+            union {
+                uint16_t flowkey;
+                struct {
+                    uint16_t incarn :  2;
+                    uint16_t self   : 14;
+                };
+            };
+            union {
+                struct {
+                    uint16_t intf   : 8;
+                    uint16_t client : 8;
+                } mcast_blogkey;
+                uint16_t blogkey;
+            };
+        )
+        LE_DECL(
+            union {
+                uint16_t blogkey;
+                struct {
+                    uint16_t client : 8;
+                    uint16_t intf   : 8;
+                } mcast_blogkey;
+            };
+            union {
+                uint16_t flowkey;
+                struct {
+                    uint16_t self   : 14;
+                    uint16_t incarn :  2;
+                };
+            };
+        )
+    };
+} BlogActivateKey_t;
+
+#define BLOG_SET_PHYHDR(a, b)   ( (((a) & 0xf) << 4) | ((b) & 0xf) )
+#define BLOG_GET_PHYTYPE(a)     ( (a) & 0xf )
+#define BLOG_GET_PHYLEN(a)      ( (a) >> 4 )
+
+#define BLOG_PHYHDR_MASK        0xff
+#define BLOG_SET_HW_ACT(a)      ( ((a) & 0xf) << 8 )
+#define BLOG_GET_HW_ACT(a)      ( (a) >> 8 )
+
+/*
+ * =============================================================================
+ * CAUTION: OS and network stack may be built without CONFIG_BLOG defined.
+ * =============================================================================
+ */
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+
+/*
+ *------------------------------------------------------------------------------
+ *
+ *              Section: Blog Conditional Compiles CC_BLOG_SUPPORT_...
+ *
+ * These conditional compiles are not controlled by a system wide build process.
+ * E.g. CONFIG_BLOG_MCAST is a system wide build configuration
+ *      CC_BLOG_SUPPORT_MCAST is a blog defined build configuration
+ *
+ * Do not use any CONFIG_ or CC_BLOG_SUPPORT_ in Blog_t structure definitions.
+ *
+ *------------------------------------------------------------------------------
+ */
+
+/* LAB ONLY: Design development, uncomment to enable */
+/* #define CC_BLOG_SUPPORT_COLOR */
+/* #define CC_BLOG_SUPPORT_DEBUG */
+
+
+
+
+/* To enable user filtering, see blog_filter(), invoked in blog_finit() */
+/* #define CC_BLOG_SUPPORT_USER_FILTER */
+
+
+
+/*
+ * -----------------------------------------------------------------------------
+ *                      Section: Definition of a Blog_t
+ * -----------------------------------------------------------------------------
+ */
+
+#define BLOG_CHAN_INVALID   0xFF
+
+typedef struct {
+    uint8_t             channel;        /* e.g. port number, txchannel, ... */
+
+    union {
+        struct {
+            uint8_t         phyHdrLen   : 4;
+            uint8_t         phyHdrType  : 4;
+        };
+        uint8_t         phyHdr;
+    };
+
+    union {
+        struct {
+            BE_DECL(
+                uint16_t         reserved    : 2; 
+                uint16_t         DEL_IPv6    : 1;
+                uint16_t         DEL_IPv4    : 1;
+                uint16_t         GRE         : 1;
+                uint16_t         L2TP        : 1; 
+                uint16_t         PPTP        : 1;
+
+                uint16_t         PLD_IPv6    : 1;
+                uint16_t         PLD_IPv4    : 1;
+                uint16_t         PPP_1661    : 1;
+                uint16_t         PPPoE_2516  : 1;
+                uint16_t         VLAN_8021Q  : 1;    
+                uint16_t         ETH_802x    : 1;
+                uint16_t         BCM_SWC     : 1;
+                uint16_t         BCM_XPHY    : 1;    /* e.g. BCM_XTM */
+                uint16_t         GRE_ETH      :1;    /* Ethernet over GRE */
+            )
+            LE_DECL(
+                uint16_t         GRE_ETH      :1;    /* Ethernet over GRE */
+                uint16_t         BCM_XPHY    : 1;    /* e.g. BCM_XTM */
+                uint16_t         BCM_SWC     : 1;
+                uint16_t         ETH_802x    : 1;
+                uint16_t         VLAN_8021Q  : 1;    
+                uint16_t         PPPoE_2516  : 1;
+                uint16_t         PPP_1661    : 1;
+                uint16_t         PLD_IPv4    : 1;
+                uint16_t         PLD_IPv6    : 1;
+
+                uint16_t         PPTP        : 1;
+                uint16_t         L2TP        : 1;
+                uint16_t         GRE         : 1;
+                uint16_t         DEL_IPv4    : 1;
+                uint16_t         DEL_IPv6    : 1;
+                uint16_t         reserved    : 2;
+            )
+        }               bmap;/* as per order of BlogEncap_t enums declaration */
+        uint16_t        hdrs;
+    }; 
+} BlogInfo_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Buffer to log IP Tuple.
+ * Packed: 1 16byte cacheline.
+ *------------------------------------------------------------------------------
+ */
+struct blogTuple_t {
+    uint32_t        saddr;          /* IP header saddr */
+    uint32_t        daddr;          /* IP header daddr */
+
+    union {
+        struct {
+            uint16_t    source;     /* L4 source port */
+            uint16_t    dest;       /* L4 dest port */
+        }           port;
+        struct {
+            uint16_t    unused;
+            uint16_t    gre_callid;
+        };
+        uint32_t    ports;
+        uint32_t    esp_spi;
+    };
+
+    uint8_t         ttl;            /* IP header ttl */
+    uint8_t         tos;            /* IP header tos */
+    uint16_t        check;          /* checksum: rx tuple=l3, tx tuple=l4 */
+
+} ____cacheline_aligned;
+typedef struct blogTuple_t BlogTuple_t;
+
+#define NEXTHDR_IPV4 IPPROTO_IPIP
+
+#define HDRS_IPinIP     ((1<<GRE) | (1<<PLD_IPv4) | (1<<PLD_IPv6) |    \
+                         (1<<DEL_IPv4) | (1<<DEL_IPv6))
+#define HDRS_IP4in4     ((1<<PLD_IPv4) | (1<<DEL_IPv4))
+#define HDRS_IP6in4     ((1<<PLD_IPv6) | (1<<DEL_IPv4))
+#define HDRS_IP4in6     ((1<<PLD_IPv4) | (1<<DEL_IPv6))
+#define HDRS_GIP4       ((1<<PLD_IPv4) | (1<<GRE))
+
+#define RX_IP4in6(b)    (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_IP4in6)
+#define RX_IP6in4(b)    (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_IP6in4)
+#define TX_IP4in6(b)    (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_IP4in6)
+#define TX_IP6in4(b)    (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_IP6in4)
+
+#define RX_IPV4(b)      ((b)->rx.info.bmap.PLD_IPv4)
+#define TX_IPV4(b)      ((b)->tx.info.bmap.PLD_IPv4)
+#define RX_IPV6(b)      ((b)->rx.info.bmap.PLD_IPv6)
+#define TX_IPV6(b)      ((b)->tx.info.bmap.PLD_IPv6)
+#define RX_IPV4_DEL(b)  ((b)->rx.info.bmap.DEL_IPv4)
+#define TX_IPV4_DEL(b)  ((b)->tx.info.bmap.DEL_IPv4)
+#define RX_IPV6_DEL(b)  ((b)->rx.info.bmap.DEL_IPv6)
+#define TX_IPV6_DEL(b)  ((b)->tx.info.bmap.DEL_IPv6)
+
+#define RX_GRE(b)       ((b)->rx.info.bmap.GRE)
+#define TX_GRE(b)       ((b)->tx.info.bmap.GRE)
+
+#define RX_IPV4ONLY(b)  (((b)->rx.info.hdrs & HDRS_IPinIP)==(1 << PLD_IPv4))
+#define TX_IPV4ONLY(b)  (((b)->tx.info.hdrs & HDRS_IPinIP)==(1 << PLD_IPv4))
+#define RX_IPV6ONLY(b)  (((b)->rx.info.hdrs & HDRS_IPinIP)==(1 << PLD_IPv6))
+#define TX_IPV6ONLY(b)  (((b)->tx.info.hdrs & HDRS_IPinIP)==(1 << PLD_IPv6))
+
+#define RX_IPV4_OUTER(b) (RX_IPV4ONLY(b) || RX_IPV4_DEL(b))
+#define TX_IPV4_OUTER(b) (TX_IPV4ONLY(b) || TX_IPV4_DEL(b))
+#define RX_IPV6_OUTER(b) (RX_IPV6ONLY(b) || RX_IPV6_DEL(b))
+#define TX_IPV6_OUTER(b) (TX_IPV6ONLY(b) || TX_IPV6_DEL(b))
+
+#define HDRS_IPV4       ((1 << PLD_IPv4) | (1 << DEL_IPv4))
+#define HDRS_IPV6       ((1 << PLD_IPv6) | (1 << DEL_IPv6))
+
+#define T4in6UP(b)      (RX_IPV4ONLY(b) && TX_IP4in6(b))
+#define T4in6DN(b)      (RX_IP4in6(b) && TX_IPV4ONLY(b))
+
+#define T6in4UP(b)      (RX_IPV6ONLY(b) && TX_IP6in4(b))
+#define T6in4DN(b)      (RX_IP6in4(b) && TX_IPV6ONLY(b))
+
+#define CHK4in6(b)      (T4in6UP(b) || T4in6DN(b))
+#define CHK6in4(b)      (T6in4UP(b) || T6in4DN(b)) 
+#define CHK4to4(b)      (RX_IPV4ONLY(b) && TX_IPV4ONLY(b))
+#define CHK6to6(b)      (RX_IPV6ONLY(b) && TX_IPV6ONLY(b))
+
+#define HDRS_GIP4in4    ((1<<GRE) | HDRS_IP4in4)
+#define HDRS_GIP6in4    ((1<<GRE) | HDRS_IP6in4)
+
+#define RX_GIPV4ONLY(b)  (((b)->rx.info.hdrs & HDRS_IPinIP)== HDRS_GIP4)
+#define TX_GIPV4ONLY(b)  (((b)->tx.info.hdrs & HDRS_IPinIP)== HDRS_GIP4)
+
+#define RX_GIP4in4(b)   (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GIP4in4)
+#define TX_GIP4in4(b)   (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GIP4in4)
+#define RX_GIP6in4(b)   (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GIP6in4)
+#define TX_GIP6in4(b)   (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GIP6in4)
+#define RX_GIP46in4(b)  (RX_GIP4in4(b) || RX_GIP6in4(b))
+#define TX_GIP46in4(b)  (TX_GIP4in4(b) || TX_GIP6in4(b))
+
+#define TG4in4UP(b)     (RX_IPV4ONLY(b) && TX_GIP4in4(b))
+#define TG4in4DN(b)     (RX_GIP4in4(b) && TX_IPV4ONLY(b))
+#define TG6in4UP(b)     (RX_IPV6ONLY(b) && TX_GIP6in4(b))
+#define TG6in4DN(b)     (RX_GIP6in4(b) && TX_IPV6ONLY(b))
+
+#define CHKG4in4(b)     (TG4in4UP(b) || TG4in4DN(b))
+#define CHKG6in4(b)     (TG6in4UP(b) || TG6in4DN(b))
+#define CHKG46in4UP(b)  (TG4in4UP(b) || TG6in4UP(b))
+#define CHKG46in4DN(b)  (TG4in4DN(b) || TG6in4DN(b))
+#define CHKG46in4(b)    (CHKG4in4(b) || CHKG6in4(b))
+
+#define PTG4(b)         (RX_GIPV4ONLY(b) && TX_GIPV4ONLY(b))
+
+#define RX_PPTP(b)       ((b)->rx.info.bmap.PPTP)
+#define TX_PPTP(b)       ((b)->tx.info.bmap.PPTP)
+
+#define RX_L2TP(b)       ((b)->rx.info.bmap.L2TP)
+#define TX_L2TP(b)       ((b)->tx.info.bmap.L2TP)
+
+#define PKT_IPV6_GET_TOS_WORD(word)       \
+   ((ntohl(word) & 0x0FF00000) >> 20)
+
+#define PKT_IPV6_SET_TOS_WORD(word, tos)  \
+   (word = htonl((ntohl(word) & 0xF00FFFFF) | ((tos << 20) & 0x0FF00000)))
+
+/* BLOG_LOCK Definitions */
+extern spinlock_t blog_lock_g;
+#define BLOG_LOCK_BH()      spin_lock_bh( &blog_lock_g )
+#define BLOG_UNLOCK_BH()    spin_unlock_bh( &blog_lock_g )
+
+typedef struct ip6_addr {
+    union {
+        uint8_t     p8[16];
+        uint16_t    p16[8];
+        uint32_t    p32[4];
+    };
+} ip6_addr_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Buffer to log IPv6 Tuple.
+ * Packed: 3 16byte cachelines
+ *------------------------------------------------------------------------------
+ */
+struct blogTupleV6_t {
+    union {
+        uint32_t    word0;
+    };
+
+    union {
+        uint32_t    word1;
+        struct {
+            uint16_t length; 
+            uint8_t next_hdr; 
+            uint8_t rx_hop_limit;
+        };
+    };
+
+    ip6_addr_t      saddr;
+    ip6_addr_t      daddr;
+
+    union {
+        struct {
+            uint16_t    source;     /* L4 source port */
+            uint16_t    dest;       /* L4 dest port */
+        }           port;
+        uint32_t    ports;
+    };
+
+    union {
+        struct {
+            uint8_t     exthdrs:6;  /* Bit field of IPv6 extension headers */
+            uint8_t     fragflag:1; /* 6in4 Upstream IPv4 fragmentation flag */
+            uint8_t     tunnel:1;   /* Indication of IPv6 tunnel */
+            uint8_t     tx_hop_limit;
+            uint16_t    ipid;       /* 6in4 Upstream IPv4 identification */
+        };
+        uint32_t   word2;
+    };
+
+} ____cacheline_aligned;
+typedef struct blogTupleV6_t BlogTupleV6_t;
+
+typedef union blogGreFlags {
+    uint16_t    u16;
+    struct {
+        BE_DECL(
+            uint16_t csumIe : 1;
+            uint16_t rtgIe  : 1;
+            uint16_t keyIe  : 1;
+            uint16_t seqIe  : 1;
+            uint16_t srcRtIe: 1;
+            uint16_t recurIe: 3;
+            uint16_t ackIe  : 1;
+
+            uint16_t flags  : 4;
+            uint16_t ver    : 3;
+        )
+        LE_DECL(
+            uint16_t ver    : 3;
+            uint16_t flags  : 4;
+
+            uint16_t ackIe  : 1;
+            uint16_t recurIe: 3;
+            uint16_t srcRtIe: 1;
+            uint16_t seqIe  : 1;
+            uint16_t keyIe  : 1;
+            uint16_t rtgIe  : 1;
+            uint16_t csumIe : 1;
+        )
+    };
+} BlogGreFlags_t;
+
+struct blogGre_t {
+    BlogGreFlags_t  gre_flags;
+    union {
+        uint16_t    u16;
+        struct {
+            BE_DECL(
+                uint16_t reserved   : 10;
+                uint16_t fragflag   :  1;
+                uint16_t hlen       :  5;
+            )
+            LE_DECL(
+                uint16_t hlen       :  5;
+                uint16_t fragflag   :  1;
+                uint16_t reserved   : 10;
+            )
+        };
+    };
+    uint16_t    ipid;
+    uint16_t    l2_hlen;
+	
+    union { //pptp
+        struct {
+            uint16_t    keyLen;     
+            uint16_t    keyId;      
+        }; 
+        uint32_t    key;
+    };
+    uint32_t            seqNum; 
+    uint32_t            ackNum;
+    uint16_t            pppInfo;
+    uint16_t            pppProto;	
+};
+typedef struct blogGre_t BlogGre_t;
+
+typedef union blogL2tpFlags { 
+    uint16_t    u16;
+    struct {
+        BE_DECL(
+            uint16_t type       : 1;
+            uint16_t lenBit     : 3;
+            uint16_t seqBit     : 2;
+            uint16_t offsetBit  : 1;
+            uint16_t priority   : 1;
+            uint16_t reserved   : 4;
+            uint16_t version    : 4;
+        )
+        LE_DECL(
+            uint16_t version    : 4;
+            uint16_t reserved   : 4;
+            uint16_t priority   : 1;
+            int16_t offsetBit   : 1;
+            uint16_t seqBit     : 2;
+            uint16_t lenBit     : 3;
+            uint16_t type       : 1;            
+        )
+    };
+} BlogL2tpFlags_t;
+
+struct blogL2tp_t {
+    BlogL2tpFlags_t  l2tp_flags;
+    uint16_t    length;
+    uint16_t    tunnelId;
+    uint16_t    sessionId;
+    uint16_t    seqNum;
+    uint16_t    expSeqNum;
+    uint16_t    offsetSize;
+    uint16_t    offsetPad;
+    union {
+        uint16_t    u16;
+        struct {
+            BE_DECL(
+                uint16_t reserved   : 10;
+                uint16_t fragflag   :  1;
+                uint16_t hlen       :  5;
+            )
+            LE_DECL(
+                uint16_t hlen       :  5;
+                uint16_t fragflag   :  1;
+                uint16_t reserved   : 10;
+            )
+        };
+    };
+    uint16_t    ipid;
+    uint16_t    unused;
+    uint16_t    udpLen;
+    uint16_t    udpCheck;
+    uint16_t    pppInfo;
+    uint16_t    pppProto;
+
+};
+typedef struct blogL2tp_t BlogL2tp_t;
+
+#define BLOG_L2TP_PPP_LEN  4
+#define BLOG_L2TP_PORT     1701
+
+#define BLOG_PPTP_PPP_LEN  4
+#define BLOG_PPTP_DS_PPPINFO  0X2145  //pptp down stream's ppp info
+
+/*
+ *------------------------------------------------------------------------------
+ * Buffer to log Layer 2 and IP Tuple headers.
+ * Packed: 4 16byte cachelines
+ *------------------------------------------------------------------------------
+ */
+struct blogHeader_t {
+
+    BlogTuple_t         tuple;          /* L3+L4 IP Tuple log */
+
+    union {
+        void            * dev_p;        /* physical network device */
+        void            * reserved2;    
+    };
+
+    union {
+        BlogInfo_t      info;
+        uint32_t        word;           /* channel, count, rfc2684, bmap */
+        uint32_t        pktlen;         /* stats info */
+    };
+
+    struct {
+        uint8_t             vlan_8021ad :1;     /* 8021AD stacked */
+        uint8_t             wan_qdisc   :1;     /* device type */
+        uint8_t             multicast   :1;     /* multicast flag */
+        uint8_t             fkbInSkb    :1;     /* fkb from skb */
+        uint8_t             count       :4;     /* # of L2 encapsulations */
+    };
+    uint8_t             length;         /* L2 header total length */
+    uint8_t /*BlogEncap_t*/ encap[ BLOG_ENCAP_MAX ];/* All L2 header types */
+
+    uint8_t             l2hdr[ BLOG_HDRSZ_MAX ];    /* Data of all L2 headers */
+
+} ____cacheline_aligned;
+
+typedef struct blogHeader_t BlogHeader_t;           /* L2 and L3+4 tuple */
+
+/* Coarse hash key: L1, L3, L4 hash */
+union blogHash_t {
+    uint32_t        match;
+    struct {
+        uint8_t     unused;
+        uint8_t     protocol;           /* IP protocol */
+
+        struct {
+            uint8_t channel;
+
+            union {
+                struct {
+                    uint8_t         phyLen   : 4;
+                    uint8_t         phyType  : 4;
+                };
+                uint8_t         phy;
+            };
+        } l1_tuple;
+    };
+};
+
+typedef union blogHash_t BlogHash_t;
+
+/* TBD : Rearrange following bit positions for optimization. */
+union blogWfd_t {
+    uint32_t    u32;
+    struct {
+        BE_DECL(
+           uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+           uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+           uint32_t            is_wfd               : 1;/* is_wfd=1 */
+           uint32_t            is_chain             : 1;/* is_chain=1 */
+           uint32_t            reserved1            : 12;/* unused */
+           uint32_t            wfd_prio             : 1;/* 0=high, 1=low */
+           uint32_t            wfd_idx              : 2;/* WFD idx */
+           uint32_t            reserved0            : 1;/* unused */
+           uint32_t            priority             : 4;/* Tx Priority */
+           uint32_t            chain_idx            : 8;/* Tx chain index */
+        )
+        LE_DECL(
+           uint32_t            chain_idx            : 8;/* Tx chain index */
+           uint32_t            priority             : 4;/* Tx Priority */
+           uint32_t            reserved0            : 1;/* unused */
+           uint32_t            wfd_idx              : 2;/* WFD idx */
+           uint32_t            wfd_prio             : 1;/* 0=high, 1=low */
+           uint32_t            reserved1            : 12;/* unused */
+           uint32_t            is_chain             : 1;/* is_chain=1 */
+           uint32_t            is_wfd               : 1;/* is_wfd=1 */
+           uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+           uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+        )
+    } nic_ucast;
+
+    struct {
+        BE_DECL(
+           uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+           uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+           uint32_t            is_wfd               : 1;/* is_wfd=1 */
+           uint32_t            is_chain             : 1;/* is_chain=0 */
+           uint32_t            wfd_prio             : 1;/* 0=high, 1=low */
+           uint32_t            ssid                 : 4;/* SSID for WLAN */
+           uint32_t            reserved1            : 8;/* unused */
+           uint32_t            wfd_idx              : 2;/* WFD idx */
+           uint32_t            priority             : 3;/* Tx Priority */
+           uint32_t            flowring_idx         :10;/* Tx flowring index */
+        )
+        LE_DECL(
+           uint32_t            flowring_idx         :10;/* Tx flowring index */
+           uint32_t            priority             : 3;/* Tx Priority */
+           uint32_t            wfd_idx              : 2;/* WFD idx */
+           uint32_t            reserved1            : 8;/* unused */
+           uint32_t            ssid                 : 4;/* SSID for WLAN */
+           uint32_t            wfd_prio             : 1;/* 0=high, 1=low */
+           uint32_t            is_chain             : 1;/* is_chain=0 */
+           uint32_t            is_wfd               : 1;/* is_wfd=1 */
+           uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+           uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+        )
+    } dhd_ucast;
+
+    struct {
+        BE_DECL(
+           uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+           uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+           uint32_t            is_wfd               : 1;/* is_wfd=1 */
+           uint32_t            is_chain             : 1;/* is_chain=0 */
+           uint32_t            wfd_idx              : 2;/* WFD idx */
+           uint32_t            wfd_prio             : 1;/* 0=high, 1=low */
+           uint32_t            reserved1            : 2;/* unused */
+           uint32_t            ssid                 : 4;/* SSID */
+           uint32_t            reserved0            :19;/* unused */
+        )
+        LE_DECL(
+           uint32_t            reserved0            :19;/* unused */
+           uint32_t            ssid                 : 4;/* SSID */
+           uint32_t            reserved1            : 2;/* unused */
+           uint32_t            wfd_prio             : 1;/* 0=high, 1=low */
+           uint32_t            wfd_idx              : 2;/* WFD idx */
+           uint32_t            is_chain             : 1;/* is_chain=0 */
+           uint32_t            is_wfd               : 1;/* is_wfd=1 */
+           uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+           uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+        )
+    } mcast;
+};
+typedef union blogWfd_t BlogWfd_t;
+
+struct blogRnr_t {
+    BE_DECL(
+       uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+       uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+       uint32_t            is_wfd               : 1;/* rnr (is_wfd=0) */
+       uint32_t            radio_idx            : 2;/* Radio index */
+       uint32_t            reserved0            : 1;/* unused */
+       uint32_t            priority             : 3;/* Tx Priority */
+       uint32_t            ssid                 : 4;/* SSID */
+       uint32_t            reserved1            : 9;/* unused */
+       uint32_t            flowring_idx         :10;/* Tx flowring index */
+       )
+    LE_DECL(
+       uint32_t            flowring_idx         :10;/* Tx flowring index */
+       uint32_t            reserved1            : 9;/* unused */
+       uint32_t            ssid                 : 4;/* SSID */
+       uint32_t            priority             : 3;/* Tx Priority */
+       uint32_t            reserved0            : 1;/* unused */
+       uint32_t            radio_idx            : 2;/* Radio index */
+       uint32_t            is_wfd               : 1;/* rnr (is_wfd=0) */
+       uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+       uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+       )
+};
+
+typedef struct blogRnr_t BlogRnr_t;
+
+
+
+/*
+ *------------------------------------------------------------------------------
+ * TCP ACK flow prioritization.
+ * Any of the parameters given below can be changed based on the requirements. 
+ * The len parameter is IPv4/6 total/payload length and does not include any 
+ * L2 fields (like MAC DA, SA, EthType, VLAN, etc.)
+ *------------------------------------------------------------------------------
+ */
+#define BLOG_TCPACK_IPV4_LEN   64   /* IPv4 total len value for pure TCP ACK  */
+#define BLOG_TCPACK_IPV6_LEN   32   /* IPv6 len value for pure TCP ACK        */
+#define BLOG_TCPACK_XTM_TX_QID  2   /* Qid in which TCP ACK will be queued.
+                                       Change this Qid based on your reqts.
+                                       Qid shown in WebGUI in QoS Queue page  */
+#if defined(CONFIG_BCM963138) || defined(_BCM963138_) || defined(CONFIG_BCM963148) || defined(_BCM963148_)
+#define BLOG_TCPACK_MAX_COUNT   0   /* TCP ACK flow prioritization is not yet
+                                       supported by Runner. Set MAX_COUNT to 0
+                                       to disable it for now. */
+#else
+#define BLOG_TCPACK_MAX_COUNT   4   /* max # of back-to-back TCP ACKs received*/
+                                    /* after which the ACK flow is prioritized*/
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Buffer log structure.
+ * Packed: 17 16 byte cachelines, 272bytes per blog.
+ *------------------------------------------------------------------------------
+ */
+struct blog_t {
+
+    union {
+        void            * void_p;
+        struct blog_t   * blog_p;       /* Free list of Blog_t */
+        struct sk_buff  * skb_p;        /* Associated sk_buff */
+    };
+    BlogHash_t          key;            /* Coarse hash search key */
+    uint16_t            hash;           /* hash */
+    union {
+        uint32_t        wl;
+        struct {
+            BE_DECL(
+               uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+               uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+               uint32_t            reserved             : 30;
+            )
+            LE_DECL(
+               uint32_t            reserved             : 30;
+               uint32_t            is_tx_hw_acc_en      : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */
+               uint32_t            is_rx_hw_acc_en      : 1;/* =1 if WLAN Receive is capable of HW Acceleration */
+            )
+        } wl_hw_support;
+        BlogWfd_t       wfd;
+        BlogRnr_t       rnr;
+    };
+    struct blog_t       * vblog_p;      /* vertical list of Blog_t */
+    void                * mc_fdb;       /* physical rx network device */
+
+    void                * fdb[2];       /* fdb_src and fdb_dst */
+    int8_t              delta[MAX_VIRT_DEV];  /* octet delta info */
+    uint8_t             vtag_num;
+
+    uint16_t            minMtu;
+    union {
+        uint16_t        flags;
+        struct {
+            uint16_t    ptm_us_bond: 1; /* PTM US Bonding Mode */
+            uint16_t    tos_mode_us: 1; /* ToS mode for US: fixed, inherit */
+            uint16_t    tos_mode_ds: 1; /* ToS mode for DS: fixed, inherit */
+            uint16_t    has_pppoe:   1;
+            uint16_t    ack_cnt:     4; /* back to back TCP ACKs for prio */
+            uint16_t    ack_done:    1; /* TCP ACK prio decision made */
+            uint16_t    nf_dir:      1;
+            uint16_t    pop_pppoa:   1;
+            uint16_t    insert_eth:  1;
+            uint16_t    iq_prio:     1;
+            uint16_t    mc_sync:     1;
+            uint16_t    rtp_seq_chk: 1; /* RTP sequence check enable       */
+            uint16_t    incomplete:  1;
+        };
+    };
+    union {
+        uint32_t            mark;           /* NF mark value on tx */
+        void                *dst_entry;     /*skb dst_entry for local_in*/
+    };
+
+    union { 
+        uint32_t            priority;       /* Tx  priority */
+        uint32_t            flowid;         /* used only for local in */
+    };
+
+    void                * blogRule_p;   /* List of Blog Rules */
+
+    union {
+        struct {
+            uint32_t dosAttack : 16;
+            uint32_t lenPrior  :  1;
+            uint32_t vlanPrior :  1;
+            uint32_t dscpMangl :  1;
+            uint32_t tosMangl  :  1;
+            uint32_t preMod    :  1;
+            uint32_t postMod   :  1;
+            uint32_t reserved  : 10;
+        };
+        uint32_t feature;       /* Feature set for per-packet modification */
+    };
+    union {
+        struct {
+            uint8_t vlanout_offset; /* Outer VLAN header offset */
+            uint8_t vlanin_offset;  /* Inner VLAN header offset */
+            uint8_t pppoe_offset;   /* PPPoE header offset */
+            uint8_t ip_offset;      /* IPv4 header offset */
+            uint8_t ip6_offset;     /* IPv6 header offset */
+            uint8_t l4_offset;      /* Layer 4 header offset */
+            uint8_t isWan;          /* Receiving by WAN interface */
+            uint8_t reserved8_1[1];
+        };
+        uint32_t offsets[2];
+    };
+    int (*preHook)(Blog_t *blog_p, void *nbuff_p);  /* Pre-modify hook */
+    int (*postHook)(Blog_t *blog_p, void *nbuff_p); /* Post-modify hook */
+
+    /* pointers to the devices which the flow goes thru */
+    void                * virt_dev_p[MAX_VIRT_DEV];
+    uint32_t            vid; /* vid stored in network order 
+                                to improve fcache performance */
+
+    BlogTupleV6_t       tupleV6;        /* L3+L4 IP Tuple log */
+
+    BlogHeader_t        tx;             /* Transmit path headers */
+    BlogHeader_t        rx;             /* Receive path headers */
+
+    uint32_t            dev_xmit;
+    /* Flow connection/session tracker */
+    void                * ct_p[BLOG_CT_MAX];
+    uint32_t            ct_ver[BLOG_CT_VER_MAX];
+    void                * tunl_p;
+    BlogActivateKey_t   activate_key;
+    union {
+        uint16_t        flags2;
+        struct {
+            uint16_t    unused     : 15;
+            uint16_t    mcast_learn:  1;
+        };
+    };
+    uint16_t            mcast_port_map;
+
+    union{
+        struct {
+            BlogGre_t grerx;
+            BlogGre_t gretx;
+        };
+        struct {
+            BlogL2tp_t l2tptx;
+        };
+    };
+    BlogTuple_t         delrx_tuple;    /* Del proto RX L3+L4 IP Tuple log */
+    BlogTuple_t         deltx_tuple;    /* Del proto TX L3+L4 IP Tuple log */
+} ____cacheline_aligned;
+
+/*
+ * -----------------------------------------------------------------------------
+ * Engineering constants: Pre-allocated pool size 400 blogs Ucast+Mcast
+ *
+ * Extensions done in #blogs carved from a 2x4K page (external fragmentation)
+ * Blog size = 240, 8192/240 = 34 extension 32bytes internal fragmentation
+ *
+ * Number of extensions engineered to permit approximately max # of flows
+ * (assuming one blog per flow).
+ * -----------------------------------------------------------------------------
+ */
+#define CC_BLOG_SUPPORT_EXTEND              /* Conditional compile            */
+#define BLOG_POOL_SIZE_ENGG         400     /* Pre-allocated pool size        */
+/* Number of Blog_t per extension */
+#define BLOG_EXTEND_SIZE_ENGG      (8192/sizeof(Blog_t))
+/* Maximum extensions allowed including 4K flows             */
+#define BLOG_EXTEND_MAX_ENGG       (16384/BLOG_EXTEND_SIZE_ENGG)
+
+
+
+extern const char       * strBlogAction[];
+extern const char       * strBlogEncap[];
+extern const char       * strRfc2684[];
+extern const uint8_t    rfc2684HdrLength[];
+extern const uint8_t    rfc2684HdrData[][16];
+
+
+#else
+struct blog_t {void * blogRule_p;};
+#define BLOG_LOCK_BH()
+#define BLOG_UNLOCK_BH()
+#endif /* defined(CONFIG_BLOG) */
+
+/*
+ * -----------------------------------------------------------------------------
+ * Blog functional interface
+ * -----------------------------------------------------------------------------
+ */
+
+
+/*
+ * -----------------------------------------------------------------------------
+ * Section 1. Extension of a packet context with a logging context
+ * -----------------------------------------------------------------------------
+ */
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#define blog_ptr(skb_p)         skb_p->blog_p
+#else
+#define blog_ptr(skb_p)         BLOG_NULL
+#endif
+
+/* Allocate or deallocate a Blog_t */
+Blog_t * blog_get(void);
+void     blog_put(Blog_t * blog_p);
+
+/* Allocate a Blog_t and associate with sk_buff or fkbuff */
+extern Blog_t * blog_skb(struct sk_buff  * skb_p);
+extern Blog_t * blog_fkb(struct fkbuff  * fkb_p);
+
+/* Clear association of Blog_t with sk_buff */
+extern Blog_t * blog_snull(struct sk_buff * skb_p);
+extern Blog_t * blog_fnull(struct fkbuff  * fkb_p);
+
+/* Clear association of Blog_t with sk_buff and free Blog_t object */
+extern void blog_free(struct sk_buff * skb_p);
+
+/* Disable further logging. Dis-associate with skb and free Blog object */
+extern void blog_skip(struct sk_buff * skb_p);
+
+/* Transfer association of a Blog_t object between two sk_buffs. */
+extern void blog_xfer(struct sk_buff * skb_p, const struct sk_buff * prev_p);
+
+/* Duplicate a Blog_t object for another skb. */
+extern void blog_clone(struct sk_buff * skb_p, const struct blog_t * prev_p);
+
+/* Copy a Blog_t object another blog object. */
+extern void blog_copy(struct blog_t * new_p, const struct blog_t * prev_p);
+
+/* get the Ingress QoS Prio from the blog */
+extern int blog_iq(const struct sk_buff * skb_p);
+
+/* get the flow cache status */
+extern int blog_fc_enabled(void);
+
+/* get the GRE tunnel accelerated status */
+extern int blog_gre_tunnel_accelerated(void);
+
+#define BLOG_PTM_US_BONDING_NO_HW_ACCELERATION            1
+#define BLOG_PTM_US_BONDING_HW_ACCELERATION               0
+
+extern void blog_ptm_us_bonding( struct sk_buff *skb_p, int mode );
+
+/* update DPI configuration to blog */
+extern int blog_dm(BlogDpiType_t type, uint32_t param1, uint32_t param2);
+
+typedef int (*blog_dpi_ctk_update_t)(uint32_t appid);
+extern blog_dpi_ctk_update_t blog_dpi_ctk_update_fn;
+
+/*
+ *------------------------------------------------------------------------------
+ *  Section 2. Associating native OS or 3rd-party network constructs
+ *------------------------------------------------------------------------------
+ */
+
+extern void blog_link(BlogNetEntity_t entity_type, Blog_t * blog_p,
+                      void * net_p, uint32_t param1, uint32_t param2);
+
+/*
+ *------------------------------------------------------------------------------
+ * Section 3. Network construct and Blog client co-existence call backs
+ *------------------------------------------------------------------------------
+ */
+
+extern void blog_notify(BlogNotify_t event, void * net_p,
+                        uint32_t param1, uint32_t param2);
+
+extern uint32_t blog_request(BlogRequest_t event, void * net_p,
+                        uint32_t param1, uint32_t param2);
+
+extern void blog_query(BlogQuery_t query, void * net_p,
+                        uint32_t param1, uint32_t param2, uint32_t param3);
+
+/*
+ *------------------------------------------------------------------------------
+ * Section 4. Network end-point binding of Blog client
+ *
+ * If rx hook is defined,
+ *  blog_sinit(): initialize a fkb from skb, and pass to hook
+ *          if packet is consumed, skb is released.
+ *          if packet is blogged, the blog is associated with skb.
+ *  blog_sinit_locked(): same as blog_sinit, but caller must have already
+ *          locked the blog layer, see blog_lock/blog_unlock in section 6.
+ *  blog_finit(): pass to hook
+ *          if packet is to be blogged, the blog is associated with fkb.
+ *  blog_finit_locked(): same as blog_finit, but caller must have already
+ *          locked the blog layer, see blog_lock/blog_unlock in section 6.
+ *
+ * If tx hook is defined, invoke tx hook, dis-associate and free Blog_t
+ *------------------------------------------------------------------------------
+ */
+extern BlogAction_t blog_sinit_locked(struct sk_buff *skb_p, void * dev_p,
+                             uint32_t encap, uint32_t channel, uint32_t phyHdr);
+
+extern BlogAction_t blog_sinit(struct sk_buff *skb_p, void * dev_p,
+                             uint32_t encap, uint32_t channel, uint32_t phyHdr);
+
+extern BlogAction_t blog_finit_locked(struct fkbuff *fkb_p, void * dev_p,
+                             uint32_t encap, uint32_t channel, uint32_t phyHdr);
+
+extern BlogAction_t blog_finit(struct fkbuff *fkb_p, void * dev_p,
+                             uint32_t encap, uint32_t channel, uint32_t phyHdr);
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+extern BlogAction_t _blog_emit(void * nbuff_p, void * dev_p,
+                             uint32_t encap, uint32_t channel, uint32_t phyHdr);
+
+static inline BlogAction_t blog_emit(void * nbuff_p, void * dev_p,
+                        uint32_t encap, uint32_t channel, uint32_t phyHdr)
+{
+    if ( nbuff_p == NULL ) return PKT_NORM;
+    if ( !IS_SKBUFF_PTR(nbuff_p) ) return PKT_NORM;
+    // OK, this is something worth looking at, call real function
+    return ( _blog_emit(nbuff_p, dev_p, encap, channel, phyHdr) );
+}
+#else
+BlogAction_t blog_emit( void * nbuff_p, void * dev_p,
+                        uint32_t encap, uint32_t channel, uint32_t phyHdr );
+#endif
+
+/*
+ * blog_iq_prio determines the Ingress QoS priority of the packet
+ */
+extern int blog_iq_prio(struct sk_buff * skb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr);
+/*
+ *------------------------------------------------------------------------------
+ *  blog_activate(): static configuration function of blog application
+ *             pass a filled blog to the hook for configuration
+ *------------------------------------------------------------------------------
+ */
+extern uint32_t blog_activate( Blog_t * blog_p, BlogTraffic_t traffic,
+                               BlogClient_t client );
+
+/*
+ *------------------------------------------------------------------------------
+ *  blog_deactivate(): static deconfiguration function of blog application
+ *------------------------------------------------------------------------------
+ */
+extern Blog_t * blog_deactivate( uint32_t key, BlogTraffic_t traffic,
+                                 BlogClient_t client );
+
+/*
+ * -----------------------------------------------------------------------------
+ * User defined filter invoked invoked in the rx hook. A user may override the
+ * Blog action defined by the client. To enable the invocation of this API
+ * in blog_finit, ensure that CC_BLOG_SUPPORT_USER_FILTER is enabled. Also, a
+ * network device driver may directly invoke blog_filter() to override PKT_BLOG
+ * and return PKT_NORM (by releasing the associated Blog_t).
+ * -----------------------------------------------------------------------------
+ */
+extern BlogAction_t blog_filter(Blog_t * blog_p);
+
+/*
+ * -----------------------------------------------------------------------------
+ * Section 5. Binding Blog client applications:
+ *
+ * Blog defines three hooks:
+ *
+ *  RX Hook: If this hook is defined then blog_init() will pass the packet to
+ *           the Rx Hook using the FkBuff_t context. L1 and encap information
+ *           are passed to the receive hook. The private network device context 
+ *           may be extracted using the passed net_device object, if needed.
+ *
+ *  TX Hook: If this hook is defined then blog_emit() will check to see whether
+ *           the NBuff has a Blog_t, and if so pass the NBuff and Blog to the
+ *           bound Tx hook.
+ *
+ *  NotifHook: When blog_notify is invoked, the bound hook is invoked. Based on
+ *           event type the bound Blog client may perform a custom action.
+ *
+ *  SC Hook: If this hook is defined, blog_activate() will pass a blog with
+ *           necessary information for statical configuration.
+ *
+ *  SD Hook: If this hook is defined, blog_deactivate() will pass a pointer
+ *           to a network object with BlogActivateKey information. The
+ *           respective flow entry will be deleted.
+ *
+ *  QueryHook: When blog_query is invoked, the bound hook is invoked. Based on
+ *           query type the bound Blog client will return result of query.
+ * -----------------------------------------------------------------------------
+ */
+typedef union {
+    struct {
+        uint8_t         unused      : 2;
+        uint8_t         QR_HOOK     : 1;
+        uint8_t         RX_HOOK     : 1;
+        uint8_t         TX_HOOK     : 1;
+        uint8_t         XX_HOOK     : 1;
+        uint8_t         SC_HOOK     : 1;
+        uint8_t         SD_HOOK     : 1;
+    } bmap;
+    uint8_t             hook_info;
+} BlogBind_t;
+
+typedef BlogAction_t (* BlogDevHook_t)(void * fkb_skb_p, void * dev_p,
+                                       uint32_t encap, uint32_t blogHash);
+
+typedef void (* BlogNotifyHook_t)(BlogNotify_t notification, void * net_p,
+                                  uint32_t param1, uint32_t param2);
+
+typedef void (* BlogQueryHook_t)(BlogQuery_t query, void * net_p,
+                            uint32_t param1, uint32_t param2, uint32_t param3);
+
+typedef uint32_t (* BlogScHook_t)(Blog_t * blog_p, BlogTraffic_t traffic);
+
+typedef Blog_t * (* BlogSdHook_t)(uint32_t key, BlogTraffic_t traffic);
+
+extern void blog_bind(BlogDevHook_t rx_hook,    /* Client Rx netdevice handler*/
+                      BlogDevHook_t tx_hook,    /* Client Tx netdevice handler*/
+                      BlogNotifyHook_t xx_hook, /* Client notification handler*/
+                      BlogQueryHook_t qr_hook,  /* Client query handler       */
+                      BlogBind_t   bind
+                     );
+                     
+extern int hw_accelerator_client_get(void);
+extern int sw_accelerator_client_get(void);
+                     
+extern void blog_bind_config(BlogScHook_t sc_hook,    /* Client static config handler*/
+                             BlogSdHook_t sd_hook,    /* Client static deconf handler*/
+                             BlogClient_t client,     /* Static configuration Client */
+                             BlogBind_t   bind
+                            );
+
+/*
+ * -----------------------------------------------------------------------------
+ * Section 6. Miscellanous
+ * -----------------------------------------------------------------------------
+ */
+
+/* Logging of L2|L3 headers */
+extern void blog(struct sk_buff * skb_p, BlogDir_t dir, BlogEncap_t encap,  
+                 size_t len, void * data_p);
+
+/* Dump a Blog_t object */
+extern void blog_dump(Blog_t * blog_p);
+
+/* Get the minimum Tx MTU for a blog */
+uint16_t blog_getTxMtu(Blog_t * blog_p);
+
+/*
+ * Lock and unlock the blog layer.  This is used to reduce the number of
+ * times the blog lock must be acquired and released during bulk rx processing.
+ * See also blog_finit_locked.
+ */
+extern void blog_lock(void);
+extern void blog_unlock(void);
+
+/*
+  * Per packet basis modification feature
+  */
+#define BLOG_MAX_FEATURES               8
+
+#define BLOG_LEN_PARAM_INDEX            0
+#define BLOG_DSCP_PARAM_INDEX           1
+#define BLOG_TOS_PARAM_INDEX            2
+
+#define BLOG_MAX_LEN_TBLSZ              8
+#define BLOG_MAX_DSCP_TBLSZ            64
+#define BLOG_MAX_TOS_TBLSZ            256
+
+#define BLOG_LEN_PARAM_NUM              4
+#define BLOG_MAX_PARAM_NUM              4
+
+#define BLOG_MIN_LEN_INDEX              0
+#define BLOG_MAX_LEN_INDEX              1
+#define BLOG_ORIGINAL_MARK_INDEX        2
+#define BLOG_TARGET_MARK_INDEX          3
+
+#define BLOG_MATCH_DSCP_INDEX           0
+#define BLOG_TARGET_DSCP_INDEX          1
+
+#define BLOG_MATCH_TOS_INDEX            0
+#define BLOG_TARGET_TOS_INDEX           1
+
+#define BLOG_INVALID_UINT8   ((uint8_t)(-1))
+#define BLOG_INVALID_UINT16 ((uint16_t)(-1))
+#define BLOG_INVALID_UINT32 ((uint32_t)(-1))
+
+extern int blog_set_ack_tbl(uint32_t val[]);
+extern int blog_clr_ack_tbl(void);
+extern int blog_set_len_tbl(uint32_t val[]);
+extern int blog_clr_len_tbl(void);
+extern int blog_set_dscp_tbl(uint8_t idx, uint8_t val);
+extern int blog_clr_dscp_tbl(void);
+extern int blog_set_tos_tbl(uint8_t idx, uint8_t val);
+extern int blog_clr_tos_tbl(void);
+extern int blog_pre_mod_hook(Blog_t *blog_p, void *nbuff_p);
+extern int blog_post_mod_hook(Blog_t *blog_p, void *nbuff_p);
+
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+#define BLOG_GRE_RCV_NOT_GRE             2
+#define BLOG_GRE_RCV_NO_SEQNO            1
+#define BLOG_GRE_RCV_IN_SEQ              0
+#define BLOG_GRE_RCV_NO_TUNNEL          -1
+#define BLOG_GRE_RCV_FLAGS_MISSMATCH    -2
+#define BLOG_GRE_RCV_CHKSUM_ERR         -3
+#define BLOG_GRE_RCV_OOS_LT             -4
+#define BLOG_GRE_RCV_OOS_GT             -5
+
+extern int blog_gre_rcv( struct fkbuff *fkb_p, void * dev_p, uint32_t h_proto, 
+                         void **tunl_pp, uint32_t *pkt_seqno_p);
+extern void blog_gre_xmit( struct sk_buff *skb_p, uint32_t h_proto );
+#endif
+
+#if defined(CONFIG_ACCEL_PPTP)
+#define BLOG_PPTP_RCV_NOT_PPTP             2
+#define BLOG_PPTP_RCV_NO_SEQNO            1
+#define BLOG_PPTP_RCV_IN_SEQ              0
+#define BLOG_PPTP_RCV_NO_TUNNEL          -1
+#define BLOG_PPTP_RCV_FLAGS_MISSMATCH    -2
+#define BLOG_PPTP_RCV_CHKSUM_ERR         -3
+#define BLOG_PPTP_RCV_OOS_LT             -4
+#define BLOG_PPTP_RCV_OOS_GT             -5
+extern int blog_pptp_rcv( struct fkbuff *fkb_p, uint32_t h_proto, 
+                          uint32_t *rcv_pktSeq);
+extern void blog_pptp_xmit( struct sk_buff *skb_p, uint32_t h_proto );
+#endif
+
+#define BLOG_L2TP_RCV_TUNNEL_FOUND       1
+#define BLOG_L2TP_RCV_NO_TUNNEL          0
+
+#if defined(CONFIG_BCM_PKTFLOW_MODULE) || defined(CONFIG_BCM_PKTFLOW)
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+#define VLANID_VID_MASK      0x0FFF
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(CONFIG_ARM)
+#define VLANID_VID_MASK      0xFF0F
+#endif
+
+typedef struct {
+    union {
+        struct {
+            BE_DECL(
+                uint32_t  vid1 : 16;
+                uint32_t  vid0 : 16;
+            )
+            LE_DECL(
+                uint32_t  vid0 : 16;
+                uint32_t  vid1 : 16;
+            )
+        } tag;
+        uint32_t word;
+    };
+} VlanID_t;
+#endif
+
+#endif /* defined(__BLOG_H_INCLUDED__) */
+
+#endif /* CONFIG_BCM_KF_BLOG */
diff --git a/include/linux/blog_net.h b/include/linux/blog_net.h
new file mode 100644
index 0000000000000000000000000000000000000000..d286f2e3e039547b109bce3676225348b083e887
--- /dev/null
+++ b/include/linux/blog_net.h
@@ -0,0 +1,744 @@
+#ifndef __BLOG_NET_H_INCLUDED__
+#define __BLOG_NET_H_INCLUDED__
+
+/*
+<:copyright-BRCM:2003:DUAL/GPL:standard
+
+   Copyright (c) 2003 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ *******************************************************************************
+ *
+ * File Name  : blog_net.h
+ *
+ * Description:
+ *
+ * Global definitions and declaration of Protocol Headers independent of OS as
+ * per IEEE and RFC standards.  Inlined utilities for header access.
+ *
+ * CAUTION: All protocol header structures are declared for Big Endian access
+ * and are not compatible for a Little Endian machine.
+ *
+ * CAUTION: It is also assumed that the Headers are AT LEAST 16bit aligned.
+ *
+ *******************************************************************************
+ */
+
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+#define BE_DECL(declarations)   declarations
+#define BE_CODE(statements)     do { statements } while (0)
+#define LE_DECL(declarations)
+#define LE_CODE(statements)     NULL_STMT
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(CONFIG_ARM)
+#define BE_DECL(declarations)
+#define BE_CODE(statements)     NULL_STMT
+#define LE_DECL(declarations)   declarations
+#define LE_CODE(statements)     do { statements } while (0)
+#else
+#error "Compile: fix endianess in platform.h"
+#endif
+
+
+/*----- ETH_TYPE: Standard well-defined Ethernet Encapsulations --------------*/
+#define BLOG_ETH_P_ETH_BRIDGING 0x6558  /* Transparent Ethernet bridging      */
+#define BLOG_ETH_P_IPV4         0x0800  /* IPv4 in Ethernet                   */
+#define BLOG_ETH_P_ARP          0x0806  /* Address Resolution packet          */
+#define BLOG_ETH_P_RARP         0x8035  /* Reverse ARP                        */
+#define BLOG_ETH_P_8021Q        0x8100  /* 802.1Q VLAN Extended Header        */
+#define BLOG_ETH_P_8021AD       0x88A8  /* VLAN Stacking 802.1ad              */
+#define BLOG_ETH_P_IPV6         0x86DD  /* Internet Protocol Version 6        */
+#define BLOG_ETH_P_MPLS_UC      0x8847  /* MPLS - Unicast                     */
+#define BLOG_ETH_P_MPLS_MC      0x8848  /* MPLS - Multicast                   */
+#define BLOG_ETH_P_ATMMPOA      0x884c  /* MultiProtocol Over ATM             */
+#define BLOG_ETH_P_PPP_DIS      0x8863  /* PPPoE Discovery                    */
+#define BLOG_ETH_P_PPP_SES      0x8864  /* PPPoE Session                      */
+#define BLOG_ETH_JUMBO_FRAME    0x8870  /* Jumbo frame indicator              */
+#define BLOG_ETH_P_BRCM6TAG     0x8874  /* BRCM Switch Hdr : 6 byte           */
+#define BLOG_ETH_P_BRCM4TAG     0x888A  /* BRCM Switch Hdr : 4 byte           */
+#define BLOG_ETH_P_PAUSE        0x8808  /* IEEE Pause frames. 802.3 31B       */
+#define BLOG_ETH_P_SLOW         0x8809  /* Slow Protocol. See 802.3ad 43B     */
+#define BLOG_ETH_P_8021AG       0x8902  /* 802.1ag Connectivity FaultMgmt     */
+                                            /* ITU-T recomm Y.1731 (OAM)      */
+#define BLOG_ETH_FCOE           0x8906  /* Fibre Channel over Ethernet        */
+#define BLOG_ETH_FCOE_INIT      0x8914  /* FCoE Initialization Protocol       */
+#define BLOG_ETH_QINQ1          0x9100  /* 802.1Q in Q, alternate 1           */
+#define BLOG_ETH_QINQ2          0x9200  /* 802.1Q in Q, alternate 2           */
+
+/*----- PPP_TYPE: Standard well-defined PPP Encapsulations -------------------*/
+#define BLOG_PPP_IPV4           0x0021  /* IPv4 in PPP                        */
+#define BLOG_PPP_IPCP           0x8021  /* IP Control Protocol                */
+#define BLOG_PPP_LCP            0xC021  /* Link Control Protocol              */
+#define BLOG_PPP_MP             0x003D  /* Multilink protocol                 */
+#define BLOG_PPP_IPV6           0x0057  /* IPv6 in PPP                        */
+#define BLOG_PPP_IPV6CP         0x8057  /* IPv6 Control Protocol              */
+#define BLOG_PPP_MPLSCP         0x80FD  /* MPLS Control Protocol???           */
+#define BLOG_PPP_MPLS_UC        0x0281  /* MPLS - Unicast                     */
+#define BLOG_PPP_MPLS_MC        0x0283  /* MPLS - Multicast                   */
+
+#define BLOG_GRE_PPP            0x880B  /* PPTP: PPP in GRE Tunnel            */
+
+/*----- IPPROTO: Standard well-defined IP Encapsulations ---------------------*/
+#define BLOG_IPPROTO_HOPOPTV6   0       /* IPv6 ext: Hop-by-Hop Option Header */
+#define BLOG_IPPROTO_ICMP       1       /* Internet Control Message Protocol  */
+#define BLOG_IPPROTO_IGMP       2       /* Internet Group Management Protocol */
+#define BLOG_IPPROTO_IPIP       4       /* IPIP tunnels e.g. 4in6             */
+#define BLOG_IPPROTO_TCP        6       /* Transmission Control Protocol      */
+#define BLOG_IPPROTO_EGP        8       /* Exterior Gateway Protocol          */
+#define BLOG_IPPROTO_UDP        17      /* User Datagram Protocol             */
+#define BLOG_IPPROTO_IPV6       41      /* IPv6-in-IPv4 tunnelling            */
+#define BLOG_IPPROTO_ROUTING    43      /* IPv6 ext: Routing Header           */
+#define BLOG_IPPROTO_FRAGMENT   44      /* IPv6 ext: Fragmentation Header     */
+#define BLOG_IPPROTO_RSVP       46      /* RSVP Protocol                      */
+#define BLOG_IPPROTO_GRE        47      /* Cisco GRE tunnels (rfc 1701,1702)  */
+#define BLOG_IPPROTO_ESP        50      /* Encapsulation Security Payload     */
+#define BLOG_IPPROTO_AH         51      /* Authentication Header Protocol     */
+#define BLOG_IPPROTO_ICMPV6     58      /* IPv6 ext: ICMPv6 Header            */
+#define BLOG_IPPROTO_NONE       59      /* IPv6 ext: NONE                     */
+#define BLOG_IPPROTO_DSTOPTS    60      /* IPv6 ext: Destination Options Hdr  */
+#define BLOG_IPPROTO_ANY_HOST_INTERNAL_PROTO   61  /* Any host internel proto */
+#define BLOG_IPPROTO_MTP        92      /* IPv6 ext: Mcast Transport Protocol */
+#define BLOG_IPPROTO_ENCAP      98      /* IPv6 ext: Encapsulation Header     */
+#define BLOG_IPPROTO_PIM        103     /* Protocol Independent Multicast     */
+#define BLOG_IPPROTO_COMP       108     /* Compression Header Protocol        */
+#define BLOG_IPPROTO_ANY_0HOP   114     /* Any Zero HOP                       */
+#define BLOG_IPPROTO_SCTP       132     /* Stream Control Transport Protocol  */
+#define BLOG_IPPROTO_UDPLITE    136     /* UDP-Lite (RFC 3828)                */
+
+#define BLOG_IPPROTO_UNASSIGN_B 141     /* Begin of unassigned range          */
+#define BLOG_IPPROTO_UNASSIGN_E 252     /* End of unassigned range            */
+#define BLOG_IPPROTO_RSVD_EXPT1 253     /* Reserved for experimentation       */
+#define BLOG_IPPROTO_RSVD_EXPT2 254     /* Reserved for experimentation       */
+#define BLOG_IPPROTO_RAW        255     /* Raw IP Packets                     */
+
+
+/* IGRS/UPnP using Simple Service Discovery Protocol SSDP over HTTPMU         */
+#define BLOG_HTTP_MCAST_UDP_DSTPORT 1900
+
+
+/*----- Ethernet IEEE 802.3 definitions ------------------------------------- */
+#define BLOG_LLC_SAP_SNAP       (0xAA)
+#define BLOG_LLC_SNAP_8023_DSAP (BLOG_LLC_SAP_SNAP)
+#define BLOG_LLC_SNAP_8023_SSAP (BLOG_LLC_SAP_SNAP)
+#define BLOG_LLC_SNAP_8023_Ctrl (0x3)
+#define BLOG_LLC_SNAP_8023_LEN  6
+
+#define BLOG_ETH_ADDR_LEN       6
+#define BLOG_ETH_TYPE_LEN       sizeof(uint16_t)
+#define BLOG_ETH_HDR_LEN        ((BLOG_ETH_ADDR_LEN * 2) + BLOG_ETH_TYPE_LEN)
+
+#define BLOG_ETH_MIN_LEN        60
+#define BLOG_ETH_FCS_LEN        4
+#define BLOG_ETH_MTU_LEN        0xFFFF    /* Initial minMtu value               */
+
+#define BLOG_ETH_ADDR_FMT       "[%02X:%02X:%02X:%02X:%02X:%02X]"
+#define BLOG_ETH_ADDR(e)        e.u8[0],e.u8[1],e.u8[2],e.u8[3],e.u8[4],e.u8[5]
+
+typedef union BlogEthAddr {
+    uint8_t      u8[BLOG_ETH_ADDR_LEN];
+    uint16_t    u16[BLOG_ETH_ADDR_LEN/sizeof(uint16_t)];
+} BlogEthAddr_t;
+
+typedef struct BlogEthHdr {
+    union {
+        uint8_t     u8[BLOG_ETH_HDR_LEN];
+        uint16_t   u16[BLOG_ETH_HDR_LEN/sizeof(uint16_t)];
+        struct {
+            BlogEthAddr_t macDa;
+            BlogEthAddr_t macSa;
+    /*
+     * CAUTION: Position of ethType field of an Ethernet header depends on
+     * the presence and the number of VLAN Tags
+     * E.g. A single tagged Ethernet frame will have the ethType at offset 16.
+     */
+            uint16_t    ethType;    /* or length */
+        };
+    };
+} BlogEthHdr_t;
+
+/* 16bit aligned access MAC Address functgions */
+static inline int blog_is_zero_eth_addr(uint8_t * addr_p)
+{
+    uint16_t * u16_p = (uint16_t *)addr_p;  /* assert u16_p is 16bit aligned */
+    return ( (u16_p[0] & u16_p[1] & u16_p[2]) == 0x0000 );
+}
+
+static inline int blog_is_bcast_eth_addr(uint8_t * addr_p)
+{
+    uint16_t * u16_p = (uint16_t *)addr_p;  /* assert u16_p is 16bit aligned */
+    return ( (u16_p[0] & u16_p[1] & u16_p[2]) == 0xFFFF );
+}
+
+/* Caution an IP mcast over PPPoE need not have a mcast MacDA */
+static inline int blog_is_mcast_eth_addr(uint8_t * addr_p)
+{
+#if 1
+    return *(addr_p+0) & 0x01;
+#else   /* Multicast (e.g. over PPPoE) may use unicast MacDA */
+    uint16_t * u16_p = (uint16_t *)addr_p;  /* assert u16_p is 16bit aligned */
+    if ( ((u16_p[0] == 0x0100)              /* IPv4: 01:00:5E:`1b0 */
+           && (*(addr_p+2) == 0x5e) && ((*(addr_p+3) & 0x80) == 0) )
+       || ( u16_p[0] == 0x3333)             /* IPv6: 33:33 */
+       )
+        return 1;
+    else
+        return 0;
+#endif
+}
+
+static inline int blog_cmp_eth_addr(uint8_t * addr1_p, uint8_t * addr2_p)
+{
+    uint16_t *a1 = (uint16_t *)addr1_p;
+    uint16_t *a2 = (uint16_t *)addr2_p;
+    return ( ((a1[0] ^ a2[0]) | (a1[1] ^ a2[1]) | (a1[2] ^ a2[2])) != 0 );
+}
+
+
+/*----- 6Byte Brcm6Hdr layout for 5397/98 Switch Management Port Tag ---------*/
+#define BLOG_BRCM6_HDR_LEN      6
+
+typedef struct BlogBrcm6Hdr {
+    union {
+        uint8_t     u8[BLOG_BRCM6_HDR_LEN];
+        uint16_t   u16[BLOG_BRCM6_HDR_LEN/sizeof(uint16_t)];
+            /*
+             * egress:          opcode:3, fbcount:14, rsvd:11, srcPortId:4
+             * ingress_port     opcode:3, rsvd:25, dstPortId:4
+             * ingress_map      opcode:3, rsvd:20, fwdMap:9
+             */
+    };
+} BlogBrcm6Hdr_t;
+
+
+/*----- 4Byte Brcm4Hdr layout for 53115 Switch Management Port Tag -----------*/
+#define BLOG_BRCM4_HDR_LEN      4
+
+typedef struct BlogBrcm4Hdr {
+    union {
+        uint8_t      u8[BLOG_BRCM4_HDR_LEN];
+        uint16_t    u16[BLOG_BRCM4_HDR_LEN/sizeof(uint16_t)];
+        /*
+         * egress       opcode:3, rsvd:13, rsvd2:2,
+         *              flooding:1, snooping:1, protocol:1, switching:1
+         *              learning:1, mirroring:1, tclass:3, srcpid:5
+         * ingress      opcode:3, tclass:3,
+         *              tagenforce:2, rsvd:1, dstmap:23
+         */
+    };
+} BlogBrcm4Hdr_t;
+
+/*----- Composite Ethernet with BRCM Tag -------------------------------------*/
+
+#define BLOG_ETHBRCM6_HDR_LEN   (BLOG_ETH_HDR_LEN + BLOG_BRCM6_HDR_LEN)
+#define BLOG_ETHBRCM4_HDR_LEN   (BLOG_ETH_HDR_LEN + BLOG_BRCM4_HDR_LEN)
+
+typedef struct BlogEthBrcm6Hdr {
+    union {
+        uint8_t      u8[BLOG_ETHBRCM6_HDR_LEN];
+        uint16_t    u16[BLOG_ETHBRCM6_HDR_LEN/sizeof(uint16_t)];
+        struct {
+            BlogEthAddr_t   macDa;
+            BlogEthAddr_t   macSa;
+            BlogBrcm6Hdr_t  brcm6;
+            uint16_t        ethType;
+        };
+    };
+} BlogEthBrcm6Hdr_t;
+
+typedef struct BlogEthBrcm4Hdr {
+    union {
+        uint8_t      u8[BLOG_ETHBRCM4_HDR_LEN];
+        uint16_t    u16[BLOG_ETHBRCM4_HDR_LEN/sizeof(uint16_t)];
+        struct {
+            BlogEthAddr_t   macDa;
+            BlogEthAddr_t   macSa;
+            BlogBrcm4Hdr_t  brcm4;
+            uint16_t        ethType;
+        };
+    };
+} BlogEthBrcm4Hdr_t;
+
+
+/*----- Vlan IEEE 802.1Q definitions -----------------------------------------*/
+#define BLOG_VLAN_HDR_LEN       4
+#define BLOG_VLAN_HDR_FMT       "[0x%08X] tpid<0x%04X> tci<0x%04X> "\
+                                "pbit<%u> dei<%u> vid<0x%03X>"
+#define BLOG_VLAN_HDR(v)        v.u32[0], v.tpid, v.tci.u16[0], \
+                                v.tci.pbits, v.tci.dei, v.tci.vid
+
+typedef struct BlogVlanTci {
+    union {
+        uint8_t     u8[sizeof(uint16_t)];
+        uint16_t    u16[1];
+        struct {
+            BE_DECL( uint16_t pbits:3; uint16_t dei:1; uint16_t vid:12; )
+            LE_DECL( uint16_t vid:12; uint16_t dei:1; uint16_t pbits:3; )
+        };
+    };
+} BlogVlanTci_t;
+
+typedef struct BlogVlanHdr {
+    union {
+        uint8_t      u8[BLOG_VLAN_HDR_LEN];
+        uint16_t    u16[BLOG_VLAN_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_VLAN_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            uint16_t tpid; BlogVlanTci_t tci; /* u8[ 88, A8, EA, AA ] */
+        };
+    };
+} BlogVlanHdr_t;
+
+
+/*----- PPPoE + PPP Header layout. PPPoE RFC 2516, PPP RFC 1661 --------------*/
+#define BLOG_PPPOE_HDR_LEN      8   /* Including PPP Header "PPP Type" */
+#define BLOG_PPP_HDR_LEN        sizeof(uint16_t)
+#define BLOG_PPPOE_HDR_FMT      "[0x%08X 0x%08X] ver<%u> type<%u> code<0x%02X>"\
+                                " sId<0x%04X> len<%u> pppType<0x%04X>"
+#define BLOG_PPPOE_HDR(p)       p.u32[0], p.u32[1], p.ver, p.type, p.code,\
+                                p.sId, p.len, p.pppType
+
+typedef uint16_t BlogPppHdr_t;
+
+typedef struct BlogPppoeHdr {   /* includes 2 byte PPP Type */
+    union {
+        uint8_t      u8[BLOG_PPPOE_HDR_LEN];
+        uint16_t    u16[BLOG_PPPOE_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_PPPOE_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            BE_DECL( uint16_t ver:4; uint16_t type:4; uint16_t code:8; )
+            LE_DECL( uint16_t code:8; uint16_t type:4; uint16_t ver:4; )
+            uint16_t sId; uint16_t len; BlogPppHdr_t pppType;
+        };
+    };
+} BlogPppoeHdr_t;
+
+
+/*----- Multi Protocol Label Switiching Architecture: RFC 3031 -----------------
+ *
+ * 20b-label, 3b-tos, 1b-Stack, 8b-TTL
+ * StackBit==1? if label==0 then next is IPV4, if label==1 then next is IPV6
+ *------------------------------------------------------------------------------
+ */
+#define BLOG_MPLS_HDR_LEN       4
+
+typedef struct BlogMplsHdr {
+    union {
+        uint8_t   u8[BLOG_MPLS_HDR_LEN];
+        uint16_t u16[BLOG_MPLS_HDR_LEN/sizeof(uint16_t)];
+        uint32_t u32[BLOG_MPLS_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            BE_DECL( uint32_t label:20; uint32_t cos:3; uint32_t sbit:1; uint32_t ttl:8; )
+            LE_DECL( uint32_t ttl:8; uint32_t sbit:1; uint32_t cos:3; uint32_t label:20; )
+        };
+    };
+} BlogMplsHdr_t;
+
+
+/*----- IPv4: RFC 791 definitions --------------------------------------------*/
+#define BLOG_IPV4_HDR_LEN       20  /* Not including IP Options */
+#define BLOG_IPV4_ADDR_LEN      4
+#define BLOG_IPV4_HDR_FMT       "[0x%08X] ver<%u> ihl<%u> tos<0x%02X> len<%u> "\
+                                "[0x%08X] id<%u> df<%u> mf<%u> "\
+                                "fragOffset<0x%04X> [0x%08X] "\
+                                "ttl<%u> proto<%u> chkSum<0x%04X> "\
+#define BLOG_IPV4_HDR(i)        i.u32[0], i.ver, i.ihl, i.tos, i.len, \
+                                i.u32[1], i.id, i.df, i.mf, i.fragOffset,\
+                                i.u32[2], i.ttl, i.proto, i.chkSum,
+#define BLOG_IPTOS2DSCP(tos)    ((tos) >> 2)
+#define BLOG_IPDSCP2TOS(dscp)   ((dscp) << 2)
+
+#define BLOG_IPV4_ADDR_FMT      "<%03u.%03u.%03u.%03u>"
+#define BLOG_IPV4_ADDR_PORT_FMT "<%03u.%03u.%03u.%03u:%u>"
+#define BLOG_IPV4_ADDR(ip)      ((uint8_t*)&ip)[0], ((uint8_t*)&ip)[1],     \
+                                ((uint8_t*)&ip)[2], ((uint8_t*)&ip)[3]
+
+typedef union BlogIpv4Addr {
+    uint8_t   u8[BLOG_IPV4_ADDR_LEN];
+    uint16_t u16[BLOG_IPV4_ADDR_LEN/sizeof(uint16_t)];
+    uint32_t u32[BLOG_IPV4_ADDR_LEN/sizeof(uint32_t)];
+} BlogIpv4Addr_t;
+
+#define BLOG_IP_FLAG_CE         0x8000      /* Congestion */
+#define BLOG_IP_FLAG_DF         0x4000      /* Do Not Fragment */
+#define BLOG_IP_FLAG_MF         0x2000      /* More Fragment */
+#define BLOG_IP_FRAG_OFFSET     0x1FFF 
+
+typedef struct BlogIpv4Hdr {
+    union {
+        uint8_t      u8[BLOG_IPV4_HDR_LEN];
+        uint16_t    u16[BLOG_IPV4_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_IPV4_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            BE_DECL( uint8_t ver:4; uint8_t ihl:4; ) 
+            LE_DECL( uint8_t ihl:4; uint8_t ver:4; ) 
+            uint8_t   tos; uint16_t len;
+            uint16_t  id;
+            union {
+                uint16_t flagsFrag;
+                struct {
+                    BE_DECL( uint16_t cong:1; uint16_t df:1; 
+                             uint16_t moreFrag:1; uint16_t fragOffset:13; )
+                    LE_DECL( uint16_t fragOffset:13; uint16_t moreFrag:1; 
+                             uint16_t df:1; uint16_t cong:1; )
+                };
+            };
+            uint8_t ttl; uint8_t proto; uint16_t chkSum;
+            BlogIpv4Addr_t  sAddr;
+            BlogIpv4Addr_t  dAddr;
+        };
+    };
+} BlogIpv4Hdr_t;
+
+
+/*----- IPv6: RFC 2460 RFC 3513 definitions ----------------------------------*/
+/*
+  * Well know IPv6 Address prefixes
+ *      Multicast:   FFXX::
+ *      Site local:  FEC0::
+ *      Link Local:  FE80::
+ *      Ucast 6to4:  2002::
+ */
+#define BLOG_IPV6_HDR_LEN       40
+#define BLOG_IPV6_ADDR_LEN      16
+
+#define BLOG_IPV6_ADDR_FMT      "<%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x>"
+#define BLOG_IPV6_ADDR_PORT_FMT "<%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u>"
+#define BLOG_IPV6_ADDR(ip)      \
+    ntohs(((uint16_t*)&ip)[0]), ntohs(((uint16_t*)&ip)[1]),   \
+    ntohs(((uint16_t*)&ip)[2]), ntohs(((uint16_t*)&ip)[3]),   \
+    ntohs(((uint16_t*)&ip)[4]), ntohs(((uint16_t*)&ip)[5]),   \
+    ntohs(((uint16_t*)&ip)[6]), ntohs(((uint16_t*)&ip)[7])
+
+
+typedef union BlogIpv6Addr {
+    uint8_t   u8[BLOG_IPV6_ADDR_LEN];
+    uint16_t  u16[BLOG_IPV6_ADDR_LEN/sizeof(uint16_t)];
+    uint32_t  u32[BLOG_IPV6_ADDR_LEN/sizeof(uint32_t)];
+} BlogIpv6Addr_t;
+
+typedef struct BlogIpv6Hdr {
+    union {
+        uint8_t      u8[BLOG_IPV6_HDR_LEN];
+        uint16_t    u16[BLOG_IPV6_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_IPV6_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            /* ver_tos bits -> ver 4: tos 8: flowlblHi 4 
+               using bit field results in unaligned access */
+            uint16_t ver_tos; uint16_t flowLblLo;
+            uint16_t len; uint8_t nextHdr; uint8_t hopLmt;
+            BlogIpv6Addr_t  sAddr;
+            BlogIpv6Addr_t  dAddr;
+        };
+    };
+} BlogIpv6Hdr_t;
+
+#define BLOG_IPV6EXT_HDR_LEN    8   /* multiple of 8 octets */
+typedef struct BlogIpv6ExtHdr {
+    union {
+        uint8_t      u8[BLOG_IPV6EXT_HDR_LEN];
+        uint16_t    u16[BLOG_IPV6EXT_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_IPV6EXT_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            uint8_t nextHdr; uint8_t hdrLen; uint16_t data16;
+            uint32_t data32;
+        };
+    };
+} BlogIpv6ExtHdr_t;
+
+
+/*----- Transmission Control Protocol: RFC 793 definitions -------------------*/
+
+#define BLOG_TCP_HDR_LEN        20
+
+#define TCPH_DOFF(t)            (((htons(t->offFlags.u16)) >> 12) & 0xF)
+#define TCPH_CWR(t)             (((htons(t->offFlags.u16)) >>  7) & 0x1)
+#define TCPH_ECE(t)             (((htons(t->offFlags.u16)) >>  6) & 0x1)
+#define TCPH_URG(t)             (((htons(t->offFlags.u16)) >>  5) & 0x1)
+#define TCPH_ACK(t)             (((htons(t->offFlags.u16)) >>  4) & 0x1)
+#define TCPH_PSH(t)             (((htons(t->offFlags.u16)) >>  3) & 0x1)
+#define TCPH_RST(t)             (((htons(t->offFlags.u16)) >>  2) & 0x1)
+#define TCPH_SYN(t)             (((htons(t->offFlags.u16)) >>  1) & 0x1)
+#define TCPH_FIN(t)             (((htons(t->offFlags.u16)) >>  0) & 0x1)
+
+typedef struct BlogTcpOffFlags {
+    union {
+        uint16_t u16;
+        struct { uint8_t off; uint8_t flags; };
+        struct {
+            BE_DECL(
+                uint16_t   dOff:   4;
+                uint16_t   res1:   4;
+                uint16_t   cwr :   1;
+                uint16_t   ece :   1;
+                uint16_t   urg :   1;
+                uint16_t   ack :   1;
+                uint16_t   psh :   1;
+                uint16_t   rst :   1;
+                uint16_t   syn :   1;
+                uint16_t   fin :   1;
+            )
+            LE_DECL(
+                uint16_t   fin :   1;
+                uint16_t   syn :   1;
+                uint16_t   rst :   1;
+                uint16_t   psh :   1;
+                uint16_t   ack :   1;
+                uint16_t   urg :   1;
+                uint16_t   ece :   1;
+                uint16_t   cwr :   1;
+                uint16_t   res1:   4;
+                uint16_t   dOff:   4;
+            )
+        };
+    };
+} BlogTcpOffFlags_t;
+
+typedef struct BlogTcpHdr {
+    union {
+        uint8_t      u8[BLOG_TCP_HDR_LEN];
+        uint16_t    u16[BLOG_TCP_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_TCP_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            uint16_t sPort; uint16_t dPort;
+            uint32_t seq;
+            uint32_t ackSeq;
+            BlogTcpOffFlags_t offFlags; uint16_t window;
+            uint16_t chkSum; uint16_t urgPtr;
+        };
+    };
+} BlogTcpHdr_t;
+
+
+/*----- User Datagram Protocol: RFC 768 definitions --------------------------*/
+#define BLOG_UDP_HDR_LEN        8
+
+typedef struct BlogUdpHdr {
+    union {
+        uint8_t      u8[BLOG_UDP_HDR_LEN];
+        uint16_t    u16[BLOG_UDP_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_UDP_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            uint16_t sPort; uint16_t dPort;
+            uint16_t len; uint16_t chkSum;
+        };
+    };
+} BlogUdpHdr_t;
+
+
+/*----- L2TP: RFC 2661 definitions -------------------------------------------*/
+#define BLOG_L2TP_HDR_LEN       8
+
+typedef struct BlogL2tpIeFlagsVer {
+    union {
+        uint16_t u16;
+        struct {
+            BE_DECL(
+                uint16_t   type   : 1;
+                uint16_t   lenIe  : 1;
+                uint16_t   rsvd2  : 2;
+                uint16_t   seqIe  : 1;
+                uint16_t   rsvd1  : 1;
+                uint16_t   offIe  : 1;
+                uint16_t   prio   : 1;
+                uint16_t   rsvd4  : 4;
+                uint16_t   ver    : 4;
+            )
+            LE_DECL(
+                uint16_t   ver    : 4;
+                uint16_t   rsvd4  : 4;
+                uint16_t   prio   : 1;
+                uint16_t   offIe  : 1;
+                uint16_t   rsvd1  : 1;
+                uint16_t   seqIe  : 1;
+                uint16_t   rsvd2  : 2;
+                uint16_t   lenIe  : 1;
+                uint16_t   type   : 1;
+            )
+        };
+    };
+} BlogL2tpIeFlagsVer_t;
+
+typedef struct BlogL2tpHdr {
+    union {
+        uint8_t      u8[BLOG_L2TP_HDR_LEN];
+        uint16_t    u16[BLOG_L2TP_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_L2TP_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            BlogL2tpIeFlagsVer_t ieFlagsVer; uint16_t len;
+            uint16_t tId; uint16_t sId;
+            /* uint16_t ns; uint16_t nr;
+               uint16_t offSz; uint16_t offPad; */
+        };
+    };
+} BlogL2tpHdr_t;
+
+
+/*----- Generic Routing Encapsulation: RFC 2637, PPTP session, RFC 2784 ------*/
+#define BLOG_GRE_HDR_LEN        8
+
+typedef struct BlogGreIeFlagsVer {
+    union {
+        uint16_t    u16;
+        struct {
+            BE_DECL(
+                uint16_t   csumIe : 1;
+                uint16_t   rtgIe  : 1;
+                uint16_t   keyIe  : 1;
+                uint16_t   seqIe  : 1;
+                uint16_t   srcRtIe: 1;
+                uint16_t   recurIe: 3;
+                uint16_t   ackIe  : 1;
+                uint16_t   flags  : 4;
+                uint16_t   ver    : 3;
+            )
+            LE_DECL(
+                uint16_t   ver    : 3;
+                uint16_t   flags  : 4;
+                uint16_t   ackIe  : 1;
+                uint16_t   recurIe: 3;
+                uint16_t   srcRtIe: 1;
+                uint16_t   seqIe  : 1;
+                uint16_t   keyIe  : 1;
+                uint16_t   rtgIe  : 1;
+                uint16_t   csumIe : 1;
+            )
+        };
+    };
+} BlogGreIeFlagsVer_t;
+
+typedef struct BlogGreHdr {
+    union {
+        uint8_t      u8[BLOG_GRE_HDR_LEN];
+        uint16_t    u16[BLOG_GRE_HDR_LEN/sizeof(uint16_t)];
+        uint32_t    u32[BLOG_GRE_HDR_LEN/sizeof(uint32_t)];
+        struct {
+            BlogGreIeFlagsVer_t ieFlagsVer; uint16_t proto;
+            /* RFC2784 specifies csum instead of len, for GRE ver = 0 */
+            /* RFC2637 specifies len, for GRE ver=1 used with PPTP    */
+            uint16_t len; uint16_t callId;
+            /* uint32_t seqNum; present if seqIe = 1 */
+            /* uint32_t ackNum; present if ackIe = 1 */
+        };
+    };
+} BlogGreHdr_t;
+
+/*
+ *------------------------------------------------------------------------------
+ *  Assert that headers are properly packed (without using attribute packed) 
+ *
+ *  #include <stdio.h>
+ *  #include <stdint.h>
+ *  #include "blog_net.h"
+ *  int main() {
+ *      printf("blog_net_audit_hdrs %d\n", blog_net_audit_hdrs() );
+ *      return blog_net_audit_hdrs();
+ *  }
+ *------------------------------------------------------------------------------
+ */
+static inline int blog_net_audit_hdrs(void)
+{
+#define BLOG_NET_AUDIT(hdrlen,hdrtype)  \
+    if (hdrlen != sizeof(hdrtype))      \
+        return (-1)
+
+    BLOG_NET_AUDIT( BLOG_ETH_ADDR_LEN, BlogEthAddr_t );
+    BLOG_NET_AUDIT( BLOG_ETH_HDR_LEN, BlogEthHdr_t );
+    BLOG_NET_AUDIT( BLOG_BRCM6_HDR_LEN, BlogBrcm6Hdr_t );
+    BLOG_NET_AUDIT( BLOG_BRCM4_HDR_LEN, BlogBrcm4Hdr_t );
+    BLOG_NET_AUDIT( BLOG_ETHBRCM6_HDR_LEN, BlogEthBrcm6Hdr_t );
+    BLOG_NET_AUDIT( BLOG_ETHBRCM4_HDR_LEN, BlogEthBrcm4Hdr_t );
+    BLOG_NET_AUDIT( BLOG_VLAN_HDR_LEN, BlogVlanHdr_t );
+    BLOG_NET_AUDIT( BLOG_PPPOE_HDR_LEN, BlogPppoeHdr_t );
+    BLOG_NET_AUDIT( BLOG_MPLS_HDR_LEN, BlogMplsHdr_t );
+    BLOG_NET_AUDIT( BLOG_IPV4_ADDR_LEN, BlogIpv4Addr_t );
+    BLOG_NET_AUDIT( BLOG_IPV4_HDR_LEN, BlogIpv4Hdr_t );
+    BLOG_NET_AUDIT( BLOG_IPV6_ADDR_LEN, BlogIpv6Addr_t );
+    BLOG_NET_AUDIT( BLOG_IPV6_HDR_LEN, BlogIpv6Hdr_t );
+    BLOG_NET_AUDIT( BLOG_TCP_HDR_LEN, BlogTcpHdr_t );
+    BLOG_NET_AUDIT( BLOG_UDP_HDR_LEN, BlogUdpHdr_t );
+    BLOG_NET_AUDIT( BLOG_L2TP_HDR_LEN, BlogL2tpHdr_t );
+    BLOG_NET_AUDIT( BLOG_GRE_HDR_LEN, BlogGreHdr_t );
+
+    return 0;
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Network Utilities  : 16bit aligned
+ *------------------------------------------------------------------------------
+ */
+#if defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(CONFIG_ARM)
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_read32_align16
+ * Description  : Read a 32bit value from a 16 byte aligned data stream
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t blog_read32_align16( uint16_t * from )
+{
+    return (uint32_t)( (from[1] << 16) | from[0] );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_write32_align16
+ * Description  : Write a 32bit value to a 16bit aligned data stream
+ *------------------------------------------------------------------------------
+ */
+static inline void blog_write32_align16( uint16_t * to, uint32_t from )
+{
+    to[1] = (uint16_t)htons(from >> 16);
+    to[0] = (uint16_t)htons(from >> 0);
+}
+
+#elif defined(CONFIG_CPU_BIG_ENDIAN)
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_read32_align16
+ * Description  : Read a 32bit value from a 16 byte aligned data stream
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t blog_read32_align16( uint16_t * from )
+{
+    return (uint32_t)( (from[0] << 16) | (from[1]) );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_write32_align16
+ * Description  : Write a 32bit value to a 16bit aligned data stream
+ *------------------------------------------------------------------------------
+ */
+static inline void blog_write32_align16( uint16_t * to, uint32_t from )
+{
+    to[0] = (uint16_t)(from >> 16);
+    to[1] = (uint16_t)(from >>  0);
+}
+#endif /* defined(CONFIG_CPU_BIG_ENDIAN) */
+
+#endif /* defined(__BLOG_NET_H_INCLUDED__) */
diff --git a/include/linux/blog_rule.h b/include/linux/blog_rule.h
new file mode 100644
index 0000000000000000000000000000000000000000..1f50d7c704e6a81c3268917679996f8254ad0c7d
--- /dev/null
+++ b/include/linux/blog_rule.h
@@ -0,0 +1,256 @@
+#if defined(CONFIG_BCM_KF_BLOG)
+#ifndef __BLOG_RULE_H_INCLUDED__
+#define __BLOG_RULE_H_INCLUDED__
+
+/* 
+* <:copyright-BRCM:2010:DUAL/GPL:standard
+* 
+*    Copyright (c) 2010 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+:>
+*/
+
+/*
+ *******************************************************************************
+ *
+ * File Name  : blog_rule.h
+ *
+ * Description: Blog rules are extensions to a Blog structure that can be used
+ *              to specify additional fiters and modifications.
+ *
+ *******************************************************************************
+ */
+
+#define CC_CONFIG_BLOG_RULE_DEBUG
+
+#define BLOG_RULE_VERSION      "v1.0"
+
+#define BLOG_RULE_VLAN_TAG_MAX  2
+
+#define BLOG_RULE_ACTION_MAX    16
+
+#define BLOG_RULE_PBITS_MASK    0xE000
+#define BLOG_RULE_PBITS_SHIFT   13
+#define BLOG_RULE_DEI_MASK      0x1000
+#define BLOG_RULE_DEI_SHIFT     12
+#define BLOG_RULE_VID_MASK      0x0FFF
+#define BLOG_RULE_VID_SHIFT     0
+
+#define BLOG_RULE_GET_TCI_PBITS(_tci) \
+    ( ((_tci) & BLOG_RULE_PBITS_MASK) >> BLOG_RULE_PBITS_SHIFT )
+
+#define BLOG_RULE_GET_TCI_DEI(_tci) \
+    ( ((_tci) & BLOG_RULE_DEI_MASK) >> BLOG_RULE_DEI_SHIFT )
+
+#define BLOG_RULE_GET_TCI_VID(_tci) \
+    ( (_tci) & BLOG_RULE_VID_MASK )
+
+#define BLOG_RULE_DSCP_IN_TOS_MASK    0xFC
+#define BLOG_RULE_DSCP_IN_TOS_SHIFT   2
+
+#define BLOG_RULE_IP_PROTO_MASK    0xFF
+#define BLOG_RULE_IP_PROTO_SHIFT   0
+#define BLOG_RULE_IP6_NXT_HDR_MASK    0xFF
+#define BLOG_RULE_IP6_NXT_HDR_SHIFT   0
+
+#define blog_rule_filterInUse(_filter)                          \
+    ({                                                          \
+        char *_filter_p = (char *)(&_filter);                   \
+        int _i, _val;                                           \
+        for(_i=0; _i<sizeof(_filter); ++_i) {                   \
+            if((_val = _filter_p[_i]) != 0) break;              \
+        }                                                       \
+        _val;                                                   \
+    })
+
+typedef struct {
+    struct ethhdr mask;
+    struct ethhdr value;
+} blogRuleFilterEth_t;
+
+typedef struct {
+    union {
+        struct vlan_hdr mask;
+        uint32_t mask32;
+    };
+    union {
+        struct vlan_hdr value;
+        uint32_t value32;
+    };
+} blogRuleFilterVlan_t;
+
+typedef struct {
+    /* only contains the fields we are interested */
+    uint8_t tos;
+    uint8_t ip_proto;
+} blogRuleIpv4Header_t;
+
+typedef struct {
+    blogRuleIpv4Header_t mask;
+    blogRuleIpv4Header_t value;
+} blogRuleFilterIpv4_t;
+
+typedef struct {
+    /* only contains the fields we are interested */
+    uint8_t tclass;
+    uint8_t nxtHdr;
+} blogRuleIpv6Header_t;
+
+typedef struct {
+    blogRuleIpv6Header_t mask;
+    blogRuleIpv6Header_t value;
+} blogRuleFilterIpv6_t;
+
+typedef struct {
+    uint32_t priority;     /* skb priority filter value is offset by 1 because
+                            * 0 is reserved to indicate filter not in use.
+                            * Therefore the supported skb priority range is
+                            * [0 to 0xfffffffe].
+                            */
+    uint16_t markFlowId;
+    uint16_t markPort;     /* port mark filter value is offset by 1 because
+                            * 0 is reserved to indicate filter not in use.
+                            * Therefore use 16-bit to cover the supported
+                            * port range [0 to 255].
+                            */ 
+} blogRuleFilterSkb_t;
+
+typedef struct {
+    blogRuleFilterEth_t eth;
+    uint32_t nbrOfVlanTags;
+    blogRuleFilterVlan_t vlan[BLOG_RULE_VLAN_TAG_MAX];
+    uint32_t hasPppoeHeader;
+    blogRuleFilterIpv4_t ipv4;
+    blogRuleFilterIpv6_t ipv6;
+    blogRuleFilterSkb_t  skb;
+    uint32_t flags;
+#define BLOG_RULE_FILTER_FLAGS_IS_UNICAST   0x0001
+#define BLOG_RULE_FILTER_FLAGS_IS_MULTICAST 0x0002
+#define BLOG_RULE_FILTER_FLAGS_IS_BROADCAST 0x0004
+} blogRuleFilter_t;
+
+#define BLOG_RULE_FILTER_FLAGS_ALL               \
+    ( BLOG_RULE_FILTER_FLAGS_IS_UNICAST   |      \
+      BLOG_RULE_FILTER_FLAGS_IS_MULTICAST |      \
+      BLOG_RULE_FILTER_FLAGS_IS_BROADCAST )
+
+#undef  BLOG_RULE_DECL
+#define BLOG_RULE_DECL(x) x
+
+typedef enum {
+    BLOG_RULE_DECL(BLOG_RULE_CMD_NOP=0),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_MAC_DA),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_MAC_SA),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_ETHERTYPE),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_PUSH_VLAN_HDR),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_POP_VLAN_HDR),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_PBITS),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_DEI),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_VID),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_VLAN_PROTO),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_PBITS),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_DEI),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_VID),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_VLAN_PROTO),
+//    BLOG_RULE_DECL(BLOG_RULE_CMD_XLATE_DSCP_TO_PBITS),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_POP_PPPOE_HDR),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_DSCP),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_DECR_TTL),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_DECR_HOP_LIMIT),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_DROP),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_SKB_MARK_PORT),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_SKB_MARK_QUEUE),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_OVRD_LEARNING_VID),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_STA_MAC_ADDRESS),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_MAX)
+} blogRuleCommand_t;
+
+typedef struct {
+    uint8_t cmd; // blogRuleCommand_t
+    uint8_t toTag;
+    union {
+        uint16_t etherType;
+        uint16_t tpid;
+        uint16_t pbits;
+        uint16_t dei;
+        uint16_t vid;
+        uint16_t vlanProto;
+        uint16_t dscp;
+        uint16_t fromTag;
+        uint16_t skbMarkQueue;
+        uint16_t skbMarkPort;
+        uint16_t arg;
+        uint8_t macAddr[ETH_ALEN];
+    };
+} blogRuleAction_t;
+
+typedef struct blogRule {
+    blogRuleFilter_t filter;
+    uint32_t actionCount;
+    blogRuleAction_t action[BLOG_RULE_ACTION_MAX];
+    struct blogRule *next_p;
+} blogRule_t;
+
+typedef enum {
+    BLOG_RULE_VLAN_NOTIFY_DIR_RX,
+    BLOG_RULE_VLAN_NOTIFY_DIR_TX,
+    BLOG_RULE_VLAN_NOTIFY_DIR_MAX
+} blogRuleVlanNotifyDirection_t;
+
+/*
+ * blogRuleVlanHook_t: The Linux VLAN manager must use this hook to register
+ * the handler that creates Blog Rules based on the configured VLAN Rules.
+ */
+typedef int (* blogRuleVlanHook_t)(Blog_t *blog_p,
+                                   struct net_device *rxVlanDev,
+                                   struct net_device *txVlanDev);
+
+/*
+ * blogRuleVlanNotifyHook_t: The Linux VLAN manager uses this hook to notify
+ * the registered handler whenever VLAN Rules are added or removed.
+ * The device (dev) can be either a VLAN interface or a Real interface.
+ */
+typedef void (* blogRuleVlanNotifyHook_t)(struct net_device *dev,
+                                          blogRuleVlanNotifyDirection_t direction,
+                                          uint32_t nbrOfTags);
+
+extern blogRuleVlanHook_t blogRuleVlanHook;
+extern blogRuleVlanNotifyHook_t blogRuleVlanNotifyHook;
+
+typedef int (* blogArlHook_t)(void *e);
+
+extern blogArlHook_t bcm_arl_process_hook_g;
+
+/* -------------- User API -------------- */
+
+blogRule_t *blog_rule_alloc(void);
+void blog_rule_free(blogRule_t *blogRule_p);
+int blog_rule_free_list(Blog_t *blog_p);
+void blog_rule_init(blogRule_t *blogRule_p);
+void blog_rule_dump(blogRule_t *blogRule_p);
+int blog_rule_add_action(blogRule_t *blogRule_p, blogRuleAction_t *action_p);
+int blog_rule_delete_action(void *rule_p);
+
+#endif /* defined(__BLOG_RULE_H_INCLUDED__) */
+#endif /* defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) */
diff --git a/include/linux/brcm_dll.h b/include/linux/brcm_dll.h
new file mode 100644
index 0000000000000000000000000000000000000000..b21858bcf1e65097d6ad53b839349fc1b6703c77
--- /dev/null
+++ b/include/linux/brcm_dll.h
@@ -0,0 +1,69 @@
+#ifndef _dll_t_
+#define _dll_t_
+/*
+<:copyright-BRCM:2014:DUAL/GPL:standard 
+
+   Copyright (c) 2014 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+typedef struct dll_t {
+    struct dll_t * next_p;
+    struct dll_t * prev_p;
+} Dll_t, * PDll_t;
+
+#define dll_init(node_p)        ((node_p)->next_p = (node_p)->prev_p = (node_p))
+
+/* dll macros returing a PDll_t */
+#define dll_head_p(list_p)      ((list_p)->next_p)
+#define dll_tail_p(list_p)      ((list_p)->prev_p)
+
+#define dll_next_p(node_p)      ((node_p)->next_p)
+#define dll_prev_p(node_p)      ((node_p)->prev_p)
+
+#define dll_empty(list_p)       ((list_p)->next_p == (list_p))
+#define dll_end(list_p, node_p) ((list_p) == (node_p))
+
+/* inserts the node new_p "after" the node at_p */
+#define dll_insert(new_p, at_p) ((new_p)->next_p = (at_p)->next_p,      \
+                                 (new_p)->prev_p = (at_p),              \
+                                 (at_p)->next_p = (new_p),              \
+                                 (new_p)->next_p->prev_p = (new_p))
+
+#define dll_append(list_p, node_p)      dll_insert((node_p), dll_tail_p(list_p))
+#define dll_prepend(list_p, node_p)     dll_insert((node_p), (list_p))
+
+/* deletes a node from any list that it "may" be in, if at all. */
+#define dll_delete(node_p)      ((node_p)->prev_p->next_p = (node_p)->next_p, \
+                                 (node_p)->next_p->prev_p = (node_p)->prev_p)
+/**
+ * dll_for_each -   iterate over a list
+ * @pos:    the &struct list_head to use as a loop cursor.
+ * @head:   the head for your list.
+ */
+#define dll_for_each(pos, head) \
+    for (pos = (head)->next_p; pos != (head); pos = pos->next_p)
+
+#endif  /* ! defined(_dll_t_) */
diff --git a/include/linux/buzzz_kevt.h b/include/linux/buzzz_kevt.h
new file mode 100644
index 0000000000000000000000000000000000000000..ead31803db7e3fcdfde3d994f955b7330b51ce3c
--- /dev/null
+++ b/include/linux/buzzz_kevt.h
@@ -0,0 +1,154 @@
+#ifndef __buzzz_kevt_h_included__
+#define __buzzz_kevt_h_included__
+
+#if defined(CONFIG_BUZZZ_KEVT)
+/*
+ * +----------------------------------------------------------------------------
+ *
+ * BCM BUZZZ ARM Cortex A9 Router Kernel events
+ *
+ * $Copyright Open Broadcom Corporation$
+ * $Id$
+ *
+ * vim: set ts=4 noet sw=4 tw=80:
+ * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*-
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+#include <asm/buzzz.h>
+
+#undef BUZZZ_KEVT
+#define BUZZZ_KEVT(event)       BUZZZ_KEVT__ ## event,
+
+#undef  _B_
+#undef  _H_
+#undef  _N_
+#undef  _FAIL_
+#define _B_                     "\e[0;34m"
+#define _H_                     "\e[0;31m;40m"
+#define _N_                     "\e[0m"
+#define _FAIL_                  _H_ " === FAILURE ===" _N_
+
+#if defined(CONFIG_BUZZZ_KEVT)
+/* Expected events : Font = Normal */
+#define BUZZZ_KEVTN(event, format) \
+    buzzz_klog_reg(BUZZZ_KEVT__## event, "\t\t" format);
+
+/* Unexpected events: Font = bold2 highlighted */
+#define BUZZZ_KEVTH(event, format) \
+    buzzz_klog_reg(BUZZZ_KEVT__## event, _H_ "\t\t" format _N_);
+#endif  /*  CONFIG_BUZZZ_KEVT */
+
+
+typedef
+enum buzzz_rtr_dpid
+{
+    BUZZZ_KEVT__DATAPATH_START = 100,
+
+	BUZZZ_KEVT(SAMPLE)
+
+	/* Enet */
+	BUZZZ_KEVT(ENET_RX_THREAD)
+	BUZZZ_KEVT(ENET_RX_BUDGET)
+	BUZZZ_KEVT(BCMEAPI_RX_PKT_BREAK)
+	BUZZZ_KEVT(BCMEAPI_RX_PKT_CONT)
+	BUZZZ_KEVT(BCMEAPI_RX_PKT_SKIP)
+	BUZZZ_KEVT(BCMEAPI_RX_PKT_CHAIN)
+	BUZZZ_KEVT(BCMEAPI_RX_PKT_FCACHE)
+	BUZZZ_KEVT(BCMEAPI_RX_PKT_SKB)
+	BUZZZ_KEVT(BCMEAPI_RX_PKT_NETIF_RX)
+	BUZZZ_KEVT(BCMEAPI_RX_PKT_NEXTRX)
+
+	/* Flow Cache */
+	BUZZZ_KEVT(FC_RECEIVE)
+	BUZZZ_KEVT(FC_STACK)
+	BUZZZ_KEVT(FC_PKT_DONE)
+
+	/* DHD */
+	BUZZZ_KEVT(DHD_START_XMIT)
+	BUZZZ_KEVT(DHD_PROT_TXDATA_BGN)
+	BUZZZ_KEVT(DHD_PROT_TXDATA_END)
+	BUZZZ_KEVT(DHD_PROT_CREDIT_DROP)
+	BUZZZ_KEVT(DHD_PROT_TXDESC_DROP)
+	BUZZZ_KEVT(DHD_PROT_PROCESS_BGN)
+	BUZZZ_KEVT(DHD_PROT_PROCESS_END)
+	BUZZZ_KEVT(DHD_PROCESS_TXSTATUS)
+	
+	BUZZZ_KEVT(CIRCULARBUF_WRITE_COMPLETE_BGN)
+	BUZZZ_KEVT(CIRCULARBUF_WRITE_COMPLETE_END)
+
+    /* NBUFF */
+	BUZZZ_KEVT(FKB_FLUSH)
+
+    /* WFD */
+	BUZZZ_KEVT(WFD_PKT_GET_BGN)
+    BUZZZ_KEVT(WFD_PKT_GET_PROG)
+    BUZZZ_KEVT(WFD_PKT_GET_END)
+	BUZZZ_KEVT(WFD_TX_HOOK_BGN)
+    BUZZZ_KEVT(WFD_TX_HOOK_END)
+
+} buzzz_rtr_dpid_t;
+
+
+/* Invoke this once in a datapath module's init */
+static inline int
+buzzz_dp_init(void)
+{
+#if defined(CONFIG_BUZZZ_KEVT)
+
+	BUZZZ_KEVTN(SAMPLE,                "sample pkt<%p>")
+
+	/* Enet */
+	BUZZZ_KEVTN(ENET_RX_THREAD,        "bcm63xx_enet_rx_thread loop")
+	BUZZZ_KEVTN(ENET_RX_BUDGET,        "bcm63xx_rx budget<%d>")
+	BUZZZ_KEVTN(BCMEAPI_RX_PKT_BREAK,  "bcmeapi_rx_pkt break")
+	BUZZZ_KEVTN(BCMEAPI_RX_PKT_CONT,   "bcmeapi_rx_pkt cont")
+	BUZZZ_KEVTN(BCMEAPI_RX_PKT_SKIP,   "bcmeapi_rx_pkt skip")
+	BUZZZ_KEVTN(BCMEAPI_RX_PKT_CHAIN,  "bcm63xx_rx tx chain")
+	BUZZZ_KEVTN(BCMEAPI_RX_PKT_FCACHE, "bcm63xx_rx finit")
+	BUZZZ_KEVTN(BCMEAPI_RX_PKT_SKB,    "bcm63xx_rx alloc skb")
+	BUZZZ_KEVTN(BCMEAPI_RX_PKT_NETIF_RX, "bcm63xx_rx netif_receive_skb")
+	BUZZZ_KEVTN(BCMEAPI_RX_PKT_NEXTRX, "bcm63xx_rx next_rx")
+
+	/* Flow Cache */
+	BUZZZ_KEVTN(FC_RECEIVE,            "fc_receive")
+	BUZZZ_KEVTN(FC_STACK,              "fc_stack")
+	BUZZZ_KEVTN(FC_PKT_DONE,           "fc_stack PKT_DONE")
+
+	/* DHD */
+	BUZZZ_KEVTN(DHD_START_XMIT,        "dhd_start_xmit")
+	BUZZZ_KEVTN(DHD_PROT_TXDATA_BGN,   "dhd_prot_txdata bgn credit<%d>")
+	BUZZZ_KEVTN(DHD_PROT_TXDATA_END,   "dhd_prot_txdata end pktlen<%d>")
+	BUZZZ_KEVTH(DHD_PROT_CREDIT_DROP,  "dhd_prot_txdata credit DROP")
+	BUZZZ_KEVTH(DHD_PROT_TXDESC_DROP,  "dhd_prot_txdata txdesc DROP")
+	BUZZZ_KEVTN(DHD_PROT_PROCESS_BGN,  ">>> dhd_prot_process_msgbuf")
+	BUZZZ_KEVTN(DHD_PROT_PROCESS_END,  "<<< dhd_prot_process_msgbuf")
+	BUZZZ_KEVTN(DHD_PROCESS_TXSTATUS,  "dhd_prot_txstatus_process")
+	
+	BUZZZ_KEVTN(CIRCULARBUF_WRITE_COMPLETE_BGN, ">> circularbuf_write_complete")
+	BUZZZ_KEVTN(CIRCULARBUF_WRITE_COMPLETE_END, "<< circularbuf_write_complete")
+
+    /* NBUFF */
+	BUZZZ_KEVTN(FKB_FLUSH,  "_fkb_flush cache_op<%d> data<%p> dirty<%p> flush_len<%d>")
+
+    /* WFD */
+	BUZZZ_KEVTN(WFD_PKT_GET_BGN,  "rdpa_cpu_wfd_packet_get BGN")	
+    BUZZZ_KEVTN(WFD_PKT_GET_PROG, "rdpa_cpu_wfd_packet_get cnt<%u> PROG")
+	BUZZZ_KEVTN(WFD_PKT_GET_END,  "rdpa_cpu_wfd_packet_get cnt<%u> END")
+    BUZZZ_KEVTN(WFD_TX_HOOK_BGN,  "WFD Tx Hook BGN")
+    BUZZZ_KEVTN(WFD_TX_HOOK_END,  "WFD Tx Hook END")
+
+#endif  /*  CONFIG_BUZZZ_KEVT */
+
+	return 0;
+}
+#else  /* ! CONFIG_BUZZZ */
+#define BUZZZ_DPL1(N, ID, ARG...)   do {} while (0)
+#define BUZZZ_DPL2(N, ID, ARG...)   do {} while (0)
+#define BUZZZ_DPL3(N, ID, ARG...)   do {} while (0)
+#define BUZZZ_DPL4(N, ID, ARG...)   do {} while (0)
+#define BUZZZ_DPL5(N, ID, ARG...)   do {} while (0)
+#endif /* ! CONFIG_BUZZZ */
+
+#endif /* __buzzz_kevt_h_included__ */
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 3c80fd7e8b567c8f6d8cd768c120926a424ec577..50e10f7a6fc8c5f146c1221bb6de73b191ad4ebd 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -40,6 +40,12 @@
 #define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
 #define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
 
+#if defined(CONFIG_BCM_KF_BOUNCE) && defined(CONFIG_BRCM_BOUNCE)
+/* Force always inlining.
+   if inline was defined we would get a warning as we do not undef apriori*/
+#define inline      inline      __attribute__((always_inline))
+#endif
+
 static inline __le64 __cpu_to_le64p(const __u64 *p)
 {
 	return (__force __le64)__swab64p(p);
@@ -88,6 +94,12 @@ static inline __u16 __be16_to_cpup(const __be16 *p)
 {
 	return (__force __u16)*p;
 }
+
+#if defined(CONFIG_BCM_KF_BOUNCE) && defined(CONFIG_BRCM_BOUNCE)
+#undef inline
+/* inline now means inline with no attributes */
+#endif
+
 #define __cpu_to_le64s(x) __swab64s((x))
 #define __le64_to_cpus(x) __swab64s((x))
 #define __cpu_to_le32s(x) __swab32s((x))
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index e5834aa24b9ec2a287026d58caf097bf438df055..f613134e8972ac098dd9671dc1eb693412ac37b2 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -45,6 +45,23 @@
  * Force always-inline if the user requests it so via the .config,
  * or if gcc is too old:
  */
+#if defined(CONFIG_BCM_KF_BOUNCE)
+
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+    !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) || \
+    !defined(CONFIG_BRCM_BOUNCE)
+# define inline		inline		__attribute__((always_inline))
+# define __inline__	__inline__	__attribute__((always_inline))
+# define __inline	__inline	__attribute__((always_inline))
+#else
+/* A lot of inline functions can cause havoc with function tracing */
+# define inline		inline		notrace
+# define __inline__	__inline__	notrace
+# define __inline	__inline	notrace
+#endif
+
+#else 
+
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
 # define inline		inline		__attribute__((always_inline))
@@ -57,6 +74,8 @@
 # define __inline	__inline	notrace
 #endif
 
+#endif /* CONFIG_BRCM_BOUNCE */
+
 #define __deprecated			__attribute__((deprecated))
 #define __packed			__attribute__((packed))
 #define __weak				__attribute__((weak))
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index b60f6ba01d0c787225b626c5f4b4eef2cdca1e37..7a669c92be2d3a39d5cf9864c9fa4bb72987809e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -236,6 +236,9 @@ struct cpufreq_driver {
 	int	(*exit)		(struct cpufreq_policy *policy);
 	int	(*suspend)	(struct cpufreq_policy *policy);
 	int	(*resume)	(struct cpufreq_policy *policy);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	int	(*init_sysfs)	(struct cpufreq_policy *policy);
+#endif
 	struct freq_attr	**attr;
 };
 
@@ -310,6 +313,14 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
  *                        CPUFREQ 2.6. INTERFACE                     *
  *********************************************************************/
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+int cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy);
+int cpufreq_set_speed(const char *govstr, int fraction);
+unsigned cpufreq_get_freq_max(unsigned *max_out);
+
+int cpufreq_minimum_reserve(int freq);
+int cpufreq_minimum_unreserve(int freq);
+#endif
 int cpufreq_update_policy(unsigned int cpu);
 
 #ifdef CONFIG_CPU_FREQ
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index a2c819d3c96e3d367b4de11b852efea54a79e80d..e460a86f347b8538aad8e2f3458c35b19034aa07 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -146,8 +146,13 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
 #define for_each_cpu_not(cpu, mask)		\
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#if defined(CONFIG_BCM_KF_CPP_SUPPORT)
+#define for_each_cpu_and(cpu, mask, ttt)	\
+	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)ttt)
+#else
 #define for_each_cpu_and(cpu, mask, and)	\
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
+#endif
 #else
 /**
  * cpumask_first - get the first cpu in a cpumask
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index b92eadf92d72a06bcfebc64b345d3590977e5a40..2a387f0fd1986817a7d496b2bc121abdb3550f18 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -171,7 +171,15 @@ struct aead_request {
 	struct scatterlist *assoc;
 	struct scatterlist *src;
 	struct scatterlist *dst;
-
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	unsigned int data_offset;
+	u8           next_hdr;
+#else
+	int alloc_buff_spu;
+	int headerLen;
+#endif
+#endif
 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
diff --git a/include/linux/devinfo.h b/include/linux/devinfo.h
new file mode 100644
index 0000000000000000000000000000000000000000..160ffef2fddb51257d894573954a426011eac58d
--- /dev/null
+++ b/include/linux/devinfo.h
@@ -0,0 +1,45 @@
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+#ifndef __DEVINFO_H__
+#define __DEVINFO_H__
+
+#include <linux/brcm_dll.h>
+
+//#define CC_DEVINFO_SUPPORT_DEBUG
+#ifndef DEVINFO_NULL_STMT
+#define DEVINFO_NULL_STMT                   do { /* NULL BODY */ } while (0)
+#endif
+
+#define DEVINFO_HTABLE_SIZE 64
+#define DEVINFO_MAX_ENTRIES 256
+#define DEVINFO_IX_INVALID 0
+#define DEVINFO_NULL ((DevInfo_t*)NULL)
+#define DEVINFO_DONE 1
+
+#include <linux/if_ether.h>
+
+typedef struct devinfo_entry_t {
+    uint16_t idx;
+    uint16_t flags;
+    uint16_t vendor_id; //!< Vendor (e.g. "Microsoft")
+    uint16_t os_id; //!< OS/Device name (e.g. "Windows 8", or "iPhone 4")
+    uint16_t class_id; //!< OS Class (e.g. "Windows")
+    uint16_t type_id; //!< Device Type (e.g. "Phone")
+    uint32_t dev_id; //!< Device Name (e.g. "iPhone 4")
+} DevInfoEntry_t;
+
+typedef struct devinfo_t {
+    struct dll_t node;
+    struct devinfo_t *chain_p;
+
+    DevInfoEntry_t entry;
+    uint8_t mac[ETH_ALEN];
+} __attribute__((packed)) DevInfo_t;
+
+
+extern uint16_t devinfo_lookup( const uint8_t *mac );
+extern void devinfo_get( uint16_t idx, DevInfoEntry_t *entry );
+extern void devinfo_set( const DevInfoEntry_t *entry );
+extern void devinfo_getmac( uint16_t idx, uint8_t *mac );
+extern int devinfo_init( void );
+#endif
+#endif
diff --git a/include/linux/dpi_ctk.h b/include/linux/dpi_ctk.h
new file mode 100644
index 0000000000000000000000000000000000000000..625c59b32cf946609dc5eb65d87cfdada6f5f5e1
--- /dev/null
+++ b/include/linux/dpi_ctk.h
@@ -0,0 +1,24 @@
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+#ifndef __DPI_CTK_H__
+#define __DPI_CTK_H__
+
+#include<linux/skbuff.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+
+#define APPID_STATUS_ONGOING    ((uint16_t) (1 << 0))
+#define APPID_STATUS_IDENTIFIED ((uint16_t) (1 << 1))
+#define APPID_STATUS_FINAL      ((uint16_t) (1 << 2))
+#define APPID_STATUS_NOMORE     ((uint16_t) (1 << 3))
+//#define APPID_STATUS_RESYNC     ((uint16_t) (1 << 4))
+#define DEVID_STATUS_ONGOING    ((uint16_t) (1 << 5))
+#define DEVID_STATUS_IDENTIFIED ((uint16_t) (1 << 6))
+#define DEVID_STATUS_FINAL      ((uint16_t) (1 << 7))
+#define DEVID_STATUS_NOMORE     ((uint16_t) (1 << 8))
+
+#define CTK_INIT_FROM_WAN      ((uint16_t) (1 << 15))
+
+#define IS_CTK_INIT_FROM_WAN(ct)  \
+        ( ((ct)->dpi.flags & CTK_INIT_FROM_WAN) == CTK_INIT_FROM_WAN )
+
+#endif /* __DPI_CTK_H__ */
+#endif
diff --git a/include/linux/dpistats.h b/include/linux/dpistats.h
new file mode 100644
index 0000000000000000000000000000000000000000..4d8247ceed225543912c0bb26c37d0b0dbee801b
--- /dev/null
+++ b/include/linux/dpistats.h
@@ -0,0 +1,46 @@
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+#ifndef __DPISTATS_H__
+#define __DPISTATS_H__
+
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/brcm_dll.h>
+
+//#define CC_DPISTATS_SUPPORT_DEBUG
+#ifndef DPISTATS_NULL_STMT
+#define DPISTATS_NULL_STMT                   do { /* NULL BODY */ } while (0)
+#endif
+
+#define DPISTATS_HTABLE_SIZE 512
+#define DPISTATS_MAX_ENTRIES 4096
+#define DPISTATS_IX_INVALID 0
+#define DPISTATS_NULL ((DpiStats_t*)NULL)
+
+typedef struct ctk_stats_t {
+    unsigned long pkts;
+    unsigned long long bytes;
+    unsigned long ts;
+} CtkStats_t;
+
+typedef struct dpistats_entry_t {
+    uint32_t idx;
+    dpi_info_t result;
+    CtkStats_t upstream;
+    CtkStats_t dnstream;
+} DpiStatsEntry_t;
+
+typedef struct dpistats_t {
+    struct dll_t node;
+    struct dpistats_t *chain_p;
+
+    DpiStatsEntry_t entry;
+    CtkStats_t evict_up;
+    CtkStats_t evict_dn;
+} __attribute__((packed)) DpiStats_t;
+
+extern uint32_t dpistats_lookup( const dpi_info_t *res_p );
+extern void dpistats_info( uint32_t idx, const DpiStatsEntry_t *stats_p );
+extern void dpistats_update( uint32_t idx, const DpiStatsEntry_t *stats_p );
+extern void dpistats_show( struct seq_file *s );
+extern int dpistats_init( void );
+#endif
+#endif
diff --git a/include/linux/flwstif.h b/include/linux/flwstif.h
new file mode 100644
index 0000000000000000000000000000000000000000..61a0e3da137e36333e030c2badc97c572c69c2c8
--- /dev/null
+++ b/include/linux/flwstif.h
@@ -0,0 +1,65 @@
+#ifndef __FLWSTIF_H_INCLUDED__
+#define __FLWSTIF_H_INCLUDED__
+
+                /*--------------------------------------*/
+                /* flwstif.h and flwstif.c for Linux OS */
+                /*--------------------------------------*/
+
+/* 
+* <:copyright-BRCM:2014:DUAL/GPL:standard
+* 
+*    Copyright (c) 2014 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+:>
+*/
+
+#if defined(__KERNEL__)                 /* Kernel space compilation         */
+#include <linux/types.h>                /* LINUX ISO C99 7.18 Integer types */
+#else                                   /* User space compilation           */
+#include <stdint.h>                     /* C-Lib ISO C99 7.18 Integer types */
+#endif
+
+typedef struct {
+    unsigned long rx_packets;
+    unsigned long rx_bytes;
+    unsigned long pollTS_ms; // Poll timestamp in ms
+}FlwStIf_t;
+
+typedef enum {
+    FLWSTIF_REQ_GET,
+    FLWSTIF_REQ_PUSH,
+    FLWSTIF_REQ_MAX
+}FlwStIfReq_t;
+
+extern uint32_t flwStIf_request( FlwStIfReq_t req, void *ptr, uint32_t param1,
+                                 uint32_t param2, uint32_t param3 );
+
+typedef int (* flwStIfGetHook_t)( uint32_t flwIdx, FlwStIf_t *flwSt_p );
+
+typedef int (* flwStIfPushHook_t)( void *ctk1, void *ctk2, uint32_t direction,
+                                   FlwStIf_t *flwSt_p );
+
+extern void flwStIf_bind(flwStIfGetHook_t flwStIfGetHook, flwStIfPushHook_t flwStIfPushHook);
+
+#endif /* __FLWSTIF_H_INCLUDED__ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 25c40b9f848afeac9313be994c6e8ad03683a0c5..421eb42152513fc7403300fe622eb2ed939673f1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -379,6 +379,10 @@ struct inodes_stat_t {
 #define SYNC_FILE_RANGE_WRITE		2
 #define SYNC_FILE_RANGE_WAIT_AFTER	4
 
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+#define MAX_PAGES_PER_RECVFILE		32
+#endif
+
 #ifdef __KERNEL__
 
 #include <linux/linkage.h>
@@ -489,6 +493,12 @@ struct iattr {
  */
 #include <linux/quota.h>
 
+/*
+ * Maximum number of layers of fs stack.  Needs to be limited to
+ * prevent kernel stack overflow
+ */
+#define FILESYSTEM_MAX_STACK_DEPTH 2
+
 /** 
  * enum positive_aop_returns - aop return codes with specific semantics
  *
@@ -1507,6 +1517,11 @@ struct super_block {
 
 	/* Being remounted read-only */
 	int s_readonly_remount;
+
+	/*
+	 * Indicates how deep in a filesystem stack this SB is
+	 */
+	int s_stack_depth;
 };
 
 /* superblock cache pruning functions */
@@ -1664,6 +1679,8 @@ struct inode_operations {
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
 		      u64 len);
+	struct file *(*open) (struct dentry *, struct file *,
+			      const struct cred *);
 } ____cacheline_aligned;
 
 struct seq_file;
@@ -2021,6 +2038,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
 extern struct file *filp_open(const char *, int, umode_t);
 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
 				   const char *, int);
+extern struct file *vfs_open(struct path *, struct file *, const struct cred *);
 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
 				 const struct cred *);
 extern int filp_close(struct file *, fl_owner_t id);
diff --git a/include/linux/gbpm.h b/include/linux/gbpm.h
new file mode 100644
index 0000000000000000000000000000000000000000..77d0a1a33b7d7154c50f4251e13a9443d8d80b38
--- /dev/null
+++ b/include/linux/gbpm.h
@@ -0,0 +1,115 @@
+#ifndef __GBPM_H_INCLUDED__
+#define __GBPM_H_INCLUDED__
+
+/*
+ *
+<:copyright-BRCM:2007:DUAL/GPL:standard
+
+   Copyright (c) 2007 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license 
+agreement governing use of this software, this software is licensed 
+to you under the terms of the GNU General Public License version 2 
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give 
+   you permission to link this software with independent modules, and 
+   to copy and distribute the resulting executable under terms of your 
+   choice, provided that you also meet, for each linked independent 
+   module, the terms and conditions of the license of that module. 
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications 
+   of the software.  
+
+Not withstanding the above, under no circumstances may you combine 
+this software in any way with any other Broadcom software provided 
+under a license other than the GPL, without Broadcom's express prior 
+written consent. 
+
+:>
+*/
+
+/*
+ *******************************************************************************
+ * File Name : gbpm.h
+ *
+ *******************************************************************************
+ */
+#define GBPM_VERSION             "v0.1"
+#define GBPM_VER_STR             GBPM_VERSION " " __DATE__ " " __TIME__
+#define GBPM_MODNAME             "Broadcom GBPM "
+
+#define GBPM_ERROR               (-1)
+#define GBPM_SUCCESS             0
+
+#define GBPM_RXCHNL_MAX              4
+#define GBPM_RXCHNL_DISABLED         0
+#define GBPM_RXCHNL_ENABLED          1
+
+
+typedef enum {
+    GBPM_PORT_ETH,
+    GBPM_PORT_XTM,
+    GBPM_PORT_FWD,
+    GBPM_PORT_WLAN,
+    GBPM_PORT_USB,
+    GBPM_PORT_MAX
+} gbpm_port_t;
+
+typedef void (* gbpm_evt_hook_t) (void);
+
+typedef void (* gbpm_thresh_hook_t)(void);
+typedef void (* gbpm_upd_buf_lvl_hook_t)(int);
+typedef void (* gbpm_status_hook_t)(void);
+
+
+typedef int (* gbpm_dyn_buf_lvl_hook_t) (void);
+typedef int (* gbpm_alloc_mult_hook_t)( uint32_t, uint32_t *);
+typedef void (* gbpm_free_mult_hook_t)( uint32_t, uint32_t *);
+typedef uint32_t * (* gbpm_alloc_hook_t)(void);
+typedef void (* gbpm_free_hook_t)( uint32_t *);
+typedef int (* gbpm_resv_rx_hook_t)(gbpm_port_t, uint32_t, uint32_t, uint32_t );
+typedef int (* gbpm_unresv_rx_hook_t)( gbpm_port_t, uint32_t );
+typedef uint32_t (* gbpm_get_total_bufs_hook_t)(void);
+typedef uint32_t (* gbpm_get_avail_bufs_hook_t)(void);
+typedef uint32_t (* gbpm_get_max_dyn_bufs_hook_t)(void);
+
+
+int gbpm_get_dyn_buf_level(void);
+int gbpm_resv_rx_buf( gbpm_port_t port, uint32_t chnl,
+        uint32_t num_rx_buf, uint32_t bulk_alloc_count );
+int gbpm_unresv_rx_buf( gbpm_port_t port, uint32_t chnl );
+
+int gbpm_alloc_mult_buf( uint32_t num, uint32_t *buf_p );
+void gbpm_free_mult_buf( uint32_t num, uint32_t *buf_p );
+
+uint32_t *gbpm_alloc_buf( void );
+void gbpm_free_buf( uint32_t *buf_p );
+
+uint32_t gbpm_get_total_bufs( void );
+#define CONFIG_GBPM_API_HAS_GET_TOTAL_BUFS 1
+
+uint32_t gbpm_get_avail_bufs( void );
+#define CONFIG_GBPM_API_HAS_GET_AVAIL_BUFS 1
+
+uint32_t gbpm_get_max_dyn_bufs( void );
+
+
+void gbpm_queue_work(void);
+void gbpm_bind( gbpm_dyn_buf_lvl_hook_t gbpm_dyn_buf_lvl, 
+                gbpm_alloc_mult_hook_t gbpm_alloc_mult,
+                gbpm_free_mult_hook_t gbpm_free_mult,
+                gbpm_alloc_hook_t gbpm_alloc,
+                gbpm_free_hook_t gbpm_free,
+                gbpm_resv_rx_hook_t gbpm_resv_rx, 
+                gbpm_unresv_rx_hook_t gbpm_unresv_rx ,
+                gbpm_get_total_bufs_hook_t gbpm_get_total_bufs ,
+                gbpm_get_avail_bufs_hook_t gbpm_get_avail_bufs,
+                gbpm_get_max_dyn_bufs_hook_t gbpm_get_max_dyn_bufs );
+
+void gbpm_unbind( void );
+
+#endif  /* defined(__GBPM_H_INCLUDED__) */
+
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 581e74b7df95e3d063bbbcaece35aafe60fad774..d126af16e819881f9e0625aae8ecbd88b6d170f5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -11,8 +11,30 @@ struct vm_area_struct;
 
 /* Plain integer GFP bitmasks. Do not use this directly. */
 #define ___GFP_DMA		0x01u
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+/* due to the way kernel only supports up to 4 ZONEs and some tricks
+ * defined in this header file, in order to introduce a new GFP_ACP
+ * flag in to GFP, there are some if-defs implemented to adjust
+ * GFP values. GFP_ACP will use one of the GFP_HIGHMEM or GFP_DMA32
+ * value if they are not used.  If all 3 are enabled, then compilation
+ * will fail. */
+#ifndef CONFIG_ZONE_DMA32
+/* if DMA32 is not defined, then GFP_ACP will share value with GFP_DMA32 */
 #define ___GFP_HIGHMEM		0x02u
 #define ___GFP_DMA32		0x04u
+#define ___GFP_ACP		0x04u
+#elif !defined(CONFIG_HIGHMEM)
+/* if HIGHMEM is not defined, then GFP_ACP will share value with GFP_HIGHMEM */
+#define ___GFP_HIGHMEM		0x02u
+#define ___GFP_ACP		0x02u
+#define ___GFP_DMA32		0x04u
+#else
+#error gfp.h -- cannot have all ACP, DMA32, highmem enabled
+#endif
+#else
+#define ___GFP_HIGHMEM		0x02u
+#define ___GFP_DMA32		0x04u
+#endif
 #define ___GFP_MOVABLE		0x08u
 #define ___GFP_WAIT		0x10u
 #define ___GFP_HIGH		0x20u
@@ -51,7 +73,12 @@ struct vm_area_struct;
 #define __GFP_HIGHMEM	((__force gfp_t)___GFP_HIGHMEM)
 #define __GFP_DMA32	((__force gfp_t)___GFP_DMA32)
 #define __GFP_MOVABLE	((__force gfp_t)___GFP_MOVABLE)  /* Page is movable */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define __GFP_ACP	((__force gfp_t)___GFP_ACP)
+#define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_ACP|__GFP_DMA32|__GFP_MOVABLE)
+#else
 #define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+#endif
 /*
  * Action modifiers - doesn't change the zoning
  *
@@ -138,7 +165,17 @@ struct vm_area_struct;
 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 
 /* Do not use these with a slab allocator */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#ifndef CONFIG_ZONE_DMA32
+#define GFP_SLAB_BUG_MASK (__GFP_HIGHMEM|~__GFP_BITS_MASK)
+#elif !defined(CONFIG_HIGHMEM)
+#define GFP_SLAB_BUG_MASK (__GFP_DMA32|~__GFP_BITS_MASK)
+#else
+#error gfp.h -- cannot have all ACP, DMA32, highmem enabled
+#endif
+#else
 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
+#endif
 
 /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
    platforms, used as appropriate on others */
@@ -148,6 +185,9 @@ struct vm_area_struct;
 /* 4GB DMA on some platforms */
 #define GFP_DMA32	__GFP_DMA32
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define GFP_ACP		__GFP_ACP
+#endif
 /* Convert GFP flags to their corresponding migrate type */
 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
 {
@@ -179,6 +219,17 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
 #define OPT_ZONE_DMA32 ZONE_NORMAL
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#ifndef CONFIG_ZONE_DMA32
+#undef OPT_ZONE_DMA32
+#define OPT_ZONE_DMA32 ZONE_ACP
+#elif !defined(CONFIG_HIGHMEM)
+#undef OPT_ZONE_HIGHMEM
+#define OPT_ZONE_HIGHMEM ZONE_ACP
+#else
+#error gfp.h -- cannot have all ACP, DMA32, highmem enabled
+#endif
+#endif
 /*
  * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
  * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 474f2a51cf0af66474be229b626403975a36683c..23506be3f39404639159748ca6e0c2d35459ae31 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -80,7 +80,11 @@ struct icmphdr {
 		__be16	mtu;
 	} frag;
   } un;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+} LINUX_NET_PACKED;
+#else
 };
+#endif
 
 #ifdef __KERNEL__
 #include <linux/skbuff.h>
diff --git a/include/linux/if.h b/include/linux/if.h
index f995c663c493baf4d369bcf05d3bed20d38dcd1f..02612b04413f778bb4b98e895946f5534e072b7f 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -21,7 +21,12 @@
 
 #include <linux/types.h>		/* for "__kernel_caddr_t" et al	*/
 #include <linux/socket.h>		/* for "struct sockaddr" et al	*/
+#if !defined(CONFIG_BCM_IN_KERNEL)
+//userspace already has __user defined....
+//#include <bcm_local_kernel_include/linux/compiler.h>		/* for "__user" et al           */
+#else
 #include <linux/compiler.h>		/* for "__user" et al           */
+#endif
 
 #define	IFNAMSIZ	16
 #define	IFALIASZ	256
@@ -82,6 +87,25 @@
 #define IFF_TEAM_PORT	0x40000		/* device used as team port */
 #define IFF_SUPP_NOFCS	0x80000		/* device supports sending custom FCS */
 
+#if defined(CONFIG_BCM_KF_ENET_SWITCH)
+#define IFF_HW_SWITCH  0x40000
+#define IFF_EXT_SWITCH 0x80000             /* Indicates the interface is an external switch interface */
+
+#endif /* CONFIG_BCM_KF_ENET_SWITCH */
+
+#if defined(CONFIG_BCM_KF_IP)
+#define IFF_EPON_IF    0x100000            /* Indicates SFU hardware switching.  */
+#endif
+#if defined(CONFIG_BCM_KF_WANDEV)
+#define IFF_WANDEV     0x200000            /* avoid WAN bridge traffic leaking */
+#endif
+#if defined(CONFIG_BCM_KF_VLAN)
+#define IFF_BCM_VLAN   0x400000            /* Broadcom VLAN Interface */
+#endif
+/* #if defined(CONFIG_BCM_KF_PPP) */
+#define IFF_PPP        0x800000            /* PPP Interface */
+/* #endif */
+
 
 #define IF_GET_IFACE	0x0001		/* for querying only */
 #define IF_GET_PROTO	0x0002
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 6d722f41ee7c760010ae26f3deea4fea1c9ca4aa..dbe3218513967320968af0645888ec6e29b757fe 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -40,6 +40,10 @@
 #define ARPHRD_METRICOM	23		/* Metricom STRIP (new IANA id)	*/
 #define	ARPHRD_IEEE1394	24		/* IEEE 1394 IPv4 - RFC 2734	*/
 #define ARPHRD_EUI64	27		/* EUI-64                       */
+#if defined(CONFIG_BCM_KF_IP)
+#define ARPHRD_CPCS     28              /* CPCS                         */
+#define ARPHRD_DSL      29              /* ADSL                         */
+#endif
 #define ARPHRD_INFINIBAND 32		/* InfiniBand			*/
 
 /* Dummy types for non ARP hardware */
@@ -91,6 +95,9 @@
 #define ARPHRD_PHONET	820		/* PhoNet media type		*/
 #define ARPHRD_PHONET_PIPE 821		/* PhoNet pipe header		*/
 #define ARPHRD_CAIF	822		/* CAIF media type		*/
+#ifdef CONFIG_BCM_KF_MHI
+#define ARPHRD_MHI	1823		/* Modem-Host IF		*/
+#endif
 
 #define ARPHRD_VOID	  0xFFFF	/* Void type, nothing is known */
 #define ARPHRD_NONE	  0xFFFE	/* zero header length */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index dd3f201396407dc7f4cac0c15452278c850ba261..fb9d3c67745df7c53621692b36d0fe3e25f9e86e 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -43,6 +43,55 @@
 #define BRCTL_SET_PATH_COST 17
 #define BRCTL_GET_FDB_ENTRIES 18
 
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_IGMP)
+#define BRCTL_ENABLE_SNOOPING                   21
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_IGMP_RATE_LIMIT)
+#define BRCTL_ENABLE_IGMP_RATE_LIMIT            23
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_MLD)
+#define BRCTL_MLD_ENABLE_SNOOPING               24
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+#define BRCTL_ADD_FDB_ENTRIES                   26
+#define BRCTL_DEL_FDB_ENTRIES                   27
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_BRIDGE_DYNAMIC_FDB)
+#define BRCTL_DEL_DYN_FDB_ENTRIES               28
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_NETFILTER)
+#define BRCTL_SET_FLOWS                         29
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_UNI_UNI)
+#define BRCTL_SET_UNI_UNI_CTRL                  30
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_IGMP)
+#define BRCTL_ENABLE_IGMP_LAN2LAN_MC            31
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_MLD)
+#define BRCTL_ENABLE_MLD_LAN2LAN_MC             32
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_IGMP)
+#define BRCTL_GET_IGMP_LAN_TO_LAN_MCAST_ENABLED 33
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_MLD)
+#define BRCTL_GET_MLD_LAN_TO_LAN_MCAST_ENABLED  34
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+#define BRCTL_DEL_STATIC_FDB_ENTRIES            35
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_BRIDGE_DYNAMIC_FDB)
+#define BRCTL_ADD_FDB_DYNAMIC_ENTRIES           36
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || (defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT))
+#define BRCTL_GET_BR_FDB_LIMIT                  37
+#define BRCTL_SET_BR_FDB_LIMIT                  38
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_STP_LOOP)
+#define BRCTL_MARK_DEDICATED_STP                39
+#define BRCTL_BLOCK_STP                         40
+#endif
+
 #define BR_STATE_DISABLED 0
 #define BR_STATE_LISTENING 1
 #define BR_STATE_LEARNING 2
@@ -94,18 +143,50 @@ struct __fdb_entry {
 	__u32 ageing_timer_value;
 	__u8 port_hi;
 	__u8 pad0;
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+	__u16 vid;
+#else
 	__u16 unused;
+#endif
 };
 
 #ifdef __KERNEL__
 
 #include <linux/netdevice.h>
 
+#if defined(CONFIG_BCM_KF_BRIDGE_PORT_ISOLATION) || defined(CONFIG_BCM_KF_BRIDGE_STP)
+enum {
+	BREVT_IF_CHANGED,
+	BREVT_STP_STATE_CHANGED
+};
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_PORT_ISOLATION)
+extern struct net_device *bridge_get_next_port(char *brName, unsigned int *portNum);
+extern int register_bridge_notifier(struct notifier_block *nb);
+extern int unregister_bridge_notifier(struct notifier_block *nb);
+extern void bridge_get_br_list(char *brList, const unsigned int listSize);
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_STP)
+struct stpPortInfo {
+	char portName[IFNAMSIZ];
+	unsigned char stpState;
+};
+extern int register_bridge_stp_notifier(struct notifier_block *nb);
+extern int unregister_bridge_stp_notifier(struct notifier_block *nb);
+#endif
+
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
 typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
 
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+typedef struct net_device *(* br_fb_process_hook_t)(struct sk_buff *skb_p, uint16_t h_proto, struct net_device *txDev );
+extern void br_fb_bind(br_fb_process_hook_t brFbProcessHook);
+#endif
+
 #endif
 
 #endif
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 56d907a2c80478d4c2932a3856662b59af2fdc1f..6cb75db5159f9d6c4b8aca654598077963bc3c0b 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -92,6 +92,11 @@
 #define ETH_P_EDSA	0xDADA		/* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_AF_IUCV   0xFBFB		/* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
 
+#if defined(CONFIG_BCM_KF_VLAN)
+#define ETH_P_8021AG	0x8902		/* 802.1ag Connectivity Fault Mgmt */
+#define	ETH_P_8023AH	0x8809      /* 802.3ah Ethernet OAM */
+#endif
+
 /*
  *	Non DIX types. Won't clash for 1500 types.
  */
@@ -119,6 +124,11 @@
 #define ETH_P_PHONET	0x00F5		/* Nokia Phonet frames          */
 #define ETH_P_IEEE802154 0x00F6		/* IEEE802.15.4 frame		*/
 #define ETH_P_CAIF	0x00F7		/* ST-Ericsson CAIF protocol	*/
+#ifdef CONFIG_BCM_KF_MHI
+#define ETH_P_MHI	0x00F8		/* Renesas MHI protocol		*/
+#define ETH_P_RAW	0x00F9		/* RAW access to frames		*/
+#define ETH_P_MHDP	0x00FA		/* MHDP data frames		*/
+#endif
 
 /*
  *	This is an Ethernet frame header.
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4b24ff453aee56029a2b2e0b5ee233b9afab6977..bafeed192ec2a959a81c57659e4bf759ced66038 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -15,6 +15,7 @@ struct rtnl_link_stats {
 	__u32	rx_dropped;		/* no space in linux buffers	*/
 	__u32	tx_dropped;		/* no space available in linux	*/
 	__u32	multicast;		/* multicast packets received	*/
+
 	__u32	collisions;
 
 	/* detailed rx_errors: */
@@ -35,6 +36,16 @@ struct rtnl_link_stats {
 	/* for cslip etc */
 	__u32	rx_compressed;
 	__u32	tx_compressed;
+#if defined(CONFIG_BCM_KF_BLOG)
+	__u32   tx_multicast_packets;  /* multicast packets transmitted */
+	__u32   rx_multicast_bytes;  /* multicast bytes recieved */ 
+	__u32   tx_multicast_bytes;  /* multicast bytes transmitted */
+	__u32   rx_broadcast_packets;  /* broadcast packets recieved */
+	__u32   tx_broadcast_packets;  /* broadcast packets transmitted */
+	/* NOTE: Unicast packets are not counted but are instead calculated as needed
+	using total - (broadcast + multicast) */
+	__u32   rx_unknown_packets;  /* unknown protocol packets recieved */
+#endif
 };
 
 /* The main device statistics structure */
@@ -48,6 +59,8 @@ struct rtnl_link_stats64 {
 	__u64	rx_dropped;		/* no space in linux buffers	*/
 	__u64	tx_dropped;		/* no space available in linux	*/
 	__u64	multicast;		/* multicast packets received	*/
+
+
 	__u64	collisions;
 
 	/* detailed rx_errors: */
@@ -68,6 +81,16 @@ struct rtnl_link_stats64 {
 	/* for cslip etc */
 	__u64	rx_compressed;
 	__u64	tx_compressed;
+#if defined(CONFIG_BCM_KF_BLOG)
+	__u64   tx_multicast_packets;  /* multicast packets transmitted */
+	__u64   rx_multicast_bytes;  /* multicast bytes recieved */ 
+	__u64   tx_multicast_bytes;  /* multicast bytes transmitted */
+	__u64   rx_broadcast_packets;  /* broadcast packets recieved */
+	__u64   tx_broadcast_packets;  /* broadcast packets transmitted */
+	/* NOTE: Unicast packets are not counted but are instead calculated as needed
+	using total - (broadcast + multicast) */
+	__u64   rx_unknown_packets;  /* unknown protocol packets recieved */
+#endif
 };
 
 /* The struct should be in sync with struct ifmap */
diff --git a/include/linux/if_mhi.h b/include/linux/if_mhi.h
new file mode 100644
index 0000000000000000000000000000000000000000..84c300dd9ab6c660575495286685f0c3b1c6dfcf
--- /dev/null
+++ b/include/linux/if_mhi.h
@@ -0,0 +1,53 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: if_mhi.h
+ *
+ * 'Modem-Host Interface' kernel definitions
+ */
+
+#ifndef LINUX_IF_MHI_H
+#define LINUX_IF_MHI_H
+
+/* Packet sizes */
+
+#define MHI_MIN_MTU		260
+#define MHI_MAX_MTU		65540
+
+#define MHI_MTU			MHI_MAX_MTU
+
+/* Packet socket options */
+#define MHI_DROP_COUNT		1
+
+/* Ioctl definitions */
+
+
+#endif /* LINUX_IF_MHI_H */
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 16b92d008bed842e141f77664e94446cbe652dcf..13c979c9846494356282c1aca5bb5aee3206ba02 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -61,6 +61,9 @@ struct ip_tunnel_6rd {
 	__be32			relay_prefix;
 	__u16			prefixlen;
 	__u16			relay_prefixlen;
+#if defined(CONFIG_BCM_KF_IPV6RD_SECURITY)
+	__be32			br_addr;
+#endif
 };
 
 enum {
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a810987cb80e47cd2a344dd1e3d289a6d3b46371..80b25498f483ca9886a40429697a699124ef0baf 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -372,6 +372,10 @@ enum vlan_ioctl_cmds {
 	SET_VLAN_FLAG_CMD,
 	GET_VLAN_REALDEV_NAME_CMD, /* If this works, you know it's a VLAN device, btw */
 	GET_VLAN_VID_CMD /* Get the VID of this VLAN (specified by name) */
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+	,
+	SET_VLAN_NFMARK_TO_PRIORITY_CMD
+#endif	
 };
 
 enum vlan_flags {
@@ -399,6 +403,9 @@ struct vlan_ioctl_args {
 		unsigned int name_type;
 		unsigned int bind_type;
 		unsigned int flag; /* Matches vlan_dev_priv flags */
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+		int nfmark_to_priority;
+#endif
         } u;
 
 	short vlan_qos;   
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 82de336b81550af6a64fff632037acec7e145f2a..69559f286f42ae08867342945ab9b28e24fe46af 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -19,6 +19,10 @@
 #include <linux/types.h>
 #include <asm/byteorder.h>
 
+#if defined(CONFIG_BCM_KF_IGMP)
+#define CC_BRCM_KF_MULTI_IGMP_GR_SUPPRESSION
+#endif
+
 /*
  *	IGMP protocol structures
  */
@@ -32,7 +36,11 @@ struct igmphdr {
 	__u8 code;		/* For newer IGMP */
 	__sum16 csum;
 	__be32 group;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+} LINUX_NET_PACKED;
+#else
 };
+#endif
 
 /* V3 group record types [grec_type] */
 #define IGMPV3_MODE_IS_INCLUDE		1
@@ -48,7 +56,11 @@ struct igmpv3_grec {
 	__be16	grec_nsrcs;
 	__be32	grec_mca;
 	__be32	grec_src[0];
-};
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+    } LINUX_NET_PACKED;
+#else
+    };
+#endif
 
 struct igmpv3_report {
 	__u8 type;
@@ -57,7 +69,11 @@ struct igmpv3_report {
 	__be16 resv2;
 	__be16 ngrec;
 	struct igmpv3_grec grec[0];
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+} LINUX_NET_PACKED;
+#else
 };
+#endif
 
 struct igmpv3_query {
 	__u8 type;
@@ -78,7 +94,11 @@ struct igmpv3_query {
 	__u8 qqic;
 	__be16 nsrcs;
 	__be32 srcs[0];
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+} LINUX_NET_PACKED;
+#else
 };
+#endif
 
 #define IGMP_HOST_MEMBERSHIP_QUERY	0x11	/* From RFC1112 */
 #define IGMP_HOST_MEMBERSHIP_REPORT	0x12	/* Ditto */
@@ -189,6 +209,9 @@ struct ip_mc_list {
 	unsigned int		sfmode;
 	struct ip_sf_list	*sources;
 	struct ip_sf_list	*tomb;
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CC_BRCM_KF_MULTI_IGMP_GR_SUPPRESSION)
+	unsigned int		osfmode;
+#endif
 	unsigned long		sfcount[2];
 	union {
 		struct ip_mc_list *next;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 773dbc9e1c5f623085eaf03fba5aa0bff775e450..adb6e976207081c370a47749cf4aae6419b3fa0b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -685,7 +685,7 @@ static inline void softirq_early_init(void) { }
  * if more than one irq occurred.
  */
 
-#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) 
+#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
 static inline unsigned long probe_irq_on(void)
 {
 	return 0;
diff --git a/include/linux/ip.h b/include/linux/ip.h
index bd0a2a8631c60e6620578acc68c363540aea1755..32732bc812f35cf661e00f540a0ca376fa6be2ae 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -102,7 +102,11 @@ struct iphdr {
 	__be32	saddr;
 	__be32	daddr;
 	/*The options start here. */
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+} LINUX_NET_PACKED;
+#else
 };
+#endif
 
 #ifdef __KERNEL__
 #include <linux/skbuff.h>
diff --git a/include/linux/iqos.h b/include/linux/iqos.h
new file mode 100644
index 0000000000000000000000000000000000000000..5cfcf1de9fa89b6dad2c691a97c9e86391ffbd2a
--- /dev/null
+++ b/include/linux/iqos.h
@@ -0,0 +1,163 @@
+#ifndef __IQOS_H_INCLUDED__
+#define __IQOS_H_INCLUDED__
+
+/*
+<:copyright-BRCM:2009:DUAL/GPL:standard
+
+   Copyright (c) 2009 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+
+/*
+ *******************************************************************************
+ * File Name : ingqos.h
+ *
+ *******************************************************************************
+ */
+#define IQOS_VERSION             "v0.1"
+#define IQOS_VER_STR             IQOS_VERSION " " __DATE__ " " __TIME__
+#define IQOS_MODNAME             "Broadcom IQoS "
+
+#define IQOS_ERROR               (-1)
+#define IQOS_SUCCESS             0
+
+typedef enum {
+    IQOS_IPPROTO_TCP = 6,
+    IQOS_IPPROTO_UDP = 17,
+    IQOS_IPPROTO_MAX
+} iqos_ipproto_t;
+
+typedef enum {
+    IQOS_ENT_DYN,
+    IQOS_ENT_STAT,
+    IQOS_ENT_MAX
+} iqos_ent_t;
+
+typedef enum {
+    IQOS_PRIO_LOW,
+    IQOS_PRIO_HIGH,
+    IQOS_PRIO_MAX
+} iqos_prio_t;
+
+typedef enum {
+    IQOS_CONG_STATUS_LO,
+    IQOS_CONG_STATUS_HI,
+    IQOS_CONG_STATUS_MAX
+} iqos_cong_status_t;
+
+typedef enum {
+    IQOS_STATUS_DISABLE,
+    IQOS_STATUS_ENABLE,
+    IQOS_STATUS_MAX
+} iqos_status_t;
+
+
+
+#define IQOS_INVALID_NEXT_IX      0
+#define IQOS_INVALID_PORT         0
+
+typedef uint8_t (* iqos_add_L4port_hook_t)( iqos_ipproto_t ipProto, 
+        uint16_t destPort, iqos_ent_t ent, iqos_prio_t prio );
+
+typedef uint8_t (* iqos_rem_L4port_hook_t)( iqos_ipproto_t ipProto, 
+        uint16_t destPort, iqos_ent_t ent );
+
+typedef int (* iqos_prio_L4port_hook_t)( iqos_ipproto_t ipProto, 
+        uint16_t destPort );
+
+
+uint8_t iqos_add_L4port( iqos_ipproto_t ipProto, uint16_t destPort, 
+        iqos_ent_t ent, iqos_prio_t prio );
+
+uint8_t iqos_rem_L4port( iqos_ipproto_t ipProto, uint16_t destPort, 
+        iqos_ent_t ent );
+
+int iqos_prio_L4port( iqos_ipproto_t ipProto, uint16_t destPort );
+
+void iqos_bind( iqos_add_L4port_hook_t  iqos_add, 
+    iqos_rem_L4port_hook_t  iqos_rem, iqos_prio_L4port_hook_t iqos_prio );
+
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+#define IQOS_LOCK_IRQSAVE()         spin_lock_irqsave( &iqos_lock_g, flags )
+#define IQOS_UNLOCK_IRQRESTORE()   spin_unlock_irqrestore( &iqos_lock_g, flags )
+#define IQOS_LOCK_BH()              spin_lock_bh( &iqos_lock_g )
+#define IQOS_UNLOCK_BH()            spin_unlock_bh( &iqos_lock_g )
+#else
+#define IQOS_LOCK_IRQSAVE()         local_irq_save( flags )
+#define IQOS_UNLOCK_IRQRESTORE()    local_irq_restore( flags )
+#define IQOS_LOCK_BH()              NULL_STMT
+#define IQOS_UNLOCK_BH()            NULL_STMT
+#endif
+
+#if (defined(CONFIG_BCM_INGQOS) || defined(CONFIG_BCM_INGQOS_MODULE))
+#define IQOS_RXCHNL_MAX              4
+#define IQOS_RXCHNL_DISABLED         0
+#define IQOS_RXCHNL_ENABLED          1
+#define IQOS_MAX_RX_RING_SIZE        4096
+
+typedef enum {
+    IQOS_IF_ENET,
+    IQOS_IF_ENET_RXCHNL0 = IQOS_IF_ENET,
+    IQOS_IF_ENET_RXCHNL1,
+    IQOS_IF_ENET_RXCHNL2,
+    IQOS_IF_ENET_RXCHNL3,
+    IQOS_IF_XTM,
+    IQOS_IF_XTM_RXCHNL0 = IQOS_IF_XTM,
+    IQOS_IF_XTM_RXCHNL1,
+    IQOS_IF_XTM_RXCHNL2,
+    IQOS_IF_XTM_RXCHNL3,
+    IQOS_IF_FWD,
+    IQOS_IF_FWD_RXCHNL0 = IQOS_IF_FWD,
+    IQOS_IF_FWD_RXCHNL1,
+    IQOS_IF_WL,
+    IQOS_IF_USB,
+    IQOS_IF_MAX,
+} iqos_if_t;
+
+typedef void (* iqos_status_hook_t)(void);
+
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+typedef uint32_t (* iqos_fap_ethRxDqmQueue_hook_t)(uint32_t chnl);
+#if defined(CONFIG_BCM_XTMCFG) || defined(CONFIG_BCM_XTMCFG_MODULE)
+typedef uint32_t (* iqos_fap_xtmRxDqmQueue_hook_t)(uint32_t chnl);
+#endif
+typedef void (* iqos_fap_set_status_hook_t)(int);
+
+typedef void (* iqos_fap_add_L4port_hook_t)( uint8_t ipProto, uint16_t dport, 
+            uint8_t ent, uint8_t prio );
+typedef void (* iqos_fap_rem_L4port_hook_t)( uint8_t ipProto, uint16_t dport,
+            uint8_t ent );
+typedef void (* iqos_fap_dump_porttbl_hook_t)( uint8_t ipProto );
+#endif
+
+iqos_cong_status_t iqos_get_sys_cong_status( void );
+iqos_cong_status_t iqos_get_cong_status( iqos_if_t iface, uint32_t chnl );
+uint32_t iqos_set_cong_status( iqos_if_t iface, uint32_t chnl, 
+                iqos_cong_status_t status );
+#endif
+
+#endif  /* defined(__IQOS_H_INCLUDED__) */
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h
index a18b719f49d4dfbbb978270f791fe992cfc8b2a9..dddc265bd65f8f0ecf2793f1c498df7d7689410f 100644
--- a/include/linux/jffs2.h
+++ b/include/linux/jffs2.h
@@ -26,6 +26,11 @@
 #define KSAMTIB_CIGAM_2SFFJ 0x8519 /* For detecting wrong-endian fs */
 #define JFFS2_EMPTY_BITMASK 0xffff
 #define JFFS2_DIRTY_BITMASK 0x0000
+#if defined(CONFIG_BCM_KF_JFFS) || !defined(CONFIG_BCM_IN_KERNEL)
+#define JFFS2_EBH_COMPAT_FSET 0x00
+#define JFFS2_EBH_INCOMPAT_FSET 0x00
+#define JFFS2_EBH_ROCOMPAT_FSET 0x00
+#endif
 
 /* Summary node MAGIC marker */
 #define JFFS2_SUM_MAGIC	0x02851885
@@ -46,6 +51,7 @@
 #define JFFS2_COMPR_DYNRUBIN	0x05
 #define JFFS2_COMPR_ZLIB	0x06
 #define JFFS2_COMPR_LZO		0x07
+#define JFFS2_COMPR_LZMA	0x08
 /* Compatibility flags. */
 #define JFFS2_COMPAT_MASK 0xc000      /* What do to if an unknown nodetype is found */
 #define JFFS2_NODE_ACCURATE 0x2000
@@ -68,6 +74,10 @@
 #define JFFS2_NODETYPE_XATTR (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 8)
 #define JFFS2_NODETYPE_XREF (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 9)
 
+#if defined(CONFIG_BCM_KF_JFFS) || !defined(CONFIG_BCM_IN_KERNEL)
+#define JFFS2_NODETYPE_ERASEBLOCK_HEADER (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 5)
+#endif
+
 /* XATTR Related */
 #define JFFS2_XPREFIX_USER		1	/* for "user." */
 #define JFFS2_XPREFIX_SECURITY		2	/* for "security." */
@@ -203,6 +213,22 @@ struct jffs2_raw_summary
 	jint32_t node_crc; 	/* node crc */
 	jint32_t sum[0]; 	/* inode summary info */
 };
+#if defined(CONFIG_BCM_KF_JFFS) || !defined(CONFIG_BCM_IN_KERNEL)
+struct jffs2_raw_ebh
+{
+	jint16_t magic;
+	jint16_t nodetype; /* == JFFS2_NODETYPE_ERASEBLOCK_HEADER */
+	jint32_t totlen;
+	jint32_t hdr_crc;
+	jint32_t node_crc;
+	uint8_t  reserved; /* reserved for future use and alignment */
+	uint8_t  compat_fset;
+	uint8_t  incompat_fset;
+	uint8_t  rocompat_fset;
+	jint32_t erase_count; /* the erase count of this erase block */
+	jint32_t data[0];
+} __attribute__((packed));
+#endif
 
 union jffs2_node_union
 {
@@ -211,6 +237,9 @@ union jffs2_node_union
 	struct jffs2_raw_xattr x;
 	struct jffs2_raw_xref r;
 	struct jffs2_raw_summary s;
+#if defined(CONFIG_BCM_KF_JFFS) || !defined(CONFIG_BCM_IN_KERNEL)
+	struct jffs2_raw_ebh eh;
+#endif
 	struct jffs2_unknown_node u;
 };
 
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 387571959dd9a1219c90dc952c29f8382698f397..7f04cc95279500af746d5a8a2d75b6b7543e797f 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -115,7 +115,14 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
 
 static inline void print_ip_sym(unsigned long ip)
 {
+#if defined(CONFIG_BCM_KF_EXTRA_DEBUG)
+    if (((ip & 0xF0000000) == 0x80000000) || ((ip & 0xF0000000) == 0xc0000000))
 	printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
+    else
+    	printk("[<%p>] (suspected corrupt symbol)\n", (void *) ip);
+#else
+	printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
+#endif
 }
 
 #endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e43a4a2a5b0eb132dc5dedae6745d872d8cb1abf..eb0e441479370d656db57113a46ff1aa1d77e5dc 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -132,6 +132,10 @@
  */
 #define lower_32_bits(n) ((u32)(n))
 
+#if defined(CONFIG_BCM_KF_PRINTK_INT_ENABLED) && defined(CONFIG_BCM_PRINTK_INT_ENABLED)
+extern int printk_with_interrupt_enabled;
+#endif
+
 struct completion;
 struct pt_regs;
 struct user;
diff --git a/include/linux/lzma.h b/include/linux/lzma.h
new file mode 100644
index 0000000000000000000000000000000000000000..5f31334dfc3177c2dd050a87c7a26cf67bdf6ad3
--- /dev/null
+++ b/include/linux/lzma.h
@@ -0,0 +1,62 @@
+#ifndef __LZMA_H__
+#define __LZMA_H__
+
+#ifdef __KERNEL__
+	#include <linux/kernel.h>
+	#include <linux/sched.h>
+	#include <linux/slab.h>
+	#include <linux/vmalloc.h>
+	#include <linux/init.h>
+	#define LZMA_MALLOC vmalloc
+	#define LZMA_FREE vfree
+	#define PRINT_ERROR(msg) printk(KERN_WARNING #msg)
+	#define INIT __init
+	#define STATIC static
+#else
+	#include <stdint.h>
+	#include <stdlib.h>
+	#include <stdio.h>
+	#include <unistd.h>
+	#include <string.h>
+	#include <asm/types.h>
+	#include <errno.h>
+	#include <linux/jffs2.h>
+	#ifndef PAGE_SIZE
+		extern int page_size;
+		#define PAGE_SIZE page_size
+	#endif
+	#define LZMA_MALLOC malloc
+	#define LZMA_FREE free
+	#define PRINT_ERROR(msg) fprintf(stderr, msg)
+	#define INIT
+	#define STATIC
+#endif
+
+#include "lzma/LzmaDec.h"
+#include "lzma/LzmaEnc.h"
+
+#define LZMA_BEST_LEVEL (9)
+#define LZMA_BEST_LC    (0)
+#define LZMA_BEST_LP    (0)
+#define LZMA_BEST_PB    (0)
+#define LZMA_BEST_FB  (273)
+
+#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2)
+
+static void *p_lzma_malloc(void *p, size_t size)
+{
+        if (size == 0)
+                return NULL;
+
+        return LZMA_MALLOC(size);
+}
+
+static void p_lzma_free(void *p, void *address)
+{
+        if (address != NULL)
+                LZMA_FREE(address);
+}
+
+static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free};
+
+#endif
diff --git a/include/linux/lzma/LzFind.h b/include/linux/lzma/LzFind.h
new file mode 100644
index 0000000000000000000000000000000000000000..6d4f8e23902809dcffc92d11ed20170f89e272e6
--- /dev/null
+++ b/include/linux/lzma/LzFind.h
@@ -0,0 +1,98 @@
+/* LzFind.h -- Match finder for LZ algorithms
+2009-04-22 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_FIND_H
+#define __LZ_FIND_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef UInt32 CLzRef;
+
+typedef struct _CMatchFinder
+{
+  Byte *buffer;
+  UInt32 pos;
+  UInt32 posLimit;
+  UInt32 streamPos;
+  UInt32 lenLimit;
+
+  UInt32 cyclicBufferPos;
+  UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
+
+  UInt32 matchMaxLen;
+  CLzRef *hash;
+  CLzRef *son;
+  UInt32 hashMask;
+  UInt32 cutValue;
+
+  Byte *bufferBase;
+  ISeqInStream *stream;
+  int streamEndWasReached;
+
+  UInt32 blockSize;
+  UInt32 keepSizeBefore;
+  UInt32 keepSizeAfter;
+
+  UInt32 numHashBytes;
+  int directInput;
+  size_t directInputRem;
+  int btMode;
+  int bigHash;
+  UInt32 historySize;
+  UInt32 fixedHashSize;
+  UInt32 hashSizeSum;
+  UInt32 numSons;
+  SRes result;
+  UInt32 crc[256];
+} CMatchFinder;
+
+#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
+#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)])
+
+#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
+
+void MatchFinder_Construct(CMatchFinder *p);
+
+/* Conditions:
+     historySize <= 3 GB
+     keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
+*/
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
+    UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+    ISzAlloc *alloc);
+void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
+
+/*
+Conditions:
+  Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
+  Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
+*/
+
+typedef void (*Mf_Init_Func)(void *object);
+typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index);
+typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
+typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
+typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
+typedef void (*Mf_Skip_Func)(void *object, UInt32);
+
+typedef struct _IMatchFinder
+{
+  Mf_Init_Func Init;
+  Mf_GetIndexByte_Func GetIndexByte;
+  Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
+  Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
+  Mf_GetMatches_Func GetMatches;
+  Mf_Skip_Func Skip;
+} IMatchFinder;
+
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/linux/lzma/LzHash.h b/include/linux/lzma/LzHash.h
new file mode 100644
index 0000000000000000000000000000000000000000..f3e89966cc70d466e343961f9770ea9126cad7ef
--- /dev/null
+++ b/include/linux/lzma/LzHash.h
@@ -0,0 +1,54 @@
+/* LzHash.h -- HASH functions for LZ algorithms
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_HASH_H
+#define __LZ_HASH_H
+
+#define kHash2Size (1 << 10)
+#define kHash3Size (1 << 16)
+#define kHash4Size (1 << 20)
+
+#define kFix3HashSize (kHash2Size)
+#define kFix4HashSize (kHash2Size + kHash3Size)
+#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
+
+#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
+
+#define HASH3_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
+
+#define HASH4_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+  hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
+
+#define HASH5_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+  hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
+  hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
+  hash4Value &= (kHash4Size - 1); }
+
+/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
+#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
+
+
+#define MT_HASH2_CALC \
+  hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
+
+#define MT_HASH3_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
+
+#define MT_HASH4_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+  hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
+
+#endif
diff --git a/include/linux/lzma/LzmaDec.h b/include/linux/lzma/LzmaDec.h
new file mode 100644
index 0000000000000000000000000000000000000000..c90f95e9fbaf3bba87477fdebc4589d84ed8e8db
--- /dev/null
+++ b/include/linux/lzma/LzmaDec.h
@@ -0,0 +1,130 @@
+/* LzmaDec.h -- LZMA Decoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_DEC_H
+#define __LZMA_DEC_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* #define _LZMA_PROB32 */
+/* _LZMA_PROB32 can increase the speed on some CPUs,
+   but memory usage for CLzmaDec::probs will be doubled in that case */
+
+#ifdef _LZMA_PROB32
+#define CLzmaProb UInt32
+#else
+#define CLzmaProb UInt16
+#endif
+
+
+/* ---------- LZMA Properties ---------- */
+
+#define LZMA_PROPS_SIZE 5
+
+typedef struct _CLzmaProps
+{
+  unsigned lc, lp, pb;
+  UInt32 dicSize;
+} CLzmaProps;
+
+
+/* ---------- LZMA Decoder state ---------- */
+
+/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
+   Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
+
+#define LZMA_REQUIRED_INPUT_MAX 20
+
+typedef struct
+{
+  CLzmaProps prop;
+  CLzmaProb *probs;
+  Byte *dic;
+  const Byte *buf;
+  UInt32 range, code;
+  SizeT dicPos;
+  SizeT dicBufSize;
+  UInt32 processedPos;
+  UInt32 checkDicSize;
+  unsigned state;
+  UInt32 reps[4];
+  unsigned remainLen;
+  int needFlush;
+  int needInitState;
+  UInt32 numProbs;
+  unsigned tempBufSize;
+  Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
+} CLzmaDec;
+
+#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
+
+/* There are two types of LZMA streams:
+     0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
+     1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
+
+typedef enum
+{
+  LZMA_FINISH_ANY,   /* finish at any point */
+  LZMA_FINISH_END    /* block must be finished at the end */
+} ELzmaFinishMode;
+
+/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
+
+   You must use LZMA_FINISH_END, when you know that current output buffer
+   covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
+
+   If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
+   and output value of destLen will be less than output buffer size limit.
+   You can check status result also.
+
+   You can use multiple checks to test data integrity after full decompression:
+     1) Check Result and "status" variable.
+     2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
+     3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
+        You must use correct finish mode in that case. */
+
+typedef enum
+{
+  LZMA_STATUS_NOT_SPECIFIED,               /* use main error code instead */
+  LZMA_STATUS_FINISHED_WITH_MARK,          /* stream was finished with end mark. */
+  LZMA_STATUS_NOT_FINISHED,                /* stream was not finished */
+  LZMA_STATUS_NEEDS_MORE_INPUT,            /* you must provide more input bytes */
+  LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK  /* there is probability that stream was finished without end mark */
+} ELzmaStatus;
+
+/* ELzmaStatus is used only as output value for function call */
+
+/* ---------- One Call Interface ---------- */
+
+/* LzmaDecode
+
+finishMode:
+  It has meaning only if the decoding reaches output limit (*destLen).
+  LZMA_FINISH_ANY - Decode just destLen bytes.
+  LZMA_FINISH_END - Stream must be finished after (*destLen).
+
+Returns:
+  SZ_OK
+    status:
+      LZMA_STATUS_FINISHED_WITH_MARK
+      LZMA_STATUS_NOT_FINISHED
+      LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+  SZ_ERROR_DATA - Data error
+  SZ_ERROR_MEM  - Memory allocation error
+  SZ_ERROR_UNSUPPORTED - Unsupported properties
+  SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
+*/
+
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+    const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
+    ELzmaStatus *status, ISzAlloc *alloc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/linux/lzma/LzmaEnc.h b/include/linux/lzma/LzmaEnc.h
new file mode 100644
index 0000000000000000000000000000000000000000..2986c0460be44ca5aba94a3556df88623b597e86
--- /dev/null
+++ b/include/linux/lzma/LzmaEnc.h
@@ -0,0 +1,60 @@
+/*  LzmaEnc.h -- LZMA Encoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_ENC_H
+#define __LZMA_ENC_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LZMA_PROPS_SIZE 5
+
+typedef struct _CLzmaEncProps
+{
+  int level;       /*  0 <= level <= 9 */
+  UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
+                      (1 << 12) <= dictSize <= (1 << 30) for 64-bit version
+                       default = (1 << 24) */
+  int lc;          /* 0 <= lc <= 8, default = 3 */
+  int lp;          /* 0 <= lp <= 4, default = 0 */
+  int pb;          /* 0 <= pb <= 4, default = 2 */
+  int algo;        /* 0 - fast, 1 - normal, default = 1 */
+  int fb;          /* 5 <= fb <= 273, default = 32 */
+  int btMode;      /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
+  int numHashBytes; /* 2, 3 or 4, default = 4 */
+  UInt32 mc;        /* 1 <= mc <= (1 << 30), default = 32 */
+  unsigned writeEndMark;  /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
+  int numThreads;  /* 1 or 2, default = 2 */
+} CLzmaEncProps;
+
+void LzmaEncProps_Init(CLzmaEncProps *p);
+
+/* ---------- CLzmaEncHandle Interface ---------- */
+
+/* LzmaEnc_* functions can return the following exit codes:
+Returns:
+  SZ_OK           - OK
+  SZ_ERROR_MEM    - Memory allocation error
+  SZ_ERROR_PARAM  - Incorrect paramater in props
+  SZ_ERROR_WRITE  - Write callback error.
+  SZ_ERROR_PROGRESS - some break from progress callback
+  SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+*/
+
+typedef void * CLzmaEncHandle;
+
+CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc);
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
+SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
+SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+    int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/linux/lzma/Types.h b/include/linux/lzma/Types.h
new file mode 100644
index 0000000000000000000000000000000000000000..4751acde07222dc01a0403969b46da556fa8086e
--- /dev/null
+++ b/include/linux/lzma/Types.h
@@ -0,0 +1,226 @@
+/* Types.h -- Basic types
+2009-11-23 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_TYPES_H
+#define __7Z_TYPES_H
+
+#include <stddef.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#ifndef EXTERN_C_BEGIN
+#ifdef __cplusplus
+#define EXTERN_C_BEGIN extern "C" {
+#define EXTERN_C_END }
+#else
+#define EXTERN_C_BEGIN
+#define EXTERN_C_END
+#endif
+#endif
+
+EXTERN_C_BEGIN
+
+#define SZ_OK 0
+
+#define SZ_ERROR_DATA 1
+#define SZ_ERROR_MEM 2
+#define SZ_ERROR_CRC 3
+#define SZ_ERROR_UNSUPPORTED 4
+#define SZ_ERROR_PARAM 5
+#define SZ_ERROR_INPUT_EOF 6
+#define SZ_ERROR_OUTPUT_EOF 7
+#define SZ_ERROR_READ 8
+#define SZ_ERROR_WRITE 9
+#define SZ_ERROR_PROGRESS 10
+#define SZ_ERROR_FAIL 11
+#define SZ_ERROR_THREAD 12
+
+#define SZ_ERROR_ARCHIVE 16
+#define SZ_ERROR_NO_ARCHIVE 17
+
+typedef int SRes;
+
+#ifdef _WIN32
+typedef DWORD WRes;
+#else
+typedef int WRes;
+#endif
+
+#ifndef RINOK
+#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
+#endif
+
+typedef unsigned char Byte;
+typedef short Int16;
+typedef unsigned short UInt16;
+
+#ifdef _LZMA_UINT32_IS_ULONG
+typedef long Int32;
+typedef unsigned long UInt32;
+#else
+typedef int Int32;
+typedef unsigned int UInt32;
+#endif
+
+#ifdef _SZ_NO_INT_64
+
+/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
+   NOTES: Some code will work incorrectly in that case! */
+
+typedef long Int64;
+typedef unsigned long UInt64;
+
+#else
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+typedef __int64 Int64;
+typedef unsigned __int64 UInt64;
+#else
+typedef long long int Int64;
+typedef unsigned long long int UInt64;
+#endif
+
+#endif
+
+#ifdef _LZMA_NO_SYSTEM_SIZE_T
+typedef UInt32 SizeT;
+#else
+typedef size_t SizeT;
+#endif
+
+typedef int Bool;
+#define True 1
+#define False 0
+
+
+#ifdef _WIN32
+#define MY_STD_CALL __stdcall
+#else
+#define MY_STD_CALL
+#endif
+
+#ifdef _MSC_VER
+
+#if _MSC_VER >= 1300
+#define MY_NO_INLINE __declspec(noinline)
+#else
+#define MY_NO_INLINE
+#endif
+
+#define MY_CDECL __cdecl
+#define MY_FAST_CALL __fastcall
+
+#else
+
+#define MY_CDECL
+#define MY_FAST_CALL
+
+#endif
+
+
+/* The following interfaces use first parameter as pointer to structure */
+
+typedef struct
+{
+  SRes (*Read)(void *p, void *buf, size_t *size);
+    /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
+       (output(*size) < input(*size)) is allowed */
+} ISeqInStream;
+
+/* it can return SZ_ERROR_INPUT_EOF */
+SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);
+SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);
+SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);
+
+typedef struct
+{
+  size_t (*Write)(void *p, const void *buf, size_t size);
+    /* Returns: result - the number of actually written bytes.
+       (result < size) means error */
+} ISeqOutStream;
+
+typedef enum
+{
+  SZ_SEEK_SET = 0,
+  SZ_SEEK_CUR = 1,
+  SZ_SEEK_END = 2
+} ESzSeek;
+
+typedef struct
+{
+  SRes (*Read)(void *p, void *buf, size_t *size);  /* same as ISeqInStream::Read */
+  SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
+} ISeekInStream;
+
+typedef struct
+{
+  SRes (*Look)(void *p, void **buf, size_t *size);
+    /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
+       (output(*size) > input(*size)) is not allowed
+       (output(*size) < input(*size)) is allowed */
+  SRes (*Skip)(void *p, size_t offset);
+    /* offset must be <= output(*size) of Look */
+
+  SRes (*Read)(void *p, void *buf, size_t *size);
+    /* reads directly (without buffer). It's same as ISeqInStream::Read */
+  SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
+} ILookInStream;
+
+SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);
+SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);
+
+/* reads via ILookInStream::Read */
+SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);
+SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);
+
+#define LookToRead_BUF_SIZE (1 << 14)
+
+typedef struct
+{
+  ILookInStream s;
+  ISeekInStream *realStream;
+  size_t pos;
+  size_t size;
+  Byte buf[LookToRead_BUF_SIZE];
+} CLookToRead;
+
+void LookToRead_CreateVTable(CLookToRead *p, int lookahead);
+void LookToRead_Init(CLookToRead *p);
+
+typedef struct
+{
+  ISeqInStream s;
+  ILookInStream *realStream;
+} CSecToLook;
+
+void SecToLook_CreateVTable(CSecToLook *p);
+
+typedef struct
+{
+  ISeqInStream s;
+  ILookInStream *realStream;
+} CSecToRead;
+
+void SecToRead_CreateVTable(CSecToRead *p);
+
+typedef struct
+{
+  SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);
+    /* Returns: result. (result != SZ_OK) means break.
+       Value (UInt64)(Int64)-1 for size means unknown value. */
+} ICompressProgress;
+
+typedef struct
+{
+  void *(*Alloc)(void *p, size_t size);
+  void (*Free)(void *p, void *address); /* address can be 0 */
+} ISzAlloc;
+
+#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)
+#define IAlloc_Free(p, a) (p)->Free((p), a)
+
+EXTERN_C_END
+
+#endif
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
new file mode 100644
index 0000000000000000000000000000000000000000..5d44ced9dd0da45c5da819050e6d711fa7cd7d78
--- /dev/null
+++ b/include/linux/mhi.h
@@ -0,0 +1,72 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * file mhi.h
+ *
+ * Modem-Host Interface (MHI) kernel interface
+ */
+
+#ifndef LINUX_MHI_H
+#define LINUX_MHI_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <asm/byteorder.h>
+
+
+struct mhi_sock {
+	struct sock	sk;
+	int		sk_l3proto;
+	int		sk_ifindex;
+};
+
+struct sockaddr_mhi {
+	sa_family_t	sa_family;
+	int		sa_ifindex;
+	__u8	sa_zero[
+		sizeof(struct sockaddr)
+		- sizeof(sa_family_t)
+		- sizeof(int)];
+};
+
+
+static inline struct mhi_sock *mhi_sk(struct sock *sk)
+{
+	return (struct mhi_sock *)sk;
+}
+
+static inline struct sockaddr_mhi *sa_mhi(struct sockaddr *sa)
+{
+	return (struct sockaddr_mhi *)sa;
+}
+
+#endif
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/linux/mhi_l2mux.h b/include/linux/mhi_l2mux.h
new file mode 100644
index 0000000000000000000000000000000000000000..f28fe67776db9e119def7e75269c0fed6bde9082
--- /dev/null
+++ b/include/linux/mhi_l2mux.h
@@ -0,0 +1,215 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi_l2mux.h
+ *
+ * MHI L2MUX kernel definitions
+ */
+
+#ifndef LINUX_L2MUX_H
+#define LINUX_L2MUX_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+
+#ifdef __KERNEL__
+#include <net/sock.h>
+#define ACTIVATE_L2MUX_STAT
+
+#ifdef ACTIVATE_L2MUX_STAT
+#include <linux/list.h>
+#include <linux/time.h>
+#endif /* ACTIVATE_L2MUX_STAT */
+#endif /*__KERNEL__*/
+
+/* Official L3 protocol IDs */
+#define MHI_L3_PHONET		0x00
+#define MHI_L3_FILE		0x01
+#define MHI_L3_AUDIO		0x02
+#define MHI_L3_SECURITY		0x03
+#define MHI_L3_TEST		0x04
+#define MHI_L3_TEST_PRIO	0x05
+#define MHI_L3_XFILE		0x06
+#define MHI_L3_MHDP_DL		0x07
+#define MHI_L3_MHDP_UL		0x08
+#define MHI_L3_AUX_HOST		0x09
+#define MHI_L3_LOG		0x0A
+#define MHI_L3_CELLULAR_AUDIO	0x0B
+#define MHI_L3_IMS              0x0D
+#define MHI_L3_OEM_CP		0x0E
+#define MHI_L3_MHDP_DL_PS2	0x17
+#define MHI_L3_MHDP_UL_PS2	0x18
+#define MHI_L3_CTRL_TMODEM	0xC0
+#define MHI_L3_THERMAL		0xC1
+#define MHI_L3_MHDP_UDP_FILTER	0xFC
+#define MHI_L3_HIGH_PRIO_TEST	0xFD
+#define MHI_L3_MED_PRIO_TEST	0xFE
+#define MHI_L3_LOW_PRIO_TEST	0xFF
+
+/* 256 possible protocols */
+#define MHI_L3_NPROTO		256
+
+/* Special value for ANY */
+#define MHI_L3_ANY		0xFFFF
+
+#ifdef __KERNEL__
+typedef int (l2mux_skb_fn)(struct sk_buff *skb, struct net_device *dev);
+typedef int (l2mux_audio_fn)(unsigned char *buffer, size_t size, uint8_t phonet_dev_id);
+
+struct l2muxhdr {
+	__u8	l3_len[3];
+	__u8	l3_prot;
+} __packed;
+
+#ifdef ACTIVATE_L2MUX_STAT
+
+enum l2mux_direction {
+	UPLINK_DIR = 0,
+	DOWNLINK_DIR,
+};
+
+enum l2mux_trace_state {
+	ON = 0,
+	OFF,
+	KERNEL,
+};
+
+
+struct l2muxstat {
+	unsigned l3pid;
+	unsigned l3len;
+	enum l2mux_direction dir;
+	struct timeval time_val;
+	struct list_head list;
+	unsigned int stat_counter;
+};
+
+struct l2mux_stat_info {
+	struct proc_dir_entry *proc_entry;
+	struct l2muxstat l2muxstat_tab;
+	int l2mux_stat_id;
+	int previous_stat_counter;
+	unsigned int l2mux_total_stat_counter;
+	enum l2mux_trace_state l2mux_traces_state;
+	int l2mux_traces_activation_done;
+	struct net_device *dev;
+	struct work_struct l2mux_stat_work;
+};
+
+#endif /* ACTIVATE_L2MUX_STAT */
+
+
+#define L2MUX_HDR_SIZE  (sizeof(struct l2muxhdr))
+
+
+static inline struct l2muxhdr *l2mux_hdr(struct sk_buff *skb)
+{
+	return (struct l2muxhdr *)skb_mac_header(skb);
+}
+
+static inline void l2mux_set_proto(struct l2muxhdr *hdr, int proto)
+{
+	hdr->l3_prot = proto;
+}
+
+static inline int l2mux_get_proto(struct l2muxhdr *hdr)
+{
+	return hdr->l3_prot;
+}
+
+static inline void l2mux_set_length(struct l2muxhdr *hdr, unsigned len)
+{
+	hdr->l3_len[0] = (len) & 0xFF;
+	hdr->l3_len[1] = (len >>  8) & 0xFF;
+	hdr->l3_len[2] = (len >> 16) & 0xFF;
+}
+
+static inline unsigned l2mux_get_length(struct l2muxhdr *hdr)
+{
+	return (((unsigned)hdr->l3_len[2]) << 16) |
+		(((unsigned)hdr->l3_len[1]) << 8) |
+		((unsigned)hdr->l3_len[0]);
+}
+
+extern int l2mux_netif_rx_register(int l3, l2mux_skb_fn *rx_fn);
+extern int l2mux_netif_rx_unregister(int l3);
+
+extern int l2mux_netif_tx_register(int pt, l2mux_skb_fn *rx_fn);
+extern int l2mux_netif_tx_unregister(int pt);
+
+extern int l2mux_skb_rx(struct sk_buff *skb, struct net_device *dev);
+extern int l2mux_skb_tx(struct sk_buff *skb, struct net_device *dev);
+
+enum l2mux_audio_dev_id {
+	L2MUX_AUDIO_DEV_ID0,
+	L2MUX_AUDIO_DEV_ID1,
+	L2MUX_AUDIO_DEV_ID2,
+	L2MUX_AUDIO_DEV_ID3,
+	L2MUX_AUDIO_DEV_MAX
+};
+#define L2MUX_AUDIO_DEV_TYPE_RX		(0x1 << 24)
+#define L2MUX_AUDIO_DEV_TYPE_TX		(0x2 << 24)
+
+/* both register functions will return a positive integer value when
+ * registration complete successfully, please use the handle for
+ * unregistration.
+ * We assume that there should be only 1 voice code on the host side, but
+ * there might be more than 1 modem */
+extern int l2mux_audio_rx_register(l2mux_audio_fn *fn);
+extern int l2mux_audio_rx_unregister(int handle);
+
+extern int l2mux_audio_tx_register(uint8_t phonet_dev_id, l2mux_audio_fn *fn);
+extern int l2mux_audio_tx_unregister(int handle);
+
+/* 
+ * Input: buffer: pointer to L2muxhdr + audio payload, since payload length
+ *		  info is in l2muxhdr, so we don't need another argument
+ *	  pn_dev_id: the phonet ID this device is binded to.
+ * Note: the buffer should never be freed by anyone, since this is a static
+ * 	 buffer allocated by the modem driver. */
+extern int l2mux_audio_rx(unsigned char *buffer, uint8_t pn_dev_id);
+
+/*
+ * Input: buffer: pointer to audio payload WITHOUT l2muxhdr.  The buffer should
+ * 		  reserve at least 4 bytes in the headroom, such that L2mux
+ * 		  does not need to allocate a new memory for inserting l2muxhdr.
+ * 	  size: the size of the payload
+ * 	  pn_dev_id: The phonet ID that indicates the device which this audio
+ * 		     data is destiend to
+ * Note: The buffer should never be freed by anyone.  It should be a static
+ * 	 buffer allocated by the voice code.
+ */
+extern int l2mux_audio_tx(unsigned char *buffer, size_t size,
+		uint8_t pn_dev_id);
+
+#endif /*__KERNEL__*/
+
+#endif /* LINUX_L2MUX_H */
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 0549d2115507124405b06f5ea96f072880a3e475..c01379b0e391c046579217dc755878c00851e1d1 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -19,6 +19,9 @@
 #define APOLLO_MOUSE_MINOR	7
 #define PC110PAD_MINOR		9
 /*#define ADB_MOUSE_MINOR	10	FIXME OBSOLETE */
+#if defined(CONFIG_BCM_KF_OCF)
+#define CRYPTODEV_MINOR		70	/* /dev/crypto */
+#endif // CONFIG_BCM_KF_OCF
 #define WATCHDOG_MINOR		130	/* Watchdog timer     */
 #define TEMP_MINOR		131	/* Temperature Sensor */
 #define RTC_MINOR		135
diff --git a/include/linux/mm.h b/include/linux/mm.h
index abc98ef50a9d028b0edd269e5c09546677ba7e97..854a401f2df3d3627598ca8bf8b5f219547d9f5c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1453,7 +1453,11 @@ int write_one_page(struct page *page, int wait);
 void task_dirty_inc(struct task_struct *tsk);
 
 /* readahead.c */
+#if defined(CONFIG_BCM_KF_USB_STORAGE)
+#define VM_MAX_READAHEAD	512	/* kbytes */
+#else
 #define VM_MAX_READAHEAD	128	/* kbytes */
+#endif
 #define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
 
 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5f6806bd6ac34575122a197fb8a2e4b11bd73d20..d1d4f3e1981122039f344798293a23aa0854f8bb 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -245,6 +245,12 @@ enum zone_type {
 	 * can only do DMA areas below 4G.
 	 */
 	ZONE_DMA32,
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	/*
+	 * a specific memory zone allocated for ACP purpose in BCM63xx platform
+	 */
+	ZONE_ACP,
 #endif
 	/*
 	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
@@ -793,6 +799,12 @@ static inline int is_dma(struct zone *zone)
 	return 0;
 #endif
 }
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+static inline int is_acp(struct zone *zone)
+{
+	return zone == zone->zone_pgdat->node_zones + ZONE_ACP;
+}
+#endif
 
 /* These two functions are used to setup the per zone pages min values */
 struct ctl_table;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index d7029f4a191a093ef9294104c75756b6d3b7faee..344a2623eb2a34c990133d5f0c62390ea35a004d 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -66,6 +66,9 @@ extern void mnt_pin(struct vfsmount *mnt);
 extern void mnt_unpin(struct vfsmount *mnt);
 extern int __mnt_is_readonly(struct vfsmount *mnt);
 
+struct path;
+extern struct vfsmount *clone_private_mount(struct path *path);
+
 struct file_system_type;
 extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
 				      int flags, const char *name,
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 46caaf44339d6e706bd822218f4d56616eb51671..404274c90da73518037f2d673f0741a2c7857cf1 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -35,6 +35,7 @@
 #define SIOCGETRPF	(SIOCPROTOPRIVATE+2)
 
 #define MAXVIFS		32	
+
 typedef unsigned long vifbitmap_t;	/* User mode code depends on this lot */
 typedef unsigned short vifi_t;
 #define ALL_VIFS	((vifi_t)(-1))
@@ -243,9 +244,15 @@ struct mfc_cache {
 
 #ifdef __KERNEL__
 struct rtmsg;
+#if defined(CONFIG_BCM_KF_IGMP)
+int ipmr_get_route(struct net *net, struct sk_buff *skb,
+		   __be32 saddr, __be32 daddr,
+		   struct rtmsg *rtm, int nowait, unsigned short ifIndex);
+#else
 extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
 			  __be32 saddr, __be32 daddr,
 			  struct rtmsg *rtm, int nowait);
 #endif
+#endif
 
 #endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 6d8c7251eb8d4147968c6a146473868caa0eebbc..366621d889adf8d2f053f3afe42d4b81b35d17a4 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -32,6 +32,7 @@
 #define SIOCGETRPF	(SIOCPROTOPRIVATE+2)
 
 #define MAXMIFS		32
+
 typedef unsigned long mifbitmap_t;	/* User mode code depends on this lot */
 typedef unsigned short mifi_t;
 #define ALL_MIFS	((mifi_t)(-1))
@@ -229,8 +230,13 @@ struct mfc6_cache {
 
 #ifdef __KERNEL__
 struct rtmsg;
+#if defined(CONFIG_BCM_KF_IGMP)
+int ip6mr_get_route(struct net *net, struct sk_buff *skb, 
+		    struct rtmsg *rtm, int nowait, unsigned short ifIndex);
+#else
 extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
 			   struct rtmsg *rtm, int nowait);
+#endif
 
 #ifdef CONFIG_IPV6_MROUTE
 extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 650ef352f045f48ec9874b611faf18fc2a2b94cb..20ce27e2bd93b355bc8aead2146d52293faeb90d 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -54,6 +54,11 @@
  * that the pattern and the version count are always located in the oob area
  * of the first block.
  */
+
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+#define BBT_NULL_PAGE (-1LL)
+#endif
+ 
 struct nand_bbt_descr {
 	int options;
 	int pages[NAND_MAX_CHIPS];
diff --git a/include/linux/mtd/bchp_nand_21_22.h b/include/linux/mtd/bchp_nand_21_22.h
new file mode 100644
index 0000000000000000000000000000000000000000..a0813089f3fc085029d24dffb94fb555299d3ce5
--- /dev/null
+++ b/include/linux/mtd/bchp_nand_21_22.h
@@ -0,0 +1,872 @@
+/***************************************************************************
+ *     Copyright (c) 1999-2008, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Module Description: NAND controller version 2.1 and 2.2 definitions.
+ *
+ ***************************************************************************/
+
+#ifndef BCHP_NAND_21_22_H__
+#define BCHP_NAND_21_22_H__
+
+#define BRCMNAND_CTL_BASE                        (NAND_REG_BASE & 0x0fffffff)
+#define BRCMNAND_CACHE_BASE                      (NAND_CACHE_BASE & 0x0fffffff)
+
+#define BCHP_NAND_REG_START                     BCHP_NAND_REVISION
+#define BCHP_NAND_REG_END                       BCHP_NAND_BLK_WR_PROTECT
+
+/***************************************************************************
+ *NAND - Nand Flash Control Registers
+ ***************************************************************************/
+#define BCHP_NAND_REVISION                       (BRCMNAND_CTL_BASE + 0x00) /* NAND Revision */
+#define BCHP_NAND_CMD_START                      (BRCMNAND_CTL_BASE + 0x04) /* Nand Flash Command Start */
+#define BCHP_NAND_CMD_EXT_ADDRESS                (BRCMNAND_CTL_BASE + 0x08) /* Nand Flash Command Extended Address */
+#define BCHP_NAND_CMD_ADDRESS                    (BRCMNAND_CTL_BASE + 0x0c) /* Nand Flash Command Address */
+#define BCHP_NAND_CMD_END_ADDRESS                (BRCMNAND_CTL_BASE + 0x10) /* Nand Flash Command End Address */
+#define BCHP_NAND_CS_NAND_SELECT                 (BRCMNAND_CTL_BASE + 0x14) /* Nand Flash EBI CS Select */
+#define BCHP_NAND_CS_NAND_XOR                    (BRCMNAND_CTL_BASE + 0x18) /* Nand Flash EBI CS Address XOR with 1FC0 Control */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0          (BRCMNAND_CTL_BASE + 0x20) /* Nand Flash Spare Area Read Bytes 0-3 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4          (BRCMNAND_CTL_BASE + 0x24) /* Nand Flash Spare Area Read Bytes 4-7 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8          (BRCMNAND_CTL_BASE + 0x28) /* Nand Flash Spare Area Read Bytes 8-11 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C          (BRCMNAND_CTL_BASE + 0x2c) /* Nand Flash Spare Area Read Bytes 12-15 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0         (BRCMNAND_CTL_BASE + 0x30) /* Nand Flash Spare Area Write Bytes 0-3 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4         (BRCMNAND_CTL_BASE + 0x34) /* Nand Flash Spare Area Write Bytes 4-7 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8         (BRCMNAND_CTL_BASE + 0x38) /* Nand Flash Spare Area Write Bytes 8-11 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C         (BRCMNAND_CTL_BASE + 0x3c) /* Nand Flash Spare Area Write Bytes 12-15 */
+#define BCHP_NAND_ACC_CONTROL                    (BRCMNAND_CTL_BASE + 0x40) /* Nand Flash Access Control */
+#define BCHP_NAND_CONFIG                         (BRCMNAND_CTL_BASE + 0x44) /* Nand Flash Config */
+#define BCHP_NAND_TIMING_1                       (BRCMNAND_CTL_BASE + 0x48) /* Nand Flash Timing Parameters 1 */
+#define BCHP_NAND_TIMING_2                       (BRCMNAND_CTL_BASE + 0x4c) /* Nand Flash Timing Parameters 2 */
+#define BCHP_NAND_SEMAPHORE                      (BRCMNAND_CTL_BASE + 0x50) /* Semaphore */
+#define BCHP_NAND_FLASH_DEVICE_ID                (BRCMNAND_CTL_BASE + 0x54) /* Nand Flash Device ID */
+#define BCHP_NAND_BLOCK_LOCK_STATUS              (BRCMNAND_CTL_BASE + 0x58) /* Nand Flash Block Lock Status */
+#define BCHP_NAND_INTFC_STATUS                   (BRCMNAND_CTL_BASE + 0x5c) /* Nand Flash Interface Status */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR              (BRCMNAND_CTL_BASE + 0x60) /* ECC Correctable Error Extended Address */
+#define BCHP_NAND_ECC_CORR_ADDR                  (BRCMNAND_CTL_BASE + 0x64) /* ECC Correctable Error Address */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR               (BRCMNAND_CTL_BASE + 0x68) /* ECC Uncorrectable Error Extended Address */
+#define BCHP_NAND_ECC_UNC_ADDR                   (BRCMNAND_CTL_BASE + 0x6c) /* ECC Uncorrectable Error Address */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR            (BRCMNAND_CTL_BASE + 0x70) /* Flash Read Data Extended Address */
+#define BCHP_NAND_FLASH_READ_ADDR                (BRCMNAND_CTL_BASE + 0x74) /* Flash Read Data Address */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR          (BRCMNAND_CTL_BASE + 0x78) /* Page Program Extended Address */
+#define BCHP_NAND_PROGRAM_PAGE_ADDR              (BRCMNAND_CTL_BASE + 0x7c) /* Page Program Address */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR             (BRCMNAND_CTL_BASE + 0x80) /* Copy Back Extended Address */
+#define BCHP_NAND_COPY_BACK_ADDR                 (BRCMNAND_CTL_BASE + 0x84) /* Copy Back Address */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR           (BRCMNAND_CTL_BASE + 0x88) /* Block Erase Extended Address */
+#define BCHP_NAND_BLOCK_ERASE_ADDR               (BRCMNAND_CTL_BASE + 0x8c) /* Block Erase Address */
+#define BCHP_NAND_INV_READ_EXT_ADDR              (BRCMNAND_CTL_BASE + 0x90) /* Flash Invalid Data Extended Address */
+#define BCHP_NAND_INV_READ_ADDR                  (BRCMNAND_CTL_BASE + 0x94) /* Flash Invalid Data Address */
+#define BCHP_NAND_BLK_WR_PROTECT                 (BRCMNAND_CTL_BASE + 0x98) /* Block Write Protect Enable and Size for EBI_CS0b */
+
+/***************************************************************************
+ *REVISION - NAND Revision
+ ***************************************************************************/
+/* NAND :: REVISION :: reserved0 [31:16] */
+#define BCHP_NAND_REVISION_reserved0_MASK                          0xffff0000
+#define BCHP_NAND_REVISION_reserved0_SHIFT                         16
+
+/* NAND :: REVISION :: MAJOR [15:08] */
+#define BCHP_NAND_REVISION_MAJOR_MASK                              0x0000ff00
+#define BCHP_NAND_REVISION_MAJOR_SHIFT                             8
+
+/* NAND :: REVISION :: MINOR [07:00] */
+#define BCHP_NAND_REVISION_MINOR_MASK                              0x000000ff
+#define BCHP_NAND_REVISION_MINOR_SHIFT                             0
+
+/***************************************************************************
+ *CMD_START - Nand Flash Command Start
+ ***************************************************************************/
+/* NAND :: CMD_START :: reserved0 [31:28] */
+#define BCHP_NAND_CMD_START_reserved0_MASK                         0xf0000000
+#define BCHP_NAND_CMD_START_reserved0_SHIFT                        28
+
+/* NAND :: CMD_START :: OPCODE [27:24] */
+#define BCHP_NAND_CMD_START_OPCODE_MASK                            0x0f000000
+#define BCHP_NAND_CMD_START_OPCODE_SHIFT                           24
+#define BCHP_NAND_CMD_START_OPCODE_NULL                            0
+#define BCHP_NAND_CMD_START_OPCODE_PAGE_READ                       1
+#define BCHP_NAND_CMD_START_OPCODE_SPARE_AREA_READ                 2
+#define BCHP_NAND_CMD_START_OPCODE_STATUS_READ                     3
+#define BCHP_NAND_CMD_START_OPCODE_PROGRAM_PAGE                    4
+#define BCHP_NAND_CMD_START_OPCODE_PROGRAM_SPARE_AREA              5
+#define BCHP_NAND_CMD_START_OPCODE_COPY_BACK                       6
+#define BCHP_NAND_CMD_START_OPCODE_DEVICE_ID_READ                  7
+#define BCHP_NAND_CMD_START_OPCODE_BLOCK_ERASE                     8
+#define BCHP_NAND_CMD_START_OPCODE_FLASH_RESET                     9
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_LOCK                     10
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_LOCK_DOWN                11
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_UNLOCK                   12
+#define BCHP_NAND_CMD_START_OPCODE_READ_BLOCKS_LOCK_STATUS         13
+
+/* NAND :: CMD_START :: reserved1 [23:00] */
+#define BCHP_NAND_CMD_START_reserved1_MASK                         0x00ffffff
+#define BCHP_NAND_CMD_START_reserved1_SHIFT                        0
+
+/***************************************************************************
+ *CMD_EXT_ADDRESS - Nand Flash Command Extended Address
+ ***************************************************************************/
+/* NAND :: CMD_EXT_ADDRESS :: reserved0 [31:19] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_reserved0_MASK                   0xfff80000
+#define BCHP_NAND_CMD_EXT_ADDRESS_reserved0_SHIFT                  19
+
+/* NAND :: CMD_EXT_ADDRESS :: CS_SEL [18:16] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_MASK                      0x00070000
+#define BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT                     16
+
+/* NAND :: CMD_EXT_ADDRESS :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_EXT_ADDRESS_MASK                 0x0000ffff
+#define BCHP_NAND_CMD_EXT_ADDRESS_EXT_ADDRESS_SHIFT                0
+
+/***************************************************************************
+ *CMD_ADDRESS - Nand Flash Command Address
+ ***************************************************************************/
+/* NAND :: CMD_ADDRESS :: ADDRESS [31:00] */
+#define BCHP_NAND_CMD_ADDRESS_ADDRESS_MASK                         0xffffffff
+#define BCHP_NAND_CMD_ADDRESS_ADDRESS_SHIFT                        0
+
+/***************************************************************************
+ *CMD_END_ADDRESS - Nand Flash Command End Address
+ ***************************************************************************/
+/* NAND :: CMD_END_ADDRESS :: ADDRESS [31:00] */
+#define BCHP_NAND_CMD_END_ADDRESS_ADDRESS_MASK                     0xffffffff
+#define BCHP_NAND_CMD_END_ADDRESS_ADDRESS_SHIFT                    0
+
+/***************************************************************************
+ *CS_NAND_SELECT - Nand Flash EBI CS Select
+ ***************************************************************************/
+/* NAND :: CS_NAND_SELECT :: CS_LOCK [31:31] */
+#define BCHP_NAND_CS_NAND_SELECT_CS_LOCK_MASK                      0x80000000
+#define BCHP_NAND_CS_NAND_SELECT_CS_LOCK_SHIFT                     31
+
+/* NAND :: CS_NAND_SELECT :: AUTO_DEVICE_ID_CONFIG [30:30] */
+#define BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK        0x40000000
+#define BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_SHIFT       30
+
+/* NAND :: CS_NAND_SELECT :: reserved0 [29:29] */
+#define BCHP_NAND_CS_NAND_SELECT_reserved0_MASK                    0x20000000
+#define BCHP_NAND_CS_NAND_SELECT_reserved0_SHIFT                   29
+
+/* NAND :: CS_NAND_SELECT :: WR_PROTECT_BLK0 [28:28] */
+#define BCHP_NAND_CS_NAND_SELECT_WR_PROTECT_BLK0_MASK              0x10000000
+#define BCHP_NAND_CS_NAND_SELECT_WR_PROTECT_BLK0_SHIFT             28
+
+/* NAND :: CS_NAND_SELECT :: reserved1 [27:16] */
+#define BCHP_NAND_CS_NAND_SELECT_reserved1_MASK                    0x0fff0000
+#define BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT                   16
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_7_USES_NAND [15:15] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_USES_NAND_MASK           0x00008000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_USES_NAND_SHIFT          15
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_6_USES_NAND [14:14] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_USES_NAND_MASK           0x00004000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_USES_NAND_SHIFT          14
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_5_USES_NAND [13:13] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_USES_NAND_MASK           0x00002000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_USES_NAND_SHIFT          13
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_4_USES_NAND [12:12] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_USES_NAND_MASK           0x00001000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_USES_NAND_SHIFT          12
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_3_USES_NAND [11:11] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_USES_NAND_MASK           0x00000800
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_USES_NAND_SHIFT          11
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_2_USES_NAND [10:10] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_USES_NAND_MASK           0x00000400
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_USES_NAND_SHIFT          10
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_1_USES_NAND [09:09] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_USES_NAND_MASK           0x00000200
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_USES_NAND_SHIFT          9
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_0_USES_NAND [08:08] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_MASK           0x00000100
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT          8
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_7_SEL [07:07] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_SEL_MASK                 0x00000080
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_SEL_SHIFT                7
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_6_SEL [06:06] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_SEL_MASK                 0x00000040
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_SEL_SHIFT                6
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_5_SEL [05:05] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_SEL_MASK                 0x00000020
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_SEL_SHIFT                5
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_4_SEL [04:04] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_SEL_MASK                 0x00000010
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_SEL_SHIFT                4
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_3_SEL [03:03] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_MASK                 0x00000008
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_SHIFT                3
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_2_SEL [02:02] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_SEL_MASK                 0x00000004
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_SEL_SHIFT                2
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_1_SEL [01:01] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_SEL_MASK                 0x00000002
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_SEL_SHIFT                1
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_0_SEL [00:00] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK                 0x00000001
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_SHIFT                0
+
+/***************************************************************************
+ *CS_NAND_XOR - Nand Flash EBI CS Address XOR with 1FC0 Control
+ ***************************************************************************/
+/* NAND :: CS_NAND_XOR :: reserved0 [31:08] */
+#define BCHP_NAND_CS_NAND_XOR_reserved0_MASK                       0xffffff00
+#define BCHP_NAND_CS_NAND_XOR_reserved0_SHIFT                      8
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_7_ADDR_1FC0_XOR [07:07] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_7_ADDR_1FC0_XOR_MASK          0x00000080
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_7_ADDR_1FC0_XOR_SHIFT         7
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_6_ADDR_1FC0_XOR [06:06] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_6_ADDR_1FC0_XOR_MASK          0x00000040
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_6_ADDR_1FC0_XOR_SHIFT         6
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_5_ADDR_1FC0_XOR [05:05] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_5_ADDR_1FC0_XOR_MASK          0x00000020
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_5_ADDR_1FC0_XOR_SHIFT         5
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_4_ADDR_1FC0_XOR [04:04] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_4_ADDR_1FC0_XOR_MASK          0x00000010
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_4_ADDR_1FC0_XOR_SHIFT         4
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_3_ADDR_1FC0_XOR [03:03] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_3_ADDR_1FC0_XOR_MASK          0x00000008
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_3_ADDR_1FC0_XOR_SHIFT         3
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_2_ADDR_1FC0_XOR [02:02] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_2_ADDR_1FC0_XOR_MASK          0x00000004
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_2_ADDR_1FC0_XOR_SHIFT         2
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_1_ADDR_1FC0_XOR [01:01] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_1_ADDR_1FC0_XOR_MASK          0x00000002
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_1_ADDR_1FC0_XOR_SHIFT         1
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_0_ADDR_1FC0_XOR [00:00] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK          0x00000001
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_0 - Nand Flash Spare Area Read Bytes 0-3
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_0 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_0_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_0_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_1 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_1_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_1_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_2 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_2_MASK            0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_2_SHIFT           8
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_3 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_3_MASK            0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_3_SHIFT           0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_4 - Nand Flash Spare Area Read Bytes 4-7
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_4 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_4_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_4_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_5 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_5_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_5_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_6 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_6_MASK            0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_6_SHIFT           8
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_7 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_7_MASK            0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_7_SHIFT           0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_8 - Nand Flash Spare Area Read Bytes 8-11
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_8 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_8_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_8_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_9 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_9_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_9_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_10 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_10_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_10_SHIFT          8
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_11 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_11_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_11_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_C - Nand Flash Spare Area Read Bytes 12-15
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_12 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_12_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_12_SHIFT          24
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_13 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_13_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_13_SHIFT          16
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_14 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_14_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_14_SHIFT          8
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_15 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_15_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_15_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_0 - Nand Flash Spare Area Write Bytes 0-3
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_0 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_0_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_0_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_1 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_1_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_1_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_2 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_2_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_2_SHIFT          8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_3 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_3_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_3_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_4 - Nand Flash Spare Area Write Bytes 4-7
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_4 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_4_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_4_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_5 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_5_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_5_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_6 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_6_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_6_SHIFT          8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_7 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_7_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_7_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_8 - Nand Flash Spare Area Write Bytes 8-11
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_8 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_8_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_8_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_9 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_9_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_9_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_10 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_10_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_10_SHIFT         8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_11 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_11_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_11_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_C - Nand Flash Spare Area Write Bytes 12-15
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_12 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_12_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_12_SHIFT         24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_13 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_13_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_13_SHIFT         16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_14 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_14_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_14_SHIFT         8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_15 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_15_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_15_SHIFT         0
+
+/***************************************************************************
+ *ACC_CONTROL - Nand Flash Access Control
+ ***************************************************************************/
+/* NAND :: ACC_CONTROL :: RD_ECC_EN [31:31] */
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK                       0x80000000
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_EN_SHIFT                      31
+
+/* NAND :: ACC_CONTROL :: WR_ECC_EN [30:30] */
+#define BCHP_NAND_ACC_CONTROL_WR_ECC_EN_MASK                       0x40000000
+#define BCHP_NAND_ACC_CONTROL_WR_ECC_EN_SHIFT                      30
+
+/* NAND :: ACC_CONTROL :: RD_ECC_BLK0_EN [29:29] */
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_MASK                  0x20000000
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_SHIFT                 29
+
+/* NAND :: ACC_CONTROL :: FAST_PGM_RDIN [28:28] */
+#define BCHP_NAND_ACC_CONTROL_FAST_PGM_RDIN_MASK                   0x10000000
+#define BCHP_NAND_ACC_CONTROL_FAST_PGM_RDIN_SHIFT                  28
+
+/* NAND :: ACC_CONTROL :: RD_ERASED_ECC_EN [27:27] */
+#define BCHP_NAND_ACC_CONTROL_RD_ERASED_ECC_EN_MASK                0x08000000
+#define BCHP_NAND_ACC_CONTROL_RD_ERASED_ECC_EN_SHIFT               27
+
+/* NAND :: ACC_CONTROL :: reserved0 [26:26] */
+#define BCHP_NAND_ACC_CONTROL_reserved0_MASK                       0x04000000
+#define BCHP_NAND_ACC_CONTROL_reserved0_SHIFT                      26
+
+/* NAND :: ACC_CONTROL :: WR_PREEMPT_EN [25:25] */
+#define BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK                   0x02000000
+#define BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_SHIFT                  25
+
+/* NAND :: ACC_CONTROL :: PAGE_HIT_EN [24:24] */
+#define BCHP_NAND_ACC_CONTROL_PAGE_HIT_EN_MASK                     0x01000000
+#define BCHP_NAND_ACC_CONTROL_PAGE_HIT_EN_SHIFT                    24
+
+/* NAND :: ACC_CONTROL :: reserved1 [23:00] */
+#define BCHP_NAND_ACC_CONTROL_reserved1_MASK                       0x00ffffff
+#define BCHP_NAND_ACC_CONTROL_reserved1_SHIFT                      0
+
+/***************************************************************************
+ *CONFIG - Nand Flash Config
+ ***************************************************************************/
+/* NAND :: CONFIG :: CONFIG_LOCK [31:31] */
+#define BCHP_NAND_CONFIG_CONFIG_LOCK_MASK                          0x80000000
+#define BCHP_NAND_CONFIG_CONFIG_LOCK_SHIFT                         31
+
+/* NAND :: CONFIG :: BLOCK_SIZE [30:28] */
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_2_2
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_MASK                           0x70000000
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT                          28
+#elif CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_2_1
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_MASK                           0x30000000
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT                          28
+#endif
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB                  3
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB                  1
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB                   0
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_8KB                    2
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_256KB                  4
+
+/* NAND :: CONFIG :: DEVICE_SIZE [27:24] */
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_MASK                          0x0f000000
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_SHIFT                         24
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_4MB                  0
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_8MB                  1
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_16MB                 2
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_32MB                 3
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_64MB                 4
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_128MB                5
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_256MB                6
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_512MB                7
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_1GB                  8
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_2GB                  9
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_4GB                  10
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_8GB                  11
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_16GB                 12
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_32GB                 13
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_64GB                 14
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_128GB                15
+
+/* NAND :: CONFIG :: DEVICE_WIDTH [23:23] */
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_MASK                         0x00800000
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_SHIFT                        23
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_DVC_WIDTH_8                  0
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_DVC_WIDTH_16                 1
+
+/* NAND :: CONFIG :: reserved0 [22:22] */
+#define BCHP_NAND_CONFIG_reserved0_MASK                            0x00400000
+#define BCHP_NAND_CONFIG_reserved0_SHIFT                           22
+
+/* NAND :: CONFIG :: PAGE_SIZE [21:20] */
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_2_2
+#define BCHP_NAND_CONFIG_PAGE_SIZE_MASK                            0x00300000
+#define BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT                           20
+#elif CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_2_1
+#define BCHP_NAND_CONFIG_PAGE_SIZE_MASK                            0x40000000
+#define BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT                           30
+#endif
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512                     0
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB                     1
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_4KB                     2
+
+/* NAND :: CONFIG :: reserved1 [19:19] */
+#define BCHP_NAND_CONFIG_reserved1_MASK                            0x00080000
+#define BCHP_NAND_CONFIG_reserved1_SHIFT                           19
+
+/* NAND :: CONFIG :: FUL_ADR_BYTES [18:16] */
+#define BCHP_NAND_CONFIG_FUL_ADR_BYTES_MASK                        0x00070000
+#define BCHP_NAND_CONFIG_FUL_ADR_BYTES_SHIFT                       16
+
+/* NAND :: CONFIG :: reserved2 [15:15] */
+#define BCHP_NAND_CONFIG_reserved2_MASK                            0x00008000
+#define BCHP_NAND_CONFIG_reserved2_SHIFT                           15
+
+/* NAND :: CONFIG :: COL_ADR_BYTES [14:12] */
+#define BCHP_NAND_CONFIG_COL_ADR_BYTES_MASK                        0x00007000
+#define BCHP_NAND_CONFIG_COL_ADR_BYTES_SHIFT                       12
+
+/* NAND :: CONFIG :: reserved3 [11:11] */
+#define BCHP_NAND_CONFIG_reserved3_MASK                            0x00000800
+#define BCHP_NAND_CONFIG_reserved3_SHIFT                           11
+
+/* NAND :: CONFIG :: BLK_ADR_BYTES [10:08] */
+#define BCHP_NAND_CONFIG_BLK_ADR_BYTES_MASK                        0x00000700
+#define BCHP_NAND_CONFIG_BLK_ADR_BYTES_SHIFT                       8
+
+/* NAND :: CONFIG :: reserved4 [07:00] */
+#define BCHP_NAND_CONFIG_reserved4_MASK                            0x000000ff
+#define BCHP_NAND_CONFIG_reserved4_SHIFT                           0
+
+/***************************************************************************
+ *TIMING_1 - Nand Flash Timing Parameters 1
+ ***************************************************************************/
+/* NAND :: TIMING_1 :: tWP [31:28] */
+#define BCHP_NAND_TIMING_1_tWP_MASK                                0xf0000000
+#define BCHP_NAND_TIMING_1_tWP_SHIFT                               28
+
+/* NAND :: TIMING_1 :: tWH [27:24] */
+#define BCHP_NAND_TIMING_1_tWH_MASK                                0x0f000000
+#define BCHP_NAND_TIMING_1_tWH_SHIFT                               24
+
+/* NAND :: TIMING_1 :: tRP [23:20] */
+#define BCHP_NAND_TIMING_1_tRP_MASK                                0x00f00000
+#define BCHP_NAND_TIMING_1_tRP_SHIFT                               20
+
+/* NAND :: TIMING_1 :: tREH [19:16] */
+#define BCHP_NAND_TIMING_1_tREH_MASK                               0x000f0000
+#define BCHP_NAND_TIMING_1_tREH_SHIFT                              16
+
+/* NAND :: TIMING_1 :: tCS [15:12] */
+#define BCHP_NAND_TIMING_1_tCS_MASK                                0x0000f000
+#define BCHP_NAND_TIMING_1_tCS_SHIFT                               12
+
+/* NAND :: TIMING_1 :: tCLH [11:08] */
+#define BCHP_NAND_TIMING_1_tCLH_MASK                               0x00000f00
+#define BCHP_NAND_TIMING_1_tCLH_SHIFT                              8
+
+/* NAND :: TIMING_1 :: tALH [07:04] */
+#define BCHP_NAND_TIMING_1_tALH_MASK                               0x000000f0
+#define BCHP_NAND_TIMING_1_tALH_SHIFT                              4
+
+/* NAND :: TIMING_1 :: tADL [03:00] */
+#define BCHP_NAND_TIMING_1_tADL_MASK                               0x0000000f
+#define BCHP_NAND_TIMING_1_tADL_SHIFT                              0
+
+/***************************************************************************
+ *TIMING_2 - Nand Flash Timing Parameters 2
+ ***************************************************************************/
+/* NAND :: TIMING_2 :: reserved0 [31:12] */
+#define BCHP_NAND_TIMING_2_reserved0_MASK                          0xfffff000
+#define BCHP_NAND_TIMING_2_reserved0_SHIFT                         12
+
+/* NAND :: TIMING_2 :: tWB [11:08] */
+#define BCHP_NAND_TIMING_2_tWB_MASK                                0x00000f00
+#define BCHP_NAND_TIMING_2_tWB_SHIFT                               8
+
+/* NAND :: TIMING_2 :: tWHR [07:04] */
+#define BCHP_NAND_TIMING_2_tWHR_MASK                               0x000000f0
+#define BCHP_NAND_TIMING_2_tWHR_SHIFT                              4
+
+/* NAND :: TIMING_2 :: tREAD [03:00] */
+#define BCHP_NAND_TIMING_2_tREAD_MASK                              0x0000000f
+#define BCHP_NAND_TIMING_2_tREAD_SHIFT                             0
+
+/***************************************************************************
+ *SEMAPHORE - Semaphore
+ ***************************************************************************/
+/* NAND :: SEMAPHORE :: reserved0 [31:08] */
+#define BCHP_NAND_SEMAPHORE_reserved0_MASK                         0xffffff00
+#define BCHP_NAND_SEMAPHORE_reserved0_SHIFT                        8
+
+/* NAND :: SEMAPHORE :: semaphore_ctrl [07:00] */
+#define BCHP_NAND_SEMAPHORE_semaphore_ctrl_MASK                    0x000000ff
+#define BCHP_NAND_SEMAPHORE_semaphore_ctrl_SHIFT                   0
+
+/***************************************************************************
+ *FLASH_DEVICE_ID - Nand Flash Device ID
+ ***************************************************************************/
+/* NAND :: FLASH_DEVICE_ID :: BYTE_0 [31:24] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_0_MASK                      0xff000000
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_0_SHIFT                     24
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_1 [23:16] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_1_MASK                      0x00ff0000
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_1_SHIFT                     16
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_2 [15:08] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_2_MASK                      0x0000ff00
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_2_SHIFT                     8
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_3 [07:00] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_3_MASK                      0x000000ff
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_3_SHIFT                     0
+
+/***************************************************************************
+ *BLOCK_LOCK_STATUS - Nand Flash Block Lock Status
+ ***************************************************************************/
+/* NAND :: BLOCK_LOCK_STATUS :: reserved0 [31:08] */
+#define BCHP_NAND_BLOCK_LOCK_STATUS_reserved0_MASK                 0xffffff00
+#define BCHP_NAND_BLOCK_LOCK_STATUS_reserved0_SHIFT                8
+
+/* NAND :: BLOCK_LOCK_STATUS :: STATUS [07:00] */
+#define BCHP_NAND_BLOCK_LOCK_STATUS_STATUS_MASK                    0x000000ff
+#define BCHP_NAND_BLOCK_LOCK_STATUS_STATUS_SHIFT                   0
+
+/***************************************************************************
+ *INTFC_STATUS - Nand Flash Interface Status
+ ***************************************************************************/
+/* NAND :: INTFC_STATUS :: CTLR_READY [31:31] */
+#define BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK                     0x80000000
+#define BCHP_NAND_INTFC_STATUS_CTLR_READY_SHIFT                    31
+
+/* NAND :: INTFC_STATUS :: FLASH_READY [30:30] */
+#define BCHP_NAND_INTFC_STATUS_FLASH_READY_MASK                    0x40000000
+#define BCHP_NAND_INTFC_STATUS_FLASH_READY_SHIFT                   30
+
+/* NAND :: INTFC_STATUS :: CACHE_VALID [29:29] */
+#define BCHP_NAND_INTFC_STATUS_CACHE_VALID_MASK                    0x20000000
+#define BCHP_NAND_INTFC_STATUS_CACHE_VALID_SHIFT                   29
+
+/* NAND :: INTFC_STATUS :: SPARE_AREA_VALID [28:28] */
+#define BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_MASK               0x10000000
+#define BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_SHIFT              28
+
+/* NAND :: INTFC_STATUS :: reserved0 [27:08] */
+#define BCHP_NAND_INTFC_STATUS_reserved0_MASK                      0x0fffff00
+#define BCHP_NAND_INTFC_STATUS_reserved0_SHIFT                     8
+
+/* NAND :: INTFC_STATUS :: FLASH_STATUS [07:00] */
+#define BCHP_NAND_INTFC_STATUS_FLASH_STATUS_MASK                   0x000000ff
+#define BCHP_NAND_INTFC_STATUS_FLASH_STATUS_SHIFT                  0
+
+/***************************************************************************
+ *ECC_CORR_EXT_ADDR - ECC Correctable Error Extended Address
+ ***************************************************************************/
+/* NAND :: ECC_CORR_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_reserved0_MASK                 0xfff80000
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_reserved0_SHIFT                19
+
+/* NAND :: ECC_CORR_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_CS_SEL_MASK                    0x00070000
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_CS_SEL_SHIFT                   16
+
+/* NAND :: ECC_CORR_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_EXT_ADDRESS_MASK               0x0000ffff
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_EXT_ADDRESS_SHIFT              0
+
+/***************************************************************************
+ *ECC_CORR_ADDR - ECC Correctable Error Address
+ ***************************************************************************/
+/* NAND :: ECC_CORR_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_ECC_CORR_ADDR_ADDRESS_MASK                       0xffffffff
+#define BCHP_NAND_ECC_CORR_ADDR_ADDRESS_SHIFT                      0
+
+/***************************************************************************
+ *ECC_UNC_EXT_ADDR - ECC Uncorrectable Error Extended Address
+ ***************************************************************************/
+/* NAND :: ECC_UNC_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_reserved0_MASK                  0xfff80000
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_reserved0_SHIFT                 19
+
+/* NAND :: ECC_UNC_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_CS_SEL_MASK                     0x00070000
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_CS_SEL_SHIFT                    16
+
+/* NAND :: ECC_UNC_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_EXT_ADDRESS_MASK                0x0000ffff
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_EXT_ADDRESS_SHIFT               0
+
+/***************************************************************************
+ *ECC_UNC_ADDR - ECC Uncorrectable Error Address
+ ***************************************************************************/
+/* NAND :: ECC_UNC_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_ECC_UNC_ADDR_ADDRESS_MASK                        0xffffffff
+#define BCHP_NAND_ECC_UNC_ADDR_ADDRESS_SHIFT                       0
+
+/***************************************************************************
+ *FLASH_READ_EXT_ADDR - Flash Read Data Extended Address
+ ***************************************************************************/
+/* NAND :: FLASH_READ_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_reserved0_MASK               0xfff80000
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_reserved0_SHIFT              19
+
+/* NAND :: FLASH_READ_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_CS_SEL_MASK                  0x00070000
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_CS_SEL_SHIFT                 16
+
+/* NAND :: FLASH_READ_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_EXT_ADDRESS_MASK             0x0000ffff
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_EXT_ADDRESS_SHIFT            0
+
+/***************************************************************************
+ *FLASH_READ_ADDR - Flash Read Data Address
+ ***************************************************************************/
+/* NAND :: FLASH_READ_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_FLASH_READ_ADDR_ADDRESS_MASK                     0xffffffff
+#define BCHP_NAND_FLASH_READ_ADDR_ADDRESS_SHIFT                    0
+
+/***************************************************************************
+ *PROGRAM_PAGE_EXT_ADDR - Page Program Extended Address
+ ***************************************************************************/
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_reserved0_MASK             0xfff80000
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_reserved0_SHIFT            19
+
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_CS_SEL_MASK                0x00070000
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_CS_SEL_SHIFT               16
+
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_EXT_ADDRESS_MASK           0x0000ffff
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_EXT_ADDRESS_SHIFT          0
+
+/***************************************************************************
+ *PROGRAM_PAGE_ADDR - Page Program Address
+ ***************************************************************************/
+/* NAND :: PROGRAM_PAGE_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_PROGRAM_PAGE_ADDR_ADDRESS_MASK                   0xffffffff
+#define BCHP_NAND_PROGRAM_PAGE_ADDR_ADDRESS_SHIFT                  0
+
+/***************************************************************************
+ *COPY_BACK_EXT_ADDR - Copy Back Extended Address
+ ***************************************************************************/
+/* NAND :: COPY_BACK_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_reserved0_MASK                0xfff80000
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_reserved0_SHIFT               19
+
+/* NAND :: COPY_BACK_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_CS_SEL_MASK                   0x00070000
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_CS_SEL_SHIFT                  16
+
+/* NAND :: COPY_BACK_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_EXT_ADDRESS_MASK              0x0000ffff
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_EXT_ADDRESS_SHIFT             0
+
+/***************************************************************************
+ *COPY_BACK_ADDR - Copy Back Address
+ ***************************************************************************/
+/* NAND :: COPY_BACK_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_COPY_BACK_ADDR_ADDRESS_MASK                      0xffffffff
+#define BCHP_NAND_COPY_BACK_ADDR_ADDRESS_SHIFT                     0
+
+/***************************************************************************
+ *BLOCK_ERASE_EXT_ADDR - Block Erase Extended Address
+ ***************************************************************************/
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_reserved0_MASK              0xfff80000
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_reserved0_SHIFT             19
+
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_CS_SEL_MASK                 0x00070000
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_CS_SEL_SHIFT                16
+
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_EXT_ADDRESS_MASK            0x0000ffff
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_EXT_ADDRESS_SHIFT           0
+
+/***************************************************************************
+ *BLOCK_ERASE_ADDR - Block Erase Address
+ ***************************************************************************/
+/* NAND :: BLOCK_ERASE_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_BLOCK_ERASE_ADDR_ADDRESS_MASK                    0xffffffff
+#define BCHP_NAND_BLOCK_ERASE_ADDR_ADDRESS_SHIFT                   0
+
+/***************************************************************************
+ *INV_READ_EXT_ADDR - Flash Invalid Data Extended Address
+ ***************************************************************************/
+/* NAND :: INV_READ_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_reserved0_MASK                 0xfff80000
+#define BCHP_NAND_INV_READ_EXT_ADDR_reserved0_SHIFT                19
+
+/* NAND :: INV_READ_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_CS_SEL_MASK                    0x00070000
+#define BCHP_NAND_INV_READ_EXT_ADDR_CS_SEL_SHIFT                   16
+
+/* NAND :: INV_READ_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_EXT_ADDRESS_MASK               0x0000ffff
+#define BCHP_NAND_INV_READ_EXT_ADDR_EXT_ADDRESS_SHIFT              0
+
+/***************************************************************************
+ *INV_READ_ADDR - Flash Invalid Data Address
+ ***************************************************************************/
+/* NAND :: INV_READ_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_INV_READ_ADDR_ADDRESS_MASK                       0xffffffff
+#define BCHP_NAND_INV_READ_ADDR_ADDRESS_SHIFT                      0
+
+/***************************************************************************
+ *BLK_WR_PROTECT - Block Write Protect Enable and Size for EBI_CS0b
+ ***************************************************************************/
+/* NAND :: BLK_WR_PROTECT :: BLK_END_ADDR [31:00] */
+#define BCHP_NAND_BLK_WR_PROTECT_BLK_END_ADDR_MASK                 0xffffffff
+#define BCHP_NAND_BLK_WR_PROTECT_BLK_END_ADDR_SHIFT                0
+
+/***************************************************************************
+ *FLASH_CACHE%i - Flash Cache Buffer Read Access
+ ***************************************************************************/
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_BASE                          BRCMNAND_CACHE_BASE
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_START                         0
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_END                           127
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_ELEMENT_SIZE                  32
+
+/***************************************************************************
+ *FLASH_CACHE%i - Flash Cache Buffer Read Access
+ ***************************************************************************/
+/* NAND :: FLASH_CACHEi :: WORD [31:00] */
+#define BCHP_NAND_FLASH_CACHEi_WORD_MASK                           0xffffffff
+#define BCHP_NAND_FLASH_CACHEi_WORD_SHIFT                          0
+
+/***************************************************************************
+ *Definitions not supported by this version NAND controller but defined
+ *in order to compile
+ ***************************************************************************/
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT           (BCHP_NAND_REG_END + 4)
+
+#define BCHP_NAND_ACC_CONTROL_CS1_PARTIAL_PAGE_EN_MASK             0
+#define BCHP_NAND_ACC_CONTROL_CS1_PARTIAL_PAGE_EN_SHIFT            0
+#define BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_MASK                 0
+#define BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_SHIFT                0
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK                     0
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT                    0
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK                       0
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT                      0
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_MASK               0
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_SHIFT              0
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK                 0
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT                0
+
+#endif /* #ifndef BCHP_NAND_21_22_H__ */
+
+/* End of File */
diff --git a/include/linux/mtd/bchp_nand_40.h b/include/linux/mtd/bchp_nand_40.h
new file mode 100644
index 0000000000000000000000000000000000000000..eaf78087a710c22d931d06be4493dbbc6ff5c700
--- /dev/null
+++ b/include/linux/mtd/bchp_nand_40.h
@@ -0,0 +1,1304 @@
+/***************************************************************************
+ *     Copyright (c) 1999-2008, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Module Description: NAND controller version 4.0 definitions.
+ *
+ ***************************************************************************/
+
+#ifndef BCHP_NAND_40_H__
+#define BCHP_NAND_40_H__
+
+#include <bcm_map_part.h>
+
+#define BRCMNAND_CTL_BASE                       (NAND_REG_BASE & 0x0fffffff)
+#define BRCMNAND_CACHE_BASE                     (NAND_CACHE_BASE & 0x0fffffff)
+#define BRCMNAND_FLD_ADDR(FLD)                  \
+    (BRCMNAND_CTL_BASE + (offsetof(NandCtrlRegs,FLD)))
+
+#define BCHP_NAND_REG_START                     BRCMNAND_CTL_BASE
+#define BCHP_NAND_REG_END                       (BCHP_NAND_REG_START + \
+                                                 sizeof(NandCtrlRegs))
+
+/***************************************************************************
+ *NAND - Nand Flash Control Registers
+ ***************************************************************************/
+#define BCHP_NAND_REVISION                      BRCMNAND_FLD_ADDR(NandRevision) /* NAND Revision */
+#define BCHP_NAND_CMD_START                     BRCMNAND_FLD_ADDR(NandCmdStart) /* Nand Flash Command Start */
+#define BCHP_NAND_CMD_EXT_ADDRESS               BRCMNAND_FLD_ADDR(NandCmdExtAddr) /* Nand Flash Command Extended Address */
+#define BCHP_NAND_CMD_ADDRESS                   BRCMNAND_FLD_ADDR(NandCmdAddr) /* Nand Flash Command Address */
+#define BCHP_NAND_CMD_END_ADDRESS               BRCMNAND_FLD_ADDR(NandCmdEndAddr) /* Nand Flash Command End Address */
+#define BCHP_NAND_CS_NAND_SELECT                BRCMNAND_FLD_ADDR(NandNandBootConfig) /* Nand Flash EBI CS Select */
+#define BCHP_NAND_CS_NAND_XOR                   BRCMNAND_FLD_ADDR(NandCsNandXor) /* Nand Flash EBI CS Address XOR with 1FC0 Control */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0         BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs0) /* Nand Flash Spare Area Read Bytes 0-3 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4         BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs4) /* Nand Flash Spare Area Read Bytes 4-7 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8         BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs8) /* Nand Flash Spare Area Read Bytes 8-11 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C         BRCMNAND_FLD_ADDR(NandSpareAreaReadOfsC) /* Nand Flash Spare Area Read Bytes 12-15 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0        BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs0) /* Nand Flash Spare Area Write Bytes 0-3 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4        BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs4) /* Nand Flash Spare Area Write Bytes 4-7 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8        BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs8) /* Nand Flash Spare Area Write Bytes 8-11 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C        BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfsC) /* Nand Flash Spare Area Write Bytes 12-15 */
+#define BCHP_NAND_ACC_CONTROL                   BRCMNAND_FLD_ADDR(NandAccControl) /* Nand Flash Access Control */
+#define BCHP_NAND_CONFIG                        BRCMNAND_FLD_ADDR(NandConfig) /* Nand Flash Config */
+#define BCHP_NAND_TIMING_1                      BRCMNAND_FLD_ADDR(NandTiming1) /* Nand Flash Timing Parameters 1 */
+#define BCHP_NAND_TIMING_2                      BRCMNAND_FLD_ADDR(NandTiming2) /* Nand Flash Timing Parameters 2 */
+#define BCHP_NAND_SEMAPHORE                     BRCMNAND_FLD_ADDR(NandSemaphore) /* Semaphore */
+#define BCHP_NAND_FLASH_DEVICE_ID               BRCMNAND_FLD_ADDR(NandFlashDeviceId) /* Nand Flash Device ID */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT           BRCMNAND_FLD_ADDR(NandFlashDeviceIdExt) /* Nand Flash Extended Device ID */
+#define BCHP_NAND_BLOCK_LOCK_STATUS             BRCMNAND_FLD_ADDR(NandBlockLockStatus) /* Nand Flash Block Lock Status */
+#define BCHP_NAND_INTFC_STATUS                  BRCMNAND_FLD_ADDR(NandIntfcStatus) /* Nand Flash Interface Status */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR             BRCMNAND_FLD_ADDR(NandEccCorrExtAddr) /* ECC Correctable Error Extended Address */
+#define BCHP_NAND_ECC_CORR_ADDR                 BRCMNAND_FLD_ADDR(NandEccCorrAddr) /* ECC Correctable Error Address */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR              BRCMNAND_FLD_ADDR(NandEccUncExtAddr) /* ECC Uncorrectable Error Extended Address */
+#define BCHP_NAND_ECC_UNC_ADDR                  BRCMNAND_FLD_ADDR(NandEccUncAddr) /* ECC Uncorrectable Error Address */
+#define BCHP_NAND_READ_ERROR_COUNT              BRCMNAND_FLD_ADDR(NandReadErrorCount) /* Read Error Count */
+#define BCHP_NAND_CORR_STAT_THRESHOLD           BRCMNAND_FLD_ADDR(NandCorrStatThreshold) /* Correctable Error Reporting Threshold */
+#define BCHP_NAND_ONFI_STATUS                   BRCMNAND_FLD_ADDR(NandOnfiStatus) /* ONFI Status */
+#define BCHP_NAND_ONFI_DEBUG_DATA               BRCMNAND_FLD_ADDR(NandOnfiDebugData) /* ONFI Debug Data */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR           BRCMNAND_FLD_ADDR(NandFlashReadExtAddr) /* Flash Read Data Extended Address */
+#define BCHP_NAND_FLASH_READ_ADDR               BRCMNAND_FLD_ADDR(NandFlashReadAddr) /* Flash Read Data Address */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR         BRCMNAND_FLD_ADDR(NandProgramPageExtAddr) /* Page Program Extended Address */
+#define BCHP_NAND_PROGRAM_PAGE_ADDR             BRCMNAND_FLD_ADDR(NandProgramPageAddr) /* Page Program Address */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR            BRCMNAND_FLD_ADDR(NandCopyBackExtAddr) /* Copy Back Extended Address */
+#define BCHP_NAND_COPY_BACK_ADDR                BRCMNAND_FLD_ADDR(NandCopyBackAddr) /* Copy Back Address */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR          BRCMNAND_FLD_ADDR(NandBlockEraseExtAddr) /* Block Erase Extended Address */
+#define BCHP_NAND_BLOCK_ERASE_ADDR              BRCMNAND_FLD_ADDR(NandBlockEraseAddr) /* Block Erase Address */
+#define BCHP_NAND_INV_READ_EXT_ADDR             BRCMNAND_FLD_ADDR(NandInvReadExtAddr) /* Flash Invalid Data Extended Address */
+#define BCHP_NAND_INV_READ_ADDR                 BRCMNAND_FLD_ADDR(NandInvReadAddr) /* Flash Invalid Data Address */
+#define BCHP_NAND_BLK_WR_PROTECT                BRCMNAND_FLD_ADDR(NandBlkWrProtect) /* Block Write Protect Enable and Size for EBI_CS0b */
+#define BCHP_NAND_ACC_CONTROL_CS1               BRCMNAND_FLD_ADDR(NandAccControlCs1) /* Nand Flash Access Control */
+#define BCHP_NAND_CONFIG_CS1                    BRCMNAND_FLD_ADDR(NandConfigCs1) /* Nand Flash Config */
+#define BCHP_NAND_TIMING_1_CS1                  BRCMNAND_FLD_ADDR(NandTiming1Cs1) /* Nand Flash Timing Parameters 1 */
+#define BCHP_NAND_TIMING_2_CS1                  BRCMNAND_FLD_ADDR(NandTiming2Cs1) /* Nand Flash Timing Parameters 2 */
+#define BCHP_NAND_ACC_CONTROL_CS2               BRCMNAND_FLD_ADDR(NandAccControlCs2) /* Nand Flash Access Control */
+#define BCHP_NAND_CONFIG_CS2                    BRCMNAND_FLD_ADDR(NandConfigCs2) /* Nand Flash Config */
+#define BCHP_NAND_TIMING_1_CS2                  BRCMNAND_FLD_ADDR(NandTiming1Cs2) /* Nand Flash Timing Parameters 1 */
+#define BCHP_NAND_TIMING_2_CS2                  BRCMNAND_FLD_ADDR(NandTiming2Cs2) /* Nand Flash Timing Parameters 2 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs10) /* Nand Flash Spare Area Read Bytes 16-19 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs14) /* Nand Flash Spare Area Read Bytes 20-23 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs18) /* Nand Flash Spare Area Read Bytes 24-27 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs1C) /* Nand Flash Spare Area Read Bytes 28-31 */
+#define BCHP_NAND_LL_OP                         BRCMNAND_FLD_ADDR(NandLlOpNand) /* Nand Flash Low Level Operation */
+#define BCHP_NAND_LL_RDDATA                     BRCMNAND_FLD_ADDR(NandLlRdData) /* Nand Flash Low Level Read Data */
+
+/***************************************************************************
+ *REVISION - NAND Revision
+ ***************************************************************************/
+/* NAND :: REVISION :: 8KB_PAGE_SUPPORT [31:31] */
+#define BCHP_NAND_REVISION_8KB_PAGE_SUPPORT_MASK                   0x80000000
+#define BCHP_NAND_REVISION_8KB_PAGE_SUPPORT_SHIFT                  31
+
+/* NAND :: REVISION :: reserved0 [30:16] */
+#define BCHP_NAND_REVISION_reserved0_MASK                          0x7fff0000
+#define BCHP_NAND_REVISION_reserved0_SHIFT                         16
+
+/* NAND :: REVISION :: MAJOR [15:08] */
+#define BCHP_NAND_REVISION_MAJOR_MASK                              0x0000ff00
+#define BCHP_NAND_REVISION_MAJOR_SHIFT                             8
+
+/* NAND :: REVISION :: MINOR [07:00] */
+#define BCHP_NAND_REVISION_MINOR_MASK                              0x000000ff
+#define BCHP_NAND_REVISION_MINOR_SHIFT                             0
+
+/***************************************************************************
+ *CMD_START - Nand Flash Command Start
+ ***************************************************************************/
+/* NAND :: CMD_START :: reserved0 [31:28] */
+#define BCHP_NAND_CMD_START_reserved0_MASK                         0xe0000000
+#define BCHP_NAND_CMD_START_reserved0_SHIFT                        28
+
+/* NAND :: CMD_START :: OPCODE [27:24] */
+#define BCHP_NAND_CMD_START_OPCODE_MASK                            0x1f000000
+#define BCHP_NAND_CMD_START_OPCODE_SHIFT                           24
+#define BCHP_NAND_CMD_START_OPCODE_NULL                            0
+#define BCHP_NAND_CMD_START_OPCODE_PAGE_READ                       1
+#define BCHP_NAND_CMD_START_OPCODE_SPARE_AREA_READ                 2
+#define BCHP_NAND_CMD_START_OPCODE_STATUS_READ                     3
+#define BCHP_NAND_CMD_START_OPCODE_PROGRAM_PAGE                    4
+#define BCHP_NAND_CMD_START_OPCODE_PROGRAM_SPARE_AREA              5
+#define BCHP_NAND_CMD_START_OPCODE_COPY_BACK                       6
+#define BCHP_NAND_CMD_START_OPCODE_DEVICE_ID_READ                  7
+#define BCHP_NAND_CMD_START_OPCODE_BLOCK_ERASE                     8
+#define BCHP_NAND_CMD_START_OPCODE_FLASH_RESET                     9
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_LOCK                     10
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_LOCK_DOWN                11
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_UNLOCK                   12
+#define BCHP_NAND_CMD_START_OPCODE_READ_BLOCKS_LOCK_STATUS         13
+#define BCHP_NAND_CMD_START_OPCODE_PARAMETER_READ                  14
+#define BCHP_NAND_CMD_START_OPCODE_PARAMETER_CHANGE_COL            15
+#define BCHP_NAND_CMD_START_OPCODE_LOW_LEVEL_OP                    16
+
+/* NAND :: CMD_START :: reserved1 [23:00] */
+#define BCHP_NAND_CMD_START_reserved1_MASK                         0x00ffffff
+#define BCHP_NAND_CMD_START_reserved1_SHIFT                        0
+
+/***************************************************************************
+ *CMD_EXT_ADDRESS - Nand Flash Command Extended Address
+ ***************************************************************************/
+/* NAND :: CMD_EXT_ADDRESS :: reserved0 [31:19] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_reserved0_MASK                   0xfff80000
+#define BCHP_NAND_CMD_EXT_ADDRESS_reserved0_SHIFT                  19
+
+/* NAND :: CMD_EXT_ADDRESS :: CS_SEL [18:16] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_MASK                      0x00070000
+#define BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT                     16
+
+/* NAND :: CMD_EXT_ADDRESS :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_EXT_ADDRESS_MASK                 0x0000ffff
+#define BCHP_NAND_CMD_EXT_ADDRESS_EXT_ADDRESS_SHIFT                0
+
+/***************************************************************************
+ *CMD_ADDRESS - Nand Flash Command Address
+ ***************************************************************************/
+/* NAND :: CMD_ADDRESS :: ADDRESS [31:00] */
+#define BCHP_NAND_CMD_ADDRESS_ADDRESS_MASK                         0xffffffff
+#define BCHP_NAND_CMD_ADDRESS_ADDRESS_SHIFT                        0
+
+/***************************************************************************
+ *CMD_END_ADDRESS - Nand Flash Command End Address
+ ***************************************************************************/
+/* NAND :: CMD_END_ADDRESS :: ADDRESS [31:00] */
+#define BCHP_NAND_CMD_END_ADDRESS_ADDRESS_MASK                     0xffffffff
+#define BCHP_NAND_CMD_END_ADDRESS_ADDRESS_SHIFT                    0
+
+/***************************************************************************
+ *CS_NAND_SELECT - Nand Flash EBI CS Select
+ ***************************************************************************/
+/* NAND :: CS_NAND_SELECT :: CS_LOCK [31:31] */
+#define BCHP_NAND_CS_NAND_SELECT_CS_LOCK_MASK                      0x80000000
+#define BCHP_NAND_CS_NAND_SELECT_CS_LOCK_SHIFT                     31
+
+/* NAND :: CS_NAND_SELECT :: AUTO_DEVICE_ID_CONFIG [30:30] */
+#define BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK        0x40000000
+#define BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_SHIFT       30
+
+/* NAND :: CS_NAND_SELECT :: reserved0 [29:29] */
+#define BCHP_NAND_CS_NAND_SELECT_reserved0_MASK                    0x20000000
+#define BCHP_NAND_CS_NAND_SELECT_reserved0_SHIFT                   29
+
+/* NAND :: CS_NAND_SELECT :: WR_PROTECT_BLK0 [28:28] */
+#define BCHP_NAND_CS_NAND_SELECT_WR_PROTECT_BLK0_MASK              0x10000000
+#define BCHP_NAND_CS_NAND_SELECT_WR_PROTECT_BLK0_SHIFT             28
+
+/* NAND :: CS_NAND_SELECT :: reserved1 [27:16] */
+#define BCHP_NAND_CS_NAND_SELECT_reserved1_MASK                    0x0fff0000
+#define BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT                   16
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_7_USES_NAND [15:15] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_USES_NAND_MASK           0x00008000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_USES_NAND_SHIFT          15
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_6_USES_NAND [14:14] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_USES_NAND_MASK           0x00004000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_USES_NAND_SHIFT          14
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_5_USES_NAND [13:13] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_USES_NAND_MASK           0x00002000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_USES_NAND_SHIFT          13
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_4_USES_NAND [12:12] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_USES_NAND_MASK           0x00001000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_USES_NAND_SHIFT          12
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_3_USES_NAND [11:11] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_USES_NAND_MASK           0x00000800
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_USES_NAND_SHIFT          11
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_2_USES_NAND [10:10] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_USES_NAND_MASK           0x00000400
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_USES_NAND_SHIFT          10
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_1_USES_NAND [09:09] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_USES_NAND_MASK           0x00000200
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_USES_NAND_SHIFT          9
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_0_USES_NAND [08:08] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_MASK           0x00000100
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT          8
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_7_SEL [07:07] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_SEL_MASK                 0x00000080
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_SEL_SHIFT                7
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_6_SEL [06:06] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_SEL_MASK                 0x00000040
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_SEL_SHIFT                6
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_5_SEL [05:05] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_SEL_MASK                 0x00000020
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_SEL_SHIFT                5
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_4_SEL [04:04] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_SEL_MASK                 0x00000010
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_SEL_SHIFT                4
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_3_SEL [03:03] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_MASK                 0x00000008
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_SHIFT                3
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_2_SEL [02:02] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_SEL_MASK                 0x00000004
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_SEL_SHIFT                2
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_1_SEL [01:01] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_SEL_MASK                 0x00000002
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_SEL_SHIFT                1
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_0_SEL [00:00] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK                 0x00000001
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_SHIFT                0
+
+/***************************************************************************
+ *CS_NAND_XOR - Nand Flash EBI CS Address XOR with 1FC0 Control
+ ***************************************************************************/
+/* NAND :: CS_NAND_XOR :: ONLY_BLOCK_0_1FC0_XOR [31:31] */
+#define BCHP_NAND_CS_NAND_XOR_ONLY_BLOCK_0_1FC0_XOR_MASK           0x80000000
+#define BCHP_NAND_CS_NAND_XOR_ONLY_BLOCK_0_1FC0_XOR_SHIFT          31
+
+/* NAND :: CS_NAND_XOR :: reserved0 [30:08] */
+#define BCHP_NAND_CS_NAND_XOR_reserved0_MASK                       0x7fffff00
+#define BCHP_NAND_CS_NAND_XOR_reserved0_SHIFT                      8
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_7_ADDR_1FC0_XOR [07:07] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_7_ADDR_1FC0_XOR_MASK          0x00000080
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_7_ADDR_1FC0_XOR_SHIFT         7
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_6_ADDR_1FC0_XOR [06:06] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_6_ADDR_1FC0_XOR_MASK          0x00000040
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_6_ADDR_1FC0_XOR_SHIFT         6
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_5_ADDR_1FC0_XOR [05:05] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_5_ADDR_1FC0_XOR_MASK          0x00000020
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_5_ADDR_1FC0_XOR_SHIFT         5
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_4_ADDR_1FC0_XOR [04:04] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_4_ADDR_1FC0_XOR_MASK          0x00000010
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_4_ADDR_1FC0_XOR_SHIFT         4
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_3_ADDR_1FC0_XOR [03:03] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_3_ADDR_1FC0_XOR_MASK          0x00000008
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_3_ADDR_1FC0_XOR_SHIFT         3
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_2_ADDR_1FC0_XOR [02:02] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_2_ADDR_1FC0_XOR_MASK          0x00000004
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_2_ADDR_1FC0_XOR_SHIFT         2
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_1_ADDR_1FC0_XOR [01:01] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_1_ADDR_1FC0_XOR_MASK          0x00000002
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_1_ADDR_1FC0_XOR_SHIFT         1
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_0_ADDR_1FC0_XOR [00:00] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK          0x00000001
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_0 - Nand Flash Spare Area Read Bytes 0-3
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_0 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_0_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_0_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_1 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_1_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_1_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_2 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_2_MASK            0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_2_SHIFT           8
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_3 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_3_MASK            0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_3_SHIFT           0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_4 - Nand Flash Spare Area Read Bytes 4-7
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_4 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_4_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_4_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_5 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_5_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_5_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_6 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_6_MASK            0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_6_SHIFT           8
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_7 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_7_MASK            0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_7_SHIFT           0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_8 - Nand Flash Spare Area Read Bytes 8-11
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_8 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_8_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_8_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_9 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_9_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_9_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_10 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_10_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_10_SHIFT          8
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_11 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_11_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_11_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_C - Nand Flash Spare Area Read Bytes 12-15
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_12 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_12_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_12_SHIFT          24
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_13 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_13_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_13_SHIFT          16
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_14 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_14_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_14_SHIFT          8
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_15 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_15_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_15_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_0 - Nand Flash Spare Area Write Bytes 0-3
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_0 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_0_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_0_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_1 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_1_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_1_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_2 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_2_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_2_SHIFT          8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_3 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_3_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_3_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_4 - Nand Flash Spare Area Write Bytes 4-7
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_4 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_4_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_4_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_5 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_5_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_5_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_6 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_6_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_6_SHIFT          8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_7 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_7_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_7_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_8 - Nand Flash Spare Area Write Bytes 8-11
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_8 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_8_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_8_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_9 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_9_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_9_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_10 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_10_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_10_SHIFT         8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_11 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_11_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_11_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_C - Nand Flash Spare Area Write Bytes 12-15
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_12 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_12_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_12_SHIFT         24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_13 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_13_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_13_SHIFT         16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_14 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_14_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_14_SHIFT         8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_15 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_15_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_15_SHIFT         0
+
+/***************************************************************************
+ *ACC_CONTROL - Nand Flash Access Control
+ ***************************************************************************/
+/* NAND :: ACC_CONTROL :: RD_ECC_EN [31:31] */
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK                       0x80000000
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_EN_SHIFT                      31
+
+/* NAND :: ACC_CONTROL :: WR_ECC_EN [30:30] */
+#define BCHP_NAND_ACC_CONTROL_WR_ECC_EN_MASK                       0x40000000
+#define BCHP_NAND_ACC_CONTROL_WR_ECC_EN_SHIFT                      30
+
+/* NAND :: ACC_CONTROL :: RD_ECC_BLK0_EN [29:29] */
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_MASK                  0x20000000
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_SHIFT                 29
+
+/* NAND :: ACC_CONTROL :: FAST_PGM_RDIN [28:28] */
+#define BCHP_NAND_ACC_CONTROL_FAST_PGM_RDIN_MASK                   0x10000000
+#define BCHP_NAND_ACC_CONTROL_FAST_PGM_RDIN_SHIFT                  28
+
+/* NAND :: ACC_CONTROL :: RD_ERASED_ECC_EN [27:27] */
+#define BCHP_NAND_ACC_CONTROL_RD_ERASED_ECC_EN_MASK                0x08000000
+#define BCHP_NAND_ACC_CONTROL_RD_ERASED_ECC_EN_SHIFT               27
+
+/* NAND :: ACC_CONTROL :: PARTIAL_PAGE_EN [26:26] */
+#define BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_MASK                 0x04000000
+#define BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_SHIFT                26
+
+/* NAND :: ACC_CONTROL :: WR_PREEMPT_EN [25:25] */
+#define BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK                   0x02000000
+#define BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_SHIFT                  25
+
+/* NAND :: ACC_CONTROL :: PAGE_HIT_EN [24:24] */
+#define BCHP_NAND_ACC_CONTROL_PAGE_HIT_EN_MASK                     0x01000000
+#define BCHP_NAND_ACC_CONTROL_PAGE_HIT_EN_SHIFT                    24
+
+/* NAND :: ACC_CONTROL :: ECC_LEVEL_0 [23:20] */
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_MASK                     0x00f00000
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_0_SHIFT                    20
+
+/* NAND :: ACC_CONTROL :: ECC_LEVEL [19:16] */
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK                       0x000f0000
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT                      16
+
+/* NAND :: ACC_CONTROL :: reserved0 [15:14] */
+#define BCHP_NAND_ACC_CONTROL_reserved0_MASK                       0x0000c000
+#define BCHP_NAND_ACC_CONTROL_reserved0_SHIFT                      14
+
+/* NAND :: ACC_CONTROL :: SPARE_AREA_SIZE_0 [13:08] */
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_MASK               0x00003f00
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_0_SHIFT              8
+
+/* NAND :: ACC_CONTROL :: reserved1 [07:06] */
+#define BCHP_NAND_ACC_CONTROL_reserved1_MASK                       0x000000c0
+#define BCHP_NAND_ACC_CONTROL_reserved1_SHIFT                      6
+
+/* NAND :: ACC_CONTROL :: SPARE_AREA_SIZE [05:00] */
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK                 0x0000003f
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT                0
+
+/***************************************************************************
+ *CONFIG - Nand Flash Config
+ ***************************************************************************/
+/* NAND :: CONFIG :: CONFIG_LOCK [31:31] */
+#define BCHP_NAND_CONFIG_CONFIG_LOCK_MASK                          0x80000000
+#define BCHP_NAND_CONFIG_CONFIG_LOCK_SHIFT                         31
+
+/* NAND :: CONFIG :: BLOCK_SIZE [30:28] */
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_MASK                           0x70000000
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT                          28
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_2048KB                 6
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_1024KB                 5
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB                  3
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB                  1
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB                   0
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_8KB                    2
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_256KB                  4
+
+/* NAND :: CONFIG :: DEVICE_SIZE [27:24] */
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_MASK                          0x0f000000
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_SHIFT                         24
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_4MB                  0
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_8MB                  1
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_16MB                 2
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_32MB                 3
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_64MB                 4
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_128MB                5
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_256MB                6
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_512MB                7
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_1GB                  8
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_2GB                  9
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_4GB                  10
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_8GB                  11
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_16GB                 12
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_32GB                 13
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_64GB                 14
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_128GB                15
+
+/* NAND :: CONFIG :: DEVICE_WIDTH [23:23] */
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_MASK                         0x00800000
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_SHIFT                        23
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_DVC_WIDTH_8                  0
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_DVC_WIDTH_16                 1
+
+/* NAND :: CONFIG :: reserved0 [22:22] */
+#define BCHP_NAND_CONFIG_reserved0_MASK                            0x00400000
+#define BCHP_NAND_CONFIG_reserved0_SHIFT                           22
+
+/* NAND :: CONFIG :: PAGE_SIZE [21:20] */
+#define BCHP_NAND_CONFIG_PAGE_SIZE_MASK                            0x00300000
+#define BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT                           20
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512                     0
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB                     1
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_4KB                     2
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_8KB                     3
+
+/* NAND :: CONFIG :: reserved1 [19:19] */
+#define BCHP_NAND_CONFIG_reserved1_MASK                            0x00080000
+#define BCHP_NAND_CONFIG_reserved1_SHIFT                           19
+
+/* NAND :: CONFIG :: FUL_ADR_BYTES [18:16] */
+#define BCHP_NAND_CONFIG_FUL_ADR_BYTES_MASK                        0x00070000
+#define BCHP_NAND_CONFIG_FUL_ADR_BYTES_SHIFT                       16
+
+/* NAND :: CONFIG :: reserved2 [15:15] */
+#define BCHP_NAND_CONFIG_reserved2_MASK                            0x00008000
+#define BCHP_NAND_CONFIG_reserved2_SHIFT                           15
+
+/* NAND :: CONFIG :: COL_ADR_BYTES [14:12] */
+#define BCHP_NAND_CONFIG_COL_ADR_BYTES_MASK                        0x00007000
+#define BCHP_NAND_CONFIG_COL_ADR_BYTES_SHIFT                       12
+
+/* NAND :: CONFIG :: reserved3 [11:11] */
+#define BCHP_NAND_CONFIG_reserved3_MASK                            0x00000800
+#define BCHP_NAND_CONFIG_reserved3_SHIFT                           11
+
+/* NAND :: CONFIG :: BLK_ADR_BYTES [10:08] */
+#define BCHP_NAND_CONFIG_BLK_ADR_BYTES_MASK                        0x00000700
+#define BCHP_NAND_CONFIG_BLK_ADR_BYTES_SHIFT                       8
+
+/* NAND :: CONFIG :: reserved4 [07:00] */
+#define BCHP_NAND_CONFIG_reserved4_MASK                            0x000000ff
+#define BCHP_NAND_CONFIG_reserved4_SHIFT                           0
+
+/***************************************************************************
+ *TIMING_1 - Nand Flash Timing Parameters 1
+ ***************************************************************************/
+/* NAND :: TIMING_1 :: tWP [31:28] */
+#define BCHP_NAND_TIMING_1_tWP_MASK                                0xf0000000
+#define BCHP_NAND_TIMING_1_tWP_SHIFT                               28
+
+/* NAND :: TIMING_1 :: tWH [27:24] */
+#define BCHP_NAND_TIMING_1_tWH_MASK                                0x0f000000
+#define BCHP_NAND_TIMING_1_tWH_SHIFT                               24
+
+/* NAND :: TIMING_1 :: tRP [23:20] */
+#define BCHP_NAND_TIMING_1_tRP_MASK                                0x00f00000
+#define BCHP_NAND_TIMING_1_tRP_SHIFT                               20
+
+/* NAND :: TIMING_1 :: tREH [19:16] */
+#define BCHP_NAND_TIMING_1_tREH_MASK                               0x000f0000
+#define BCHP_NAND_TIMING_1_tREH_SHIFT                              16
+
+/* NAND :: TIMING_1 :: tCS [15:12] */
+#define BCHP_NAND_TIMING_1_tCS_MASK                                0x0000f000
+#define BCHP_NAND_TIMING_1_tCS_SHIFT                               12
+
+/* NAND :: TIMING_1 :: tCLH [11:08] */
+#define BCHP_NAND_TIMING_1_tCLH_MASK                               0x00000f00
+#define BCHP_NAND_TIMING_1_tCLH_SHIFT                              8
+
+/* NAND :: TIMING_1 :: tALH [07:04] */
+#define BCHP_NAND_TIMING_1_tALH_MASK                               0x000000f0
+#define BCHP_NAND_TIMING_1_tALH_SHIFT                              4
+
+/* NAND :: TIMING_1 :: tADL [03:00] */
+#define BCHP_NAND_TIMING_1_tADL_MASK                               0x0000000f
+#define BCHP_NAND_TIMING_1_tADL_SHIFT                              0
+
+/***************************************************************************
+ *TIMING_2 - Nand Flash Timing Parameters 2
+ ***************************************************************************/
+/* NAND :: TIMING_2 :: CLK_SELECT [31:31] */
+#define BCHP_NAND_TIMING_2_CLK_SELECT_MASK                         0x80000000
+#define BCHP_NAND_TIMING_2_CLK_SELECT_SHIFT                        31
+#define BCHP_NAND_TIMING_2_CLK_SELECT_CLK_108                      0
+#define BCHP_NAND_TIMING_2_CLK_SELECT_CLK_216                      1
+
+/* NAND :: TIMING_2 :: reserved0 [30:13] */
+#define BCHP_NAND_TIMING_2_reserved0_MASK                          0x7fffe000
+#define BCHP_NAND_TIMING_2_reserved0_SHIFT                         13
+
+/* NAND :: TIMING_2 :: tWB [12:09] */
+#define BCHP_NAND_TIMING_2_tWB_MASK                                0x00001e00
+#define BCHP_NAND_TIMING_2_tWB_SHIFT                               9
+
+/* NAND :: TIMING_2 :: tWHR [08:04] */
+#define BCHP_NAND_TIMING_2_tWHR_MASK                               0x000001f0
+#define BCHP_NAND_TIMING_2_tWHR_SHIFT                              4
+
+/* NAND :: TIMING_2 :: tREAD [03:00] */
+#define BCHP_NAND_TIMING_2_tREAD_MASK                              0x0000000f
+#define BCHP_NAND_TIMING_2_tREAD_SHIFT                             0
+
+/***************************************************************************
+ *SEMAPHORE - Semaphore
+ ***************************************************************************/
+/* NAND :: SEMAPHORE :: reserved0 [31:08] */
+#define BCHP_NAND_SEMAPHORE_reserved0_MASK                         0xffffff00
+#define BCHP_NAND_SEMAPHORE_reserved0_SHIFT                        8
+
+/* NAND :: SEMAPHORE :: semaphore_ctrl [07:00] */
+#define BCHP_NAND_SEMAPHORE_semaphore_ctrl_MASK                    0x000000ff
+#define BCHP_NAND_SEMAPHORE_semaphore_ctrl_SHIFT                   0
+
+/***************************************************************************
+ *FLASH_DEVICE_ID - Nand Flash Device ID
+ ***************************************************************************/
+/* NAND :: FLASH_DEVICE_ID :: BYTE_0 [31:24] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_0_MASK                      0xff000000
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_0_SHIFT                     24
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_1 [23:16] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_1_MASK                      0x00ff0000
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_1_SHIFT                     16
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_2 [15:08] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_2_MASK                      0x0000ff00
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_2_SHIFT                     8
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_3 [07:00] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_3_MASK                      0x000000ff
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_3_SHIFT                     0
+
+/***************************************************************************
+ *FLASH_DEVICE_ID_EXT - Nand Flash Extended Device ID
+ ***************************************************************************/
+/* NAND :: FLASH_DEVICE_ID_EXT :: BYTE_4 [31:24] */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_4_MASK                  0xff000000
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_4_SHIFT                 24
+
+/* NAND :: FLASH_DEVICE_ID_EXT :: BYTE_5 [23:16] */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_5_MASK                  0x00ff0000
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_5_SHIFT                 16
+
+/* NAND :: FLASH_DEVICE_ID_EXT :: BYTE_6 [15:08] */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_6_MASK                  0x0000ff00
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_6_SHIFT                 8
+
+/* NAND :: FLASH_DEVICE_ID_EXT :: BYTE_7 [07:00] */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_7_MASK                  0x000000ff
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_7_SHIFT                 0
+
+/***************************************************************************
+ *BLOCK_LOCK_STATUS - Nand Flash Block Lock Status
+ ***************************************************************************/
+/* NAND :: BLOCK_LOCK_STATUS :: reserved0 [31:08] */
+#define BCHP_NAND_BLOCK_LOCK_STATUS_reserved0_MASK                 0xffffff00
+#define BCHP_NAND_BLOCK_LOCK_STATUS_reserved0_SHIFT                8
+
+/* NAND :: BLOCK_LOCK_STATUS :: STATUS [07:00] */
+#define BCHP_NAND_BLOCK_LOCK_STATUS_STATUS_MASK                    0x000000ff
+#define BCHP_NAND_BLOCK_LOCK_STATUS_STATUS_SHIFT                   0
+
+/***************************************************************************
+ *INTFC_STATUS - Nand Flash Interface Status
+ ***************************************************************************/
+/* NAND :: INTFC_STATUS :: CTLR_READY [31:31] */
+#define BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK                     0x80000000
+#define BCHP_NAND_INTFC_STATUS_CTLR_READY_SHIFT                    31
+
+/* NAND :: INTFC_STATUS :: FLASH_READY [30:30] */
+#define BCHP_NAND_INTFC_STATUS_FLASH_READY_MASK                    0x40000000
+#define BCHP_NAND_INTFC_STATUS_FLASH_READY_SHIFT                   30
+
+/* NAND :: INTFC_STATUS :: CACHE_VALID [29:29] */
+#define BCHP_NAND_INTFC_STATUS_CACHE_VALID_MASK                    0x20000000
+#define BCHP_NAND_INTFC_STATUS_CACHE_VALID_SHIFT                   29
+
+/* NAND :: INTFC_STATUS :: SPARE_AREA_VALID [28:28] */
+#define BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_MASK               0x10000000
+#define BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_SHIFT              28
+
+/* NAND :: INTFC_STATUS :: ERASED [27:27] */
+#define BCHP_NAND_INTFC_STATUS_ERASED_MASK                         0x08000000
+#define BCHP_NAND_INTFC_STATUS_ERASED_SHIFT                        27
+
+/* NAND :: INTFC_STATUS :: reserved0 [26:08] */
+#define BCHP_NAND_INTFC_STATUS_reserved0_MASK                      0x07ffff00
+#define BCHP_NAND_INTFC_STATUS_reserved0_SHIFT                     8
+
+/* NAND :: INTFC_STATUS :: FLASH_STATUS [07:00] */
+#define BCHP_NAND_INTFC_STATUS_FLASH_STATUS_MASK                   0x000000ff
+#define BCHP_NAND_INTFC_STATUS_FLASH_STATUS_SHIFT                  0
+
+/***************************************************************************
+ *ECC_CORR_EXT_ADDR - ECC Correctable Error Extended Address
+ ***************************************************************************/
+/* NAND :: ECC_CORR_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_reserved0_MASK                 0xfff80000
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_reserved0_SHIFT                19
+
+/* NAND :: ECC_CORR_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_CS_SEL_MASK                    0x00070000
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_CS_SEL_SHIFT                   16
+
+/* NAND :: ECC_CORR_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_EXT_ADDRESS_MASK               0x0000ffff
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_EXT_ADDRESS_SHIFT              0
+
+/***************************************************************************
+ *ECC_CORR_ADDR - ECC Correctable Error Address
+ ***************************************************************************/
+/* NAND :: ECC_CORR_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_ECC_CORR_ADDR_ADDRESS_MASK                       0xffffffff
+#define BCHP_NAND_ECC_CORR_ADDR_ADDRESS_SHIFT                      0
+
+/***************************************************************************
+ *ECC_UNC_EXT_ADDR - ECC Uncorrectable Error Extended Address
+ ***************************************************************************/
+/* NAND :: ECC_UNC_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_reserved0_MASK                  0xfff80000
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_reserved0_SHIFT                 19
+
+/* NAND :: ECC_UNC_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_CS_SEL_MASK                     0x00070000
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_CS_SEL_SHIFT                    16
+
+/* NAND :: ECC_UNC_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_EXT_ADDRESS_MASK                0x0000ffff
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_EXT_ADDRESS_SHIFT               0
+
+/***************************************************************************
+ *ECC_UNC_ADDR - ECC Uncorrectable Error Address
+ ***************************************************************************/
+/* NAND :: ECC_UNC_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_ECC_UNC_ADDR_ADDRESS_MASK                        0xffffffff
+#define BCHP_NAND_ECC_UNC_ADDR_ADDRESS_SHIFT                       0
+
+/***************************************************************************
+ *READ_ERROR_COUNT - Read Error Count
+ ***************************************************************************/
+/* NAND :: READ_ERROR_COUNT :: READ_ERROR_COUNT [31:00] */
+#define BCHP_NAND_READ_ERROR_COUNT_READ_ERROR_COUNT_MASK           0xffffffff
+#define BCHP_NAND_READ_ERROR_COUNT_READ_ERROR_COUNT_SHIFT          0
+
+/***************************************************************************
+ *CORR_STAT_THRESHOLD - Correctable Error Reporting Threshold
+ ***************************************************************************/
+/* NAND :: CORR_STAT_THRESHOLD :: reserved0 [31:04] */
+#define BCHP_NAND_CORR_STAT_THRESHOLD_reserved0_MASK               0xfffffff0
+#define BCHP_NAND_CORR_STAT_THRESHOLD_reserved0_SHIFT              4
+
+/* NAND :: CORR_STAT_THRESHOLD :: CORR_STAT_THRESHOLD [03:00] */
+#define BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_MASK     0x0000000f
+#define BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT    0
+
+/***************************************************************************
+ *ONFI_STATUS - ONFI Status
+ ***************************************************************************/
+/* NAND :: ONFI_STATUS :: ONFI_DEBUG_SEL [31:28] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_DEBUG_SEL_MASK                  0xf0000000
+#define BCHP_NAND_ONFI_STATUS_ONFI_DEBUG_SEL_SHIFT                 28
+
+/* NAND :: ONFI_STATUS :: reserved0 [27:06] */
+#define BCHP_NAND_ONFI_STATUS_reserved0_MASK                       0x0fffffc0
+#define BCHP_NAND_ONFI_STATUS_reserved0_SHIFT                      6
+
+/* NAND :: ONFI_STATUS :: ONFI_BAD_IDENT_PG2 [05:05] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG2_MASK              0x00000020
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG2_SHIFT             5
+
+/* NAND :: ONFI_STATUS :: ONFI_BAD_IDENT_PG1 [04:04] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG1_MASK              0x00000010
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG1_SHIFT             4
+
+/* NAND :: ONFI_STATUS :: ONFI_BAD_IDENT_PG0 [03:03] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG0_MASK              0x00000008
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG0_SHIFT             3
+
+/* NAND :: ONFI_STATUS :: ONFI_CRC_ERROR_PG2 [02:02] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG2_MASK              0x00000004
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG2_SHIFT             2
+
+/* NAND :: ONFI_STATUS :: ONFI_CRC_ERROR_PG1 [01:01] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG1_MASK              0x00000002
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG1_SHIFT             1
+
+/* NAND :: ONFI_STATUS :: ONFI_CRC_ERROR_PG0 [00:00] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG0_MASK              0x00000001
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG0_SHIFT             0
+
+/***************************************************************************
+ *ONFI_DEBUG_DATA - ONFI Debug Data
+ ***************************************************************************/
+/* NAND :: ONFI_DEBUG_DATA :: ONFI_DEBUG_DATA [31:00] */
+#define BCHP_NAND_ONFI_DEBUG_DATA_ONFI_DEBUG_DATA_MASK             0xffffffff
+#define BCHP_NAND_ONFI_DEBUG_DATA_ONFI_DEBUG_DATA_SHIFT            0
+
+/***************************************************************************
+ *FLASH_READ_EXT_ADDR - Flash Read Data Extended Address
+ ***************************************************************************/
+/* NAND :: FLASH_READ_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_reserved0_MASK               0xfff80000
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_reserved0_SHIFT              19
+
+/* NAND :: FLASH_READ_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_CS_SEL_MASK                  0x00070000
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_CS_SEL_SHIFT                 16
+
+/* NAND :: FLASH_READ_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_EXT_ADDRESS_MASK             0x0000ffff
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_EXT_ADDRESS_SHIFT            0
+
+/***************************************************************************
+ *FLASH_READ_ADDR - Flash Read Data Address
+ ***************************************************************************/
+/* NAND :: FLASH_READ_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_FLASH_READ_ADDR_ADDRESS_MASK                     0xffffffff
+#define BCHP_NAND_FLASH_READ_ADDR_ADDRESS_SHIFT                    0
+
+/***************************************************************************
+ *PROGRAM_PAGE_EXT_ADDR - Page Program Extended Address
+ ***************************************************************************/
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_reserved0_MASK             0xfff80000
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_reserved0_SHIFT            19
+
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_CS_SEL_MASK                0x00070000
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_CS_SEL_SHIFT               16
+
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_EXT_ADDRESS_MASK           0x0000ffff
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_EXT_ADDRESS_SHIFT          0
+
+/***************************************************************************
+ *PROGRAM_PAGE_ADDR - Page Program Address
+ ***************************************************************************/
+/* NAND :: PROGRAM_PAGE_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_PROGRAM_PAGE_ADDR_ADDRESS_MASK                   0xffffffff
+#define BCHP_NAND_PROGRAM_PAGE_ADDR_ADDRESS_SHIFT                  0
+
+/***************************************************************************
+ *COPY_BACK_EXT_ADDR - Copy Back Extended Address
+ ***************************************************************************/
+/* NAND :: COPY_BACK_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_reserved0_MASK                0xfff80000
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_reserved0_SHIFT               19
+
+/* NAND :: COPY_BACK_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_CS_SEL_MASK                   0x00070000
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_CS_SEL_SHIFT                  16
+
+/* NAND :: COPY_BACK_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_EXT_ADDRESS_MASK              0x0000ffff
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_EXT_ADDRESS_SHIFT             0
+
+/***************************************************************************
+ *COPY_BACK_ADDR - Copy Back Address
+ ***************************************************************************/
+/* NAND :: COPY_BACK_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_COPY_BACK_ADDR_ADDRESS_MASK                      0xffffffff
+#define BCHP_NAND_COPY_BACK_ADDR_ADDRESS_SHIFT                     0
+
+/***************************************************************************
+ *BLOCK_ERASE_EXT_ADDR - Block Erase Extended Address
+ ***************************************************************************/
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_reserved0_MASK              0xfff80000
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_reserved0_SHIFT             19
+
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_CS_SEL_MASK                 0x00070000
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_CS_SEL_SHIFT                16
+
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_EXT_ADDRESS_MASK            0x0000ffff
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_EXT_ADDRESS_SHIFT           0
+
+/***************************************************************************
+ *BLOCK_ERASE_ADDR - Block Erase Address
+ ***************************************************************************/
+/* NAND :: BLOCK_ERASE_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_BLOCK_ERASE_ADDR_ADDRESS_MASK                    0xffffffff
+#define BCHP_NAND_BLOCK_ERASE_ADDR_ADDRESS_SHIFT                   0
+
+/***************************************************************************
+ *INV_READ_EXT_ADDR - Flash Invalid Data Extended Address
+ ***************************************************************************/
+/* NAND :: INV_READ_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_reserved0_MASK                 0xfff80000
+#define BCHP_NAND_INV_READ_EXT_ADDR_reserved0_SHIFT                19
+
+/* NAND :: INV_READ_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_CS_SEL_MASK                    0x00070000
+#define BCHP_NAND_INV_READ_EXT_ADDR_CS_SEL_SHIFT                   16
+
+/* NAND :: INV_READ_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_EXT_ADDRESS_MASK               0x0000ffff
+#define BCHP_NAND_INV_READ_EXT_ADDR_EXT_ADDRESS_SHIFT              0
+
+/***************************************************************************
+ *INV_READ_ADDR - Flash Invalid Data Address
+ ***************************************************************************/
+/* NAND :: INV_READ_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_INV_READ_ADDR_ADDRESS_MASK                       0xffffffff
+#define BCHP_NAND_INV_READ_ADDR_ADDRESS_SHIFT                      0
+
+/***************************************************************************
+ *BLK_WR_PROTECT - Block Write Protect Enable and Size for EBI_CS0b
+ ***************************************************************************/
+/* NAND :: BLK_WR_PROTECT :: BLK_END_ADDR [31:00] */
+#define BCHP_NAND_BLK_WR_PROTECT_BLK_END_ADDR_MASK                 0xffffffff
+#define BCHP_NAND_BLK_WR_PROTECT_BLK_END_ADDR_SHIFT                0
+
+/***************************************************************************
+ *ACC_CONTROL_CS1 - Nand Flash Access Control
+ ***************************************************************************/
+/* NAND :: ACC_CONTROL_CS1 :: RD_ECC_EN [31:31] */
+#define BCHP_NAND_ACC_CONTROL_CS1_RD_ECC_EN_MASK                   0x80000000
+#define BCHP_NAND_ACC_CONTROL_CS1_RD_ECC_EN_SHIFT                  31
+
+/* NAND :: ACC_CONTROL_CS1 :: WR_ECC_EN [30:30] */
+#define BCHP_NAND_ACC_CONTROL_CS1_WR_ECC_EN_MASK                   0x40000000
+#define BCHP_NAND_ACC_CONTROL_CS1_WR_ECC_EN_SHIFT                  30
+
+/* NAND :: ACC_CONTROL_CS1 :: reserved0 [29:29] */
+#define BCHP_NAND_ACC_CONTROL_CS1_reserved0_MASK                   0x20000000
+#define BCHP_NAND_ACC_CONTROL_CS1_reserved0_SHIFT                  29
+
+/* NAND :: ACC_CONTROL_CS1 :: FAST_PGM_RDIN [28:28] */
+#define BCHP_NAND_ACC_CONTROL_CS1_FAST_PGM_RDIN_MASK               0x10000000
+#define BCHP_NAND_ACC_CONTROL_CS1_FAST_PGM_RDIN_SHIFT              28
+
+/* NAND :: ACC_CONTROL_CS1 :: RD_ERASED_ECC_EN [27:27] */
+#define BCHP_NAND_ACC_CONTROL_CS1_RD_ERASED_ECC_EN_MASK            0x08000000
+#define BCHP_NAND_ACC_CONTROL_CS1_RD_ERASED_ECC_EN_SHIFT           27
+
+/* NAND :: ACC_CONTROL_CS1 :: PARTIAL_PAGE_EN [26:26] */
+#define BCHP_NAND_ACC_CONTROL_CS1_PARTIAL_PAGE_EN_MASK             0x04000000
+#define BCHP_NAND_ACC_CONTROL_CS1_PARTIAL_PAGE_EN_SHIFT            26
+
+/* NAND :: ACC_CONTROL_CS1 :: WR_PREEMPT_EN [25:25] */
+#define BCHP_NAND_ACC_CONTROL_CS1_WR_PREEMPT_EN_MASK               0x02000000
+#define BCHP_NAND_ACC_CONTROL_CS1_WR_PREEMPT_EN_SHIFT              25
+
+/* NAND :: ACC_CONTROL_CS1 :: PAGE_HIT_EN [24:24] */
+#define BCHP_NAND_ACC_CONTROL_CS1_PAGE_HIT_EN_MASK                 0x01000000
+#define BCHP_NAND_ACC_CONTROL_CS1_PAGE_HIT_EN_SHIFT                24
+
+/* NAND :: ACC_CONTROL_CS1 :: reserved1 [23:20] */
+#define BCHP_NAND_ACC_CONTROL_CS1_reserved1_MASK                   0x00f00000
+#define BCHP_NAND_ACC_CONTROL_CS1_reserved1_SHIFT                  20
+
+/* NAND :: ACC_CONTROL_CS1 :: ECC_LEVEL [19:16] */
+#define BCHP_NAND_ACC_CONTROL_CS1_ECC_LEVEL_MASK                   0x000f0000
+#define BCHP_NAND_ACC_CONTROL_CS1_ECC_LEVEL_SHIFT                  16
+
+/* NAND :: ACC_CONTROL_CS1 :: reserved2 [15:06] */
+#define BCHP_NAND_ACC_CONTROL_CS1_reserved2_MASK                   0x0000ffc0
+#define BCHP_NAND_ACC_CONTROL_CS1_reserved2_SHIFT                  6
+
+/* NAND :: ACC_CONTROL_CS1 :: SPARE_AREA_SIZE [05:00] */
+#define BCHP_NAND_ACC_CONTROL_CS1_SPARE_AREA_SIZE_MASK             0x0000003f
+#define BCHP_NAND_ACC_CONTROL_CS1_SPARE_AREA_SIZE_SHIFT            0
+
+/***************************************************************************
+ *CONFIG_CS1 - Nand Flash Config
+ ***************************************************************************/
+/* NAND :: CONFIG_CS1 :: CONFIG_LOCK [31:31] */
+#define BCHP_NAND_CONFIG_CS1_CONFIG_LOCK_MASK                      0x80000000
+#define BCHP_NAND_CONFIG_CS1_CONFIG_LOCK_SHIFT                     31
+
+/* NAND :: CONFIG_CS1 :: BLOCK_SIZE [30:28] */
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_MASK                       0x70000000
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_SHIFT                      28
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_2048KB             6
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_1024KB             5
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_512KB              3
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_128KB              1
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_16KB               0
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_8KB                2
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_256KB              4
+
+/* NAND :: CONFIG_CS1 :: DEVICE_SIZE [27:24] */
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_MASK                      0x0f000000
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_SHIFT                     24
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_4MB              0
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_8MB              1
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_16MB             2
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_32MB             3
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_64MB             4
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_128MB            5
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_256MB            6
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_512MB            7
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_1GB              8
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_2GB              9
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_4GB              10
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_8GB              11
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_16GB             12
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_32GB             13
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_64GB             14
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_128GB            15
+
+/* NAND :: CONFIG_CS1 :: DEVICE_WIDTH [23:23] */
+#define BCHP_NAND_CONFIG_CS1_DEVICE_WIDTH_MASK                     0x00800000
+#define BCHP_NAND_CONFIG_CS1_DEVICE_WIDTH_SHIFT                    23
+#define BCHP_NAND_CONFIG_CS1_DEVICE_WIDTH_DVC_WIDTH_8              0
+#define BCHP_NAND_CONFIG_CS1_DEVICE_WIDTH_DVC_WIDTH_16             1
+
+/* NAND :: CONFIG_CS1 :: reserved0 [22:22] */
+#define BCHP_NAND_CONFIG_CS1_reserved0_MASK                        0x00400000
+#define BCHP_NAND_CONFIG_CS1_reserved0_SHIFT                       22
+
+/* NAND :: CONFIG_CS1 :: PAGE_SIZE [21:20] */
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_MASK                        0x00300000
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_SHIFT                       20
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_512                 0
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_2KB                 1
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_4KB                 2
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_8KB                 3
+
+/* NAND :: CONFIG_CS1 :: reserved1 [19:19] */
+#define BCHP_NAND_CONFIG_CS1_reserved1_MASK                        0x00080000
+#define BCHP_NAND_CONFIG_CS1_reserved1_SHIFT                       19
+
+/* NAND :: CONFIG_CS1 :: FUL_ADR_BYTES [18:16] */
+#define BCHP_NAND_CONFIG_CS1_FUL_ADR_BYTES_MASK                    0x00070000
+#define BCHP_NAND_CONFIG_CS1_FUL_ADR_BYTES_SHIFT                   16
+
+/* NAND :: CONFIG_CS1 :: reserved2 [15:15] */
+#define BCHP_NAND_CONFIG_CS1_reserved2_MASK                        0x00008000
+#define BCHP_NAND_CONFIG_CS1_reserved2_SHIFT                       15
+
+/* NAND :: CONFIG_CS1 :: COL_ADR_BYTES [14:12] */
+#define BCHP_NAND_CONFIG_CS1_COL_ADR_BYTES_MASK                    0x00007000
+#define BCHP_NAND_CONFIG_CS1_COL_ADR_BYTES_SHIFT                   12
+
+/* NAND :: CONFIG_CS1 :: reserved3 [11:11] */
+#define BCHP_NAND_CONFIG_CS1_reserved3_MASK                        0x00000800
+#define BCHP_NAND_CONFIG_CS1_reserved3_SHIFT                       11
+
+/* NAND :: CONFIG_CS1 :: BLK_ADR_BYTES [10:08] */
+#define BCHP_NAND_CONFIG_CS1_BLK_ADR_BYTES_MASK                    0x00000700
+#define BCHP_NAND_CONFIG_CS1_BLK_ADR_BYTES_SHIFT                   8
+
+/* NAND :: CONFIG_CS1 :: reserved4 [07:00] */
+#define BCHP_NAND_CONFIG_CS1_reserved4_MASK                        0x000000ff
+#define BCHP_NAND_CONFIG_CS1_reserved4_SHIFT                       0
+
+/***************************************************************************
+ *TIMING_1_CS1 - Nand Flash Timing Parameters 1
+ ***************************************************************************/
+/* NAND :: TIMING_1_CS1 :: tWP [31:28] */
+#define BCHP_NAND_TIMING_1_CS1_tWP_MASK                            0xf0000000
+#define BCHP_NAND_TIMING_1_CS1_tWP_SHIFT                           28
+
+/* NAND :: TIMING_1_CS1 :: tWH [27:24] */
+#define BCHP_NAND_TIMING_1_CS1_tWH_MASK                            0x0f000000
+#define BCHP_NAND_TIMING_1_CS1_tWH_SHIFT                           24
+
+/* NAND :: TIMING_1_CS1 :: tRP [23:20] */
+#define BCHP_NAND_TIMING_1_CS1_tRP_MASK                            0x00f00000
+#define BCHP_NAND_TIMING_1_CS1_tRP_SHIFT                           20
+
+/* NAND :: TIMING_1_CS1 :: tREH [19:16] */
+#define BCHP_NAND_TIMING_1_CS1_tREH_MASK                           0x000f0000
+#define BCHP_NAND_TIMING_1_CS1_tREH_SHIFT                          16
+
+/* NAND :: TIMING_1_CS1 :: tCS [15:12] */
+#define BCHP_NAND_TIMING_1_CS1_tCS_MASK                            0x0000f000
+#define BCHP_NAND_TIMING_1_CS1_tCS_SHIFT                           12
+
+/* NAND :: TIMING_1_CS1 :: tCLH [11:08] */
+#define BCHP_NAND_TIMING_1_CS1_tCLH_MASK                           0x00000f00
+#define BCHP_NAND_TIMING_1_CS1_tCLH_SHIFT                          8
+
+/* NAND :: TIMING_1_CS1 :: tALH [07:04] */
+#define BCHP_NAND_TIMING_1_CS1_tALH_MASK                           0x000000f0
+#define BCHP_NAND_TIMING_1_CS1_tALH_SHIFT                          4
+
+/* NAND :: TIMING_1_CS1 :: tADL [03:00] */
+#define BCHP_NAND_TIMING_1_CS1_tADL_MASK                           0x0000000f
+#define BCHP_NAND_TIMING_1_CS1_tADL_SHIFT                          0
+
+/***************************************************************************
+ *TIMING_2_CS1 - Nand Flash Timing Parameters 2
+ ***************************************************************************/
+/* NAND :: TIMING_2_CS1 :: CLK_SELECT [31:31] */
+#define BCHP_NAND_TIMING_2_CS1_CLK_SELECT_MASK                     0x80000000
+#define BCHP_NAND_TIMING_2_CS1_CLK_SELECT_SHIFT                    31
+#define BCHP_NAND_TIMING_2_CS1_CLK_SELECT_CLK_108                  0
+#define BCHP_NAND_TIMING_2_CS1_CLK_SELECT_CLK_216                  1
+
+/* NAND :: TIMING_2_CS1 :: reserved0 [30:13] */
+#define BCHP_NAND_TIMING_2_CS1_reserved0_MASK                      0x7fffe000
+#define BCHP_NAND_TIMING_2_CS1_reserved0_SHIFT                     13
+
+/* NAND :: TIMING_2_CS1 :: tWB [12:09] */
+#define BCHP_NAND_TIMING_2_CS1_tWB_MASK                            0x00001e00
+#define BCHP_NAND_TIMING_2_CS1_tWB_SHIFT                           9
+
+/* NAND :: TIMING_2_CS1 :: tWHR [08:04] */
+#define BCHP_NAND_TIMING_2_CS1_tWHR_MASK                           0x000001f0
+#define BCHP_NAND_TIMING_2_CS1_tWHR_SHIFT                          4
+
+/* NAND :: TIMING_2_CS1 :: tREAD [03:00] */
+#define BCHP_NAND_TIMING_2_CS1_tREAD_MASK                          0x0000000f
+#define BCHP_NAND_TIMING_2_CS1_tREAD_SHIFT                         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_10 - Nand Flash Spare Area Read Bytes 16-19
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_10 :: BYTE_OFS_16 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_16_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_16_SHIFT         24
+
+/* NAND :: SPARE_AREA_READ_OFS_10 :: BYTE_OFS_17 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_17_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_17_SHIFT         16
+
+/* NAND :: SPARE_AREA_READ_OFS_10 :: BYTE_OFS_18 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_18_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_18_SHIFT         8
+
+/* NAND :: SPARE_AREA_READ_OFS_10 :: BYTE_OFS_19 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_19_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_19_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_14 - Nand Flash Spare Area Read Bytes 20-23
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_14 :: BYTE_OFS_20 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_20_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_20_SHIFT         24
+
+/* NAND :: SPARE_AREA_READ_OFS_14 :: BYTE_OFS_21 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_21_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_21_SHIFT         16
+
+/* NAND :: SPARE_AREA_READ_OFS_14 :: BYTE_OFS_22 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_22_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_22_SHIFT         8
+
+/* NAND :: SPARE_AREA_READ_OFS_14 :: BYTE_OFS_23 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_23_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_23_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_18 - Nand Flash Spare Area Read Bytes 24-27
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_18 :: BYTE_OFS_24 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_24_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_24_SHIFT         24
+
+/* NAND :: SPARE_AREA_READ_OFS_18 :: BYTE_OFS_25 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_25_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_25_SHIFT         16
+
+/* NAND :: SPARE_AREA_READ_OFS_18 :: BYTE_OFS_26 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_26_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_26_SHIFT         8
+
+/* NAND :: SPARE_AREA_READ_OFS_18 :: BYTE_OFS_27 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_27_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_27_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_1C - Nand Flash Spare Area Read Bytes 28-31
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_1C :: BYTE_OFS_28 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_28_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_28_SHIFT         24
+
+/* NAND :: SPARE_AREA_READ_OFS_1C :: BYTE_OFS_29 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_29_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_29_SHIFT         16
+
+/* NAND :: SPARE_AREA_READ_OFS_1C :: BYTE_OFS_30 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_30_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_30_SHIFT         8
+
+/* NAND :: SPARE_AREA_READ_OFS_1C :: BYTE_OFS_31 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_31_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_31_SHIFT         0
+
+/***************************************************************************
+ *LL_OP - Nand Flash Low Level Operation
+ ***************************************************************************/
+/* NAND :: LL_OP :: RETURN_IDLE [31:31] */
+#define BCHP_NAND_LL_OP_RETURN_IDLE_MASK                           0x80000000
+#define BCHP_NAND_LL_OP_RETURN_IDLE_SHIFT                          31
+
+/* NAND :: LL_OP :: reserved0 [30:20] */
+#define BCHP_NAND_LL_OP_reserved0_MASK                             0x7ff00000
+#define BCHP_NAND_LL_OP_reserved0_SHIFT                            20
+
+/* NAND :: LL_OP :: CLE [19:19] */
+#define BCHP_NAND_LL_OP_CLE_MASK                                   0x00080000
+#define BCHP_NAND_LL_OP_CLE_SHIFT                                  19
+
+/* NAND :: LL_OP :: ALE [18:18] */
+#define BCHP_NAND_LL_OP_ALE_MASK                                   0x00040000
+#define BCHP_NAND_LL_OP_ALE_SHIFT                                  18
+
+/* NAND :: LL_OP :: WE [17:17] */
+#define BCHP_NAND_LL_OP_WE_MASK                                    0x00020000
+#define BCHP_NAND_LL_OP_WE_SHIFT                                   17
+
+/* NAND :: LL_OP :: RE [16:16] */
+#define BCHP_NAND_LL_OP_RE_MASK                                    0x00010000
+#define BCHP_NAND_LL_OP_RE_SHIFT                                   16
+
+/* NAND :: LL_OP :: DATA [15:00] */
+#define BCHP_NAND_LL_OP_DATA_MASK                                  0x0000ffff
+#define BCHP_NAND_LL_OP_DATA_SHIFT                                 0
+
+/***************************************************************************
+ *LL_RDDATA - Nand Flash Low Level Read Data
+ ***************************************************************************/
+/* NAND :: LL_RDDATA :: reserved0 [31:16] */
+#define BCHP_NAND_LL_RDDATA_reserved0_MASK                         0xffff0000
+#define BCHP_NAND_LL_RDDATA_reserved0_SHIFT                        16
+
+/* NAND :: LL_RDDATA :: DATA [15:00] */
+#define BCHP_NAND_LL_RDDATA_DATA_MASK                              0x0000ffff
+#define BCHP_NAND_LL_RDDATA_DATA_SHIFT                             0
+
+/***************************************************************************
+ *FLASH_CACHE%i - Flash Cache Buffer Read Access
+ ***************************************************************************/
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_BASE                          BRCMNAND_CACHE_BASE
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_START                         0
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_END                           127
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_ELEMENT_SIZE                  32
+
+/***************************************************************************
+ *FLASH_CACHE%i - Flash Cache Buffer Read Access
+ ***************************************************************************/
+/* NAND :: FLASH_CACHEi :: WORD [31:00] */
+#define BCHP_NAND_FLASH_CACHEi_WORD_MASK                           0xffffffff
+#define BCHP_NAND_FLASH_CACHEi_WORD_SHIFT                          0
+
+
+#endif /* #ifndef BCHP_NAND_40_H__ */
+
+/* End of File */
diff --git a/include/linux/mtd/bchp_nand_7x.h b/include/linux/mtd/bchp_nand_7x.h
new file mode 100644
index 0000000000000000000000000000000000000000..2a90965f96701d9e3aa73d9c5b76ce1bec9b7d00
--- /dev/null
+++ b/include/linux/mtd/bchp_nand_7x.h
@@ -0,0 +1,1432 @@
+/*
+<:copyright-BRCM:2014:DUAL/GPL:standard
+
+   Copyright (c) 2014 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#ifndef BCHP_NAND_7x_H__
+#define BCHP_NAND_7x_H__
+
+#include <bcm_map_part.h>
+#ifdef CONFIG_ARM
+#define BRCMNAND_CTL_BASE                       (NAND_REG_BASE)
+#define BRCMNAND_CACHE_BASE                     (NAND_CACHE_BASE)
+#else
+#define BRCMNAND_CTL_BASE                       (NAND_REG_BASE & 0x0fffffff)
+#define BRCMNAND_CACHE_BASE                     (NAND_CACHE_BASE & 0x0fffffff)
+#endif
+#define BRCMNAND_FLD_ADDR(FLD)                  \
+    (BRCMNAND_CTL_BASE + (offsetof(NandCtrlRegs,FLD)))
+
+#define BCHP_NAND_REG_START                     BRCMNAND_CTL_BASE
+#define BCHP_NAND_REG_END                       (BCHP_NAND_REG_START + \
+                                                 sizeof(NandCtrlRegs))
+
+/***************************************************************************
+ *NAND - Nand Flash Control Registers
+ ***************************************************************************/
+#define BCHP_NAND_REVISION                      BRCMNAND_FLD_ADDR(NandRevision) /* NAND Revision */
+#define BCHP_NAND_CMD_START                     BRCMNAND_FLD_ADDR(NandCmdStart) /* Nand Flash Command Start */
+#define BCHP_NAND_CMD_EXT_ADDRESS               BRCMNAND_FLD_ADDR(NandCmdExtAddr) /* Nand Flash Command Extended Address */
+#define BCHP_NAND_CMD_ADDRESS                   BRCMNAND_FLD_ADDR(NandCmdAddr) /* Nand Flash Command Address */
+#define BCHP_NAND_CMD_END_ADDRESS               BRCMNAND_FLD_ADDR(NandCmdEndAddr) /* Nand Flash Command End Address */
+#define BCHP_NAND_CS_NAND_SELECT                BRCMNAND_FLD_ADDR(NandNandBootConfig) /* Nand Flash EBI CS Select */
+#define BCHP_NAND_CS_NAND_XOR                   BRCMNAND_FLD_ADDR(NandCsNandXor) /* Nand Flash EBI CS Address XOR with 1FC0 Control */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0         BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs0) /* Nand Flash Spare Area Read Bytes 0-3 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4         BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs4) /* Nand Flash Spare Area Read Bytes 4-7 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8         BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs8) /* Nand Flash Spare Area Read Bytes 8-11 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C         BRCMNAND_FLD_ADDR(NandSpareAreaReadOfsC) /* Nand Flash Spare Area Read Bytes 12-15 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0        BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs0) /* Nand Flash Spare Area Write Bytes 0-3 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4        BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs4) /* Nand Flash Spare Area Write Bytes 4-7 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8        BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs8) /* Nand Flash Spare Area Write Bytes 8-11 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C        BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfsC) /* Nand Flash Spare Area Write Bytes 12-15 */
+#define BCHP_NAND_ACC_CONTROL                   BRCMNAND_FLD_ADDR(NandAccControl) /* Nand Flash Access Control */
+#if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_7_0
+#define BCHP_NAND_CONFIG_EXT                    BRCMNAND_FLD_ADDR(NandConfigExt) /* Nand Flash Config Ext */
+#endif
+#define BCHP_NAND_CONFIG                        BRCMNAND_FLD_ADDR(NandConfig) /* Nand Flash Config */
+#define BCHP_NAND_TIMING_1                      BRCMNAND_FLD_ADDR(NandTiming1) /* Nand Flash Timing Parameters 1 */
+#define BCHP_NAND_TIMING_2                      BRCMNAND_FLD_ADDR(NandTiming2) /* Nand Flash Timing Parameters 2 */
+#define BCHP_NAND_SEMAPHORE                     BRCMNAND_FLD_ADDR(NandSemaphore) /* Semaphore */
+#define BCHP_NAND_FLASH_DEVICE_ID               BRCMNAND_FLD_ADDR(NandFlashDeviceId) /* Nand Flash Device ID */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT           BRCMNAND_FLD_ADDR(NandFlashDeviceIdExt) /* Nand Flash Extended Device ID */
+#define BCHP_NAND_BLOCK_LOCK_STATUS             BRCMNAND_FLD_ADDR(NandBlockLockStatus) /* Nand Flash Block Lock Status */
+#define BCHP_NAND_INTFC_STATUS                  BRCMNAND_FLD_ADDR(NandIntfcStatus) /* Nand Flash Interface Status */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR             BRCMNAND_FLD_ADDR(NandEccCorrExtAddr) /* ECC Correctable Error Extended Address */
+#define BCHP_NAND_ECC_CORR_ADDR                 BRCMNAND_FLD_ADDR(NandEccCorrAddr) /* ECC Correctable Error Address */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR              BRCMNAND_FLD_ADDR(NandEccUncExtAddr) /* ECC Uncorrectable Error Extended Address */
+#define BCHP_NAND_ECC_UNC_ADDR                  BRCMNAND_FLD_ADDR(NandEccUncAddr) /* ECC Uncorrectable Error Address */
+#define BCHP_NAND_READ_ERROR_COUNT              BRCMNAND_FLD_ADDR(NandReadErrorCount) /* Read Error Count */
+#define BCHP_NAND_CORR_STAT_THRESHOLD           BRCMNAND_FLD_ADDR(NandCorrStatThreshold) /* Correctable Error Reporting Threshold */
+#define BCHP_NAND_ONFI_STATUS                   BRCMNAND_FLD_ADDR(NandOnfiStatus) /* ONFI Status */
+#define BCHP_NAND_ONFI_DEBUG_DATA               BRCMNAND_FLD_ADDR(NandOnfiDebugData) /* ONFI Debug Data */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR           BRCMNAND_FLD_ADDR(NandFlashReadExtAddr) /* Flash Read Data Extended Address */
+#define BCHP_NAND_FLASH_READ_ADDR               BRCMNAND_FLD_ADDR(NandFlashReadAddr) /* Flash Read Data Address */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR         BRCMNAND_FLD_ADDR(NandProgramPageExtAddr) /* Page Program Extended Address */
+#define BCHP_NAND_PROGRAM_PAGE_ADDR             BRCMNAND_FLD_ADDR(NandProgramPageAddr) /* Page Program Address */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR            BRCMNAND_FLD_ADDR(NandCopyBackExtAddr) /* Copy Back Extended Address */
+#define BCHP_NAND_COPY_BACK_ADDR                BRCMNAND_FLD_ADDR(NandCopyBackAddr) /* Copy Back Address */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR          BRCMNAND_FLD_ADDR(NandBlockEraseExtAddr) /* Block Erase Extended Address */
+#define BCHP_NAND_BLOCK_ERASE_ADDR              BRCMNAND_FLD_ADDR(NandBlockEraseAddr) /* Block Erase Address */
+#define BCHP_NAND_INV_READ_EXT_ADDR             BRCMNAND_FLD_ADDR(NandInvReadExtAddr) /* Flash Invalid Data Extended Address */
+#define BCHP_NAND_INV_READ_ADDR                 BRCMNAND_FLD_ADDR(NandInvReadAddr) /* Flash Invalid Data Address */
+#define BCHP_NAND_BLK_WR_PROTECT                BRCMNAND_FLD_ADDR(NandBlkWrProtect) /* Block Write Protect Enable and Size for EBI_CS0b */
+#define BCHP_NAND_ACC_CONTROL_CS1               BRCMNAND_FLD_ADDR(NandAccControlCs1) /* Nand Flash Access Control */
+#if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_7_0
+#define BCHP_NAND_CONFIG_EXT_CS1                BRCMNAND_FLD_ADDR(NandConfigExtCs1) /* Nand Flash Config Ext */
+#endif
+#define BCHP_NAND_CONFIG_CS1                    BRCMNAND_FLD_ADDR(NandConfigCs1) /* Nand Flash Config */
+#define BCHP_NAND_TIMING_1_CS1                  BRCMNAND_FLD_ADDR(NandTiming1Cs1) /* Nand Flash Timing Parameters 1 */
+#define BCHP_NAND_TIMING_2_CS1                  BRCMNAND_FLD_ADDR(NandTiming2Cs1) /* Nand Flash Timing Parameters 2 */
+#define BCHP_NAND_ACC_CONTROL_CS2               BRCMNAND_FLD_ADDR(NandAccControlCs2) /* Nand Flash Access Control */
+#if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_7_0
+#define BCHP_NAND_CONFIG_EXT_CS2                BRCMNAND_FLD_ADDR(NandConfigExtCs2) /* Nand Flash Config Ext */
+#endif
+#define BCHP_NAND_CONFIG_CS2                    BRCMNAND_FLD_ADDR(NandConfigCs2) /* Nand Flash Config */
+#define BCHP_NAND_TIMING_1_CS2                  BRCMNAND_FLD_ADDR(NandTiming1Cs2) /* Nand Flash Timing Parameters 1 */
+#define BCHP_NAND_TIMING_2_CS2                  BRCMNAND_FLD_ADDR(NandTiming2Cs2) /* Nand Flash Timing Parameters 2 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs10) /* Nand Flash Spare Area Read Bytes 16-19 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs14) /* Nand Flash Spare Area Read Bytes 20-23 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs18) /* Nand Flash Spare Area Read Bytes 24-27 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs1C) /* Nand Flash Spare Area Read Bytes 28-31 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_20        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs20) /* Nand Flash Spare Area Read Bytes 32-35 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_24        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs24) /* Nand Flash Spare Area Read Bytes 36-39 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_28        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs28) /* Nand Flash Spare Area Read Bytes 40-43 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_2C        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs2C) /* Nand Flash Spare Area Read Bytes 44-47 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_30        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs30) /* Nand Flash Spare Area Read Bytes 48-51*/
+#define BCHP_NAND_SPARE_AREA_READ_OFS_34        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs34) /* Nand Flash Spare Area Read Bytes 52-55 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_38        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs38) /* Nand Flash Spare Area Read Bytes 56-59 */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_3C        BRCMNAND_FLD_ADDR(NandSpareAreaReadOfs3C) /* Nand Flash Spare Area Read Bytes 60-63 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_10       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs10) /* Nand Flash Spare Area Write Bytes 16-19 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_14       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs14) /* Nand Flash Spare Area Write Bytes 20-23 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_18       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs18) /* Nand Flash Spare Area Write Bytes 24-27 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_1C       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs1C) /* Nand Flash Spare Area Write Bytes 28-31 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_20       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs20) /* Nand Flash Spare Area Write Bytes 32-35 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_24       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs24) /* Nand Flash Spare Area Write Bytes 36-39 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_28       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs28) /* Nand Flash Spare Area Write Bytes 40-43 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_2C       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs2C) /* Nand Flash Spare Area Write Bytes 44-47 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_30       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs30) /* Nand Flash Spare Area Write Bytes 48-51 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_34       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs34) /* Nand Flash Spare Area Write Bytes 52-55 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_38       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs38) /* Nand Flash Spare Area Write Bytes 56-59 */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_3C       BRCMNAND_FLD_ADDR(NandSpareAreaWriteOfs3C) /* Nand Flash Spare Area Write Bytes 60-63 */
+#define BCHP_NAND_LL_OP                         BRCMNAND_FLD_ADDR(NandLlOpNand) /* Nand Flash Low Level Operation */
+#define BCHP_NAND_LL_RDDATA                     BRCMNAND_FLD_ADDR(NandLlRdData) /* Nand Flash Low Level Read Data */
+
+/***************************************************************************
+ *REVISION - NAND Revision
+ ***************************************************************************/
+/* NAND :: REVISION :: 8KB_PAGE_SUPPORT [31:31] */
+#define BCHP_NAND_REVISION_8KB_PAGE_SUPPORT_MASK                   0x80000000
+#define BCHP_NAND_REVISION_8KB_PAGE_SUPPORT_SHIFT                  31
+
+/* NAND :: REVISION :: reserved0 [30:16] */
+#define BCHP_NAND_REVISION_reserved0_MASK                          0x7fff0000
+#define BCHP_NAND_REVISION_reserved0_SHIFT                         16
+
+/* NAND :: REVISION :: MAJOR [15:08] */
+#define BCHP_NAND_REVISION_MAJOR_MASK                              0x0000ff00
+#define BCHP_NAND_REVISION_MAJOR_SHIFT                             8
+
+/* NAND :: REVISION :: MINOR [07:00] */
+#define BCHP_NAND_REVISION_MINOR_MASK                              0x000000ff
+#define BCHP_NAND_REVISION_MINOR_SHIFT                             0
+
+/***************************************************************************
+ *CMD_START - Nand Flash Command Start
+ ***************************************************************************/
+/* NAND :: CMD_START :: reserved0 [31:05] */
+#define BCHP_NAND_CMD_START_reserved_MASK                          0xffffffe0
+#define BCHP_NAND_CMD_START_reserved_SHIFT                         5
+
+/* NAND :: CMD_START :: OPCODE [04:00] */
+#define BCHP_NAND_CMD_START_OPCODE_MASK                            0x1f
+#define BCHP_NAND_CMD_START_OPCODE_SHIFT                           0
+#define BCHP_NAND_CMD_START_OPCODE_NULL                            0
+#define BCHP_NAND_CMD_START_OPCODE_PAGE_READ                       1
+#define BCHP_NAND_CMD_START_OPCODE_SPARE_AREA_READ                 2
+#define BCHP_NAND_CMD_START_OPCODE_STATUS_READ                     3
+#define BCHP_NAND_CMD_START_OPCODE_PROGRAM_PAGE                    4
+#define BCHP_NAND_CMD_START_OPCODE_PROGRAM_SPARE_AREA              5
+#define BCHP_NAND_CMD_START_OPCODE_COPY_BACK                       6
+#define BCHP_NAND_CMD_START_OPCODE_DEVICE_ID_READ                  7
+#define BCHP_NAND_CMD_START_OPCODE_BLOCK_ERASE                     8
+#define BCHP_NAND_CMD_START_OPCODE_FLASH_RESET                     9
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_LOCK                     10
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_LOCK_DOWN                11
+#define BCHP_NAND_CMD_START_OPCODE_BLOCKS_UNLOCK                   12
+#define BCHP_NAND_CMD_START_OPCODE_READ_BLOCKS_LOCK_STATUS         13
+#define BCHP_NAND_CMD_START_OPCODE_PARAMETER_READ                  14
+#define BCHP_NAND_CMD_START_OPCODE_PARAMETER_CHANGE_COL            15
+#define BCHP_NAND_CMD_START_OPCODE_LOW_LEVEL_OP                    16
+
+/***************************************************************************
+ *CMD_EXT_ADDRESS - Nand Flash Command Extended Address
+ ***************************************************************************/
+/* NAND :: CMD_EXT_ADDRESS :: reserved0 [31:19] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_reserved0_MASK                   0xfff80000
+#define BCHP_NAND_CMD_EXT_ADDRESS_reserved0_SHIFT                  19
+
+/* NAND :: CMD_EXT_ADDRESS :: CS_SEL [18:16] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_MASK                      0x00070000
+#define BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT                     16
+
+/* NAND :: CMD_EXT_ADDRESS :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_CMD_EXT_ADDRESS_EXT_ADDRESS_MASK                 0x0000ffff
+#define BCHP_NAND_CMD_EXT_ADDRESS_EXT_ADDRESS_SHIFT                0
+
+/***************************************************************************
+ *CMD_ADDRESS - Nand Flash Command Address
+ ***************************************************************************/
+/* NAND :: CMD_ADDRESS :: ADDRESS [31:00] */
+#define BCHP_NAND_CMD_ADDRESS_ADDRESS_MASK                         0xffffffff
+#define BCHP_NAND_CMD_ADDRESS_ADDRESS_SHIFT                        0
+
+/***************************************************************************
+ *CMD_END_ADDRESS - Nand Flash Command End Address
+ ***************************************************************************/
+/* NAND :: CMD_END_ADDRESS :: ADDRESS [31:00] */
+#define BCHP_NAND_CMD_END_ADDRESS_ADDRESS_MASK                     0xffffffff
+#define BCHP_NAND_CMD_END_ADDRESS_ADDRESS_SHIFT                    0
+
+/***************************************************************************
+ *CS_NAND_SELECT - Nand Flash EBI CS Select
+ ***************************************************************************/
+/* NAND :: CS_NAND_SELECT :: CS_LOCK [31:31] */
+#define BCHP_NAND_CS_NAND_SELECT_CS_LOCK_MASK                      0x80000000
+#define BCHP_NAND_CS_NAND_SELECT_CS_LOCK_SHIFT                     31
+
+/* NAND :: CS_NAND_SELECT :: AUTO_DEVICE_ID_CONFIG [30:30] */
+#define BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK        0x40000000
+#define BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_SHIFT       30
+
+/* NAND :: CS_NAND_SELECT :: reserved0 [29:29] */
+#define BCHP_NAND_CS_NAND_SELECT_NAND_WP_MASK                    0x20000000
+#define BCHP_NAND_CS_NAND_SELECT_NAND_WP_SHIFT                   29
+
+/* NAND :: CS_NAND_SELECT :: WR_PROTECT_BLK0 [28:28] */
+#define BCHP_NAND_CS_NAND_SELECT_WR_PROTECT_BLK0_MASK              0x10000000
+#define BCHP_NAND_CS_NAND_SELECT_WR_PROTECT_BLK0_SHIFT             28
+
+/* NAND :: CS_NAND_SELECT :: reserved1 [27:16] */
+#define BCHP_NAND_CS_NAND_SELECT_reserved1_MASK                    0x0fff0000
+#define BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT                   16
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_7_USES_NAND [15:15] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_USES_NAND_MASK           0x00008000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_USES_NAND_SHIFT          15
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_6_USES_NAND [14:14] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_USES_NAND_MASK           0x00004000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_USES_NAND_SHIFT          14
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_5_USES_NAND [13:13] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_USES_NAND_MASK           0x00002000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_USES_NAND_SHIFT          13
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_4_USES_NAND [12:12] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_USES_NAND_MASK           0x00001000
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_USES_NAND_SHIFT          12
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_3_USES_NAND [11:11] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_USES_NAND_MASK           0x00000800
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_USES_NAND_SHIFT          11
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_2_USES_NAND [10:10] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_USES_NAND_MASK           0x00000400
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_USES_NAND_SHIFT          10
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_1_USES_NAND [09:09] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_USES_NAND_MASK           0x00000200
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_USES_NAND_SHIFT          9
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_0_USES_NAND [08:08] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_MASK           0x00000100
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT          8
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_7_SEL [07:07] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_SEL_MASK                 0x00000080
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_7_SEL_SHIFT                7
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_6_SEL [06:06] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_SEL_MASK                 0x00000040
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_6_SEL_SHIFT                6
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_5_SEL [05:05] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_SEL_MASK                 0x00000020
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_5_SEL_SHIFT                5
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_4_SEL [04:04] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_SEL_MASK                 0x00000010
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_4_SEL_SHIFT                4
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_3_SEL [03:03] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_MASK                 0x00000008
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_3_SEL_SHIFT                3
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_2_SEL [02:02] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_SEL_MASK                 0x00000004
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_2_SEL_SHIFT                2
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_1_SEL [01:01] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_SEL_MASK                 0x00000002
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_1_SEL_SHIFT                1
+
+/* NAND :: CS_NAND_SELECT :: EBI_CS_0_SEL [00:00] */
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK                 0x00000001
+#define BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_SHIFT                0
+
+/***************************************************************************
+ *CS_NAND_XOR - Nand Flash EBI CS Address XOR with 1FC0 Control
+ ***************************************************************************/
+/* NAND :: CS_NAND_XOR :: ONLY_BLOCK_0_1FC0_XOR [31:31] */
+#define BCHP_NAND_CS_NAND_XOR_ONLY_BLOCK_0_1FC0_XOR_MASK           0x80000000
+#define BCHP_NAND_CS_NAND_XOR_ONLY_BLOCK_0_1FC0_XOR_SHIFT          31
+
+/* NAND :: CS_NAND_XOR :: reserved0 [30:08] */
+#define BCHP_NAND_CS_NAND_XOR_reserved0_MASK                       0x7fffff00
+#define BCHP_NAND_CS_NAND_XOR_reserved0_SHIFT                      8
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_7_ADDR_1FC0_XOR [07:07] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_7_ADDR_1FC0_XOR_MASK          0x00000080
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_7_ADDR_1FC0_XOR_SHIFT         7
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_6_ADDR_1FC0_XOR [06:06] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_6_ADDR_1FC0_XOR_MASK          0x00000040
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_6_ADDR_1FC0_XOR_SHIFT         6
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_5_ADDR_1FC0_XOR [05:05] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_5_ADDR_1FC0_XOR_MASK          0x00000020
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_5_ADDR_1FC0_XOR_SHIFT         5
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_4_ADDR_1FC0_XOR [04:04] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_4_ADDR_1FC0_XOR_MASK          0x00000010
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_4_ADDR_1FC0_XOR_SHIFT         4
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_3_ADDR_1FC0_XOR [03:03] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_3_ADDR_1FC0_XOR_MASK          0x00000008
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_3_ADDR_1FC0_XOR_SHIFT         3
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_2_ADDR_1FC0_XOR [02:02] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_2_ADDR_1FC0_XOR_MASK          0x00000004
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_2_ADDR_1FC0_XOR_SHIFT         2
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_1_ADDR_1FC0_XOR [01:01] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_1_ADDR_1FC0_XOR_MASK          0x00000002
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_1_ADDR_1FC0_XOR_SHIFT         1
+
+/* NAND :: CS_NAND_XOR :: EBI_CS_0_ADDR_1FC0_XOR [00:00] */
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK          0x00000001
+#define BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_0 - Nand Flash Spare Area Read Bytes 0-3
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_0 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_0_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_0_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_1 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_1_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_1_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_2 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_2_MASK            0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_2_SHIFT           8
+
+/* NAND :: SPARE_AREA_READ_OFS_0 :: BYTE_OFS_3 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_3_MASK            0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_0_BYTE_OFS_3_SHIFT           0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_4 - Nand Flash Spare Area Read Bytes 4-7
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_4 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_4_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_4_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_5 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_5_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_5_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_6 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_6_MASK            0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_6_SHIFT           8
+
+/* NAND :: SPARE_AREA_READ_OFS_4 :: BYTE_OFS_7 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_7_MASK            0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_4_BYTE_OFS_7_SHIFT           0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_8 - Nand Flash Spare Area Read Bytes 8-11
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_8 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_8_MASK            0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_8_SHIFT           24
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_9 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_9_MASK            0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_9_SHIFT           16
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_10 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_10_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_10_SHIFT          8
+
+/* NAND :: SPARE_AREA_READ_OFS_8 :: BYTE_OFS_11 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_11_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_8_BYTE_OFS_11_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_C - Nand Flash Spare Area Read Bytes 12-15
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_12 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_12_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_12_SHIFT          24
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_13 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_13_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_13_SHIFT          16
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_14 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_14_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_14_SHIFT          8
+
+/* NAND :: SPARE_AREA_READ_OFS_C :: BYTE_OFS_15 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_15_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_C_BYTE_OFS_15_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_0 - Nand Flash Spare Area Write Bytes 0-3
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_0 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_0_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_0_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_1 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_1_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_1_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_2 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_2_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_2_SHIFT          8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_0 :: BYTE_OFS_3 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_3_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_0_BYTE_OFS_3_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_4 - Nand Flash Spare Area Write Bytes 4-7
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_4 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_4_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_4_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_5 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_5_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_5_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_6 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_6_MASK           0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_6_SHIFT          8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_4 :: BYTE_OFS_7 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_7_MASK           0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_4_BYTE_OFS_7_SHIFT          0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_8 - Nand Flash Spare Area Write Bytes 8-11
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_8 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_8_MASK           0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_8_SHIFT          24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_9 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_9_MASK           0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_9_SHIFT          16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_10 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_10_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_10_SHIFT         8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_8 :: BYTE_OFS_11 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_11_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_8_BYTE_OFS_11_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_WRITE_OFS_C - Nand Flash Spare Area Write Bytes 12-15
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_12 [31:24] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_12_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_12_SHIFT         24
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_13 [23:16] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_13_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_13_SHIFT         16
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_14 [15:08] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_14_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_14_SHIFT         8
+
+/* NAND :: SPARE_AREA_WRITE_OFS_C :: BYTE_OFS_15 [07:00] */
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_15_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_WRITE_OFS_C_BYTE_OFS_15_SHIFT         0
+
+/***************************************************************************
+ *ACC_CONTROL - Nand Flash Access Control
+ ***************************************************************************/
+/* NAND :: ACC_CONTROL :: RD_ECC_EN [31:31] */
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK                       0x80000000
+#define BCHP_NAND_ACC_CONTROL_RD_ECC_EN_SHIFT                      31
+
+/* NAND :: ACC_CONTROL :: WR_ECC_EN [30:30] */
+#define BCHP_NAND_ACC_CONTROL_WR_ECC_EN_MASK                       0x40000000
+#define BCHP_NAND_ACC_CONTROL_WR_ECC_EN_SHIFT                      30
+
+/* NAND :: ACC_CONTROL :: RD_ECC_BLK0_EN [29:29] */
+#define BCHP_NAND_ACC_CONTROL_CE_CARE_MASK                         0x20000000
+#define BCHP_NAND_ACC_CONTROL_CE_CARE_SHIFT                        29
+
+/* NAND :: ACC_CONTROL :: reserve1 [28:28] */
+#define BCHP_NAND_ACC_CONTROL_RESERVED1_MASK                       0x10000000
+#define BCHP_NAND_ACC_CONTROL_RESERVED1_SHIFT                      28
+
+/* NAND :: ACC_CONTROL :: RD_ERASED_ECC_EN [27:27] */
+#define BCHP_NAND_ACC_CONTROL_RD_ERASED_ECC_EN_MASK                0x08000000
+#define BCHP_NAND_ACC_CONTROL_RD_ERASED_ECC_EN_SHIFT               27
+
+/* NAND :: ACC_CONTROL :: PARTIAL_PAGE_EN [26:26] */
+#define BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_MASK                 0x04000000
+#define BCHP_NAND_ACC_CONTROL_PARTIAL_PAGE_EN_SHIFT                26
+
+/* NAND :: ACC_CONTROL :: WR_PREEMPT_EN [25:25] */
+#define BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK                   0x02000000
+#define BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_SHIFT                  25
+
+/* NAND :: ACC_CONTROL :: PAGE_HIT_EN [24:24] */
+#define BCHP_NAND_ACC_CONTROL_PAGE_HIT_EN_MASK                     0x01000000
+#define BCHP_NAND_ACC_CONTROL_PAGE_HIT_EN_SHIFT                    24
+
+/* NAND :: ACC_CONTROL :: PREFETCH_EN [23:23] */
+#define BCHP_NAND_ACC_CONTROL_PREFETCH_EN_MASK                     0x00800000
+#define BCHP_NAND_ACC_CONTROL_PREFETCH_EN_SHIFT                    23
+
+/* NAND :: ACC_CONTROL :: CACHE_MODE_EN [22:22] */
+#define BCHP_NAND_ACC_CONTROL_CACHE_MODE_EN_MASK                   0x00400000
+#define BCHP_NAND_ACC_CONTROL_CACHE_MODE_EN_SHIFT                  22
+
+/* NAND :: ACC_CONTROL :: reserve2 [21:21] */
+#define BCHP_NAND_ACC_CONTROL_RESERVED2_MASK                       0x00200000
+#define BCHP_NAND_ACC_CONTROL_RESERVED2_SHIFT                      21
+
+/* NAND :: ACC_CONTROL :: ECC_LEVEL [20:16] */
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_MASK                       0x001f0000
+#define BCHP_NAND_ACC_CONTROL_ECC_LEVEL_SHIFT                      16
+
+/* NAND :: ACC_CONTROL :: reserved3 [15:8] */
+#define BCHP_NAND_ACC_CONTROL_RESERVED3_MASK                       0x0000ff00
+#define BCHP_NAND_ACC_CONTROL_RESERVED3_SHIFT                      8
+
+/* NAND :: ACC_CONTROL :: SECTOR_SIZE_1K [07:07] */
+#define BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_MASK                  0x00000080
+#define BCHP_NAND_ACC_CONTROL_SECTOR_SIZE_1K_SHIFT                 7
+
+/* NAND :: ACC_CONTROL :: SPARE_AREA_SIZE [06:00] */
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_MASK                 0x0000007f
+#define BCHP_NAND_ACC_CONTROL_SPARE_AREA_SIZE_SHIFT                0
+
+#if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_7_0
+/***************************************************************************
+ *CONFIG EXT - Nand Flash Config Ext
+ ***************************************************************************/
+/* NAND :: CONFIG_EXT :: BLOCK_SIZE [11:4] */
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_MASK                           0x00000ff0
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT                          4
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_8192KB                 10
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_4096KB                 9
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_2048KB                 8
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_1024KB                 7
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB                  6
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_256KB                  5
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB                  4
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_64KB                   3
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_32KB                   2
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB                   1
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_8KB                    0
+
+/* NAND :: CONFIG_EXT :: PAGE_SIZE [11:4] */
+#define BCHP_NAND_CONFIG_PAGE_SIZE_MASK                            0x0000000f
+#define BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT                           0
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512                     0
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_1KB                     1
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB                     2
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_4KB                     3
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_8KB                     4
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_16KB                    5 
+
+#endif
+
+/***************************************************************************
+ *CONFIG - Nand Flash Config
+ ***************************************************************************/
+/* NAND :: CONFIG :: CONFIG_LOCK [31:31] */
+#define BCHP_NAND_CONFIG_CONFIG_LOCK_MASK                          0x80000000
+#define BCHP_NAND_CONFIG_CONFIG_LOCK_SHIFT                         31
+
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_7_0
+/* NAND :: CONFIG :: BLOCK_SIZE [30:28] */
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_MASK                           0x70000000
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_SHIFT                          28
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_2048KB                 6
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_1024KB                 5
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_512KB                  4
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_256KB                  3
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_128KB                  2
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_16KB                   1
+#define BCHP_NAND_CONFIG_BLOCK_SIZE_BK_SIZE_8KB                    0
+#endif
+
+/* NAND :: CONFIG :: DEVICE_SIZE [27:24] */
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_MASK                          0x0f000000
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_SHIFT                         24
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_4MB                  0
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_8MB                  1
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_16MB                 2
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_32MB                 3
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_64MB                 4
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_128MB                5
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_256MB                6
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_512MB                7
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_1GB                  8
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_2GB                  9
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_4GB                  10
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_8GB                  11
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_16GB                 12
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_32GB                 13
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_64GB                 14
+#define BCHP_NAND_CONFIG_DEVICE_SIZE_DVC_SIZE_128GB                15
+
+/* NAND :: CONFIG :: DEVICE_WIDTH [23:23] */
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_MASK                         0x00800000
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_SHIFT                        23
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_DVC_WIDTH_8                  0
+#define BCHP_NAND_CONFIG_DEVICE_WIDTH_DVC_WIDTH_16                 1
+
+/* NAND :: CONFIG :: reserved0 [22:22] */
+#define BCHP_NAND_CONFIG_reserved0_MASK                            0x00400000
+#define BCHP_NAND_CONFIG_reserved0_SHIFT                           22
+
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_7_0
+/* NAND :: CONFIG :: PAGE_SIZE [21:20] */
+#define BCHP_NAND_CONFIG_PAGE_SIZE_MASK                            0x00300000
+#define BCHP_NAND_CONFIG_PAGE_SIZE_SHIFT                           20
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_512                     0
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_2KB                     1
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_4KB                     2
+#define BCHP_NAND_CONFIG_PAGE_SIZE_PG_SIZE_8KB                     3
+#endif
+
+/* NAND :: CONFIG :: reserved1 [19:19] */
+#define BCHP_NAND_CONFIG_reserved1_MASK                            0x00080000
+#define BCHP_NAND_CONFIG_reserved1_SHIFT                           19
+
+/* NAND :: CONFIG :: FUL_ADR_BYTES [18:16] */
+#define BCHP_NAND_CONFIG_FUL_ADR_BYTES_MASK                        0x00070000
+#define BCHP_NAND_CONFIG_FUL_ADR_BYTES_SHIFT                       16
+
+/* NAND :: CONFIG :: reserved2 [15:15] */
+#define BCHP_NAND_CONFIG_reserved2_MASK                            0x00008000
+#define BCHP_NAND_CONFIG_reserved2_SHIFT                           15
+
+/* NAND :: CONFIG :: COL_ADR_BYTES [14:12] */
+#define BCHP_NAND_CONFIG_COL_ADR_BYTES_MASK                        0x00007000
+#define BCHP_NAND_CONFIG_COL_ADR_BYTES_SHIFT                       12
+
+/* NAND :: CONFIG :: reserved3 [11:11] */
+#define BCHP_NAND_CONFIG_reserved3_MASK                            0x00000800
+#define BCHP_NAND_CONFIG_reserved3_SHIFT                           11
+
+/* NAND :: CONFIG :: BLK_ADR_BYTES [10:08] */
+#define BCHP_NAND_CONFIG_BLK_ADR_BYTES_MASK                        0x00000700
+#define BCHP_NAND_CONFIG_BLK_ADR_BYTES_SHIFT                       8
+
+/* NAND :: CONFIG :: reserved4 [07:00] */
+#define BCHP_NAND_CONFIG_reserved4_MASK                            0x000000ff
+#define BCHP_NAND_CONFIG_reserved4_SHIFT                           0
+
+/***************************************************************************
+ *TIMING_1 - Nand Flash Timing Parameters 1
+ ***************************************************************************/
+/* NAND :: TIMING_1 :: tWP [31:28] */
+#define BCHP_NAND_TIMING_1_tWP_MASK                                0xf0000000
+#define BCHP_NAND_TIMING_1_tWP_SHIFT                               28
+
+/* NAND :: TIMING_1 :: tWH [27:24] */
+#define BCHP_NAND_TIMING_1_tWH_MASK                                0x0f000000
+#define BCHP_NAND_TIMING_1_tWH_SHIFT                               24
+
+/* NAND :: TIMING_1 :: tRP [23:20] */
+#define BCHP_NAND_TIMING_1_tRP_MASK                                0x00f00000
+#define BCHP_NAND_TIMING_1_tRP_SHIFT                               20
+
+/* NAND :: TIMING_1 :: tREH [19:16] */
+#define BCHP_NAND_TIMING_1_tREH_MASK                               0x000f0000
+#define BCHP_NAND_TIMING_1_tREH_SHIFT                              16
+
+/* NAND :: TIMING_1 :: tCS [15:12] */
+#define BCHP_NAND_TIMING_1_tCS_MASK                                0x0000f000
+#define BCHP_NAND_TIMING_1_tCS_SHIFT                               12
+
+/* NAND :: TIMING_1 :: tCLH [11:08] */
+#define BCHP_NAND_TIMING_1_tCLH_MASK                               0x00000f00
+#define BCHP_NAND_TIMING_1_tCLH_SHIFT                              8
+
+/* NAND :: TIMING_1 :: tALH [07:04] */
+#define BCHP_NAND_TIMING_1_tALH_MASK                               0x000000f0
+#define BCHP_NAND_TIMING_1_tALH_SHIFT                              4
+
+/* NAND :: TIMING_1 :: tADL [03:00] */
+#define BCHP_NAND_TIMING_1_tADL_MASK                               0x0000000f
+#define BCHP_NAND_TIMING_1_tADL_SHIFT                              0
+
+/***************************************************************************
+ *TIMING_2 - Nand Flash Timing Parameters 2
+ ***************************************************************************/
+/* NAND :: TIMING_2 :: CLK_SELECT [31:31] */
+#define BCHP_NAND_TIMING_2_CLK_SELECT_MASK                         0x80000000
+#define BCHP_NAND_TIMING_2_CLK_SELECT_SHIFT                        31
+#define BCHP_NAND_TIMING_2_CLK_SELECT_CLK_108                      0
+#define BCHP_NAND_TIMING_2_CLK_SELECT_CLK_216                      1
+
+/* NAND :: TIMING_2 :: reserved0 [30:13] */
+#define BCHP_NAND_TIMING_2_reserved0_MASK                          0x7fffe000
+#define BCHP_NAND_TIMING_2_reserved0_SHIFT                         13
+
+/* NAND :: TIMING_2 :: tWB [12:09] */
+#define BCHP_NAND_TIMING_2_tWB_MASK                                0x00001e00
+#define BCHP_NAND_TIMING_2_tWB_SHIFT                               9
+
+/* NAND :: TIMING_2 :: tWHR [08:04] */
+#define BCHP_NAND_TIMING_2_tWHR_MASK                               0x000001f0
+#define BCHP_NAND_TIMING_2_tWHR_SHIFT                              4
+
+/* NAND :: TIMING_2 :: tREAD [03:00] */
+#define BCHP_NAND_TIMING_2_tREAD_MASK                              0x0000000f
+#define BCHP_NAND_TIMING_2_tREAD_SHIFT                             0
+
+/***************************************************************************
+ *SEMAPHORE - Semaphore
+ ***************************************************************************/
+/* NAND :: SEMAPHORE :: reserved0 [31:08] */
+#define BCHP_NAND_SEMAPHORE_reserved0_MASK                         0xffffff00
+#define BCHP_NAND_SEMAPHORE_reserved0_SHIFT                        8
+
+/* NAND :: SEMAPHORE :: semaphore_ctrl [07:00] */
+#define BCHP_NAND_SEMAPHORE_semaphore_ctrl_MASK                    0x000000ff
+#define BCHP_NAND_SEMAPHORE_semaphore_ctrl_SHIFT                   0
+
+/***************************************************************************
+ *FLASH_DEVICE_ID - Nand Flash Device ID
+ ***************************************************************************/
+/* NAND :: FLASH_DEVICE_ID :: BYTE_0 [31:24] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_0_MASK                      0xff000000
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_0_SHIFT                     24
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_1 [23:16] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_1_MASK                      0x00ff0000
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_1_SHIFT                     16
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_2 [15:08] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_2_MASK                      0x0000ff00
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_2_SHIFT                     8
+
+/* NAND :: FLASH_DEVICE_ID :: BYTE_3 [07:00] */
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_3_MASK                      0x000000ff
+#define BCHP_NAND_FLASH_DEVICE_ID_BYTE_3_SHIFT                     0
+
+/***************************************************************************
+ *FLASH_DEVICE_ID_EXT - Nand Flash Extended Device ID
+ ***************************************************************************/
+/* NAND :: FLASH_DEVICE_ID_EXT :: BYTE_4 [31:24] */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_4_MASK                  0xff000000
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_4_SHIFT                 24
+
+/* NAND :: FLASH_DEVICE_ID_EXT :: BYTE_5 [23:16] */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_5_MASK                  0x00ff0000
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_5_SHIFT                 16
+
+/* NAND :: FLASH_DEVICE_ID_EXT :: BYTE_6 [15:08] */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_6_MASK                  0x0000ff00
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_6_SHIFT                 8
+
+/* NAND :: FLASH_DEVICE_ID_EXT :: BYTE_7 [07:00] */
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_7_MASK                  0x000000ff
+#define BCHP_NAND_FLASH_DEVICE_ID_EXT_BYTE_7_SHIFT                 0
+
+/***************************************************************************
+ *BLOCK_LOCK_STATUS - Nand Flash Block Lock Status
+ ***************************************************************************/
+/* NAND :: BLOCK_LOCK_STATUS :: reserved0 [31:08] */
+#define BCHP_NAND_BLOCK_LOCK_STATUS_reserved0_MASK                 0xffffff00
+#define BCHP_NAND_BLOCK_LOCK_STATUS_reserved0_SHIFT                8
+
+/* NAND :: BLOCK_LOCK_STATUS :: STATUS [07:00] */
+#define BCHP_NAND_BLOCK_LOCK_STATUS_STATUS_MASK                    0x000000ff
+#define BCHP_NAND_BLOCK_LOCK_STATUS_STATUS_SHIFT                   0
+
+/***************************************************************************
+ *INTFC_STATUS - Nand Flash Interface Status
+ ***************************************************************************/
+/* NAND :: INTFC_STATUS :: CTLR_READY [31:31] */
+#define BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK                     0x80000000
+#define BCHP_NAND_INTFC_STATUS_CTLR_READY_SHIFT                    31
+
+/* NAND :: INTFC_STATUS :: FLASH_READY [30:30] */
+#define BCHP_NAND_INTFC_STATUS_FLASH_READY_MASK                    0x40000000
+#define BCHP_NAND_INTFC_STATUS_FLASH_READY_SHIFT                   30
+
+/* NAND :: INTFC_STATUS :: CACHE_VALID [29:29] */
+#define BCHP_NAND_INTFC_STATUS_CACHE_VALID_MASK                    0x20000000
+#define BCHP_NAND_INTFC_STATUS_CACHE_VALID_SHIFT                   29
+
+/* NAND :: INTFC_STATUS :: SPARE_AREA_VALID [28:28] */
+#define BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_MASK               0x10000000
+#define BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_SHIFT              28
+
+/* NAND :: INTFC_STATUS :: ERASED [27:27] */
+#define BCHP_NAND_INTFC_STATUS_ERASED_MASK                         0x08000000
+#define BCHP_NAND_INTFC_STATUS_ERASED_SHIFT                        27
+
+/* NAND :: INTFC_STATUS :: PLANE_READY [26:26] */
+#define BCHP_NAND_INTFC_STATUS_PLANE_READY_MASK                    0x04000000
+#define BCHP_NAND_INTFC_STATUS_PLANE_READY_SHIFT                   26
+
+/* NAND :: INTFC_STATUS :: reserved0 [25:08] */
+#define BCHP_NAND_INTFC_STATUS_reserved0_MASK                      0x03ffff00
+#define BCHP_NAND_INTFC_STATUS_reserved0_SHIFT                     8
+
+/* NAND :: INTFC_STATUS :: FLASH_STATUS [07:00] */
+#define BCHP_NAND_INTFC_STATUS_FLASH_STATUS_MASK                   0x000000ff
+#define BCHP_NAND_INTFC_STATUS_FLASH_STATUS_SHIFT                  0
+
+/***************************************************************************
+ *ECC_CORR_EXT_ADDR - ECC Correctable Error Extended Address
+ ***************************************************************************/
+/* NAND :: ECC_CORR_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_reserved0_MASK                 0xfff80000
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_reserved0_SHIFT                19
+
+/* NAND :: ECC_CORR_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_CS_SEL_MASK                    0x00070000
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_CS_SEL_SHIFT                   16
+
+/* NAND :: ECC_CORR_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_EXT_ADDRESS_MASK               0x0000ffff
+#define BCHP_NAND_ECC_CORR_EXT_ADDR_EXT_ADDRESS_SHIFT              0
+
+/***************************************************************************
+ *ECC_CORR_ADDR - ECC Correctable Error Address
+ ***************************************************************************/
+/* NAND :: ECC_CORR_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_ECC_CORR_ADDR_ADDRESS_MASK                       0xffffffff
+#define BCHP_NAND_ECC_CORR_ADDR_ADDRESS_SHIFT                      0
+
+/***************************************************************************
+ *ECC_UNC_EXT_ADDR - ECC Uncorrectable Error Extended Address
+ ***************************************************************************/
+/* NAND :: ECC_UNC_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_reserved0_MASK                  0xfff80000
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_reserved0_SHIFT                 19
+
+/* NAND :: ECC_UNC_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_CS_SEL_MASK                     0x00070000
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_CS_SEL_SHIFT                    16
+
+/* NAND :: ECC_UNC_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_EXT_ADDRESS_MASK                0x0000ffff
+#define BCHP_NAND_ECC_UNC_EXT_ADDR_EXT_ADDRESS_SHIFT               0
+
+/***************************************************************************
+ *ECC_UNC_ADDR - ECC Uncorrectable Error Address
+ ***************************************************************************/
+/* NAND :: ECC_UNC_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_ECC_UNC_ADDR_ADDRESS_MASK                        0xffffffff
+#define BCHP_NAND_ECC_UNC_ADDR_ADDRESS_SHIFT                       0
+
+/***************************************************************************
+ *READ_ERROR_COUNT - Read Error Count
+ ***************************************************************************/
+/* NAND :: READ_ERROR_COUNT :: READ_ERROR_COUNT [31:00] */
+#define BCHP_NAND_READ_ERROR_COUNT_READ_ERROR_COUNT_MASK           0xffffffff
+#define BCHP_NAND_READ_ERROR_COUNT_READ_ERROR_COUNT_SHIFT          0
+
+/***************************************************************************
+ *CORR_STAT_THRESHOLD - Correctable Error Reporting Threshold
+ ***************************************************************************/
+/* NAND :: CORR_STAT_THRESHOLD :: reserved0 [31:30] */
+#define BCHP_NAND_CORR_STAT_THRESHOLD_reserved0_MASK               0xc0000000
+#define BCHP_NAND_CORR_STAT_THRESHOLD_reserved0_SHIFT              30
+
+/* NAND :: CORR_STAT_THRESHOLD :: CORR_STAT_THRESHOLD [05:00] */
+#define BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_MASK     0x0000003f
+#define BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT    0
+
+/***************************************************************************
+ *ONFI_STATUS - ONFI Status
+ ***************************************************************************/
+/* NAND :: ONFI_STATUS :: ONFI_DEBUG_SEL [31:28] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_DEBUG_SEL_MASK                  0xf0000000
+#define BCHP_NAND_ONFI_STATUS_ONFI_DEBUG_SEL_SHIFT                 28
+
+/* NAND :: ONFI_STATUS :: reserved0 [27:06] */
+#define BCHP_NAND_ONFI_STATUS_reserved0_MASK                       0x0fffffc0
+#define BCHP_NAND_ONFI_STATUS_reserved0_SHIFT                      6
+
+/* NAND :: ONFI_STATUS :: ONFI_BAD_IDENT_PG2 [05:05] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG2_MASK              0x00000020
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG2_SHIFT             5
+
+/* NAND :: ONFI_STATUS :: ONFI_BAD_IDENT_PG1 [04:04] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG1_MASK              0x00000010
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG1_SHIFT             4
+
+/* NAND :: ONFI_STATUS :: ONFI_BAD_IDENT_PG0 [03:03] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG0_MASK              0x00000008
+#define BCHP_NAND_ONFI_STATUS_ONFI_BAD_IDENT_PG0_SHIFT             3
+
+/* NAND :: ONFI_STATUS :: ONFI_CRC_ERROR_PG2 [02:02] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG2_MASK              0x00000004
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG2_SHIFT             2
+
+/* NAND :: ONFI_STATUS :: ONFI_CRC_ERROR_PG1 [01:01] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG1_MASK              0x00000002
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG1_SHIFT             1
+
+/* NAND :: ONFI_STATUS :: ONFI_CRC_ERROR_PG0 [00:00] */
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG0_MASK              0x00000001
+#define BCHP_NAND_ONFI_STATUS_ONFI_CRC_ERROR_PG0_SHIFT             0
+
+/***************************************************************************
+ *ONFI_DEBUG_DATA - ONFI Debug Data
+ ***************************************************************************/
+/* NAND :: ONFI_DEBUG_DATA :: ONFI_DEBUG_DATA [31:00] */
+#define BCHP_NAND_ONFI_DEBUG_DATA_ONFI_DEBUG_DATA_MASK             0xffffffff
+#define BCHP_NAND_ONFI_DEBUG_DATA_ONFI_DEBUG_DATA_SHIFT            0
+
+/***************************************************************************
+ *FLASH_READ_EXT_ADDR - Flash Read Data Extended Address
+ ***************************************************************************/
+/* NAND :: FLASH_READ_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_reserved0_MASK               0xfff80000
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_reserved0_SHIFT              19
+
+/* NAND :: FLASH_READ_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_CS_SEL_MASK                  0x00070000
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_CS_SEL_SHIFT                 16
+
+/* NAND :: FLASH_READ_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_EXT_ADDRESS_MASK             0x0000ffff
+#define BCHP_NAND_FLASH_READ_EXT_ADDR_EXT_ADDRESS_SHIFT            0
+
+/***************************************************************************
+ *FLASH_READ_ADDR - Flash Read Data Address
+ ***************************************************************************/
+/* NAND :: FLASH_READ_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_FLASH_READ_ADDR_ADDRESS_MASK                     0xffffffff
+#define BCHP_NAND_FLASH_READ_ADDR_ADDRESS_SHIFT                    0
+
+/***************************************************************************
+ *PROGRAM_PAGE_EXT_ADDR - Page Program Extended Address
+ ***************************************************************************/
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_reserved0_MASK             0xfff80000
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_reserved0_SHIFT            19
+
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_CS_SEL_MASK                0x00070000
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_CS_SEL_SHIFT               16
+
+/* NAND :: PROGRAM_PAGE_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_EXT_ADDRESS_MASK           0x0000ffff
+#define BCHP_NAND_PROGRAM_PAGE_EXT_ADDR_EXT_ADDRESS_SHIFT          0
+
+/***************************************************************************
+ *PROGRAM_PAGE_ADDR - Page Program Address
+ ***************************************************************************/
+/* NAND :: PROGRAM_PAGE_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_PROGRAM_PAGE_ADDR_ADDRESS_MASK                   0xffffffff
+#define BCHP_NAND_PROGRAM_PAGE_ADDR_ADDRESS_SHIFT                  0
+
+/***************************************************************************
+ *COPY_BACK_EXT_ADDR - Copy Back Extended Address
+ ***************************************************************************/
+/* NAND :: COPY_BACK_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_reserved0_MASK                0xfff80000
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_reserved0_SHIFT               19
+
+/* NAND :: COPY_BACK_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_CS_SEL_MASK                   0x00070000
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_CS_SEL_SHIFT                  16
+
+/* NAND :: COPY_BACK_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_EXT_ADDRESS_MASK              0x0000ffff
+#define BCHP_NAND_COPY_BACK_EXT_ADDR_EXT_ADDRESS_SHIFT             0
+
+/***************************************************************************
+ *COPY_BACK_ADDR - Copy Back Address
+ ***************************************************************************/
+/* NAND :: COPY_BACK_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_COPY_BACK_ADDR_ADDRESS_MASK                      0xffffffff
+#define BCHP_NAND_COPY_BACK_ADDR_ADDRESS_SHIFT                     0
+
+/***************************************************************************
+ *BLOCK_ERASE_EXT_ADDR - Block Erase Extended Address
+ ***************************************************************************/
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_reserved0_MASK              0xfff80000
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_reserved0_SHIFT             19
+
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_CS_SEL_MASK                 0x00070000
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_CS_SEL_SHIFT                16
+
+/* NAND :: BLOCK_ERASE_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_EXT_ADDRESS_MASK            0x0000ffff
+#define BCHP_NAND_BLOCK_ERASE_EXT_ADDR_EXT_ADDRESS_SHIFT           0
+
+/***************************************************************************
+ *BLOCK_ERASE_ADDR - Block Erase Address
+ ***************************************************************************/
+/* NAND :: BLOCK_ERASE_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_BLOCK_ERASE_ADDR_ADDRESS_MASK                    0xffffffff
+#define BCHP_NAND_BLOCK_ERASE_ADDR_ADDRESS_SHIFT                   0
+
+/***************************************************************************
+ *INV_READ_EXT_ADDR - Flash Invalid Data Extended Address
+ ***************************************************************************/
+/* NAND :: INV_READ_EXT_ADDR :: reserved0 [31:19] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_reserved0_MASK                 0xfff80000
+#define BCHP_NAND_INV_READ_EXT_ADDR_reserved0_SHIFT                19
+
+/* NAND :: INV_READ_EXT_ADDR :: CS_SEL [18:16] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_CS_SEL_MASK                    0x00070000
+#define BCHP_NAND_INV_READ_EXT_ADDR_CS_SEL_SHIFT                   16
+
+/* NAND :: INV_READ_EXT_ADDR :: EXT_ADDRESS [15:00] */
+#define BCHP_NAND_INV_READ_EXT_ADDR_EXT_ADDRESS_MASK               0x0000ffff
+#define BCHP_NAND_INV_READ_EXT_ADDR_EXT_ADDRESS_SHIFT              0
+
+/***************************************************************************
+ *INV_READ_ADDR - Flash Invalid Data Address
+ ***************************************************************************/
+/* NAND :: INV_READ_ADDR :: ADDRESS [31:00] */
+#define BCHP_NAND_INV_READ_ADDR_ADDRESS_MASK                       0xffffffff
+#define BCHP_NAND_INV_READ_ADDR_ADDRESS_SHIFT                      0
+
+/***************************************************************************
+ *BLK_WR_PROTECT - Block Write Protect Enable and Size for EBI_CS0b
+ ***************************************************************************/
+/* NAND :: BLK_WR_PROTECT :: BLK_END_ADDR [31:00] */
+#define BCHP_NAND_BLK_WR_PROTECT_BLK_END_ADDR_MASK                 0xffffffff
+#define BCHP_NAND_BLK_WR_PROTECT_BLK_END_ADDR_SHIFT                0
+
+/***************************************************************************
+ *ACC_CONTROL_CS1 - Nand Flash Access Control
+ ***************************************************************************/
+/* NAND :: ACC_CONTROL_CS1 :: RD_ECC_EN [31:31] */
+#define BCHP_NAND_ACC_CONTROL_CS1_RD_ECC_EN_MASK                   0x80000000
+#define BCHP_NAND_ACC_CONTROL_CS1_RD_ECC_EN_SHIFT                  31
+
+/* NAND :: ACC_CONTROL_CS1 :: WR_ECC_EN [30:30] */
+#define BCHP_NAND_ACC_CONTROL_CS1_WR_ECC_EN_MASK                   0x40000000
+#define BCHP_NAND_ACC_CONTROL_CS1_WR_ECC_EN_SHIFT                  30
+
+/* NAND :: ACC_CONTROL :: RD_ECC_BLK0_EN [29:29] */
+#define BCHP_NAND_ACC_CONTROL_CS1_CE_CARE_MASK                     0x20000000
+#define BCHP_NAND_ACC_CONTROL_CS1_CE_CARE_SHIFT                    29
+
+/* NAND :: ACC_CONTROL :: reserve1 [28:28] */
+#define BCHP_NAND_ACC_CONTROL_CS1_RESERVED1_MASK                   0x10000000
+#define BCHP_NAND_ACC_CONTROL_CS1_RESERVED1_SHIFT                  28
+
+/* NAND :: ACC_CONTROL_CS1 :: RD_ERASED_ECC_EN [27:27] */
+#define BCHP_NAND_ACC_CONTROL_CS1_RD_ERASED_ECC_EN_MASK            0x08000000
+#define BCHP_NAND_ACC_CONTROL_CS1_RD_ERASED_ECC_EN_SHIFT           27
+
+/* NAND :: ACC_CONTROL_CS1 :: PARTIAL_PAGE_EN [26:26] */
+#define BCHP_NAND_ACC_CONTROL_CS1_PARTIAL_PAGE_EN_MASK             0x04000000
+#define BCHP_NAND_ACC_CONTROL_CS1_PARTIAL_PAGE_EN_SHIFT            26
+
+/* NAND :: ACC_CONTROL_CS1 :: WR_PREEMPT_EN [25:25] */
+#define BCHP_NAND_ACC_CONTROL_CS1_WR_PREEMPT_EN_MASK               0x02000000
+#define BCHP_NAND_ACC_CONTROL_CS1_WR_PREEMPT_EN_SHIFT              25
+
+/* NAND :: ACC_CONTROL_CS1 :: PAGE_HIT_EN [24:24] */
+#define BCHP_NAND_ACC_CONTROL_CS1_PAGE_HIT_EN_MASK                 0x01000000
+#define BCHP_NAND_ACC_CONTROL_CS1_PAGE_HIT_EN_SHIFT                24
+
+/* NAND :: ACC_CONTROL :: PREFETCH_EN [23:23] */
+#define BCHP_NAND_ACC_CONTROL_CS1_PREFETCH_EN_MASK                 0x00800000
+#define BCHP_NAND_ACC_CONTROL_CS1_PREFETCH_EN_SHIFT                23
+
+/* NAND :: ACC_CONTROL :: CACHE_MODE_EN [22:22] */
+#define BCHP_NAND_ACC_CONTROL_CS1_CACHE_MODE_EN_MASK               0x00400000
+#define BCHP_NAND_ACC_CONTROL_CS1_CACHE_MODE_EN_SHIFT              22
+
+/* NAND :: ACC_CONTROL :: reserve2 [21:21] */
+#define BCHP_NAND_ACC_CONTROL_CS1_RESERVED2_MASK                   0x00200000
+#define BCHP_NAND_ACC_CONTROL_CS1_RESERVED2_SHIFT                  21
+
+/* NAND :: ACC_CONTROL :: ECC_LEVEL [20:16] */
+#define BCHP_NAND_ACC_CONTROL_CS1_ECC_LEVEL_MASK                   0x001f0000
+#define BCHP_NAND_ACC_CONTROL_CS1_ECC_LEVEL_SHIFT                  16
+
+/* NAND :: ACC_CONTROL :: reserved3 [15:8] */
+#define BCHP_NAND_ACC_CONTROL_CS1_RESERVED3_MASK                   0x0000ff00
+#define BCHP_NAND_ACC_CONTROL_CS1_RESERVED3_SHIFT                  8
+
+/* NAND :: ACC_CONTROL :: SECTOR_SIZE_1K [07:07] */
+#define BCHP_NAND_ACC_CONTROL_CS1_SECTOR_SIZE_1K_MASK              0x00000080
+#define BCHP_NAND_ACC_CONTROL_CS1_SECTOR_SIZE_1K_SHIFT             7
+
+/* NAND :: ACC_CONTROL_CS1 :: SPARE_AREA_SIZE [06:00] */
+#define BCHP_NAND_ACC_CONTROL_CS1_SPARE_AREA_SIZE_MASK             0x0000007f
+#define BCHP_NAND_ACC_CONTROL_CS1_SPARE_AREA_SIZE_SHIFT            0
+
+#if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_7_0
+/***************************************************************************
+ *CONFIG_CS1 EXT - Nand Flash Config Ext
+ ***************************************************************************/
+/* NAND :: CONFIG_CS1_EXT :: BLOCK_SIZE [11:4] */
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_MASK                       0x00000ff0
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_SHIFT                      4
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_8192KB             10
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_4096KB             9
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_2048KB             8
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_1024KB             7
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_512KB              6
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_256KB              5
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_128KB              4
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_64KB               3
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_32KB               2
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_16KB               1
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_8KB                0
+
+/* NAND :: CONFIG_CS1_EXT :: PAGE_SIZE [11:4] */
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_MASK                        0x0000000f
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_SHIFT                       0
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_512                 0
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_1KB                 1
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_2KB                 2
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_4KB                 3
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_8KB                 4
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_16KB                5 
+
+#endif
+
+/***************************************************************************
+ *CONFIG_CS1 - Nand Flash Config
+ ***************************************************************************/
+/* NAND :: CONFIG_CS1 :: CONFIG_LOCK [31:31] */
+#define BCHP_NAND_CONFIG_CS1_CONFIG_LOCK_MASK                      0x80000000
+#define BCHP_NAND_CONFIG_CS1_CONFIG_LOCK_SHIFT                     31
+
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_7_0
+/* NAND :: CONFIG_CS1 :: BLOCK_SIZE [30:28] */
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_MASK                       0x70000000
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_SHIFT                      28
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_2048KB             6
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_1024KB             5
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_512KB              4
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_256KB              3
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_128KB              2
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_16KB               1
+#define BCHP_NAND_CONFIG_CS1_BLOCK_SIZE_BK_SIZE_8KB                0
+#endif
+
+/* NAND :: CONFIG_CS1 :: DEVICE_SIZE [27:24] */
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_MASK                      0x0f000000
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_SHIFT                     24
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_4MB              0
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_8MB              1
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_16MB             2
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_32MB             3
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_64MB             4
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_128MB            5
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_256MB            6
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_512MB            7
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_1GB              8
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_2GB              9
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_4GB              10
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_8GB              11
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_16GB             12
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_32GB             13
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_64GB             14
+#define BCHP_NAND_CONFIG_CS1_DEVICE_SIZE_DVC_SIZE_128GB            15
+
+/* NAND :: CONFIG_CS1 :: DEVICE_WIDTH [23:23] */
+#define BCHP_NAND_CONFIG_CS1_DEVICE_WIDTH_MASK                     0x00800000
+#define BCHP_NAND_CONFIG_CS1_DEVICE_WIDTH_SHIFT                    23
+#define BCHP_NAND_CONFIG_CS1_DEVICE_WIDTH_DVC_WIDTH_8              0
+#define BCHP_NAND_CONFIG_CS1_DEVICE_WIDTH_DVC_WIDTH_16             1
+
+/* NAND :: CONFIG_CS1 :: reserved0 [22:22] */
+#define BCHP_NAND_CONFIG_CS1_reserved0_MASK                        0x00400000
+#define BCHP_NAND_CONFIG_CS1_reserved0_SHIFT                       22
+
+#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_7_0
+/* NAND :: CONFIG_CS1 :: PAGE_SIZE [21:20] */
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_MASK                        0x00300000
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_SHIFT                       20
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_512                 0
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_2KB                 1
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_4KB                 2
+#define BCHP_NAND_CONFIG_CS1_PAGE_SIZE_PG_SIZE_8KB                 3
+#endif
+
+/* NAND :: CONFIG_CS1 :: reserved1 [19:19] */
+#define BCHP_NAND_CONFIG_CS1_reserved1_MASK                        0x00080000
+#define BCHP_NAND_CONFIG_CS1_reserved1_SHIFT                       19
+
+/* NAND :: CONFIG_CS1 :: FUL_ADR_BYTES [18:16] */
+#define BCHP_NAND_CONFIG_CS1_FUL_ADR_BYTES_MASK                    0x00070000
+#define BCHP_NAND_CONFIG_CS1_FUL_ADR_BYTES_SHIFT                   16
+
+/* NAND :: CONFIG_CS1 :: reserved2 [15:15] */
+#define BCHP_NAND_CONFIG_CS1_reserved2_MASK                        0x00008000
+#define BCHP_NAND_CONFIG_CS1_reserved2_SHIFT                       15
+
+/* NAND :: CONFIG_CS1 :: COL_ADR_BYTES [14:12] */
+#define BCHP_NAND_CONFIG_CS1_COL_ADR_BYTES_MASK                    0x00007000
+#define BCHP_NAND_CONFIG_CS1_COL_ADR_BYTES_SHIFT                   12
+
+/* NAND :: CONFIG_CS1 :: reserved3 [11:11] */
+#define BCHP_NAND_CONFIG_CS1_reserved3_MASK                        0x00000800
+#define BCHP_NAND_CONFIG_CS1_reserved3_SHIFT                       11
+
+/* NAND :: CONFIG_CS1 :: BLK_ADR_BYTES [10:08] */
+#define BCHP_NAND_CONFIG_CS1_BLK_ADR_BYTES_MASK                    0x00000700
+#define BCHP_NAND_CONFIG_CS1_BLK_ADR_BYTES_SHIFT                   8
+
+/* NAND :: CONFIG_CS1 :: reserved4 [07:00] */
+#define BCHP_NAND_CONFIG_CS1_reserved4_MASK                        0x000000ff
+#define BCHP_NAND_CONFIG_CS1_reserved4_SHIFT                       0
+
+/***************************************************************************
+ *TIMING_1_CS1 - Nand Flash Timing Parameters 1
+ ***************************************************************************/
+/* NAND :: TIMING_1_CS1 :: tWP [31:28] */
+#define BCHP_NAND_TIMING_1_CS1_tWP_MASK                            0xf0000000
+#define BCHP_NAND_TIMING_1_CS1_tWP_SHIFT                           28
+
+/* NAND :: TIMING_1_CS1 :: tWH [27:24] */
+#define BCHP_NAND_TIMING_1_CS1_tWH_MASK                            0x0f000000
+#define BCHP_NAND_TIMING_1_CS1_tWH_SHIFT                           24
+
+/* NAND :: TIMING_1_CS1 :: tRP [23:20] */
+#define BCHP_NAND_TIMING_1_CS1_tRP_MASK                            0x00f00000
+#define BCHP_NAND_TIMING_1_CS1_tRP_SHIFT                           20
+
+/* NAND :: TIMING_1_CS1 :: tREH [19:16] */
+#define BCHP_NAND_TIMING_1_CS1_tREH_MASK                           0x000f0000
+#define BCHP_NAND_TIMING_1_CS1_tREH_SHIFT                          16
+
+/* NAND :: TIMING_1_CS1 :: tCS [15:12] */
+#define BCHP_NAND_TIMING_1_CS1_tCS_MASK                            0x0000f000
+#define BCHP_NAND_TIMING_1_CS1_tCS_SHIFT                           12
+
+/* NAND :: TIMING_1_CS1 :: tCLH [11:08] */
+#define BCHP_NAND_TIMING_1_CS1_tCLH_MASK                           0x00000f00
+#define BCHP_NAND_TIMING_1_CS1_tCLH_SHIFT                          8
+
+/* NAND :: TIMING_1_CS1 :: tALH [07:04] */
+#define BCHP_NAND_TIMING_1_CS1_tALH_MASK                           0x000000f0
+#define BCHP_NAND_TIMING_1_CS1_tALH_SHIFT                          4
+
+/* NAND :: TIMING_1_CS1 :: tADL [03:00] */
+#define BCHP_NAND_TIMING_1_CS1_tADL_MASK                           0x0000000f
+#define BCHP_NAND_TIMING_1_CS1_tADL_SHIFT                          0
+
+/***************************************************************************
+ *TIMING_2_CS1 - Nand Flash Timing Parameters 2
+ ***************************************************************************/
+/* NAND :: TIMING_2_CS1 :: CLK_SELECT [31:31] */
+#define BCHP_NAND_TIMING_2_CS1_CLK_SELECT_MASK                     0x80000000
+#define BCHP_NAND_TIMING_2_CS1_CLK_SELECT_SHIFT                    31
+#define BCHP_NAND_TIMING_2_CS1_CLK_SELECT_CLK_108                  0
+#define BCHP_NAND_TIMING_2_CS1_CLK_SELECT_CLK_216                  1
+
+/* NAND :: TIMING_2_CS1 :: reserved0 [30:13] */
+#define BCHP_NAND_TIMING_2_CS1_reserved0_MASK                      0x7fffe000
+#define BCHP_NAND_TIMING_2_CS1_reserved0_SHIFT                     13
+
+/* NAND :: TIMING_2_CS1 :: tWB [12:09] */
+#define BCHP_NAND_TIMING_2_CS1_tWB_MASK                            0x00001e00
+#define BCHP_NAND_TIMING_2_CS1_tWB_SHIFT                           9
+
+/* NAND :: TIMING_2_CS1 :: tWHR [08:04] */
+#define BCHP_NAND_TIMING_2_CS1_tWHR_MASK                           0x000001f0
+#define BCHP_NAND_TIMING_2_CS1_tWHR_SHIFT                          4
+
+/* NAND :: TIMING_2_CS1 :: tREAD [03:00] */
+#define BCHP_NAND_TIMING_2_CS1_tREAD_MASK                          0x0000000f
+#define BCHP_NAND_TIMING_2_CS1_tREAD_SHIFT                         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_10 - Nand Flash Spare Area Read Bytes 16-19
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_10 :: BYTE_OFS_16 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_16_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_16_SHIFT         24
+
+/* NAND :: SPARE_AREA_READ_OFS_10 :: BYTE_OFS_17 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_17_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_17_SHIFT         16
+
+/* NAND :: SPARE_AREA_READ_OFS_10 :: BYTE_OFS_18 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_18_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_18_SHIFT         8
+
+/* NAND :: SPARE_AREA_READ_OFS_10 :: BYTE_OFS_19 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_19_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_10_BYTE_OFS_19_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_14 - Nand Flash Spare Area Read Bytes 20-23
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_14 :: BYTE_OFS_20 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_20_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_20_SHIFT         24
+
+/* NAND :: SPARE_AREA_READ_OFS_14 :: BYTE_OFS_21 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_21_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_21_SHIFT         16
+
+/* NAND :: SPARE_AREA_READ_OFS_14 :: BYTE_OFS_22 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_22_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_22_SHIFT         8
+
+/* NAND :: SPARE_AREA_READ_OFS_14 :: BYTE_OFS_23 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_23_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_14_BYTE_OFS_23_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_18 - Nand Flash Spare Area Read Bytes 24-27
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_18 :: BYTE_OFS_24 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_24_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_24_SHIFT         24
+
+/* NAND :: SPARE_AREA_READ_OFS_18 :: BYTE_OFS_25 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_25_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_25_SHIFT         16
+
+/* NAND :: SPARE_AREA_READ_OFS_18 :: BYTE_OFS_26 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_26_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_26_SHIFT         8
+
+/* NAND :: SPARE_AREA_READ_OFS_18 :: BYTE_OFS_27 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_27_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_18_BYTE_OFS_27_SHIFT         0
+
+/***************************************************************************
+ *SPARE_AREA_READ_OFS_1C - Nand Flash Spare Area Read Bytes 28-31
+ ***************************************************************************/
+/* NAND :: SPARE_AREA_READ_OFS_1C :: BYTE_OFS_28 [31:24] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_28_MASK          0xff000000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_28_SHIFT         24
+
+/* NAND :: SPARE_AREA_READ_OFS_1C :: BYTE_OFS_29 [23:16] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_29_MASK          0x00ff0000
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_29_SHIFT         16
+
+/* NAND :: SPARE_AREA_READ_OFS_1C :: BYTE_OFS_30 [15:08] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_30_MASK          0x0000ff00
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_30_SHIFT         8
+
+/* NAND :: SPARE_AREA_READ_OFS_1C :: BYTE_OFS_31 [07:00] */
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_31_MASK          0x000000ff
+#define BCHP_NAND_SPARE_AREA_READ_OFS_1C_BYTE_OFS_31_SHIFT         0
+
+/***************************************************************************
+ *LL_OP - Nand Flash Low Level Operation
+ ***************************************************************************/
+/* NAND :: LL_OP :: RETURN_IDLE [31:31] */
+#define BCHP_NAND_LL_OP_RETURN_IDLE_MASK                           0x80000000
+#define BCHP_NAND_LL_OP_RETURN_IDLE_SHIFT                          31
+
+/* NAND :: LL_OP :: reserved0 [30:20] */
+#define BCHP_NAND_LL_OP_reserved0_MASK                             0x7ff00000
+#define BCHP_NAND_LL_OP_reserved0_SHIFT                            20
+
+/* NAND :: LL_OP :: CLE [19:19] */
+#define BCHP_NAND_LL_OP_CLE_MASK                                   0x00080000
+#define BCHP_NAND_LL_OP_CLE_SHIFT                                  19
+
+/* NAND :: LL_OP :: ALE [18:18] */
+#define BCHP_NAND_LL_OP_ALE_MASK                                   0x00040000
+#define BCHP_NAND_LL_OP_ALE_SHIFT                                  18
+
+/* NAND :: LL_OP :: WE [17:17] */
+#define BCHP_NAND_LL_OP_WE_MASK                                    0x00020000
+#define BCHP_NAND_LL_OP_WE_SHIFT                                   17
+
+/* NAND :: LL_OP :: RE [16:16] */
+#define BCHP_NAND_LL_OP_RE_MASK                                    0x00010000
+#define BCHP_NAND_LL_OP_RE_SHIFT                                   16
+
+/* NAND :: LL_OP :: DATA [15:00] */
+#define BCHP_NAND_LL_OP_DATA_MASK                                  0x0000ffff
+#define BCHP_NAND_LL_OP_DATA_SHIFT                                 0
+
+/***************************************************************************
+ *LL_RDDATA - Nand Flash Low Level Read Data
+ ***************************************************************************/
+/* NAND :: LL_RDDATA :: reserved0 [31:16] */
+#define BCHP_NAND_LL_RDDATA_reserved0_MASK                         0xffff0000
+#define BCHP_NAND_LL_RDDATA_reserved0_SHIFT                        16
+
+/* NAND :: LL_RDDATA :: DATA [15:00] */
+#define BCHP_NAND_LL_RDDATA_DATA_MASK                              0x0000ffff
+#define BCHP_NAND_LL_RDDATA_DATA_SHIFT                             0
+
+/***************************************************************************
+ *FLASH_CACHE%i - Flash Cache Buffer Read Access
+ ***************************************************************************/
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_BASE                          BRCMNAND_CACHE_BASE
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_START                         0
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_END                           127
+#define BCHP_NAND_FLASH_CACHEi_ARRAY_ELEMENT_SIZE                  32
+
+/***************************************************************************
+ *FLASH_CACHE%i - Flash Cache Buffer Read Access
+ ***************************************************************************/
+/* NAND :: FLASH_CACHEi :: WORD [31:00] */
+#define BCHP_NAND_FLASH_CACHEi_WORD_MASK                           0xffffffff
+#define BCHP_NAND_FLASH_CACHEi_WORD_SHIFT                          0
+
+
+#endif /* #ifndef BCHP_NAND_7x_H__ */
+
+/* End of File */
diff --git a/include/linux/mtd/brcmnand.h b/include/linux/mtd/brcmnand.h
new file mode 100644
index 0000000000000000000000000000000000000000..addd4e87f7b04507c03da95f9de87b3014b597f8
--- /dev/null
+++ b/include/linux/mtd/brcmnand.h
@@ -0,0 +1,900 @@
+/*
+ <:copyright-BRCM:2012:GPL/GPL:standard 
+ 
+    Copyright (c) 2012 Broadcom Corporation
+    All Rights Reserved
+ 
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2, as published by
+ the Free Software Foundation (the "GPL").
+ 
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+ 
+ 
+ A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+ writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA.
+ 
+ :>
+
+ * drivers/mtd/brcmnand/brcmnand.h
+ *
+ *  
+ *
+ * Data structures for Broadcom NAND controller
+ * 
+ * when     who     what
+ * 20060729 tht     Original coding
+ */
+
+
+#ifndef _BRCM_NAND_H_
+#define _BRCM_NAND_H_
+
+#include <linux/version.h>
+#include <generated/autoconf.h>
+#include <linux/mtd/nand.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#if 0
+/*
+ * Conversion between Kernel Kconfig and Controller version number
+ * Legacy codes 2.6.18
+ */
+
+#define CONFIG_MTD_BRCMNAND_VERS_0_0        0
+#define CONFIG_MTD_BRCMNAND_VERS_0_1        1
+#define CONFIG_MTD_BRCMNAND_VERS_1_0        2
+
+/* The followings revs are not implemented for 2.6.12 */
+#define CONFIG_MTD_BRCMNAND_VERS_2_0        3
+#define CONFIG_MTD_BRCMNAND_VERS_2_1        4
+#define CONFIG_MTD_BRCMNAND_VERS_2_2        5
+
+/* Supporting MLC NAND */
+#define CONFIG_MTD_BRCMNAND_VERS_3_0        6
+#define CONFIG_MTD_BRCMNAND_VERS_3_1_0      7   /* RDB reads as 3.0 */
+#define CONFIG_MTD_BRCMNAND_VERS_3_1_1      8   /* RDB reads as 3.0 */
+#define CONFIG_MTD_BRCMNAND_VERS_3_2        9   
+#define CONFIG_MTD_BRCMNAND_VERS_3_3        10  
+#define CONFIG_MTD_BRCMNAND_VERS_3_4		11
+#endif
+
+/*
+ * New way of using verison numbers
+ */
+#define BRCMNAND_VERSION(major, minor,int_minor)	((major<<16) | (minor<<8) | int_minor)
+
+/*
+ * BRCMNAND_INT_MINOR: Internal version number, not reflected on the silicon
+ */
+#if defined( CONFIG_BCM7601 ) || defined( CONFIG_BCM7400A0 )
+#define BRCMNAND_INT_MINOR	1
+#else
+#define BRCMNAND_INT_MINOR	0
+#endif
+#define CONFIG_MTD_BRCMNAND_VERSION	\
+	BRCMNAND_VERSION(CONFIG_BRCMNAND_MAJOR_VERS, CONFIG_BRCMNAND_MINOR_VERS, BRCMNAND_INT_MINOR)
+
+
+#define CONFIG_MTD_BRCMNAND_VERS_0_0		BRCMNAND_VERSION(0,0,1) /* (0,0,0) is DONT-CARE */
+#define CONFIG_MTD_BRCMNAND_VERS_0_1		BRCMNAND_VERSION(0,1,0)
+#define CONFIG_MTD_BRCMNAND_VERS_1_0		BRCMNAND_VERSION(1,0,0)
+
+/* The followings revs are not implemented for 2.6.12 */
+#define CONFIG_MTD_BRCMNAND_VERS_2_0		BRCMNAND_VERSION(2,0,0)
+#define CONFIG_MTD_BRCMNAND_VERS_2_1		BRCMNAND_VERSION(2,1,0)
+#define CONFIG_MTD_BRCMNAND_VERS_2_2		BRCMNAND_VERSION(2,2,0)
+
+/* Supporting MLC NAND */
+#define CONFIG_MTD_BRCMNAND_VERS_3_0		BRCMNAND_VERSION(3,0,0)
+#define CONFIG_MTD_BRCMNAND_VERS_3_1_0		BRCMNAND_VERSION(3,1,0)	/* RDB reads as 3.0 */
+#define CONFIG_MTD_BRCMNAND_VERS_3_1_1		BRCMNAND_VERSION(3,1,1)	/* RDB reads as 3.0 */
+#define CONFIG_MTD_BRCMNAND_VERS_3_2		BRCMNAND_VERSION(3,2,0)	
+#define CONFIG_MTD_BRCMNAND_VERS_3_3		BRCMNAND_VERSION(3,3,0)	
+#define CONFIG_MTD_BRCMNAND_VERS_3_4		BRCMNAND_VERSION(3,4,0)
+
+/* Supporting ONFI */
+#define CONFIG_MTD_BRCMNAND_VERS_4_0		BRCMNAND_VERSION(4,0,0)
+
+/* Supporting 1KB ECC subpage */
+#define CONFIG_MTD_BRCMNAND_VERS_5_0		BRCMNAND_VERSION(5,0,0)
+
+/* Add 40-bit ECC support. Remove ECC_LEVEL_0 and SPARE_AREA_SIZE_0 fields. 
+  Expand ECC_LEVEL and SPARE_AREA_SIZE field */
+#define CONFIG_MTD_BRCMNAND_VERS_6_0		BRCMNAND_VERSION(6,0,0)
+
+/* Remove FAST_PGM_RDIN bit. Always sets true internally when PARTIAL_PAGE_EN=1.*/
+#define CONFIG_MTD_BRCMNAND_VERS_7_0		BRCMNAND_VERSION(7,0,0)
+#define CONFIG_MTD_BRCMNAND_VERS_7_1		BRCMNAND_VERSION(7,1,0)
+
+#ifdef CONFIG_MTD_BRCMNAND_EDU
+#define CONFIG_MTD_BRCMNAND_USE_ISR		1
+#endif
+
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
+#define MAX_NAND_CS 8 // Upper limit, actual limit varies depending on platfrom
+
+#else
+#define MAX_NAND_CS 1
+#endif
+
+
+//ST NAND flashes
+#ifndef FLASHTYPE_ST
+    #define FLASHTYPE_ST            0x20
+#endif
+#define ST_NAND128W3A           0x73
+#define ST_NAND256R3A           0x35
+#define ST_NAND256W3A           0x75
+#define ST_NAND256R4A           0x45
+#define ST_NAND256W4A           0x55
+#define ST_NAND512R3A           0x36    //Used on Bcm97400
+#define ST_NAND512W3A           0x76
+#define ST_NAND512R4A           0x46
+#define ST_NAND512W4A           0x56
+#define ST_NAND01GR3A           0x39
+#define ST_NAND01GW3A           0x79
+#define ST_NAND01GR4A           0x49
+#define ST_NAND01GW4A           0x59
+#define ST_NAND01GR3B           0xA1
+#define ST_NAND01GW3B           0xF1
+#define ST_NAND01GR4B           0xB1
+#define ST_NAND01GW4B           0xC1
+#define ST_NAND02GR3B           0xAA
+#define ST_NAND02GW3B           0xDA
+#define ST_NAND02GR4B           0xBA
+#define ST_NAND02GW4B           0xCA
+#define ST_NAND04GR3B           0xAC
+#define ST_NAND04GW3B           0xDC
+#define ST_NAND04GR4B           0xBC
+#define ST_NAND04GW4B           0xCC
+#define ST_NAND08GR3B           0xA3
+#define ST_NAND08GW3B           0xD3
+#define ST_NAND08GR4B           0xB3
+#define ST_NAND08GW4B           0xC3
+
+//Samsung NAND flash
+#define FLASHTYPE_SAMSUNG       0xEC
+#define SAMSUNG_K9F1G08R0A      0xA1
+#define SAMSUNG_K9F1G08U0A      0xF1
+#define SAMSUNG_K9F1G08U0E      0xF1
+#define SAMSUNG_K9F2G08U1A      0xF1
+#define SAMSUNG_K9F2G08U0A      0xDA
+#define SAMSUNG_K9K8G08U0A      0xD3
+#define SAMSUNG_K9F8G08U0M	0xD3
+
+
+//K9F5608(R/U/D)0D
+#define SAMSUNG_K9F5608R0D      0x35
+#define SAMSUNG_K9F5608U0D      0x75
+#define SAMSUNG_K9F5608D0D      0x75
+//K9F1208(R/B/U)0B
+#define SAMSUNG_K9F1208R0B      0x36
+#define SAMSUNG_K9F1208B0B      0x76
+#define SAMSUNG_K9F1208U0B      0x76
+
+/*--------- Chip ID decoding for Samsung MLC NAND flashes -----------------------*/
+#define SAMSUNG_K9LBG08U0M	0xD7	/* 55h, B6h, 78h */
+#define SAMSUNG_K9LBG08U0D	0xD7	/* D5h, 29h, 41h */
+#define SAMSUNG_K9LBG08U0E	0xD7	/* C5h, 72h, 54h, 42h */
+
+#define SAMSUNG_K9GAG08U0D	0xD5	/* 94h, 29h, 34h */
+#define SAMSUNG_K9GAG08U0E	0xD5	/* 84h, 72h, 50h, 42h */
+
+#define SAMSUNG_3RDID_INT_CHIPNO_MASK   NAND_CI_CHIPNR_MSK
+
+#define SAMSUNG_3RDID_CELLTYPE_MASK NAND_CI_CELLTYPE_MSK
+#define SAMSUNG_3RDID_CELLTYPE_SLC  0x00
+#define SAMSUNG_3RDID_CELLTYPE_4LV  0x04
+#define SAMSUNG_3RDID_CELLTYPE_8LV  0x08
+#define SAMSUNG_3RDID_CELLTYPE_16LV 0x0C
+
+// Low level MLC test as compared to the high level test in mtd-abi.h
+#define NAND_IS_MLC(chip) ((chip)->cellinfo & NAND_CI_CELLTYPE_MSK)
+
+#define SAMSUNG_3RDID_NOP_MASK		0x30
+#define SAMSUNG_3RDID_NOP_1         0x00
+#define SAMSUNG_3RDID_NOP_2         0x10
+#define SAMSUNG_3RDID_NOP_4         0x20
+#define SAMSUNG_3RDID_NOP_8         0x30
+
+#define SAMSUNG_3RDID_INTERLEAVE        0x40
+
+#define SAMSUNG_3RDID_CACHE_PROG        0x80
+
+#define SAMSUNG_4THID_PAGESIZE_MASK 0x03
+#define SAMSUNG_4THID_PAGESIZE_1KB  0x00
+#define SAMSUNG_4THID_PAGESIZE_2KB  0x01
+#define SAMSUNG_4THID_PAGESIZE_4KB  0x02
+#define SAMSUNG_4THID_PAGESIZE_8KB  0x03
+
+#define SAMSUNG_4THID_OOBSIZE_MASK  0x04
+#define SAMSUNG_4THID_OOBSIZE_8B        0x00
+#define SAMSUNG_4THID_OOBSIZE_16B   0x04
+
+#define SAMSUNG_4THID_BLKSIZE_MASK	0x30
+#define SAMSUNG_4THID_BLKSIZE_64KB	0x00
+#define SAMSUNG_4THID_BLKSIZE_128KB	0x10
+#define SAMSUNG_4THID_BLKSIZE_256KB	0x20
+#define SAMSUNG_4THID_BLKSIZE_512KB	0x30
+
+
+
+#define SAMSUNG2_4THID_PAGESIZE_MASK	0x03
+#define SAMSUNG2_4THID_PAGESIZE_2KB	0x00
+#define SAMSUNG2_4THID_PAGESIZE_4KB	0x01
+#define SAMSUNG2_4THID_PAGESIZE_8KB	0x02
+#define SAMSUNG2_4THID_PAGESIZE_RSV	0x03
+
+#define SAMSUNG2_4THID_BLKSIZE_MASK	0xB0
+#define SAMSUNG2_4THID_BLKSIZE_128KB	0x00
+#define SAMSUNG2_4THID_BLKSIZE_256KB	0x10
+#define SAMSUNG2_4THID_BLKSIZE_512KB	0x20
+#define SAMSUNG2_4THID_BLKSIZE_1MB	0x30
+#define SAMSUNG2_4THID_BLKSIZE_RSVD1	0x80
+#define SAMSUNG2_4THID_BLKSIZE_RSVD2	0x90
+#define SAMSUNG2_4THID_BLKSIZE_RSVD3	0xA0
+#define SAMSUNG2_4THID_BLKSIZE_RSVD4	0xB0
+
+#define SAMSUNG2_4THID_OOBSIZE_MASK			0x4c
+#define SAMSUNG2_4THID_OOBSIZE_PERPAGE_128	0x04
+#define SAMSUNG2_4THID_OOBSIZE_PERPAGE_218	0x08 /* 27.4 per 512B */
+#define SAMSUNG2_4THID_OOBSIZE_PERPAGE_400	0x0C /* 16 per 512B */
+#define SAMSUNG2_4THID_OOBSIZE_PERPAGE_436	0x40 /* 27.5 per 512B */
+
+#define SAMSUNG_5THID_NRPLANE_MASK  0x0C
+#define SAMSUNG_5THID_NRPLANE_1     0x00
+#define SAMSUNG_5THID_NRPLANE_2     0x04
+#define SAMSUNG_5THID_NRPLANE_4     0x08
+#define SAMSUNG_5THID_NRPLANE_8     0x0C
+
+#define SAMSUNG_5THID_PLANESZ_MASK  0x70
+#define SAMSUNG_5THID_PLANESZ_64Mb  0x00
+#define SAMSUNG_5THID_PLANESZ_128Mb 0x10
+#define SAMSUNG_5THID_PLANESZ_256Mb 0x20
+#define SAMSUNG_5THID_PLANESZ_512Mb 0x30
+#define SAMSUNG_5THID_PLANESZ_1Gb   0x40
+#define SAMSUNG_5THID_PLANESZ_2Gb   0x50
+#define SAMSUNG_5THID_PLANESZ_4Gb   0x60
+#define SAMSUNG_5THID_PLANESZ_8Gb   0x70
+
+#define SAMSUNG2_5THID_ECCLVL_MASK	0x70
+#define SAMSUNG2_5THID_ECCLVL_1BIT	0x00
+#define SAMSUNG2_5THID_ECCLVL_2BIT	0x10
+#define SAMSUNG2_5THID_ECCLVL_4BIT	0x20
+#define SAMSUNG2_5THID_ECCLVL_8BIT	0x30
+#define SAMSUNG2_5THID_ECCLVL_16BIT	0x40
+#define SAMSUNG2_5THID_ECCLVL_24BIT_1KB	0x50
+
+
+
+
+/*--------- END Samsung MLC NAND flashes -----------------------*/
+
+//Hynix NAND flashes
+#define FLASHTYPE_HYNIX         0xAD
+//Hynix HY27(U/S)S(08/16)561A
+#define HYNIX_HY27US08561A      0x75
+#define HYNIX_HY27US16561A      0x55
+#define HYNIX_HY27SS08561A      0x35
+#define HYNIX_HY27SS16561A      0x45
+//Hynix HY27(U/S)S(08/16)121A
+#define HYNIX_HY27US08121A      0x76
+#define HYNIX_HY27US16121A      0x56
+#define HYNIX_HY27SS08121A      0x36
+#define HYNIX_HY27SS16121A      0x46
+//Hynix HY27(U/S)F(08/16)1G2M
+#define HYNIX_HY27UF081G2M      0xF1
+#define HYNIX_HY27UF161G2M      0xC1
+#define HYNIX_HY27SF081G2M      0xA1
+#define HYNIX_HY27SF161G2M      0xAD
+
+/* This is the new version of HYNIX_HY27UF081G2M .  The 2M version is EOL */
+#define HYNIX_HY27UF081G2A      0xF1
+
+#define HYNIX_HY27UF082G2A      0xDA
+
+// #define HYNIX_HY27UF084G2M     0xDC /* replaced by the next one */
+#define HYNIX_HY27U4G8F2D		0xDC
+
+/* Hynix MLC flashes, same infos as Samsung, except the 5th Byte */
+#define HYNIX_HY27UT088G2A  0xD3
+
+/* Hynix MLC flashes, same infos as Samsung, except the 5th Byte */
+#define HYNIX_HY27UAG8T2M		0xD5	/* 14H, B6H, 44H: 3rd,4th,5th ID bytes */
+
+/* Number of Planes, same as Samsung */
+
+/* Plane Size Type 2 */
+#define HYNIX_5THID_PLANESZ_MASK    0x70
+#define HYNIX_5THID_PLANESZ_512Mb   0x00
+#define HYNIX_5THID_PLANESZ_1Gb 0x10
+#define HYNIX_5THID_PLANESZ_2Gb 0x20
+#define HYNIX_5THID_PLANESZ_4Gb 0x30
+#define HYNIX_5THID_PLANESZ_8Gb 0x40
+#define HYNIX_5THID_PLANESZ_RSVD1   0x50
+#define HYNIX_5THID_PLANESZ_RSVD2   0x60
+#define HYNIX_5THID_PLANESZ_RSVD3   0x70
+
+/* Legacy Hynix on H27U4G8F2D */
+/* Plane Size */
+#define HYNIX_5THID_LEG_PLANESZ_MASK		0x70
+#define HYNIX_5THID_LEG_PLANESZ_64Mb		0x00
+#define HYNIX_5THID_LEG_PLANESZ_128Mb	0x10
+#define HYNIX_5THID_LEG_PLANESZ_256Mb	0x20
+#define HYNIX_5THID_LEG_PLANESZ_512Mb	0x30
+#define HYNIX_5THID_LEG_PLANESZ_1Gb		0x40
+#define HYNIX_5THID_LEG_PLANESZ_2Gb		0x50
+#define HYNIX_5THID_LEG_PLANESZ_4Gb		0x60
+#define HYNIX_5THID_LEG_PLANESZ_8Gb		0x70
+
+
+/*--------- END Hynix MLC NAND flashes -----------------------*/
+
+//Micron flashes
+#define FLASHTYPE_MICRON        0x2C
+//MT29F2G(08/16)AAB
+#define MICRON_MT29F2G08AAB     0xDA
+#define MICRON_MT29F2G16AAB     0xCA
+
+#define MICRON_MT29F1G08ABA	0xF1
+#define MICRON_MT29F2G08ABA	0xDA
+#define MICRON_MT29F4G08ABA	0xDC
+
+#define MICRON_MT29F8G08ABA	0x38
+#define MICRON_MT29F16G08ABA	0x48 /* SLC, 2Ch, 48h, 00h, 26h, 89h */
+
+#define MICRON_MT29F16G08CBA	0x48 /* MLC, 2Ch, 48h, 04h, 46h, 85h
+										have same dev ID as the SLC part, bytes 3,4,5 are different however */
+
+/*
+ * Micron M60A & M68A ID encoding are similar to Samsung Type 1.
+ */
+
+#define MICRON_3RDID_INT_CHIPNO_MASK	NAND_CI_CHIPNR_MSK
+
+#define MICRON_3RDID_CELLTYPE_MASK	NAND_CI_CELLTYPE_MSK
+#define MICRON_3RDID_CELLTYPE_SLC	0x00
+#define MICRON_3RDID_CELLTYPE_4LV	0x04
+//#define MICRON_3RDID_CELLTYPE_8LV	0x08
+//#define MICRON_3RDID_CELLTYPE_16LV	0x0C
+
+
+/* Nbr of simultaneously programmed pages */
+#define MICRON_3RDID_SIMPG_MASK		0x30
+#define MICRON_3RDID_SIMPG_1			0x00
+#define MICRON_3RDID_SIMPG_2			0x10
+//#define MICRON_3RDID_SIM_4			0x20
+//#define MICRON_3RDID_SIM_8			0x30
+
+#define MICRON_3RDID_INTERLEAVE		0x40
+
+#define MICRON_3RDID_CACHE_PROG		0x80
+
+#define MICRON_4THID_PAGESIZE_MASK	0x03
+#define MICRON_4THID_PAGESIZE_1KB		0x00
+#define MICRON_4THID_PAGESIZE_2KB		0x01
+#define MICRON_4THID_PAGESIZE_4KB		0x02
+#define MICRON_4THID_PAGESIZE_8KB		0x03
+
+#define MICRON_4THID_OOBSIZE_MASK	0x04
+#define MICRON_4THID_OOBSIZE_8B		0x00
+#define MICRON_4THID_OOBSIZE_16B		0x04
+
+#define MICRON_4THID_BLKSIZE_MASK		0x30
+#define MICRON_4THID_BLKSIZE_64KB		0x00
+#define MICRON_4THID_BLKSIZE_128KB	0x10
+#define MICRON_4THID_BLKSIZE_256KB	0x20
+#define MICRON_4THID_BLKSIZE_512KB	0x30
+
+/* Required ECC level */
+#define MICRON_5THID_ECCLVL_MASK		0x03
+#define MICRON_5THID_ECCLVL_4BITS		0x02
+
+#define MICRON_5THID_NRPLANE_MASK	0x0C
+#define MICRON_5THID_NRPLANE_1		0x00
+#define MICRON_5THID_NRPLANE_2		0x04
+#define MICRON_5THID_NRPLANE_4		0x08
+//#define SAMSUNG_5THID_NRPLANE_8		0x0C
+
+#define MICRON_5THID_PLANESZ_MASK	0x70
+#define MICRON_5THID_PLANESZ_64Mb	0x00
+#define MICRON_5THID_PLANESZ_128Mb	0x10
+#define MICRON_5THID_PLANESZ_256Mb	0x20
+#define MICRON_5THID_PLANESZ_512Mb	0x30
+#define MICRON_5THID_PLANESZ_1Gb		0x40
+#define MICRON_5THID_PLANESZ_2Gb		0x50
+#define MICRON_5THID_PLANESZ_4Gb		0x60
+#define MICRON_5THID_PLANESZ_8Gb		0x70
+
+#define MICRON_5THID_INT_ECC_MASK	0x80
+#define MICRON_5THID_INT_ECC_ENA		0x80
+
+
+/*
+ * Micron M61A ID encoding will be phased out in favor of ONFI
+ */
+ #define MICRON_M61A_2NDID_VOLTAGE_MASK		0x0F
+ #define MICRON_M61A_2NDID_3_3V				0x08
+
+/* Not strictly followed, must rely on 5th ID byte for density */
+#define MICRON_M61A_2NDID_DENSITY_MASK		0xF0
+#define MICRON_M61A_2NDID_2Gb					0x10
+#define MICRON_M61A_2NDID_4Gb					0x20 
+#define MICRON_M61A_2NDID_8Gb					0x30 
+#define MICRON_M61A_2NDID_16Gb				0x40 
+
+/* M61A_3RDID_SLC is same as standard Samsung Type 1 */
+/* M61A_4THID_PAGESIZE same as standard Samsung Type 1 */
+
+#define MICRON_M61A_4THID_OOBSIZE_MASK		0x0C
+#define MICRON_M61A_4THID_OOBSIZE_28B		0x04	/* 224 per 4KB page */
+
+/* Pages per block ==> Block Size */
+#define MICRON_M61A_4THID_PGPBLK_MASK		0x70
+#define MICRON_M61A_4THID_128PG_PERBLK		0x20	/* 128 pages per block =512KB blkSize*/
+
+#define MICRON_M61A_4THID_MULTI_LUN_MASK	0x80
+#define MICRON_M61A_4THID_MLUN_SUPPORTED	0x80	/* 128 pages per block */
+
+
+#define MICRON_M61A_5THID_PLN_PER_LUN_MASK	0x03
+#define MICRON_M61A_5THID_2PLN				0x01	/* 2 planes per LUN */
+
+#define MICRON_M61A_5THID_BLK_PER_LUN_MASK	0x1C
+#define MICRON_M61A_5THID_2048BLKS			0x04	/* 2048 blks per LUN */
+
+//Spansion flashes
+#ifndef FLASHTYPE_SPANSION
+    #define FLASHTYPE_SPANSION      0x01
+#endif
+/* Large Page */
+#define SPANSION_S30ML01GP_08   0xF1    //x8 mode
+#define SPANSION_S30ML01GP_16   0xC1    //x16 mode
+#define SPANSION_S30ML02GP_08   0xDA    //x8 mode
+#define SPANSION_S30ML02GP_16   0xCA    //x16 mode
+#define SPANSION_S30ML04GP_08   0xDC    //x8 mode
+#define SPANSION_S30ML04GP_16   0xCC    //x16 mode
+
+/* Small Page */
+#define SPANSION_S30ML512P_08   0x76    //64MB x8 mode
+#define SPANSION_S30ML512P_16   0x56    //64MB x16 mode
+#define SPANSION_S30ML256P_08   0x75    //32MB x8 mode
+#define SPANSION_S30ML256P_16   0x55    //32MB x16 mode
+#define SPANSION_S30ML128P_08   0x73    //x8 mode
+#define SPANSION_S30ML128P_16   0x53    //x16 mode
+
+
+/* -------- Toshiba NAND E2PROM -----------------*/
+#define FLASHTYPE_TOSHIBA		0x98
+
+#define TOSHIBA_TC58NVG0S3ETA00	0xD1
+#define TOSHIBA_TC58NVG1S3ETAI5	0xDA
+#define TOSHIBA_TC58NVG3S0ETA00	0xD3
+
+/*---------------------------------------------------------------------------------------*/
+
+// Low level MLC test as compared to the high level test in mtd-abi.h
+#define NAND_IS_MLC(chip) ((chip)->cellinfo & NAND_CI_CELLTYPE_MSK)
+
+
+//Command Opcode
+#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_7_0
+#define OP_PAGE_READ                0x01
+#define OP_SPARE_AREA_READ          0x02
+#define OP_STATUS_READ              0x03
+#define OP_PROGRAM_PAGE             0x04
+#define OP_PROGRAM_SPARE_AREA       0x05
+#define OP_COPY_BACK                0x06
+#define OP_DEVICE_ID_READ           0x07
+#define OP_BLOCK_ERASE              0x08
+#define OP_FLASH_RESET              0x09
+#define OP_BLOCKS_LOCK              0x0A
+#define OP_BLOCKS_LOCK_DOWN         0x0B
+#define OP_BLOCKS_UNLOCK            0x0C
+#define OP_READ_BLOCKS_LOCK_STATUS  0x0D
+#define OP_PARAMETER_READ           0x0E
+#define OP_PARAMETER_CHANGE_COL     0x0F
+#define OP_LOW_LEVEL_OP             0x10
+#else
+#define OP_PAGE_READ                0x01000000
+#define OP_SPARE_AREA_READ          0x02000000
+#define OP_STATUS_READ              0x03000000
+#define OP_PROGRAM_PAGE             0x04000000
+#define OP_PROGRAM_SPARE_AREA       0x05000000
+#define OP_COPY_BACK                0x06000000
+#define OP_DEVICE_ID_READ           0x07000000
+#define OP_BLOCK_ERASE              0x08000000
+#define OP_FLASH_RESET              0x09000000
+#define OP_BLOCKS_LOCK              0x0A000000
+#define OP_BLOCKS_LOCK_DOWN         0x0B000000
+#define OP_BLOCKS_UNLOCK            0x0C000000
+#define OP_READ_BLOCKS_LOCK_STATUS  0x0D000000
+#define OP_PARAMETER_READ           0x0E000000
+#define OP_PARAMETER_CHANGE_COL     0x0F000000
+#define OP_LOW_LEVEL_OP             0x10000000
+#endif
+
+//NAND flash controller 
+#define NFC_FLASHCACHE_SIZE     512
+
+#if CONFIG_MTD_BRCMNAND_VERSION <=  CONFIG_MTD_BRCMNAND_VERS_3_2
+#define BCHP_NAND_LAST_REG		BCHP_NAND_BLK_WR_PROTECT
+
+#elif CONFIG_MTD_BRCMNAND_VERSION <=  CONFIG_MTD_BRCMNAND_VERS_3_3
+  #ifdef BCHP_NAND_TIMING_2_CS3
+#define BCHP_NAND_LAST_REG		BCHP_NAND_TIMING_2_CS3
+  #else
+#define BCHP_NAND_LAST_REG		BCHP_NAND_TIMING_2_CS2
+  #endif
+#elif CONFIG_MTD_BRCMNAND_VERSION <=  CONFIG_MTD_BRCMNAND_VERS_5_0
+#define BCHP_NAND_LAST_REG		BCHP_NAND_SPARE_AREA_READ_OFS_1C 
+#else
+#define BCHP_NAND_LAST_REG		BCHP_NAND_SPARE_AREA_WRITE_OFS_1C 
+#endif
+
+#define BRCMNAND_CTRL_REGS		(BCHP_NAND_REVISION)
+#define BRCMNAND_CTRL_REGS_END		(BCHP_NAND_LAST_REG)
+
+
+/**
+ * brcmnand_state_t - chip states
+ * Enumeration for BrcmNAND flash chip state
+ */
+typedef enum {
+    BRCMNAND_FL_READY = FL_READY,
+    BRCMNAND_FL_READING = FL_READING,
+    BRCMNAND_FL_WRITING = FL_WRITING,
+    BRCMNAND_FL_ERASING = FL_ERASING,
+    BRCMNAND_FL_SYNCING = FL_SYNCING,
+    BRCMNAND_FL_CACHEDPRG = FL_CACHEDPRG,
+    BRCMNAND_FL_UNLOCKING = FL_UNLOCKING,
+    BRCMNAND_FL_LOCKING = FL_LOCKING,
+    BRCMNAND_FL_RESETING = FL_RESETING,
+    BRCMNAND_FL_OTPING = FL_OTPING,
+    BRCMNAND_FL_PM_SUSPENDED = FL_PM_SUSPENDED,
+    BRCMNAND_FL_EXCLUSIVE = FL_UNKNOWN+10,  // Exclusive access to NOR flash, prevent all NAND accesses.
+    BRCMNAND_FL_XIP,            // Exclusive access to XIP part of the flash
+} brcmnand_state_t;
+
+//#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
+/*
+ * ECC levels, corresponding to BCHP_NAND_ACC_CONTROL_ECC_LEVEL
+ */
+typedef enum {
+    BRCMNAND_ECC_DISABLE    = 0u,
+    BRCMNAND_ECC_BCH_1      = 1u,
+    BRCMNAND_ECC_BCH_2      = 2u,
+    BRCMNAND_ECC_BCH_3      = 3u,
+    BRCMNAND_ECC_BCH_4      = 4u,
+    BRCMNAND_ECC_BCH_5      = 5u,
+    BRCMNAND_ECC_BCH_6      = 6u,
+    BRCMNAND_ECC_BCH_7      = 7u,
+    BRCMNAND_ECC_BCH_8      = 8u,
+    BRCMNAND_ECC_BCH_9      = 9u,
+    BRCMNAND_ECC_BCH_10     = 10u,
+    BRCMNAND_ECC_BCH_11     = 11u,
+    BRCMNAND_ECC_BCH_12     = 12u,
+    BRCMNAND_ECC_RESVD_1    = 13u,
+    BRCMNAND_ECC_RESVD_2    = 14u,
+    BRCMNAND_ECC_HAMMING    = 15u,
+} brcmnand_ecc_level_t;
+
+/*
+ * Number of required ECC bytes per 512B slice
+ */
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+static const unsigned int brcmnand_eccbytes[16] = {
+    [BRCMNAND_ECC_DISABLE]  = 0,
+    [BRCMNAND_ECC_BCH_1]    = 2,
+    [BRCMNAND_ECC_BCH_2]    = 4,
+    [BRCMNAND_ECC_BCH_3]    = 5,
+    [BRCMNAND_ECC_BCH_4]    = 7,
+    [BRCMNAND_ECC_BCH_5]    = 9,
+    [BRCMNAND_ECC_BCH_6]    = 10,
+    [BRCMNAND_ECC_BCH_7]    = 12,
+    [BRCMNAND_ECC_BCH_8]    = 13,
+    [BRCMNAND_ECC_BCH_9]    = 15,
+    [BRCMNAND_ECC_BCH_10]   = 17,
+    [BRCMNAND_ECC_BCH_11]   = 18,
+    [BRCMNAND_ECC_BCH_12]   = 20,
+    [BRCMNAND_ECC_RESVD_1]  = 0,
+    [BRCMNAND_ECC_RESVD_2]  = 0,
+    [BRCMNAND_ECC_HAMMING]  = 3,
+};
+#else
+static const unsigned int brcmnand_eccbytes[16] = {
+    [BRCMNAND_ECC_DISABLE]  = 0,
+    [BRCMNAND_ECC_BCH_1]    = 2,
+    [BRCMNAND_ECC_BCH_2]    = 4,
+    [BRCMNAND_ECC_BCH_3]    = 6,
+    [BRCMNAND_ECC_BCH_4]    = 7,
+    [BRCMNAND_ECC_BCH_5]    = 9,
+    [BRCMNAND_ECC_BCH_6]    = 11,
+    [BRCMNAND_ECC_BCH_7]    = 13,
+    [BRCMNAND_ECC_BCH_8]    = 14,
+    [BRCMNAND_ECC_BCH_9]    = 16,
+    [BRCMNAND_ECC_BCH_10]   = 18,
+    [BRCMNAND_ECC_BCH_11]   = 20,
+    [BRCMNAND_ECC_BCH_12]   = 21,
+    [BRCMNAND_ECC_RESVD_1]  = 0,
+    [BRCMNAND_ECC_RESVD_2]  = 0,
+    [BRCMNAND_ECC_HAMMING]  = 3,
+};
+
+#endif
+
+//#endif
+
+
+/**
+ * struct brcmnand_chip - BrcmNAND Private Flash Chip Data
+ * @param base      [BOARDSPECIFIC] address to access Broadcom NAND controller
+ * @param chipsize  [INTERN] the size of one chip for multichip arrays
+ * @param device_id [INTERN] device ID
+ * @param verstion_id   [INTERN] version ID
+ * @param options   [BOARDSPECIFIC] various chip options. They can partly be set to inform brcmnand_scan about
+ * @param erase_shift   [INTERN] number of address bits in a block
+ * @param page_shift    [INTERN] number of address bits in a page
+ * @param ppb_shift [INTERN] number of address bits in a pages per block
+ * @param page_mask [INTERN] a page per block mask
+ * @cellinfo:           [INTERN] MLC/multichip data from chip ident
+ * @param readw     [REPLACEABLE] hardware specific function for read short
+ * @param writew    [REPLACEABLE] hardware specific function for write short
+ * @param command   [REPLACEABLE] hardware specific function for writing commands to the chip
+ * @param wait      [REPLACEABLE] hardware specific function for wait on ready
+ * @param read_bufferram    [REPLACEABLE] hardware specific function for BufferRAM Area
+ * @param write_bufferram   [REPLACEABLE] hardware specific function for BufferRAM Area
+ * @param read_word [REPLACEABLE] hardware specific function for read register of BrcmNAND
+ * @param write_word    [REPLACEABLE] hardware specific function for write register of BrcmNAND
+ * @param scan_bbt  [REPLACEALBE] hardware specific function for scaning Bad block Table
+ * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip
+ * @param wq        [INTERN] wait queue to sleep on if a BrcmNAND operation is in progress
+ * @param state     [INTERN] the current state of the BrcmNAND device
+ * @param autooob   [REPLACEABLE] the default (auto)placement scheme
+ * @param bbm       [REPLACEABLE] pointer to Bad Block Management
+ * @param priv      [OPTIONAL] pointer to private chip date
+ */
+
+/*
+ * Global members, shared by all ChipSelect, one per controller
+ */
+struct brcmnand_ctrl {
+	spinlock_t			chip_lock;
+	//atomic_t			semCount; // Used to lock out NAND access for NOR, TBD
+	wait_queue_head_t		wq;
+	brcmnand_state_t		state;
+	
+	struct nand_buffers* 	buffers; // THT 2.6.18-5.3: Changed to pointer to accomodate EDU
+#define BRCMNAND_OOBBUF(pbuf) (&((pbuf)->databuf[NAND_MAX_PAGESIZE]))
+
+	unsigned int		numchips; // Always 1 in v0.0 and 0.1, up to 8 in v1.0+
+	int 				CS[MAX_NAND_CS];	// Value of CS selected one per chip, in ascending order of chip Select (enforced)..
+										// Say, user uses CS0, CS2, and CS5 for NAND, then the first 3 entries
+										// have the values 0, 2 and 5, and numchips=3.
+};
+
+struct brcmnand_chip {
+
+	
+	/* Shared by all Chip select */
+	struct brcmnand_ctrl* ctrl;
+
+	/*
+	 *	Private members, 
+	 *
+	  */
+    //unsigned long     regs;   /* Register page */
+    unsigned char __iomem       *vbase; /* Virtual address of start of flash */
+    unsigned long       pbase; // Physical address of vbase
+    unsigned long       device_id;
+
+    //THT: In BrcmNAND, the NAND controller  keeps track of the 512B Cache
+    // so there is no need to manage the buffer ram.
+    //unsigned int      bufferram_index;
+    //struct brcmnand_bufferram bufferram;
+
+    int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len);
+    int (*wait)(struct mtd_info *mtd, int state, uint32_t* pStatus);
+    
+    unsigned short (*read_word)(void __iomem *addr);
+    void (*write_word)(unsigned short value, void __iomem *addr);
+
+    // THT: Sync Burst Read, not supported.
+    //void (*mmcontrol)(struct mtd_info *mtd, int sync_read);
+
+    // Private methods exported from BBT
+    int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);    
+    int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
+    int (*scan_bbt)(struct mtd_info *mtd);
+    int (*erase_bbt)(struct mtd_info *mtd, struct erase_info *instr, int allowbbt, int doNotUseBBT);
+
+    uint32_t (*ctrl_read) (uint32_t command);
+    void (*ctrl_write) (uint32_t command, uint32_t val);
+    uint32_t (*ctrl_writeAddr)(struct brcmnand_chip* chip, loff_t addr, int cmdEndAddr);
+
+    /*
+     * THT: Private methods exported to BBT, equivalent to the methods defined in struct ecc_nand_ctl
+     * The caller is responsible to hold locks before calling these routines
+     * Input and output buffers __must__ be aligned on a DW boundary (enforced inside the driver).
+     * EDU may require that the buffer be aligned on a 512B boundary.
+     */
+    int (*read_page)(struct mtd_info *mtd,  
+        uint8_t *outp_buf, uint8_t* outp_oob, uint64_t page);
+    int (*write_page)(struct mtd_info *mtd, 
+        const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t page);
+    int (*read_page_oob)(struct mtd_info *mtd, uint8_t* outp_oob, uint64_t page);
+    int (*write_page_oob)(struct mtd_info *mtd,  const uint8_t* inp_oob, uint64_t page, int isMarkBadBlock);
+    
+    int (*write_is_complete)(struct mtd_info *mtd, int* outp_needBBT);
+
+    /*
+     * THT: Same as the mtd calls with same name, except that locks are 
+     * expected to be already held by caller.  Mostly used by BBT codes
+     */
+    int (*read_oob) (struct mtd_info *mtd, loff_t from,
+             struct mtd_oob_ops *ops);
+    int (*write_oob) (struct mtd_info *mtd, loff_t to,
+             struct mtd_oob_ops *ops);
+
+    uint64_t            chipSize;
+	
+    int                 directAccess;       // For v1,0+, use directAccess or EBI address   
+	int				xor_disable;	// Value of  !NAND_CS_NAND_XOR:00
+	int				csi; /* index into the CS array.  chip->CS[chip->csi] yield the value of HW ChipSelect */
+
+    unsigned int        chip_shift; // How many bits shold be shifted.
+    uint64_t            mtdSize;    // Total size of NAND flash, 64 bit integer for V1.0.  This supercedes mtd->size which is
+                                // currently defined as a uint32_t.
+
+    /* THT Added */
+    unsigned int        busWidth, pageSize, blockSize; /* Actually page size from chip, as reported by the controller */
+
+    unsigned int        erase_shift;
+    unsigned int        page_shift;
+    int                 phys_erase_shift;   
+    int                 bbt_erase_shift;
+    //unsigned int      ppb_shift;  /* Pages per block shift */
+    unsigned int        page_mask;
+    //int               subpagesize;
+    uint8_t             cellinfo;
+	uint8_t			nop;
+
+    //u_char*           data_buf;   // Replaced by buffers
+    //u_char*           oob_buf;
+    int                 oobdirty;
+    uint8_t*            data_poi;
+    uint8_t*            oob_poi;
+    unsigned int        options;
+    int                 badblockpos;
+    
+    //unsigned long     chipsize;
+    int                 pagemask;
+    int64_t             pagebuf; /* Cached page number.  This can be a 36 bit signed integer. 
+                          * -1LL denotes NULL/invalidated page cache. */
+    int                 oobavail; // Number of free bytes per page
+    int                 disableECC; /* Turn on for 100% valid chips that don't need ECC 
+                         * might need in future for Spansion flash */
+                
+    struct nand_ecclayout *ecclayout;
+
+	
+	int			reqEccLevel;	/* Required ECC level, from chipID string (Samsung Type 2, Micron) 
+								 * or from datasheet otherwise */
+
+    // THT Used in lieu of struct nand_ecc_ctrl ecc;
+	brcmnand_ecc_level_t ecclevel;	// Actual ECC scheme used, must be >= reqEccLevel
+	int			ecctotal; // total number of ECC bytes per page, 3 for Small pages, 12 for large pages.
+    int                 eccsize; // Size of the ECC block, always 512 for Brcm Nand Controller
+	int			eccbytes; // How many bytes are used for ECC per eccsize (3 for Hamming)
+	int			eccsteps; // How many ECC block per page (4 for 2K page, 1 for 512B page, 8 for 4K page etc...
+	int			eccOobSize; // # of oob byte per ECC step, mostly 16, 27 for BCH-8
+
+	int			eccSectorSize; // Sector size, not necessarily 512B for new flashes
+	
+    
+    //struct nand_hw_control hwcontrol;
+
+    struct mtd_oob_ops  ops;
+
+    
+    uint8_t             *bbt;
+	uint32_t		bbtSize;
+    int (*isbad_bbt)(struct mtd_info *mtd, loff_t ofs, int allowbbt);
+    struct nand_bbt_descr   *bbt_td;
+    struct nand_bbt_descr   *bbt_md;
+    struct nand_bbt_descr   *badblock_pattern;
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+    struct brcmnand_cet_descr *cet;     /* CET descriptor */
+#endif
+
+    void                *priv;
+};
+
+#ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
+
+#define BRCMNAND_CET_DISABLED   0x01    /* CET is disabled due to a serious error */
+#define BRCMNAND_CET_LAZY   0x02    /* Reload CET when needed */
+#define BRCMNAND_CET_LOADED 0x04    /* CET is in memory */
+/*
+ * struct brcmnand_cet_descr - Correctable Error Table (CET) descriptor
+ * @offs        Offset in OOB where the CET signature begins
+ * @len         Length (in bytes) of the CET signature
+ * @startblk        Block address starting where CET begins
+ * @sign        Growth of CET (top->down or down->top) 
+ *          Inverse direct of BBT's sign
+ * @flags       Status of CET disabled/lazy/loaded
+ * @cerr_count      Total correctable errors encountered so far
+ * @numblks     Number of blocks that CET spans
+ * @maxblks     Maximum blocks that CET can have 2*numblks
+ * @brcmnand_cet_memtable   Pointer to in-memory CET
+ * @pattern     Identifier used to recognize CET
+ * @cet_flush       Kernel work queue to handle flush of in-mem
+ *          CET to the flash 
+ */
+struct brcmnand_cet_descr {
+    uint8_t offs;       
+    uint8_t len;        
+    int startblk;   
+    char sign;      /* 1 => bottom->top -1 => top->bottom - inverse of BBT */
+    char flags;     
+    uint32_t cerr_count;    
+    int numblks;        
+    int maxblks;        
+    struct brcmnand_cet_memtable  *memtbl;  
+    char *pattern;      
+	struct mtd_info *mtd;
+    struct delayed_work cet_flush;
+};
+
+/*
+ * Copy of the CET in memory for faster access and easy rewrites
+ * @isdirty     dirty = true => flush data to flash 
+ * @blk         the physical block# (flash) that this bitvec belongs to
+ * @bitvec      pointer to one block (blk#) of data
+ */
+struct brcmnand_cet_memtable {
+    char isdirty;       
+    int blk;        
+    char *bitvec;       
+};
+#endif
+
+
+/*
+ * Options bits
+ */
+#define BRCMNAND_CONT_LOCK      (0x0001)
+
+
+//extern void brcmnand_prepare_reboot(void);
+
+/*
+ * @ mtd        The MTD interface handle from opening "/dev/mtd<n>" or "/dev/mtdblock<n>"
+ * @ buff       Buffer to hold the data read from the NOR flash, must be able to hold len bytes, and aligned on
+ *          word boundary.
+ * @ offset Offset of the data from CS0 (on NOR flash), must be on word boundary.
+ * @ len        Number of bytes to be read, must be even number.
+ *
+ * returns 0 on success, negative error codes on failure.
+ *
+ * The caller thread may block until access to the NOR flash can be granted.
+ * Further accesses to the NAND flash (from other threads) will be blocked until this routine returns.
+ * The routine performs the required swapping of CS0/CS1 under the hood.
+ */
+extern int brcmnand_readNorFlash(struct mtd_info *mtd, void* buff, unsigned int offset, int len);
+
+#if (CONFIG_BRCMNAND_MAJOR_VERS == 7)
+#include "bchp_nand_7x.h"
+#elif (CONFIG_BRCMNAND_MAJOR_VERS == 4)
+#include "bchp_nand_40.h"
+#elif (CONFIG_BRCMNAND_MAJOR_VERS == 2)
+#include "bchp_nand_21_22.h"
+#endif
+
+#endif
diff --git a/include/linux/mtd/mtd64.h b/include/linux/mtd/mtd64.h
new file mode 100644
index 0000000000000000000000000000000000000000..ff0e74b70536fc8a006345c99976a9c635e60517
--- /dev/null
+++ b/include/linux/mtd/mtd64.h
@@ -0,0 +1,331 @@
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+/*
+ * drivers/mtd/mtd64.h
+ *
+ <:copyright-BRCM:2012:DUAL/GPL:standard
+ 
+    Copyright (c) 2012 Broadcom Corporation
+    All Rights Reserved
+ 
+ Unless you and Broadcom execute a separate written software license
+ agreement governing use of this software, this software is licensed
+ to you under the terms of the GNU General Public License version 2
+ (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+ with the following added to such license:
+ 
+    As a special exception, the copyright holders of this software give
+    you permission to link this software with independent modules, and
+    to copy and distribute the resulting executable under terms of your
+    choice, provided that you also meet, for each linked independent
+    module, the terms and conditions of the license of that module.
+    An independent module is a module which is not derived from this
+    software.  The special exception does not apply to any modifications
+    of the software.
+ 
+ Not withstanding the above, under no circumstances may you combine
+ this software in any way with any other Broadcom software provided
+ under a license other than the GPL, without Broadcom's express prior
+ written consent.
+ 
+ :> 
+ *
+ * Data structures for MTD 64 bit ops (Borrowed heavily from broadcom NAND
+ * controller brcmnand_priv.h)
+ * 
+ * when		who		what
+ * 20080805	sidc		Original coding
+ */
+
+#ifndef _MTD64_H_
+#define _MTD64_H_
+
+#include <generated/autoconf.h>
+
+/*
+ * 64 bit arithmetics 
+ */
+#include <asm-generic/gcclib.h>
+#include <asm-generic/longlong.h>
+#include <linux/bitmap.h>
+
+#define LONGLONG_TO_BITS (sizeof(uint64_t)*BITS_PER_UNIT)
+
+/*
+ * Create a 64 bit number out of a 32 bit 
+ */
+static inline int64_t mtd64_constructor(long hi, unsigned long low) 
+{
+	DIunion ull;
+
+	ull.s.high = hi;
+	ull.s.low = low;
+
+	return ull.ll;
+}
+
+
+/*
+ * Allow inline printing of 64 bit integer values
+ */
+static inline char *mtd64_sprintf(char* msg, int64_t offset)
+{
+	DIunion llw;
+
+	llw.ll = offset;
+	sprintf(msg, "%08x:%08x", llw.s.high, llw.s.low);
+	return msg;
+}
+
+static inline int mtd64_is_positive(int64_t ll)
+{
+	DIunion u;
+
+	u.ll = ll;
+	return ((int) u.s.high > 0 || (((int) u.s.high) == 0 && ((unsigned int) u.s.low) > 0));
+}
+
+static inline int mtd64_is_positiveorzero(int64_t ll)
+{
+	DIunion u;
+
+	u.ll = ll;
+	return ((int) u.s.high >= 0 || (((int) u.s.high) == 0 && ((unsigned int) u.s.low) >= 0));
+}
+
+/*
+ * Returns low DWord
+ */
+static inline uint32_t mtd64_ll_low(int64_t ll)
+{
+	DIunion ull;
+
+	ull.ll = ll;
+	return (uint32_t) ull.s.low;
+}
+
+/*
+ * Returns high DWord
+ */
+static inline int32_t mtd64_ll_high(int64_t ll)
+{
+	DIunion ull;
+
+	ull.ll = ll;
+	return (int32_t) ull.s.high;
+}
+  
+static inline int mtd64_ll_ffs(uint64_t ll)
+{
+	DIunion ull;
+	int res;
+
+	ull.ll = ll;
+	res = ffs(ull.s.low);
+	if (res)
+		return res;
+	res = ffs(ull.s.high);
+	return (32 + res);
+}
+
+#if 0
+/*
+ * Returns (ll >> shift)
+ */
+static inline uint64_t mtd64_rshft(uint64_t ll, int shift)
+{
+	DIunion src, res;
+
+	src.ll = ll;
+	bitmap_shift_right((unsigned long*) &res, (unsigned long*) &src, shift, LONGLONG_TO_BITS);
+	return res.ll;
+}
+#define mtd64_rshft32(ll,s) mtd64_rshft(ll, s)
+
+/*
+ * Returns (ul << shift) with ul a 32-bit unsigned integer.  Returned value is a 64bit integer
+ */
+static inline uint64_t mtd64_lshft32(uint64_t ll, int shift)
+{
+	DIunion src, res;
+
+	src.ll = ll;
+	bitmap_shift_left((unsigned long*) &res, (unsigned long*) &src, shift, LONGLONG_TO_BITS);
+	return res.ll;
+}
+
+/* 
+ * returns (left + right)
+ */
+static inline int64_t mtd64_add(int64_t left, int64_t right)
+{
+	DIunion l, r, sum;
+
+	l.ll = left;
+	r.ll = right;
+
+	add_ssaaaa(sum.s.high, sum.s.low, l.s.high, l.s.low, r.s.high, r.s.low);
+	return sum.ll;
+}
+
+/*
+ * returns (left + right), with right being a 32-bit integer
+ */
+static inline int64_t mtd64_add32(int64_t left, int right)
+{
+	DIunion l, r, sum;
+
+	l.ll = left;
+	r.s.high = 0;
+	r.s.low = right;
+
+	add_ssaaaa(sum.s.high, sum.s.low, l.s.high, l.s.low, r.s.high, r.s.low);
+	return sum.ll;
+}
+
+/*
+ * returns (left - right)
+ */
+static inline int64_t mtd64_sub(int64_t left, int64_t right)
+{
+	DIunion l, r, diff;
+
+	l.ll = left;
+	r.ll = right;
+
+	sub_ddmmss(diff.s.high, diff.s.low, l.s.high, l.s.low, r.s.high, r.s.low);
+	return diff.ll;
+}
+
+/*
+ * returns (left - right)
+ */
+static inline int64_t mtd64_sub32(int64_t left, int  right)
+{
+	DIunion l, r, diff;
+
+	l.ll = left;
+	r.s.low = right;
+	r.s.high = 0;
+
+	sub_ddmmss(diff.s.high, diff.s.low, l.s.high, l.s.low, r.s.high, r.s.low);
+	return diff.ll;
+}
+
+static inline int mtd64_notequals(int64_t left, int64_t right)
+{
+	DIunion l, r;
+
+	l.ll = left;
+	r.ll = right;
+
+	if (l.s.high == r.s.high && l.s.low == r.s.low) 
+		return 0;
+	return 1;
+}
+
+static inline int mtd64_equals(int64_t left, int64_t right)
+{
+	DIunion l, r;
+
+	l.ll = left;
+	r.ll = right;
+
+	if (l.s.high == r.s.high && l.s.low == r.s.low) 
+		return 1;
+	return 0;
+}
+
+static inline int mtd64_is_greater(int64_t left, int64_t right)
+{
+	return mtd64_is_positive(mtd64_sub(left, right));
+}
+
+static inline int mtd64_is_gteq(int64_t left, int64_t right)
+{
+	return mtd64_is_positiveorzero(mtd64_sub(left, right));
+}
+
+static inline int mtd64_is_less(int64_t left, int64_t right)
+{
+	return mtd64_is_positive(mtd64_sub(right, left));
+}
+
+static inline int mtd64_is_lteq(int64_t left, int64_t right)
+{
+	return mtd64_is_positiveorzero(mtd64_sub(right, left));
+}
+
+/*
+ * Returns (left & right)
+ */
+static inline uint64_t mtd64_and(uint64_t left, uint64_t right)
+{
+	uint64_t res;
+	bitmap_and((unsigned long*) &res, (unsigned long*) &left, (unsigned long*) &right, LONGLONG_TO_BITS);
+	return res;
+}
+
+static inline uint64_t mtd64_or(uint64_t left, uint64_t right)
+{
+	uint64_t res;
+	bitmap_or((unsigned long *) &res, (unsigned long *) &left, (unsigned long *) &right, LONGLONG_TO_BITS);
+	return res;
+}
+
+/*
+ * Multiply 2 32-bit integer, result is 64bit
+ */
+static inline uint64_t mtd64_mul(unsigned int left, unsigned int right)
+{
+	DIunion llw;
+	
+	umul_ppmm(llw.s.high, llw.s.low, left, right);
+	return llw.ll;
+}
+
+/*
+ * res 		Result
+ * high:low 	u64 bit 
+ * base		Divisor
+ * rem		Remainder
+ */
+#define do_mtd_div64_32(res, high, low, base, rem) ({ \
+        unsigned long __quot, __mod; \
+        unsigned long __cf, __tmp, __tmp2, __i; \
+        \
+        __asm__(".set   push\n\t" \
+                ".set   noat\n\t" \
+                ".set   noreorder\n\t" \
+                "move   %2, $0\n\t" \
+                "move   %3, $0\n\t" \
+                "b      1f\n\t" \
+                " li    %4, 0x21\n" \
+                "0:\n\t" \
+                "sll    $1, %0, 0x1\n\t" \
+                "srl    %3, %0, 0x1f\n\t" \
+                "or     %0, $1, %5\n\t" \
+                "sll    %1, %1, 0x1\n\t" \
+                "sll    %2, %2, 0x1\n" \
+                "1:\n\t" \
+                "bnez   %3, 2f\n\t" \
+                " sltu  %5, %0, %z6\n\t" \
+                "bnez   %5, 3f\n" \
+                "2:\n\t" \
+                " addiu %4, %4, -1\n\t" \
+                "subu   %0, %0, %z6\n\t" \
+                "addiu  %2, %2, 1\n" \
+                "3:\n\t" \
+                "bnez   %4, 0b\n\t" \
+                " srl   %5, %1, 0x1f\n\t" \
+                ".set   pop" \
+                : "=&r" (__mod), "=&r" (__tmp), "=&r" (__quot), "=&r" (__cf), \
+                  "=&r" (__i), "=&r" (__tmp2) \
+                : "Jr" (base), "0" (high), "1" (low)); \
+        \
+        (res) = __quot; \
+        (rem) = __mod; \
+        __quot; })
+
+#endif
+#endif
+#endif // CONFIG_BCM_KF_MTD_BCMNAND
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 1482340d3d9f5e0a6ba5d61a020b112320618362..eada5f4c978088b5d31a1116be8d9ff938d47dcd 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -229,6 +229,11 @@ typedef enum {
 /* Chip may not exist, so silence any errors in scan */
 #define NAND_SCAN_SILENT_NODEV	0x00040000
 
+#if defined(CONFIG_BCM_KF_MTD_BCMNAND)
+/* For Hynix MLC flashes, the BI are written to last and (last-2) pages. */
+#define NAND_SCAN_BI_3RD_PAGE   0x00100000
+#endif
+
 /* Options set by nand scan */
 /* Nand scan has allocated controller struct */
 #define NAND_CONTROLLER_ALLOC	0x80000000
@@ -559,6 +564,9 @@ struct nand_chip {
 #define NAND_MFR_MICRON		0x2c
 #define NAND_MFR_AMD		0x01
 #define NAND_MFR_MACRONIX	0xc2
+#if defined(CONFIG_BCM_KF_NAND)
+#define NAND_MFR_GIGADEVICE	0xc8
+#endif
 
 /**
  * struct nand_flash_dev - NAND Flash Device ID Structure
diff --git a/include/linux/nbuff.h b/include/linux/nbuff.h
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccebe2c519a7d0995fcec10bc2c9f16d4055b8
--- /dev/null
+++ b/include/linux/nbuff.h
@@ -0,0 +1,1791 @@
+#if defined(CONFIG_BCM_KF_NBUFF)
+
+#ifndef __NBUFF_H_INCLUDED__
+#define __NBUFF_H_INCLUDED__
+
+
+/*
+<:copyright-BRCM:2013:DUAL/GPL:standard
+
+   Copyright (c) 2013 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ *******************************************************************************
+ *
+ * File Name  : nbuff.h
+ * Description: Definition of a network buffer to support various forms of
+ *      network buffer, to include Linux socket buff (SKB), lightweight
+ *      fast kernel buff (FKB), BRCM Free Pool buffer (FPB), and traffic
+ *      generator support buffer (TGB)
+ *
+ *      nbuff.h may also be used to provide an interface to common APIs 
+ *      available on other OS (in particular BSD style mbuf).
+ *
+ * Common APIs provided: pushing, pulling, reading, writing, cloning, freeing
+ *
+ * Implementation Note:
+ *
+ * One may view NBuff as a base class from which other buff types are derived.
+ * Examples of derived network buffer types are sk_buff, fkbuff, fpbuff, tgbuff
+ *
+ * A pointer to a buffer is converted to a pointer to a special (derived) 
+ * network buffer type by encoding the type into the least significant 2 bits
+ * of a word aligned buffer pointer. pBuf points to the real network 
+ * buffer and pNBuff refers to pBuf ANDed with the Network Buffer Type.
+ * C++ this pointer to a virtual class (vtable based virtual function thunks).
+ *
+ * Thunk functions to redirect the calls to the appropriate buffer type, e.g.
+ * SKB or FKB uses the Network Buffer Pointer type information.
+ *
+ * This file also implements the Fast Kernel Buffer API. The fast kernel buffer
+ * carries a minimal context of the received buffer and associated buffer
+ * recycling information.
+ *
+ ******************************************************************************* */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#include <linux/types.h>            /* include ISO C99 inttypes.h             */
+#include <linux/skbuff.h>           /* include corresponding BSD style mbuf   */
+#include <linux/blog.h>
+#include <bcm_pkt_lengths.h>
+
+#define NBUFF_VERSION              "v1.0"
+
+/* Engineering Constants for Fast Kernel Buffer Global Pool (used for clones) */
+#define SUPPORT_FKB_EXTEND
+#if defined(CONFIG_BCM_KF_WL)
+#define FKBC_POOL_SIZE_ENGG         (2080)  /*1280 more to be allocated for wireless*/
+#else
+#define FKBC_POOL_SIZE_ENGG         800
+#endif
+#define FKBC_EXTEND_SIZE_ENGG       32      /* Number of FkBuf_t per extension*/
+#define FKBC_EXTEND_MAX_ENGG        16      /* Maximum extensions allowed     */
+
+#define FKBM_POOL_SIZE_ENGG         128
+#define FKBM_EXTEND_SIZE_ENGG       2
+#define FKBM_EXTEND_MAX_ENGG        200     /* Assuming one unshare           */
+
+/*
+ * Network device drivers ported to NBUFF must ensure that the headroom is at
+ * least 186 bytes in size. Remove this dependancy (TBD).
+ */
+// #define CC_FKB_HEADROOM_AUDIT
+
+/* Conditional compile of FKB functional APIs as inlined or non-inlined */
+#define CC_CONFIG_FKB_FN_INLINE
+#ifdef CC_CONFIG_FKB_FN_INLINE
+#define FKB_FN(fn_name, fn_signature, body)                                    \
+static inline fn_signature { body; }    /* APIs inlined in header file */
+#else
+#ifdef FKB_IMPLEMENTATION_FILE
+#define FKB_FN(fn_name, fn_signature, body)                                    \
+fn_signature { body; }                                                         \
+EXPORT_SYMBOL(fn_name);                 /* APIs declared in implementation */
+#else
+#define FKB_FN(fn_name, fn_signature, body)                                    \
+extern fn_signature;
+#endif  /* !defined(FKB_IMPLEMENTATION_FILE) */
+#endif  /* !defined(FKB_FN) */
+
+/* LAB ONLY: Design development */
+// #define CC_CONFIG_FKB_STATS
+// #define CC_CONFIG_FKB_COLOR
+// #define CC_CONFIG_FKB_DEBUG
+// #define CC_CONFIG_FKB_AUDIT
+// #define CC_CONFIG_FKB_STACK
+
+// #include <linux/smp.h>       /* smp_processor_id() CC_CONFIG_FKB_AUDIT */
+
+#if defined(CC_CONFIG_FKB_STATS)
+#define FKB_STATS(stats_code)   do { stats_code } while(0)
+#else
+#define FKB_STATS(stats_code)   NULL_STMT
+#endif
+
+#if defined(CC_CONFIG_FKB_STACK)
+extern void dump_stack(void);
+#define DUMP_STACK()            dump_stack()
+#else
+#define DUMP_STACK()            NULL_STMT
+#endif
+
+#if defined(CC_CONFIG_FKB_AUDIT)
+#define FKB_AUDIT(audit_code)   do { audit_code } while(0)
+#else
+#define FKB_AUDIT(audit_code)   NULL_STMT
+#endif
+
+extern int nbuff_dbg;
+#if defined(CC_CONFIG_FKB_DEBUG)
+#define fkb_dbg(lvl, fmt, arg...) \
+    if (nbuff_dbg >= lvl) printk( "FKB %s :" fmt "[<%08x>]\n", \
+        __FUNCTION__, ##arg, (int)__builtin_return_address(0) )
+#define FKB_DBG(debug_code)     do { debug_code } while(0)
+#else
+#define fkb_dbg(lvl, fmt, arg...)      do {} while(0)
+#define FKB_DBG(debug_code)     NULL_STMT
+#endif
+
+#define CC_NBUFF_FLUSH_OPTIMIZATION
+
+/* CACHE OPERATIONS */
+#define FKB_CACHE_FLUSH         0
+#define FKB_CACHE_INV           1
+
+/* OS Specific Section Begin */
+#if defined(__KERNEL__)     /* Linux MIPS Cache Specific */
+/*
+ *------------------------------------------------------------------------------
+ * common cache operations:
+ *
+ * - addr is rounded down to the cache line
+ * - end is rounded up to cache line.
+ *
+ * - if ((addr == end) and (addr was cache aligned before rounding))
+ *       no operation is performed.
+ *   else
+ *       flush data cache line UPTO but NOT INCLUDING rounded up end.
+ *
+ * Note:
+ * if before rounding, (addr == end)  AND addr was not cache aligned,
+ *      we would flush at least one line.
+ *
+ * Uses: L1_CACHE_BYTES
+ *------------------------------------------------------------------------------
+ */
+#include <asm/cache.h>
+#ifdef CONFIG_MIPS
+#include <asm/r4kcache.h>
+#endif  /* CONFIG_MIPS */
+
+/*
+ * Macros to round down and up, an address to a cachealigned address
+ */
+#define ADDR_ALIGN_DN(addr, align)  ( (addr) & ~((align) - 1) )
+#define ADDR_ALIGN_UP(addr, align)  ( ((addr) + (align) - 1) & ~((align) - 1) )
+
+#ifdef CONFIG_MIPS
+/*
+ *------------------------------------------------------------------------------
+ * Function   : cache_flush_region
+ * Description: 
+ * Writeback flush, then invalidate a region demarcated by addr to end.
+ * Cache line following rounded up end is not flushed.
+ *------------------------------------------------------------------------------
+ */
+static inline void cache_flush_region(void *addr, void *end)
+{
+    unsigned long a = ADDR_ALIGN_DN( (unsigned long)addr, L1_CACHE_BYTES );
+    unsigned long e = ADDR_ALIGN_UP( (unsigned long)end, L1_CACHE_BYTES );
+    while ( a < e )
+    {
+        flush_dcache_line(a);   /* Hit_Writeback_Inv_D */
+        a += L1_CACHE_BYTES;    /* next cache line base */
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : cache_flush_len
+ * Description: 
+ * Writeback flush, then invalidate a region given an address and a length.
+ * The demarcation end is computed by applying length to address before
+ * rounding down address. End is rounded up.
+ * Cache line following rounded up end is not flushed.
+ *------------------------------------------------------------------------------
+ */
+static inline void cache_flush_len(void *addr, int len)
+{
+    unsigned long a = ADDR_ALIGN_DN( (unsigned long)addr, L1_CACHE_BYTES );
+    unsigned long e = ADDR_ALIGN_UP( ((unsigned long)addr + len),
+                                     L1_CACHE_BYTES );
+    while ( a < e )
+    {
+        flush_dcache_line(a);   /* Hit_Writeback_Inv_D */
+        a += L1_CACHE_BYTES;    /* next cache line base */
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : cache_invalidate_region
+ * Description: 
+ * invalidate a region demarcated by addr to end.
+ * Cache line following rounded up end is not invalidateed.
+ *------------------------------------------------------------------------------
+ */
+static inline void cache_invalidate_region(void *addr, void *end)
+{
+    unsigned long a = ADDR_ALIGN_DN( (unsigned long)addr, L1_CACHE_BYTES );
+    unsigned long e = ADDR_ALIGN_UP( (unsigned long)end, L1_CACHE_BYTES );
+    while ( a < e )
+    {
+        invalidate_dcache_line(a);   /* Hit_Invalidate_D */
+        a += L1_CACHE_BYTES;    /* next cache line base */
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : cache_invalidate_len
+ * Description: 
+ * invalidate a region given an address and a length.
+ * The demarcation end is computed by applying length to address before
+ * rounding down address. End is rounded up.
+ * Cache line following rounded up end is not invalidateed.
+ *------------------------------------------------------------------------------
+ */
+static inline void cache_invalidate_len(void *addr, int len)
+{
+    unsigned long a = ADDR_ALIGN_DN( (unsigned long)addr, L1_CACHE_BYTES );
+    unsigned long e = ADDR_ALIGN_UP( ((unsigned long)addr + len),
+                                     L1_CACHE_BYTES );
+    while ( a < e )
+    {
+        invalidate_dcache_line(a);   /* Hit_Invalidate_D */
+        a += L1_CACHE_BYTES;    /* next cache line base */
+    }
+}
+
+#define cache_invalidate_region_outer_first(a, b)	cache_invalidate_region(a, b)
+#define cache_invalidate_len_outer_first(a, b)		cache_invalidate_len(a, b)
+
+#elif defined(CONFIG_ARM)
+#include <asm/cacheflush.h>
+#if defined(CONFIG_ARM_L1_CACHE_SHIFT)
+#define L1_CACHE_LINE_SIZE	(0x1 << CONFIG_ARM_L1_CACHE_SHIFT)
+#else
+#warning There is no L1 cache line size defined!
+#endif
+
+#if defined(CONFIG_OUTER_CACHE)
+
+#if defined(CONFIG_CACHE_L2X0)
+#define L2_CACHE_LINE_SIZE	32
+#endif
+
+#if defined(L2_CACHE_LINE_SIZE) && (L1_CACHE_LINE_SIZE != L2_CACHE_LINE_SIZE)
+#warning  L1 Cache line size is different from L2 cache line size!
+#endif
+
+#define CONFIG_OPTIMIZED_CACHE_FLUSH	1
+#endif
+
+/* the following functions are optimized that it does NOT support
+ * HIGHMEM in 32-bit system, please make sure buffer allocated
+ * are in memory zone 'Normal' or before */
+static inline void cache_invalidate_len_outer_first(void *virt_addr, int len)
+{
+	unsigned long start_vaddr = (unsigned long)virt_addr;
+	unsigned long end_vaddr = start_vaddr + len;
+#if defined(CONFIG_OUTER_CACHE)
+	unsigned long start_paddr = virt_to_phys(virt_addr);
+	unsigned long end_paddr = start_paddr + len;
+#endif
+
+#if defined(CONFIG_OUTER_CACHE)
+	outer_spin_lock_irqsave();
+#endif
+	/* 1st, flush & invalidate if start addr and / or end addr are not
+	 * cache line aligned */
+	if (start_vaddr & (L1_CACHE_LINE_SIZE - 1)) {
+		start_vaddr &= ~(L1_CACHE_LINE_SIZE - 1);
+		__cpuc_flush_line(start_vaddr);
+#if defined(CONFIG_OUTER_CACHE)
+		dsb();
+#endif
+		start_vaddr += L1_CACHE_LINE_SIZE;
+	}
+
+#if defined(CONFIG_OUTER_CACHE)
+	if (start_paddr & (L2_CACHE_LINE_SIZE - 1)) {
+		start_paddr &= ~(L2_CACHE_LINE_SIZE - 1);
+		outer_flush_line_no_lock(start_paddr);
+		outer_sync_no_lock();
+		start_paddr += L2_CACHE_LINE_SIZE;
+	}
+#endif
+
+	if (end_vaddr & (L1_CACHE_LINE_SIZE - 1)) {
+		end_vaddr &= ~(L1_CACHE_LINE_SIZE - 1);
+		__cpuc_flush_line(end_vaddr);
+#if defined(CONFIG_OUTER_CACHE)
+		dsb();
+#endif
+	}
+
+#if defined(CONFIG_OUTER_CACHE)
+	if (end_paddr & (L2_CACHE_LINE_SIZE - 1)) {
+		end_paddr &= ~(L2_CACHE_LINE_SIZE - 1);
+		outer_flush_line_no_lock(end_paddr);
+		outer_sync_no_lock();
+	}
+#endif
+
+#if defined(CONFIG_OUTER_CACHE)
+	/* now do the real invalidation jobs */
+	while (start_paddr < end_paddr) {
+		outer_inv_line_no_lock(start_paddr);
+		start_paddr += L2_CACHE_LINE_SIZE;
+	}
+	outer_sync_no_lock();
+#endif
+
+	/* now do the real invalidation jobs */
+	while (start_vaddr < end_vaddr) {
+		__cpuc_inv_line(start_vaddr);
+		start_vaddr += L1_CACHE_LINE_SIZE;
+	}
+
+	dsb();
+#if defined(CONFIG_OUTER_CACHE)
+	outer_spin_unlock_irqrestore();
+#endif
+
+	if ((len >= PAGE_SIZE) && (((unsigned long)virt_addr & ~PAGE_MASK) == 0))
+		set_bit(PG_dcache_clean, &phys_to_page(virt_to_phys(virt_addr))->flags);
+}
+
+static inline void cache_invalidate_region_outer_first(void *virt_addr, void *end)
+{
+	cache_invalidate_len_outer_first(virt_addr,
+			(unsigned long)end - (unsigned long)virt_addr);
+}
+
+static inline void cache_invalidate_len(void *virt_addr, int len)
+{
+	unsigned long start_vaddr = (unsigned long)virt_addr;
+	unsigned long end_vaddr = start_vaddr + len;
+#if defined(CONFIG_OUTER_CACHE)
+	unsigned long start_paddr = virt_to_phys(virt_addr);
+	unsigned long end_paddr = start_paddr + len;
+#endif
+
+#if defined(CONFIG_OUTER_CACHE)
+	outer_spin_lock_irqsave();
+#endif
+	/* 1st, flush & invalidate if start addr and / or end addr are not
+	 * cache line aligned */
+	if (start_vaddr & (L1_CACHE_LINE_SIZE - 1)) {
+		start_vaddr &= ~(L1_CACHE_LINE_SIZE - 1);
+		__cpuc_flush_line(start_vaddr);
+#if defined(CONFIG_OUTER_CACHE)
+		dsb();
+#endif
+		start_vaddr += L1_CACHE_LINE_SIZE;
+	}
+
+#if defined(CONFIG_OUTER_CACHE)
+	if (start_paddr & (L2_CACHE_LINE_SIZE - 1)) {
+		start_paddr &= ~(L2_CACHE_LINE_SIZE - 1);
+		outer_flush_line_no_lock(start_paddr);
+		start_paddr += L2_CACHE_LINE_SIZE;
+	}
+#endif
+
+	if (end_vaddr & (L1_CACHE_LINE_SIZE - 1)) {
+		end_vaddr &= ~(L1_CACHE_LINE_SIZE - 1);
+		__cpuc_flush_line(end_vaddr);
+#if defined(CONFIG_OUTER_CACHE)
+		dsb();
+#endif
+	}
+
+#if defined(CONFIG_OUTER_CACHE)
+	if (end_paddr & (L2_CACHE_LINE_SIZE - 1)) {
+		end_paddr &= ~(L2_CACHE_LINE_SIZE - 1);
+		outer_flush_line_no_lock(end_paddr);
+	}
+#endif
+
+	/* now do the real invalidation jobs */
+	while (start_vaddr < end_vaddr) {
+		__cpuc_inv_line(start_vaddr);
+#if defined(CONFIG_OUTER_CACHE)
+		dsb();
+		outer_inv_line_no_lock(start_paddr);
+		start_paddr += L2_CACHE_LINE_SIZE;
+#endif
+		start_vaddr += L1_CACHE_LINE_SIZE;
+	}
+#if defined(CONFIG_OUTER_CACHE)
+	outer_sync_no_lock();
+	outer_spin_unlock_irqrestore();
+#else
+	dsb();
+#endif
+
+	if ((len >= PAGE_SIZE) && (((unsigned long)virt_addr & ~PAGE_MASK) == 0))
+		set_bit(PG_dcache_clean, &phys_to_page(virt_to_phys(virt_addr))->flags);
+}
+
+static inline void cache_invalidate_region(void *virt_addr, void *end)
+{
+	cache_invalidate_len(virt_addr,
+			(unsigned long)end - (unsigned long)virt_addr);
+}
+
+static inline void cache_flush_len(void *addr, int len)
+{
+	unsigned long start_vaddr = (unsigned long)addr & ~(L1_CACHE_LINE_SIZE - 1);
+	unsigned long end_vaddr = (unsigned long)addr + len;
+#if defined(CONFIG_OUTER_CACHE)
+	unsigned long start_paddr = (unsigned long)virt_to_phys((void *)start_vaddr);
+#endif
+
+#if defined(CONFIG_OUTER_CACHE)
+	outer_spin_lock_irqsave();
+#endif
+#if defined(CONFIG_OPTIMIZED_CACHE_FLUSH)
+	/* this function has been optimized in a non-recommended way, if any
+	 * type of packet error occurs, please try undefine
+	 * CONFIG_OPTIMIZED_CACHE_FLUSH to use the recommended algorithm
+	 * provided by ARM cache document.
+	 * Usually, when we have multiple levels of cache, in a cache_flush
+	 * case, we do L1_clean -> L2_clean -> L2_invalidate -> L1_clean
+	 * -> L1_invalidate, we can optimize this sequence to L1_clean ->
+	 * L2_flush -> L1_flush.  This is our original approach.  However,
+	 * this will introduce 3 loops of cache operation.
+	 * This optimized method will do L1_flush -> L2_flush.  This will only
+	 * introduce 2 loops of cache operation, but it also puts us into
+	 * danger that L2 cache might update L1 cache on the cache line
+	 * that should have been invalidated. */
+
+	while (start_vaddr < end_vaddr) {
+		__cpuc_flush_line(start_vaddr);
+		start_vaddr += L1_CACHE_LINE_SIZE;
+#if defined(CONFIG_OUTER_CACHE)
+		dsb();
+		outer_flush_line_no_lock(start_paddr);
+		start_paddr += L2_CACHE_LINE_SIZE;
+#endif
+	}
+#if defined(CONFIG_OUTER_CACHE)
+	outer_sync_no_lock();
+#else
+	wmb();
+#endif
+#else	/* the non-optimized cache_flush */
+	while (start_vaddr < end_vaddr) {
+#if defined(CONFIG_OUTER_CACHE)
+		__cpuc_clean_line(start_vaddr);
+		dsb();
+		outer_flush_line_no_lock(start_paddr);
+		start_paddr += L2_CACHE_LINE_SIZE;
+		outer_sync_no_lock();
+#endif
+		__cpuc_flush_line(start_vaddr);
+		start_vaddr += L1_CACHE_LINE_SIZE;
+	}
+	wmb();
+#endif
+#if defined(CONFIG_OUTER_CACHE)
+	outer_spin_unlock_irqrestore();
+#endif
+}
+
+static inline void cache_flush_region(void *addr, void *end)
+{
+	cache_flush_len(addr, (unsigned long)end - (unsigned long)addr);
+}
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : _is_kptr_
+ * Description: Test whether a variable can be a pointer to a kernel space.
+ *              This form of variable overloading may only be used for denoting
+ *              pointers to kernel space or as a variable where the most
+ *              significant nibble is unused.
+ *              In 32bit Linux kernel, a pointer to a KSEG0, KSEG1, KSEG2 will
+ *              have 0x8, 0xA or 0xC in the most significant nibble.
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _is_kptr_(const void * vptr)
+{
+    return ( (uint32_t)vptr > 0x0FFFFFFF );
+}
+#endif  /* defined(__KERNEL__) Linux MIPS Cache Specific */
+/* OS Specific Section End */
+
+
+/*
+ * For BSD style mbuf with FKB : 
+ * generate nbuff.h by replacing "SKBUFF" to "BCMMBUF", and,
+ * use custom arg1 and arg2 instead of mark and priority, respectively.
+ */
+ 
+#ifdef TRACE_COMPILE
+#pragma message "got here 4"
+#endif
+
+struct sk_buff;
+struct blog_t;
+struct net_device;
+typedef int (*HardStartXmitFuncP) (struct sk_buff *skb,
+                                   struct net_device *dev);
+
+struct fkbuff;
+typedef struct fkbuff FkBuff_t;
+
+#define FKB_NULL                    ((FkBuff_t *)NULL)
+
+#include <linux/nbuff_types.h>
+
+/*
+ *------------------------------------------------------------------------------
+ *
+ * Pointer conversion between pBuf and pNBuff encoded buffer pointers
+ * uint8_t * pBuf;
+ * pNBuff_t  pNBuff;
+ * ...
+ * // overlays FKBUFF_PTR into pointer to build a virtual pNBuff_t
+ * pNBuff = PBUF_2_PNBUFF(pBuf,FKBUFF_PTR);
+ * ...
+ * // extracts a real uint8_t * from a virtual pNBuff_t
+ * pBuf = PNBUFF_2_PBUF(pNBuff);
+ *
+ *------------------------------------------------------------------------------
+ */
+#define PBUF_2_PNBUFF(pBuf,realType) \
+            ( (pNBuff_t) ((uint32_t)(pBuf)   | (uint32_t)(realType)) )
+#define PNBUFF_2_PBUF(pNBuff)       \
+            ( (uint8_t*) ((uint32_t)(pNBuff) & (uint32_t)NBUFF_PTR_MASK) )
+
+#if (MUST_BE_ZERO != 0)
+#error  "Design assumption SKBUFF_PTR == 0"
+#endif
+#define PNBUFF_2_SKBUFF(pNBuff)     ((struct sk_buff *)(pNBuff))
+
+#define SKBUFF_2_PNBUFF(skb)        ((pNBuff_t)(skb)) /* see MUST_BE_ZERO */
+#define FKBUFF_2_PNBUFF(fkb)        PBUF_2_PNBUFF(fkb,FKBUFF_PTR)
+
+/*
+ *------------------------------------------------------------------------------
+ *
+ * Cast from/to virtual "pNBuff_t" to/from real typed pointers
+ *
+ *  pNBuff_t pNBuff2Skb, pNBuff2Fkb;    // "void *" with NBuffPtrType_t
+ *  struct sk_buff * skb_p;
+ *  struct fkbuff  * fkb_p;
+ *  ...
+ *  pNBuff2Skb = CAST_REAL_TO_VIRT_PNBUFF(skb_p,SKBUFF_PTR);
+ *  pNBuff2Fkb = CAST_REAL_TO_VIRT_PNBUFF(fkb_p,FKBUFF_PTR);
+ *  ...
+ *  skb_p = CAST_VIRT_TO_REAL_PNBUFF(pNBuff2Skb, struct sk_buff *);
+ *  fkb_p = CAST_VIRT_TO_REAL_PNBUFF(pNBuff2Fkb, struct fkbuff  *);
+ * or,
+ *  fkb_p = PNBUFF_2_FKBUFF(pNBuff2Fkb);  
+ *------------------------------------------------------------------------------
+ */
+
+#define CAST_REAL_TO_VIRT_PNBUFF(pRealNBuff,realType) \
+            ( (pNBuff_t) (PBUF_2_PNBUFF((pRealNBuff),(realType))) )
+
+#define CAST_VIRT_TO_REAL_PNBUFF(pVirtNBuff,realType) \
+            ( (realType) PNBUFF_2_PBUF(pVirtNBuff) )
+
+#define PNBUFF_2_FKBUFF(pNBuff) CAST_VIRT_TO_REAL_PNBUFF((pNBuff),struct fkbuff*)
+
+/*
+ *------------------------------------------------------------------------------
+ *  FKB: Fast Kernel Buffers placed directly into Rx DMA Buffer
+ *  May be used ONLY for common APIs such as those available in BSD-Style mbuf
+ *------------------------------------------------------------------------------
+ */
+
+struct fkbuff
+{
+    /* List pointer must be the first field */
+    union {
+        FkBuff_t  * list;           /* SLL of free FKBs for cloning           */
+        FkBuff_t  * master_p;       /* Clone FKB to point to master FKB       */
+        atomic_t  users;            /* (private) # of references to FKB       */
+    };
+    union {                         /* Use _is_kptr_ to determine if ptr      */
+        union {
+            void          *ptr;
+            struct blog_t *blog_p;  /* Pointer to a blog                      */
+            uint8_t       *dirty_p; /* Pointer to packet payload dirty incache*/
+            uint32_t       flags;   /* Access all flags                       */
+        };
+        /*
+         * First nibble denotes a pointer or flag usage.
+         * Lowest two significant bits denote the type of pinter
+         * Remaining 22 bits may be used as flags
+         */
+        struct {
+            uint32_t   ptr_type : 8;/* Identifies whether pointer             */
+            uint32_t   unused   :21;/* Future use for flags                   */
+            uint32_t   in_skb   : 1;/* flag: FKB passed inside a SKB          */
+            uint32_t   other_ptr: 1;/* future use, to override another pointer*/
+            uint32_t   dptr_tag : 1;/* Pointer type is a dirty pointer        */
+        };
+    };
+    uint8_t       * data;           /* Pointer to packet data                 */
+
+    union {
+        /* here the bits 31-24 are valid only for native fkbs's
+         * these bits bits will be cleared when using fkbInSkb 
+         * Note that it is critical to have the Little Endian/Big endian 
+         * declaration since FKB will use length as bit field and SKB will use  
+         * length as a word  Need to maintain the same bit positions across MIPS 
+         * and ARM.
+         */
+        struct{
+            BE_DECL(
+                uint32_t  rx_csum_verified:1;
+                uint32_t  reserved:7;
+                uint32_t  len:24;              /* Packet length               */
+            )
+            LE_DECL(
+                uint32_t  len:24;
+                uint32_t  reserved:7;
+                uint32_t  rx_csum_verified:1;
+            )
+        };
+        uint32_t len_word;
+    };
+
+    union {
+        uint32_t  mark;             /* Custom arg1, e.g. tag or mark field    */
+        void      * queue;          /* Single link list queue of FKB | SKB    */
+        void      *dst_entry;       /* rtcache entry for locally termiated pkts */
+    };
+    union {
+        uint32_t    priority;       /* Custom arg2, packet priority, tx info  */
+        wlFlowInf_t wl;             /* WLAN Flow Info */
+        uint32_t    flowid;           /* used for locally terminated pkts */
+    };
+
+    RecycleFuncP  recycle_hook;   /* Nbuff recycle handler   */
+    union {
+             /* recycle hook for Clone FKB is used in DHD pointing to extra info
+	      * BE CAREFULL when using this recyle_context for free etc....  
+	      */ 
+	    void *dhd_pkttag_info_p;		  
+	    uint32_t recycle_context;     /* Rx network device/channel or pool */
+    };
+
+} ____cacheline_aligned;   /* 2 cache lines wide */
+
+#define FKB_CLEAR_LEN_WORD_FLAGS(len_word) (len_word &= 0x00FFFFFF)
+
+
+/*
+ *------------------------------------------------------------------------------
+ * An fkbuff may be referred to as a:
+ *  master - a pre-allocated rxBuffer, inplaced ahead of the headroom.
+ *  cloned - allocated from a free pool of fkbuff and points to a master.
+ *
+ *  in_skb - when a FKB is passed as a member of a SKB structure.
+ *------------------------------------------------------------------------------
+ */
+#define FKB_IN_SKB                  (1 << 2)    /* Bit#2 is in_skb */
+
+/* Return flags with the in_skb tag set */
+static inline uint32_t _set_in_skb_tag_(uint32_t flags)
+{
+    return (flags | FKB_IN_SKB);
+}
+
+/* Fetch the in_skb tag in flags */
+static inline uint32_t _get_in_skb_tag_(uint32_t flags)
+{
+    return (flags & FKB_IN_SKB);
+}
+
+/* Determine whether the in_skb tag is set in flags */
+static inline uint32_t _is_in_skb_tag_(uint32_t flags)
+{
+    return ( _get_in_skb_tag_(flags) ? 1 : 0 );
+}
+
+#define CHK_IQ_PRIO                  (1 << 3)    /* Bit#3 is check IQ Prio */
+
+/* Return flags with the in_skb_tag and chk_iq_prio set */
+static inline uint32_t _set_in_skb_n_chk_iq_prio_tag_(uint32_t flags)
+{
+    return (flags | FKB_IN_SKB | CHK_IQ_PRIO);
+}
+
+/* Return flags with the chk_iq_prio set */
+static inline uint32_t _set_chk_iq_prio_tag_(uint32_t flags)
+{
+    return (flags | CHK_IQ_PRIO);
+}
+
+/* Fetch the chk_iq_prio tag in flags */
+static inline uint32_t _get_chk_iq_prio_tag_(uint32_t flags)
+{
+    return (flags & CHK_IQ_PRIO);
+}
+
+/* Determine whether the chk_iq_prio tag is set in flags */
+static inline uint32_t _is_chk_iq_prio_tag_(uint32_t flags)
+{
+    return ( _get_chk_iq_prio_tag_(flags) ? 1 : 0 );
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * APIs to convert between a real kernel pointer and a dirty pointer.
+ *------------------------------------------------------------------------------
+ */
+
+#define FKB_DPTR_TAG                (1 << 0)    /* Bit#0 is dptr_tag */
+
+/* Test whether a pointer is a dirty pointer type */
+static inline uint32_t is_dptr_tag_(uint8_t * ptr)
+{
+    return ( ( (uint32_t) ((uint32_t)ptr & FKB_DPTR_TAG) ) ? 1 : 0);
+}
+
+/* Encode a real kernel pointer to a dirty pointer type */
+static inline uint8_t * _to_dptr_from_kptr_(uint8_t * kernel_ptr)
+{
+    if((uint32_t)(kernel_ptr) & FKB_DPTR_TAG)
+        kernel_ptr++;
+    /* Tag a kernel pointer's dirty_ptr bit, to denote a FKB dirty pointer */
+    return ( (uint8_t*) ((uint32_t)(kernel_ptr) | FKB_DPTR_TAG) );
+}
+
+/* Decode a dirty pointer type into a real kernel pointer */
+static inline uint8_t * _to_kptr_from_dptr_(uint8_t * dirty_ptr)
+{
+    FKB_AUDIT(
+        if ( dirty_ptr && !is_dptr_tag_(dirty_ptr) )
+            printk("FKB ASSERT %s !is_dptr_tag_(0x%08x)\n",
+                   __FUNCTION__, (int)dirty_ptr); );
+
+    /* Fetch kernel pointer from encoded FKB dirty_ptr,
+       by clearing dirty_ptr bit */
+    return ( (uint8_t*) ((uint32_t)(dirty_ptr) & (~FKB_DPTR_TAG)) );
+}
+
+#define FKB_OPTR_TAG                (1<<1)      /* Bit#1 other_ptr tag */
+
+#define FKB_BLOG_TAG_MASK           (FKB_DPTR_TAG | FKB_OPTR_TAG)
+
+/* Verify whether a FKB pointer is pointing to a Blog */
+#define _IS_BPTR_(fkb_ptr) \
+         ( _is_kptr_(fkb_ptr) && ! ((uint32_t)(fkb_ptr) & FKB_BLOG_TAG_MASK) )
+
+
+/*
+ *------------------------------------------------------------------------------
+ *
+ *                  Types of preallocated FKB pools
+ * 
+ *  - A Master FKB object contains memory for the rx buffer, with a FkBuff_t
+ *    placed at the head of the buffer. A Master FKB object may serve to
+ *    replenish a network devices receive ring, when packet buffers are not
+ *    promptly recycled. A Master FKB may also be used for packet replication
+ *    where in one of the transmitted packet replicas may need a unique
+ *    modification distinct from other replicas. In such a case, the FKB must
+ *    be first "unshared" by a deep packet buffer copy into a Master Fkb.
+ *    A Free Pool of Master FKB objects is maintained. Master FKB may be
+ *    alocated and recycled from this Master FKB Pool.
+ *    The Master FKB Pool may also be used for replinishing a network device
+ *    driver's rx buffer ring.
+ *
+ *  - A Cloned FKB object does not contain memory for the rx buffer.
+ *    Used by fkb_clone, to create multiple references to a packet buffer.
+ *    Multiple references to a packet buffer may be used for packet replication.
+ *    A FKB allocated from the FKB Cloned Pool will have master_p pointing to
+ *    a Master FKB and the recycle_hook member set to NULL.
+ *
+ *------------------------------------------------------------------------------
+ */
+typedef enum {
+    FkbMasterPool_e = 0,
+    FkbClonedPool_e = 1,
+    FkbMaxPools_e
+} FkbObject_t;
+
+/*
+ * Function   : _get_master_users_
+ * Description: Given a pointer to a Master FKB, fetch the users count
+ * Caution    : Does not check whether the FKB is a Master or not!
+ */
+static inline uint32_t _get_master_users_(FkBuff_t * fkbM_p)
+{
+    uint32_t users;
+    users = atomic_read(&fkbM_p->users);
+
+    FKB_AUDIT(
+        if ( users == 0 )
+            printk("FKB ASSERT cpu<%u> %s(0x%08x) users == 0, recycle<%pS>\n",
+                   smp_processor_id(), __FUNCTION__,
+                   (int)fkbM_p, fkbM_p->recycle_hook); );
+    return users;
+}
+
+/*
+ * Function   : _is_fkb_cloned_pool_
+ * Description: Test whether an "allocated" FKB is from the FKB Cloned Pool.
+ */
+static inline uint32_t _is_fkb_cloned_pool_(FkBuff_t * fkb_p)
+{
+    if ( _is_kptr_(fkb_p->master_p)
+         && (fkb_p->recycle_hook == (RecycleFuncP)NULL) )
+    {
+        FKB_AUDIT(
+            /* ASSERT if the FKB is actually linked in a FKB pool */
+            if ( _is_kptr_(fkb_p->master_p->list) )
+            {
+                printk("FKB ASSERT cpu<%u> %s :"
+                       " _is_kptr_((0x%08x)->0x%08x->0x%08x)"
+                       " master<0x%08x>.recycle<%pS>\n",
+                       smp_processor_id(), __FUNCTION__, (int)fkb_p,
+                       (int)fkb_p->master_p, (int)fkb_p->master_p->list,
+                       (int)fkb_p->master_p,
+                       fkb_p->master_p->recycle_hook);
+            }
+            /* ASSERT that Master FKB users count is greater than 0 */
+            if ( _get_master_users_(fkb_p->master_p) == 0 )
+            {
+                printk("FKB ASSERT cpu<%u> %s :"
+                       " _get_master_users_(0x%08x->0x%08x) == 0\n",
+                       smp_processor_id(), __FUNCTION__,
+                       (int)fkb_p, (int)fkb_p->master_p);
+                return 0;
+            } );
+
+        return 1;   /* Allocated FKB is from the FKB Cloned Pool */
+    }
+    else
+        return 0;
+}
+
+/*
+ * Function   : _get_fkb_users_
+ * Description: Given a pointer to a FKB (Master or Cloned), fetch users count
+ */
+static inline uint32_t _get_fkb_users_(FkBuff_t * fkb_p)
+{
+    if ( _is_kptr_(fkb_p->master_p) )       /* Cloned FKB */
+    {
+        FKB_AUDIT(
+            if ( !_is_fkb_cloned_pool_(fkb_p) ) /* double check Cloned FKB */
+            {
+                printk("FKB ASSERT cpu<%u> %s :"
+                       " !_is_fkb_cloned_pool_(0x%08x)"
+                       " master<0x%08x>.recycle<%pS>\n",
+                       smp_processor_id(), __FUNCTION__,
+                       (int)fkb_p, (int)fkb_p->master_p,
+                       fkb_p->master_p->recycle_hook);
+                return 0;
+            } );
+
+        return _get_master_users_(fkb_p->master_p);
+    }
+    else                                    /* Master FKB */
+        return _get_master_users_(fkb_p);
+}
+
+/*
+ * Function   : _get_fkb_master_ptr_
+ * Description: Fetch the pointer to the Master FKB.
+ */
+static inline FkBuff_t * _get_fkb_master_ptr_(FkBuff_t * fkb_p)
+{
+    if ( _is_kptr_(fkb_p->master_p) )       /* Cloned FKB */
+    {
+        FKB_AUDIT( 
+            if ( !_is_fkb_cloned_pool_(fkb_p) ) /* double check Cloned FKB */
+            {
+                printk("FKB ASSERT cpu<%u> %s "
+                       " !_is_fkb_cloned_pool_(0x%08x)"
+                       " master<0x%08x>.recycle<%pS>\n",
+                       smp_processor_id(), __FUNCTION__,
+                       (int)fkb_p, (int)fkb_p->master_p,
+                       fkb_p->master_p->recycle_hook);
+                return FKB_NULL;
+            } );
+
+        return fkb_p->master_p;
+    }
+    else                                    /* Master FKB */
+    {
+        FKB_AUDIT( 
+            if ( _get_master_users_(fkb_p) == 0 )  /* assert Master FKB users */
+            {
+                printk("FKB ASSERT cpu<%u> %s "
+                       " _get_master_users_(0x%08x) == 0\n",
+                       smp_processor_id(), __FUNCTION__, (int)fkb_p);
+                return FKB_NULL;
+            } );
+
+        return fkb_p;
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Placement of a FKB object in the Rx DMA buffer:
+ *
+ * RX DMA Buffer:   |----- FKB ----|--- reserve headroom ---|---...... 
+ *                  ^              ^                        ^
+ *                pFkb           pHead                    pData
+ *                pBuf
+ *------------------------------------------------------------------------------
+ */
+#define PFKBUFF_PHEAD_OFFSET        sizeof(FkBuff_t)
+#define PFKBUFF_TO_PHEAD(pFkb)      ((uint8_t*)((FkBuff_t*)(pFkb) + 1))
+
+#define PDATA_TO_PFKBUFF(pData,headroom)    \
+            (FkBuff_t *)((uint8_t*)(pData)-(headroom)-PFKBUFF_PHEAD_OFFSET)
+#define PFKBUFF_TO_PDATA(pFkb,headroom)     \
+            (uint8_t*)((uint8_t*)(pFkb) + PFKBUFF_PHEAD_OFFSET + (headroom))
+
+
+#define NBUFF_ALIGN_MASK_8   0x07
+pNBuff_t nbuff_align_data(pNBuff_t pNBuff, uint8_t **data_pp,
+                          uint32_t len, uint32_t alignMask);
+
+/*
+ *------------------------------------------------------------------------------
+ *  FKB Functional Interfaces
+ *------------------------------------------------------------------------------
+ */
+
+/*
+ * Function   : fkb_in_skb_test
+ * Description: Verifies that the layout of SKB member fields corresponding to
+ *              a FKB have the same layout. This allows a FKB to be passed via
+ *              a SKB.
+ */
+
+extern int fkb_in_skb_test( int fkb_in_skb_offset,
+                            int list_offset, int blog_p_offset,
+                            int data_offset, int len_offset, int mark_offset,
+                            int priority_offset, int recycle_hook_offset,
+                            int recycle_context_offset );
+
+/*
+ * Global FKB Subsystem Constructor
+ * fkb_construct() validates that the layout of fkbuff members in sk_buff
+ * is the same. An sk_buff contains an fkbuff and permits a quick translation
+ * to and from a fkbuff. It also preallocates the pools of FKBs.
+ */
+extern int fkb_construct(int fkb_in_skb_offset);
+
+/*
+ * Function   : fkb_stats
+ * Description: Report FKB Pool statistics, see CC_CONFIG_FKB_STATS
+ */
+extern void fkb_stats(void);
+
+/*
+ * Function   : fkb_alloc
+ * Description: Allocate a Cloned/Master FKB object from preallocated pool
+ */
+extern FkBuff_t * fkb_alloc( FkbObject_t object );
+
+/*
+ * Function   : fkb_free
+ * Description: Free a FKB object to its respective preallocated pool.
+ */
+extern void fkb_free(FkBuff_t * fkb_p);
+
+/*
+ * Function   : fkb_unshare
+ * Description: If a FKB is pointing to a buffer with multiple references
+ * to this buffer, then create a copy of the buffer and return a FKB with a
+ * single reference to this buffer.
+ */
+extern FkBuff_t * fkb_unshare(FkBuff_t * fkb_p);
+
+/*
+ * Function   : fkbM_borrow
+ * Description: Allocate a Master FKB object from the pre-allocated pool.
+ */
+extern FkBuff_t * fkbM_borrow(void);
+
+/*
+ * Function   : fkbM_return
+ * Description: Return a Master FKB object to a pre-allocated pool.
+ */
+extern void fkbM_return(FkBuff_t * fkbM_p);
+
+/*
+ * Function   : fkb_set_ref
+ * Description: Set reference count to an FKB.
+ */
+static inline void _fkb_set_ref(FkBuff_t * fkb_p, const int count)
+{
+    atomic_set(&fkb_p->users, count);
+}
+FKB_FN( fkb_set_ref,
+        void fkb_set_ref(FkBuff_t * fkb_p, const int count),
+        _fkb_set_ref(fkb_p, count) )
+
+/*
+ * Function   : fkb_inc_ref
+ * Description: Increment reference count to an FKB.
+ */
+static inline void _fkb_inc_ref(FkBuff_t * fkb_p)
+{
+    atomic_inc(&fkb_p->users);
+}
+FKB_FN( fkb_inc_ref,
+        void fkb_inc_ref(FkBuff_t * fkb_p),
+        _fkb_inc_ref(fkb_p) )
+
+/*
+ * Function   : fkb_dec_ref
+ * Description: Decrement reference count to an FKB.
+ */
+static inline void _fkb_dec_ref(FkBuff_t * fkb_p)
+{
+    atomic_dec(&fkb_p->users);
+    /* For debug, may want to assert that users does not become negative */
+}
+FKB_FN( fkb_dec_ref,
+        void fkb_dec_ref(FkBuff_t * fkb_p),
+        _fkb_dec_ref(fkb_p) )
+
+
+/*
+ * Function   : fkb_preinit
+ * Description: A network device driver may use this function to place a
+ * FKB object into rx buffers, when they are created. FKB objects preceeds
+ * the reserved headroom.
+ */
+static inline void fkb_preinit(uint8_t * pBuf, RecycleFuncP recycle_hook,
+                               uint32_t recycle_context)
+{
+    FkBuff_t * fkb_p = (FkBuff_t *)pBuf;
+    fkb_p->recycle_hook = recycle_hook;         /* never modified */
+    fkb_p->recycle_context = recycle_context;   /* never modified */
+
+    fkb_p->ptr  = (void*)NULL;                  /* resets dirty_p, blog_p */
+    fkb_p->data = (uint8_t*)NULL;
+    fkb_p->len_word  = fkb_p->mark  = fkb_p->priority = 0;
+    fkb_set_ref( fkb_p, 0 );
+}
+
+/*
+ * Function   : fkb_init
+ * Description: Initialize the FKB context for a received packet. Invoked by a
+ * network device on extract the packet from a buffer descriptor and associating
+ * a FKB context to the received packet.
+ */
+static inline FkBuff_t * _fkb_init(uint8_t * pBuf, uint32_t headroom,
+                                   uint8_t * pData, uint32_t len)
+{
+    FkBuff_t * fkb_p = PDATA_TO_PFKBUFF(pBuf, headroom);
+    fkb_dbg( 1, "fkb_p<0x%08x> pBuf<0x%08x> headroom<%u> pData<0x%08x> len<%d>",
+              (int)fkb_p, (int)pBuf, (int)headroom, (int)pData, len );
+
+#if defined(CC_FKB_HEADROOM_AUDIT)
+    if ( headroom < BCM_PKT_HEADROOM )
+        printk("NBUFF: Insufficient headroom <%u>, need <%u> %-10s\n",
+               headroom, BCM_PKT_HEADROOM, __FUNCTION__);
+#endif
+
+    fkb_p->data = pData;
+    fkb_p->len_word = 0;/*clear flags */
+    fkb_p->len  = len;
+    fkb_p->ptr  = (void*)NULL;   /* resets dirty_p, blog_p */
+
+    fkb_set_ref( fkb_p, 1 );
+
+    return fkb_p;
+}
+FKB_FN( fkb_init,
+        FkBuff_t * fkb_init(uint8_t * pBuf, uint32_t headroom,
+                            uint8_t * pData, uint32_t len),
+        return _fkb_init(pBuf, headroom, pData, len) )
+
+/*
+ * Function   : fkb_qinit
+ * Description: Same as fkb_init, with the exception that a recycle queue
+ * context is associated with the FKB, each time the packet is receieved.
+ */
+static inline FkBuff_t * _fkb_qinit(uint8_t * pBuf, uint32_t headroom,
+                    uint8_t * pData, uint32_t len, uint32_t qcontext)
+{
+    FkBuff_t * fkb_p = PDATA_TO_PFKBUFF(pBuf, headroom);
+    fkb_dbg(1, "fkb_p<0x%08x> qcontext<0x%08x>", (int)fkb_p, qcontext );
+    fkb_p->recycle_context = qcontext;
+
+    return _fkb_init(pBuf, headroom, pData, len);
+}
+FKB_FN( fkb_qinit,
+        FkBuff_t * fkb_qinit(uint8_t * pBuf, uint32_t headroom,
+                             uint8_t * pData, uint32_t len, uint32_t qcontext),
+        return _fkb_qinit(pBuf, headroom, pData, len, qcontext) )
+
+/*
+ * Function   : fkb_release
+ * Description: Release any associated blog and set ref count to 0. A fkb
+ * may be released multiple times (not decrement reference count).
+ */
+void blog_put(struct blog_t * blog_p);
+static inline void _fkb_release(FkBuff_t * fkb_p)
+{
+    fkb_dbg(1, "fkb_p<0x%08x> fkb_p->blog_p<0x%08x>",
+            (int)fkb_p, (int)fkb_p->blog_p );
+    if ( _IS_BPTR_( fkb_p->blog_p ) )
+        blog_put(fkb_p->blog_p);
+    fkb_p->ptr = (void*)NULL;   /* reset dirty_p, blog_p */
+
+    fkb_set_ref( fkb_p, 0 );    /* fkb_release may be invoked multiple times */
+}
+FKB_FN( fkb_release,
+        void fkb_release(FkBuff_t * fkb_p),
+        _fkb_release(fkb_p) )
+
+/*
+ * Function   : fkb_headroom
+ * Description: Determine available headroom for the packet in the buffer.
+ */
+static inline int _fkb_headroom(const FkBuff_t *fkb_p)
+{
+    return (int)( (uint32_t)(fkb_p->data) - (uint32_t)(fkb_p+1) );
+}
+FKB_FN( fkb_headroom,
+        int fkb_headroom(const FkBuff_t *fkb_p),
+        return _fkb_headroom(fkb_p) )
+
+/*
+ * Function   : fkb_init_headroom
+ * Description: The available headroom the packet in the buffer at fkb_init time.
+ */
+static inline int _fkb_init_headroom(void)
+{
+    return BCM_PKT_HEADROOM;
+}
+FKB_FN( fkb_init_headroom,
+        int fkb_init_headroom(void),
+        return _fkb_init_headroom() )
+
+
+/*
+ * Function   : fkb_push
+ * Description: Prepare space for data at head of the packet buffer.
+ */
+static inline uint8_t * _fkb_push(FkBuff_t * fkb_p, uint32_t len)
+{
+    fkb_p->len  += len;
+    fkb_p->data -= len;
+    return fkb_p->data;
+}
+FKB_FN( fkb_push,
+        uint8_t * fkb_push(FkBuff_t * fkb_p, uint32_t len),
+        return _fkb_push(fkb_p, len) )
+
+/*
+ * Function   : fkb_pull
+ * Description: Delete data from the head of packet buffer.
+ */
+static inline uint8_t * _fkb_pull(FkBuff_t * fkb_p, uint32_t len)
+{
+    fkb_p->len  -= len;
+    fkb_p->data += len;
+    return fkb_p->data;
+}
+FKB_FN( fkb_pull,
+        uint8_t * fkb_pull(FkBuff_t * fkb_p, uint32_t len),
+        return _fkb_pull(fkb_p, len) )
+
+/*
+ * Function   : fkb_put
+ * Description: Prepare space for data at tail of the packet buffer.
+ */
+static inline uint8_t * _fkb_put(FkBuff_t * fkb_p, uint32_t len)
+{
+    uint8_t * tail_p = fkb_p->data + fkb_p->len; 
+    fkb_p->len  += len;
+    return tail_p;
+}
+FKB_FN( fkb_put,
+        uint8_t * fkb_put(FkBuff_t * fkb_p, uint32_t len),
+        return _fkb_put(fkb_p, len) )
+
+/*
+ * Function   : fkb_pad
+ * Description: Pad the packet by requested number of bytes.
+ */
+static inline uint32_t _fkb_pad(FkBuff_t * fkb_p, uint32_t padding)
+{
+    memset((uint8_t *)(fkb_p->data + fkb_p->len), 0, padding);
+    fkb_p->len  += padding;
+    return fkb_p->len;
+}
+FKB_FN( fkb_pad,
+        uint32_t fkb_pad(FkBuff_t * fkb_p, uint32_t padding),
+        return _fkb_pad(fkb_p, padding) )
+
+/*
+ * Function   : fkb_len
+ * Description: Determine the length of the packet.
+ */
+static inline uint32_t _fkb_len(FkBuff_t * fkb_p)
+{
+    return fkb_p->len;
+}
+FKB_FN( fkb_len,
+        uint32_t fkb_len(FkBuff_t * fkb_p),
+        return _fkb_len(fkb_p) )
+
+/*
+ * Function   : fkb_data
+ * Description: Fetch the start of the packet.
+ */
+static inline uint8_t * _fkb_data(FkBuff_t * fkb_p)
+{
+    return fkb_p->data;
+}
+FKB_FN( fkb_data,
+        uint8_t * fkb_data(FkBuff_t * fkb_p),
+        return _fkb_data(fkb_p) )
+
+/*
+ * Function   : fkb_blog
+ * Description: Fetch the associated blog.
+ */
+static inline struct blog_t * _fkb_blog(FkBuff_t * fkb_p)
+{
+    return fkb_p->blog_p;
+}
+FKB_FN( fkb_blog,
+        struct blog_t * fkb_blog(FkBuff_t * fkb_p),
+        return _fkb_blog(fkb_p) )
+
+/*
+ * Function   : fkb_clone
+ * Description: Allocate a FKB from the Cloned Pool and make it reference the
+ * same packet.
+ */
+static inline FkBuff_t * _fkb_clone(FkBuff_t * fkbM_p)
+{
+    FkBuff_t * fkbC_p;
+
+    FKB_AUDIT( 
+        if ( smp_processor_id() )
+            printk("FKB ASSERT %s not supported on CP 1\n", __FUNCTION__); );
+
+    /* Fetch a pointer to the Master FKB */
+    fkbM_p = _get_fkb_master_ptr_( fkbM_p );
+
+    fkbC_p = fkb_alloc( FkbClonedPool_e );  /* Allocate FKB from Cloned pool */
+
+    if ( unlikely(fkbC_p != FKB_NULL) )
+    {
+        fkb_inc_ref( fkbM_p );
+        fkbC_p->master_p   = fkbM_p;
+        fkbC_p->ptr   = fkbM_p->ptr;
+
+        fkbC_p->data       = fkbM_p->data;
+        fkbC_p->len_word   = fkbM_p->len_word;
+        fkbC_p->mark       = fkbM_p->mark;
+        fkbC_p->priority   = fkbM_p->priority;
+    }
+
+    fkb_dbg(1, "fkbC_p<0x%08x> ---> fkbM_p<0x%08x>", (int)fkbC_p, (int)fkbM_p );
+
+    return fkbC_p;       /* May be null */
+}
+FKB_FN( fkb_clone,
+        FkBuff_t * fkb_clone(FkBuff_t * fkbM_p),
+        return _fkb_clone(fkbM_p) )
+
+/*
+ * Function   : fkb_flush
+ * Description: Flush a FKB from current data or received packet data upto
+ * the dirty_p. When Flush Optimization is disabled, the entire length.
+ */
+static inline void _fkb_flush(FkBuff_t * fkb_p, uint8_t * data_p, int len, 
+    int cache_op)
+{
+    uint8_t * fkb_data_p;
+
+    if ( _is_fkb_cloned_pool_(fkb_p) )
+        fkb_data_p = PFKBUFF_TO_PDATA(fkb_p->master_p, BCM_PKT_HEADROOM);
+    else
+        fkb_data_p = PFKBUFF_TO_PDATA(fkb_p, BCM_PKT_HEADROOM);
+
+    /* headers may have been popped */
+    if ( (uint32_t)data_p < (uint32_t)fkb_data_p )
+        fkb_data_p = data_p;
+
+    {
+#if defined(CC_NBUFF_FLUSH_OPTIMIZATION)
+    uint8_t * dirty_p;  /* Flush only L1 dirty cache lines */
+    dirty_p = _to_kptr_from_dptr_(fkb_p->dirty_p);  /* extract kernel pointer */
+
+    fkb_dbg(1, "fkb_p<0x%08x> fkb_data<0x%08x> dirty_p<0x%08x> len<%d>",
+            (int)fkb_p, (int)fkb_data_p, (int)dirty_p, len);
+
+    if (cache_op == FKB_CACHE_FLUSH)
+        cache_flush_region(fkb_data_p, dirty_p);
+    else
+        cache_invalidate_region(fkb_data_p, dirty_p);
+#else
+    uint32_t data_offset;
+    data_offset = (uint32_t)data_p - (uint32_t)fkb_data_p;
+
+    fkb_dbg(1, "fkb_p<0x%08x> fkb_data<0x%08x> data_offset<%d> len<%d>",
+            (int)fkb_p, (int)fkb_data_p, data_offset, len);
+
+    if (cache_op == FKB_FLUSH)
+        cache_flush_len(fkb_data_p, data_offset + len);
+    else
+        cache_invalidate_len(fkb_data_p, data_offset + len);
+#endif
+    }
+}
+FKB_FN( fkb_flush,
+        void fkb_flush(FkBuff_t * fkb_p, uint8_t * data, int len, int cache_op),
+        _fkb_flush(fkb_p, data, len, cache_op) )
+
+/*
+ *------------------------------------------------------------------------------
+ * Virtual accessors to common members of network kernel buffer
+ *------------------------------------------------------------------------------
+ */
+
+/* __BUILD_NBUFF_SET_ACCESSOR: generates function nbuff_set_MEMBER() */
+#define __BUILD_NBUFF_SET_ACCESSOR( TYPE, MEMBER )                             \
+static inline void nbuff_set_##MEMBER(pNBuff_t pNBuff, TYPE MEMBER) \
+{                                                                              \
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);                                       \
+    if ( IS_SKBUFF_PTR(pNBuff) )                                               \
+        ((struct sk_buff *)pBuf)->MEMBER = MEMBER;                             \
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */                         \
+    else                                                                       \
+        ((FkBuff_t *)pBuf)->MEMBER = MEMBER;                                   \
+}
+
+/* __BUILD_NBUFF_GET_ACCESSOR: generates function nbuff_get_MEMBER() */
+#define __BUILD_NBUFF_GET_ACCESSOR( TYPE, MEMBER )                             \
+static inline TYPE nbuff_get_##MEMBER(pNBuff_t pNBuff)                         \
+{                                                                              \
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);                                       \
+    if ( IS_SKBUFF_PTR(pNBuff) )                                               \
+        return (TYPE)(((struct sk_buff *)pBuf)->MEMBER);                       \
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */                         \
+    else                                                                       \
+        return (TYPE)(((FkBuff_t *)pBuf)->MEMBER);                             \
+}
+
+/*
+ * Common set/get accessor of base network buffer fields:
+ * nbuff_set_data(), nbuff_set_len(), nbuff_set_mark(), nbuff_set_priority()
+ * nbuff_get_data(), nbuff_get_len(), nbuff_get_mark(), nbuff_get_priority()
+ */
+__BUILD_NBUFF_SET_ACCESSOR(uint8_t *, data) 
+__BUILD_NBUFF_SET_ACCESSOR(uint32_t, len) 
+__BUILD_NBUFF_SET_ACCESSOR(uint32_t, mark)      /* Custom network buffer arg1 */
+__BUILD_NBUFF_SET_ACCESSOR(void *, queue)     /* Custom network buffer arg1 */
+__BUILD_NBUFF_SET_ACCESSOR(uint32_t, priority)  /* Custom network buffer arg2 */
+
+__BUILD_NBUFF_GET_ACCESSOR(uint8_t *, data)
+__BUILD_NBUFF_GET_ACCESSOR(uint32_t, len)
+__BUILD_NBUFF_GET_ACCESSOR(uint32_t, mark)      /* Custom network buffer arg1 */
+__BUILD_NBUFF_GET_ACCESSOR(void *, queue)     /* Custom network buffer arg1 */
+__BUILD_NBUFF_GET_ACCESSOR(uint32_t, priority)  /* Custom network buffer arg2 */
+
+/*
+ * Function   : nbuff_get_context
+ * Description: Extracts the data and len fields from a pNBuff_t.
+ */
+static inline void * nbuff_get_context(pNBuff_t pNBuff,
+                                     uint8_t ** data_p, uint32_t *len_p)
+{
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    if ( pBuf == (void*) NULL )
+        return pBuf;
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+        *data_p     = ((struct sk_buff *)pBuf)->data;
+        *len_p      = ((struct sk_buff *)pBuf)->len;
+    }
+    else
+    {
+        *data_p     = ((FkBuff_t *)pBuf)->data;
+        *len_p      = ((FkBuff_t *)pBuf)->len;
+    }
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x> data_p<0x%08x>",
+           (int)pNBuff, (int)pBuf, (int)*data_p );
+    return pBuf;
+}
+
+/*
+ * Function   : nbuff_get_params
+ * Description: Extracts the data, len, mark and priority field from a network
+ * buffer.
+ */
+static inline void * nbuff_get_params(pNBuff_t pNBuff,
+                                     uint8_t ** data_p, uint32_t *len_p,
+                                     uint32_t * mark_p, uint32_t *priority_p)
+{
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    if ( pBuf == (void*) NULL )
+        return pBuf;
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+        *data_p     = ((struct sk_buff *)pBuf)->data;
+        *len_p      = ((struct sk_buff *)pBuf)->len;
+        *mark_p     = ((struct sk_buff *)pBuf)->mark;
+        *priority_p = ((struct sk_buff *)pBuf)->priority;
+    }
+    else
+    {
+        *data_p     = ((FkBuff_t *)pBuf)->data;
+        *len_p      = ((FkBuff_t *)pBuf)->len;
+        *mark_p     = ((FkBuff_t *)pBuf)->mark;
+        *priority_p = ((FkBuff_t *)pBuf)->priority;
+    }
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x> data_p<0x%08x>",
+            (int)pNBuff, (int)pBuf, (int)*data_p );
+    return pBuf;
+}
+    
+/* adds recycle flags/context to nbuff_get_params used in impl4 enet */
+/*
+ * Function   : nbuff_get_params_ext
+ * Description: Extracts the data, len, mark, priority and 
+ * recycle flags/context field from a network buffer.
+ */
+static inline void * nbuff_get_params_ext(pNBuff_t pNBuff, uint8_t **data_p, 
+                                          uint32_t *len_p, uint32_t *mark_p, 
+                                          uint32_t *priority_p, 
+                                          uint32_t *rflags_p)
+{
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    if ( pBuf == (void*) NULL )
+        return pBuf;
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+        *data_p     = ((struct sk_buff *)pBuf)->data;
+        *len_p      = ((struct sk_buff *)pBuf)->len;
+        *mark_p     = ((struct sk_buff *)pBuf)->mark;
+        *priority_p = ((struct sk_buff *)pBuf)->priority;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+        *rflags_p   = ((struct sk_buff *)pBuf)->recycle_flags;
+#endif
+    }
+    else
+    {
+        *data_p     = ((FkBuff_t *)pBuf)->data;
+        *len_p      = ((FkBuff_t *)pBuf)->len;
+        *mark_p     = ((FkBuff_t *)pBuf)->mark;
+        *priority_p = ((FkBuff_t *)pBuf)->priority;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+        *rflags_p   = ((FkBuff_t *)pBuf)->recycle_context;
+#endif
+    }
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x> data_p<0x%08x>",
+            (int)pNBuff, (int)pBuf, (int)*data_p );
+    return pBuf;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Virtual common functional apis of a network kernel buffer
+ *------------------------------------------------------------------------------
+ */
+
+/*
+ * Function   : nbuff_push
+ * Description: Make space at the start of a network buffer.
+ * CAUTION    : In the case of a FKB, no check for headroom is done.
+ */
+static inline uint8_t * nbuff_push(pNBuff_t pNBuff, uint32_t len)
+{
+    uint8_t * data;
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    if ( IS_SKBUFF_PTR(pNBuff) )
+        data = skb_push(((struct sk_buff *)pBuf), len);
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */
+    else
+        data = fkb_push((FkBuff_t*)pBuf, len);
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x> data<0x%08x> len<%u>",
+            (int)pNBuff,(int)pBuf, (int)data, len );
+    return data;
+}
+
+/*
+ * Function   : nbuff_pull
+ * Description: Delete data from start of a network buffer.
+ */
+static inline uint8_t * nbuff_pull(pNBuff_t pNBuff, uint32_t len)
+{
+    uint8_t * data;
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    if ( IS_SKBUFF_PTR(pNBuff) )
+        data = skb_pull(((struct sk_buff *)pBuf), len);
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */
+    else
+        data = fkb_pull((FkBuff_t *)pBuf, len);
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x> data<0x%08x> len<%u>",
+            (int)pNBuff,(int)pBuf, (int)data, len );
+    return data;
+}
+
+/*
+ * Function   : nbuff_put
+ * Description: Make space at the tail of a network buffer.
+ * CAUTION: In the case of a FKB, no check for tailroom is done.
+ */
+static inline uint8_t * nbuff_put(pNBuff_t pNBuff, uint32_t len)
+{
+    uint8_t * tail;
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    if ( IS_SKBUFF_PTR(pNBuff) )
+        tail = skb_put(((struct sk_buff *)pBuf), len);
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */
+    else
+        tail = fkb_put((FkBuff_t *)pBuf, len);
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x> tail<0x%08x> len<%u>",
+            (int)pNBuff,(int)pBuf, (int)tail, len );
+    return tail;
+}
+
+/*
+ * Function   : nbuff_free
+ * Description: Free/recycle a network buffer and associated data
+ *
+ * Freeing may involve a recyling of the network buffer into its respective
+ * pool (per network device driver pool, kernel cache or FKB pool). Likewise
+ * the associated buffer may be recycled if there are no other network buffers
+ * referencing it.
+ */
+
+extern void dev_kfree_skb_thread(struct sk_buff *skb);
+extern void dev_kfree_skb_irq(struct sk_buff *skb);
+
+static inline void nbuff_free(pNBuff_t pNBuff)
+{
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x>", (int)pNBuff,(int)pBuf);
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+#if defined(CONFIG_BCM96838) || defined(CONFIG_BCM96848)
+        dev_kfree_skb_irq((struct sk_buff *)pBuf);
+#else
+        dev_kfree_skb_thread((struct sk_buff *)pBuf);
+#endif
+    }
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */
+    else
+        fkb_free(pBuf);
+    fkb_dbg(2, "<<");
+}
+
+/*
+ * Function   : nbuff_unshare
+ * Description: If there are more than one references to the data buffer
+ * associated with the network buffer, create a deep copy of the data buffer
+ * and return a network buffer context to it. The returned network buffer
+ * may be then used to modify the data packet without impacting the original
+ * network buffer and its data buffer.
+ *
+ * If the data packet had a single network buffer referencing it, then the
+ * original network buffer is returned.
+ */
+static inline pNBuff_t nbuff_unshare(pNBuff_t pNBuff)
+{
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x>", (int)pNBuff,(int)pBuf);
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+        struct sk_buff *skb_p;
+        skb_p = skb_unshare( (struct sk_buff *)pBuf, GFP_ATOMIC);
+        pNBuff = SKBUFF_2_PNBUFF(skb_p);
+    }
+    else
+    {
+        FkBuff_t * fkb_p;
+        fkb_p = fkb_unshare( (FkBuff_t *)pBuf );
+        pNBuff = FKBUFF_2_PNBUFF(fkb_p);
+    }
+
+    fkb_dbg(2, "<<");
+    return pNBuff;
+}
+
+/*
+ * Function   : nbuff_flush
+ * Description: Flush (Hit_Writeback_Inv_D) a network buffer's packet data.
+ */
+static inline void nbuff_flush(pNBuff_t pNBuff, uint8_t * data, int len)
+{
+    fkb_dbg(1, "pNBuff<0x%08x> data<0x%08x> len<%d>",
+            (int)pNBuff, (int)data, len);
+    if ( IS_SKBUFF_PTR(pNBuff) )
+        cache_flush_len(data, len);
+    else
+    {
+        FkBuff_t * fkb_p = (FkBuff_t *)PNBUFF_2_PBUF(pNBuff);
+        fkb_flush(fkb_p, data, len, FKB_CACHE_FLUSH); 
+    }
+    fkb_dbg(2, "<<");
+}
+
+/*
+ * Function   : nbuff_flushfree
+ * Description: Flush (Hit_Writeback_Inv_D) and free/recycle a network buffer.
+ * If the data buffer was referenced by a single network buffer, then the data
+ * buffer will also be freed/recycled. 
+ */
+static inline void nbuff_flushfree(pNBuff_t pNBuff)
+{
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x>", (int)pNBuff,(int)pBuf);
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+        struct sk_buff * skb_p = (struct sk_buff *)pBuf;
+        cache_flush_len(skb_p->data, skb_p->len);
+#if defined(CONFIG_BCM96838) || defined(CONFIG_BCM96848)
+        dev_kfree_skb_irq((struct sk_buff *)pBuf);
+#else
+        dev_kfree_skb_thread((struct sk_buff *)pBuf);
+#endif
+    }
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */
+    else
+    {
+        FkBuff_t * fkb_p = (FkBuff_t *)pBuf;
+        fkb_flush(fkb_p, fkb_p->data, fkb_p->len, FKB_CACHE_FLUSH);
+        fkb_free(fkb_p);
+    }
+    fkb_dbg(2, "<<");
+}
+
+/*
+ * Function   : nbuff_xlate
+ * Description: Convert a FKB to a SKB. The SKB is data filled with the
+ * data, len, mark, priority, and recycle hook and context. 
+ *
+ * Other SKB fields for SKB API manipulation are also initialized.
+ * SKB fields for network stack manipulation are NOT initialized.
+ *
+ * This function is typically used only in a network device drivers' hard
+ * start xmit function handler. A hard start xmit function handler may receive
+ * a network buffer of a FKB type and may not wish to rework the implementation
+ * to use nbuff APIs. In such an event, a nbuff may be translated to a skbuff.
+ */
+struct sk_buff * fkb_xlate(FkBuff_t * fkb_p);
+static inline struct sk_buff * nbuff_xlate( pNBuff_t pNBuff )
+{
+    void * pBuf = PNBUFF_2_PBUF(pNBuff);
+    fkb_dbg(1, "pNBuff<0x%08x> pBuf<0x%08x>", (int)pNBuff,(int)pBuf);
+
+    if ( IS_SKBUFF_PTR(pNBuff) )
+        return (struct sk_buff *)pBuf;
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */
+    else
+        return fkb_xlate( (FkBuff_t *)pBuf );
+}
+
+
+/* Miscellaneous helper routines */
+static inline void u16cpy( void * dst_p, const void * src_p, uint32_t bytes )
+{
+    uint16_t * dst16_p = (uint16_t*)dst_p;
+    uint16_t * src16_p = (uint16_t*)src_p;
+    do { // assuming: (bytes % sizeof(uint16_t) == 0 !!!
+        *dst16_p++ = *src16_p++;
+    } while ( bytes -= sizeof(uint16_t) );
+}
+
+static inline void u16datacpy( void * dst_p, const void * src_p, uint32_t bytes )
+{
+    uint16_t * dst16_p = (uint16_t*)dst_p;
+    uint16_t * src16_p = (uint16_t*)src_p;
+    do { // assuming: (bytes % sizeof(uint16_t) == 0 !!!
+        *dst16_p++ = htons (*src16_p++);
+    } while ( bytes -= sizeof(uint16_t) );
+}
+
+static inline int u16cmp( void * dst_p, const void * src_p,
+                          uint32_t bytes )
+{
+    uint16_t * dst16_p = (uint16_t*)dst_p;
+    uint16_t * src16_p = (uint16_t*)src_p;
+    do { // assuming: (bytes % sizeof(uint16_t) == 0 !!!
+        if ( *dst16_p++ != *src16_p++ )
+            return -1;
+    } while ( bytes -= sizeof(uint16_t) );
+
+    return 0;
+}
+
+static inline int nbuff_pad(pNBuff_t pNBuff, int padLen)
+{
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+        skb_pad((struct sk_buff *)pNBuff, padLen);
+    }
+    else
+    {
+        fkb_pad(PNBUFF_2_FKBUFF(pNBuff), padLen);
+    }
+    return 0;
+}
+
+#ifdef DUMP_DATA
+/* dumpHexData dump out the hex base binary data */
+static inline void dumpHexData1(uint8_t *pHead, uint32_t len)
+{
+    uint32_t i;
+    uint8_t *c = pHead;
+    for (i = 0; i < len; ++i) {
+        if (i % 16 == 0)
+            printk("\n");
+        printk("0x%02X, ", *c++);
+    }
+    printk("\n");
+}
+
+static inline void dump_pkt(const char * fname, uint8_t * pBuf, uint32_t len)
+{
+    //int dump_len = ( len < 64) ? len : 64;
+    int dump_len = len ;
+    printk("%s: data<0x%08x> len<%u>", fname, (int)pBuf, len);
+    dumpHexData1(pBuf, dump_len);
+    cache_flush_len((void*)pBuf, dump_len);
+}
+#define DUMP_PKT(pBuf,len)      dump_pkt(__FUNCTION__, (pBuf), (len))
+#else   /* !defined(DUMP_DATA) */
+#define DUMP_PKT(pBuf,len)      do {} while(0)
+#endif
+
+#endif  /* defined(__NBUFF_H_INCLUDED__) */
+
+#endif
diff --git a/include/linux/nbuff_types.h b/include/linux/nbuff_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..8ef5220ec1c5e9976e1c59c014f32d15293063b2
--- /dev/null
+++ b/include/linux/nbuff_types.h
@@ -0,0 +1,60 @@
+#ifndef __NBUFF_TYPES_H_INCLUDED__
+#define __NBUFF_TYPES_H_INCLUDED__
+
+/*
+<:copyright-gpl
+
+ Copyright 2011 Broadcom Corp. All Rights Reserved.
+
+ This program is free software; you can distribute it and/or modify it
+ under the terms of the GNU General Public License (Version 2) as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+
+:>
+*/
+
+/*
+ *******************************************************************************
+ *
+ * File Name  : nbuff_types.h
+ * Description: Simple nbuff type defines.
+ *
+ ******************************************************************************* */
+
+#define MUST_BE_ZERO                0
+
+/* virtual network buffer pointer to SKB|FPB|TGB|FKB  */
+typedef void * pNBuff_t;
+#define PNBUFF_NULL                 ((pNBuff_t)NULL)
+
+typedef enum NBuffPtrType
+{
+    SKBUFF_PTR = MUST_BE_ZERO,      /* Default Linux networking socket buffer */
+    FPBUFF_PTR,                     /* Experimental BRCM IuDMA freepool buffer*/
+    TGBUFF_PTR,                     /* LAB Traffic generated network buffer   */
+    FKBUFF_PTR,                     /* Lightweight fast kernel network buffer */
+    /* Do not add new ptr types */
+} NBuffPtrType_t;
+
+                                    /* 2lsbits in pointer encode NbuffType_t  */
+#define NBUFF_TYPE_MASK             0x3u
+#define NBUFF_PTR_MASK              (~NBUFF_TYPE_MASK)
+#define NBUFF_PTR_TYPE(pNBuff)      ((uint32_t)(pNBuff) & NBUFF_TYPE_MASK)
+
+
+#define IS_SKBUFF_PTR(pNBuff)       ( NBUFF_PTR_TYPE(pNBuff) == SKBUFF_PTR )
+#define IS_FPBUFF_PTR(pNBuff)       ( NBUFF_PTR_TYPE(pNBuff) == FPBUFF_PTR )
+#define IS_TGBUFF_PTR(pNBuff)       ( NBUFF_PTR_TYPE(pNBuff) == TGBUFF_PTR )
+#define IS_FKBUFF_PTR(pNBuff)       ( NBUFF_PTR_TYPE(pNBuff) == FKBUFF_PTR )
+
+
+#endif  /* defined(__NBUFF_TYPES_H_INCLUDED__) */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 5ac32123035a5159c7c017a3ac29aec8ee9291af..cfaefa65a65d5024384d1b6f78743a513fb9e5dd 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -57,6 +57,10 @@ enum {
 	NETIF_F_RXFCS_BIT,		/* Append FCS to skb pkt data */
 	NETIF_F_RXALL_BIT,		/* Receive errored frames too */
 
+#if defined(CONFIG_BCM_KF_BLOG)
+	NETIF_F_EXTSTATS_BIT,		/* Support extended statistics */
+#endif
+
 	/*
 	 * Add your fresh new feature above and remember to update
 	 * netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -103,6 +107,10 @@ enum {
 #define NETIF_F_RXFCS		__NETIF_F(RXFCS)
 #define NETIF_F_RXALL		__NETIF_F(RXALL)
 
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+#define NETIF_F_EXTSTATS	__NETIF_F(EXTSTATS)
+#endif
+
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
 #define NETIF_F_NEVER_CHANGE	(NETIF_F_VLAN_CHALLENGED | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1fcc9ba39d15efb5b1ae3b4de2f625809d42cca5..e6faf7c5dc428b7226c1298dcc29cbe840471313 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -55,6 +55,19 @@
 
 #include <linux/netdev_features.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#include <linux/bcm_dslcpe_wlan_info.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_MODULE_OWNER)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#define SET_MODULE_OWNER(dev) do { } while (0)
+#endif
+#endif /* CONFIG_BCM_KF_MODULE_OWNER */
+
+
 struct netpoll_info;
 struct device;
 struct phy_device;
@@ -179,6 +192,8 @@ struct net_device_stats {
 	unsigned long	rx_dropped;
 	unsigned long	tx_dropped;
 	unsigned long	multicast;
+
+
 	unsigned long	collisions;
 	unsigned long	rx_length_errors;
 	unsigned long	rx_over_errors;
@@ -193,6 +208,16 @@ struct net_device_stats {
 	unsigned long	tx_window_errors;
 	unsigned long	rx_compressed;
 	unsigned long	tx_compressed;
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+	unsigned long   tx_multicast_packets;  /* multicast packets transmitted */
+	unsigned long   rx_multicast_bytes;  /* multicast bytes recieved */ 
+	unsigned long   tx_multicast_bytes;  /* multicast bytes transmitted */
+	unsigned long   rx_broadcast_packets;  /* broadcast packets recieved */
+	unsigned long   tx_broadcast_packets;  /* broadcast packets transmitted */
+	/* NOTE: Unicast packets are not counted but are instead calculated as needed
+		using total - (broadcast + multicast) */
+	unsigned long   rx_unknown_packets;  /* unknown protocol packets recieved */
+#endif
 };
 
 #endif  /*  __KERNEL__  */
@@ -524,7 +549,7 @@ enum netdev_queue_state_t {
 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)		| \
 			      (1 << __QUEUE_STATE_STACK_XOFF))
 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF		| \
-					(1 << __QUEUE_STATE_FROZEN))
+				    (1 << __QUEUE_STATE_FROZEN))
 };
 /*
  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
@@ -1004,6 +1029,32 @@ struct net_device_ops {
 	void			(*ndo_neigh_destroy)(struct neighbour *n);
 };
 
+
+#if defined(CONFIG_BCM_KF_NETDEV_PATH)
+#define NETDEV_PATH_HW_SUBPORTS_MAX  CONFIG_BCM_MAX_GEM_PORTS
+struct netdev_path
+{
+        /* this pointer is used to create lists of interfaces that belong
+           to the same interface path in Linux. It points to the next
+           interface towards the physical interface (the root interface) */
+        struct net_device *next_dev;
+        /* this reference counter indicates the number of interfaces
+           referencing this interface */
+        int refcount;
+        /* indicates the hardware port number associated to the
+           interface */
+        unsigned int hw_port;
+        /* hardware port type, must be set to one of the types defined in
+           BlogPhy_t  */
+        unsigned int hw_port_type;
+        /* some device drivers support virtual subports within a hardware
+		   port. hw_subport_mcast is used to map a multicast hw subport
+		   to a hw port. */
+        unsigned int hw_subport_mcast_idx;
+};
+#endif
+
+
 /*
  *	The DEVICE structure.
  *	Actually, this whole structure is a big mistake.  It mixes I/O
@@ -1064,6 +1115,23 @@ struct net_device {
 	int			iflink;
 
 	struct net_device_stats	stats;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+        /* Update the bstats */
+        void (*put_stats)(struct net_device *dev_p, BlogStats_t * bStats_p);
+	/* Get stats pointer by type */
+        void* (*get_stats_pointer)(struct net_device *dev_p, char type);
+        /* Clear the stats information */
+        void (*clr_stats)(struct net_device *dev_p);
+	/* runner multicast acceleration hook,to be enclosed in different MACRO??? */
+	wlan_client_get_info_t  wlan_client_get_info;
+#endif
+
+
+#if defined(CONFIG_BCM_KF_NETDEV_PATH)
+    struct netdev_path path;
+#endif
+
 	atomic_long_t		rx_dropped; /* dropped packets by core network
 					     * Do not use this in drivers.
 					     */
@@ -2016,7 +2084,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  * Check individual transmit queue of a device with multiple transmit queues.
  */
 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
-					    u16 queue_index)
+					 u16 queue_index)
 {
 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 
@@ -2024,7 +2092,7 @@ static inline bool __netif_subqueue_stopped(const struct net_device *dev,
 }
 
 static inline bool netif_subqueue_stopped(const struct net_device *dev,
-					  struct sk_buff *skb)
+					 struct sk_buff *skb)
 {
 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
 }
@@ -2094,12 +2162,34 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
 #endif
 }
 
+#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK))
+typedef enum {
+BCM_GSO_LOOPBACK_NONE=0, /*null device for error protection*/
+BCM_GSO_LOOPBACK_WL0,   /* wlan interface 0 */
+BCM_GSO_LOOPBACK_WL1,   /* wlan interface 1 */
+BCM_GSO_LOOPBACK_MAXDEVS
+} gso_loopback_devids;
+
+extern int (*bcm_gso_loopback_hw_offload)(struct sk_buff *skb,  unsigned int txDevId);
+extern inline unsigned int bcm_is_gso_loopback_dev(void *dev);
+extern unsigned int bcm_gso_loopback_devptr2devid(void *dev);
+extern struct net_device * bcm_gso_loopback_devid2devptr(unsigned int devId);
+#endif
+
 /* Use this variant when it is known for sure that it
  * is executing from hardware interrupt context or with hardware interrupts
  * disabled.
  */
 extern void dev_kfree_skb_irq(struct sk_buff *skb);
 
+
+#if defined(CONFIG_BCM_KF_SKB_DEFINES) && defined(CONFIG_SMP)
+/* put the skb on a queue, and wake up the skbfreeTask to free it later,
+ * to save some cyles now
+ */
+extern void dev_kfree_skb_thread(struct sk_buff *skb);
+#endif
+
 /* Use this variant in places where it could be invoked
  * from either hardware interrupt or other context, with hardware interrupts
  * either disabled or enabled.
@@ -2623,6 +2713,10 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 
 netdev_features_t netif_skb_features(struct sk_buff *skb);
 
+#if defined(CONFIG_BCM_KF_SPDSVC) && defined(CONFIG_BCM_SPDSVC_SUPPORT)
+int skb_bypass_hw_features(struct sk_buff *skb);
+#endif
+
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
 	netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2670,6 +2764,57 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
 
 extern struct pernet_operations __net_initdata loopback_net_ops;
 
+#if defined(CONFIG_BCM_KF_NETDEV_PATH)
+
+/* Returns TRUE when _dev is a member of a path, otherwise FALSE */
+#define netdev_path_is_linked(_dev) ( (_dev)->path.next_dev != NULL )
+
+/* Returns TRUE when _dev is the leaf in a path, otherwise FALSE */
+#define netdev_path_is_leaf(_dev) ( (_dev)->path.refcount == 0 )
+
+/* Returns TRUE when _dev is the root of a path, otherwise FALSE. The root
+   device is the physical device */
+#define netdev_path_is_root(_dev) ( (_dev)->path.next_dev == NULL )
+
+/* Returns a pointer to the next device in a path, towards the root
+   (physical) device */
+#define netdev_path_next_dev(_dev) ( (_dev)->path.next_dev )
+
+#define netdev_path_set_hw_port(_dev, _hw_port, _hw_port_type)  \
+    do {                                                        \
+        (_dev)->path.hw_port = (_hw_port);                      \
+        (_dev)->path.hw_port_type = (_hw_port_type);            \
+    } while(0)
+
+#define netdev_path_set_hw_port_only(_dev, _hw_port)            \
+    do {                                                        \
+        (_dev)->path.hw_port = (_hw_port);                      \
+    } while(0)
+
+#define netdev_path_get_hw_port(_dev) ( (_dev)->path.hw_port )
+
+#define netdev_path_get_hw_port_type(_dev) ( (_dev)->path.hw_port_type )
+
+#define netdev_path_get_hw_subport_mcast_idx(_dev) ( (_dev)->path.hw_subport_mcast_idx )
+
+static inline struct net_device *netdev_path_get_root(struct net_device *dev)
+{
+    for (; !netdev_path_is_root(dev); dev = netdev_path_next_dev(dev));
+    return dev;
+}
+
+int netdev_path_add(struct net_device *new_dev, struct net_device *next_dev);
+
+int netdev_path_remove(struct net_device *dev);
+
+void netdev_path_dump(struct net_device *dev);
+
+int netdev_path_set_hw_subport_mcast_idx(struct net_device *dev,
+									 unsigned int subport_idx);
+
+#endif /* CONFIG_BCM_KF_NETDEV_PATH */
+
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* netdev_printk helpers, similar to dev_printk */
@@ -2801,5 +2946,9 @@ do {								\
 #endif
 
 #endif /* __KERNEL__ */
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+extern struct net_device_stats * net_dev_collect_stats(struct net_device  *net_p);
+extern void net_dev_clear_stats(struct net_device * dev_p);
+#endif
 
 #endif	/* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 0d3dd66322ecbb24529303f6634f36e5ce6f390d..76d70b905b290228cb8517a14bd7e9ff47e0bccf 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -83,6 +83,12 @@ enum ip_conntrack_status {
 	/* Conntrack is a fake untracked entry */
 	IPS_UNTRACKED_BIT = 12,
 	IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+    /* Conntrack eligible for Blogging */
+    IPS_BLOG_BIT = 13,
+    IPS_BLOG = (1 << IPS_BLOG_BIT),
+#endif
 };
 
 /* Connection tracking event types */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index 26f9226ea72b18ff379e2d553ccdc00112c95617..0430a055e86bf3e82ec96060a2a591251f5dd083 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -12,6 +12,23 @@
 /* This structure exists only once per master */
 struct nf_ct_h323_master {
 
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	enum{
+		/* tpkt header and payload are wrapped in one packet */
+		DIVTYPE_NORMAL = 0x00,
+		/* tpkt header is in the first packet and payload is the
+		 * next one
+		 */
+		DIVTYPE_TPKTHDR	= 0x01,
+		/* tpkt packet (size maybe is more than several kbytes) is
+		 * seperated into several parts by the tcp protocol. This
+		 * dividing method is different from the second one.
+		 */
+		DIVTYPE_Q931 = 0x02,
+	}div_type[IP_CT_DIR_MAX]; 
+#endif
+
 	/* Original and NATed Q.931 or H.245 signal ports */
 	__be16 sig_port[IP_CT_DIR_MAX];
 
@@ -29,6 +46,11 @@ struct nf_ct_h323_master {
 
 struct nf_conn;
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+extern int have_direct_route(union nf_inet_addr *src, union nf_inet_addr *dst,
+			     int family);
+#endif 
+
 extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
 			 TransportAddress *taddr,
 			 union nf_inet_addr *addr, __be16 *port);
@@ -36,6 +58,10 @@ extern void nf_conntrack_h245_expect(struct nf_conn *new,
 				     struct nf_conntrack_expect *this);
 extern void nf_conntrack_q931_expect(struct nf_conn *new,
 				     struct nf_conntrack_expect *this);
+#if defined(CONFIG_BCM_KF_NETFILTER)
+extern int (*set_addr_bf_hook)(struct sk_buff **pskb,
+		       	       unsigned char **data, int datalen, int dataoff);
+#endif
 extern int (*set_h245_addr_hook) (struct sk_buff *skb,
 				  unsigned char **data, int dataoff,
 				  H245_TransportAddress *taddr,
diff --git a/include/linux/netfilter/nf_conntrack_ipsec.h b/include/linux/netfilter/nf_conntrack_ipsec.h
new file mode 100644
index 0000000000000000000000000000000000000000..4a709a8fb94afbf9af69f423d7e5735c12130bc5
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_ipsec.h
@@ -0,0 +1,43 @@
+/* IPSEC constants and structs */
+#ifndef _NF_CONNTRACK_IPSEC_H
+#define _NF_CONNTRACK_IPSEC_H
+
+#include <linux/netfilter/nf_conntrack_common.h>
+
+/* conntrack private data */
+struct nf_ct_ipsec_master 
+{
+   __be32 initcookie;  /* initcookie of ISAKMP */
+   __be32 lan_ip;        /* LAN IP */
+};
+
+struct nf_nat_ipsec 
+{
+   __be32 lan_ip;   /* LAN IP */
+};
+
+#ifdef __KERNEL__
+
+#define IPSEC_PORT   500
+#define MAX_VPN_CONNECTION 8  
+
+struct isakmp_pkt_hdr 
+{
+   __be32 initcookie;
+};
+
+
+/* crap needed for nf_conntrack_compat.h */
+struct nf_conn;
+struct nf_conntrack_expect;
+
+extern int
+(*nf_nat_ipsec_hook_outbound)(struct sk_buff *skb,
+                           struct nf_conn *ct, enum ip_conntrack_info ctinfo);
+
+extern int
+(*nf_nat_ipsec_hook_inbound)(struct sk_buff *skb, struct nf_conn *ct,
+                             enum ip_conntrack_info ctinfo, __be32 lan_ip);
+
+#endif /* __KERNEL__ */
+#endif /* _NF_CONNTRACK_IPSEC_H */
diff --git a/include/linux/netfilter/nf_conntrack_proto_esp.h b/include/linux/netfilter/nf_conntrack_proto_esp.h
new file mode 100644
index 0000000000000000000000000000000000000000..a35aca605fa6972bb618014548ffe1a2a77b3ebd
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_proto_esp.h
@@ -0,0 +1,20 @@
+#ifndef _CONNTRACK_PROTO_ESP_H
+#define _CONNTRACK_PROTO_ESP_H
+#include <asm/byteorder.h>
+
+/* ESP PROTOCOL HEADER */
+
+struct esphdr {
+	__u32	spi;
+};
+
+struct nf_ct_esp {
+	unsigned int stream_timeout;
+	unsigned int timeout;
+};
+
+#ifdef __KERNEL__
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+#endif /* __KERNEL__ */
+#endif /* _CONNTRACK_PROTO_ESP_H */
diff --git a/include/linux/netfilter/nf_conntrack_pt.h b/include/linux/netfilter/nf_conntrack_pt.h
new file mode 100644
index 0000000000000000000000000000000000000000..9c0ad96e292c471ccd65e55448377dcdc35a636b
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_pt.h
@@ -0,0 +1,18 @@
+#ifndef _NF_CONNTRACK_PT_H
+#define _NF_CONNTRACK_PT_H
+/* PT tracking. */
+#define PT_MAX_ENTRIES	100
+#define PT_MAX_PORTS	1000
+#define PT_MAX_EXPECTED	1000
+#define PT_TIMEOUT	180
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#define PT_PROTO_TCP 	1
+#define PT_PROTO_UDP 	2
+#define PT_PROTO_ALL 	(PT_PROTO_TCP|PT_PROTO_UDP)
+#define PT_PROTO_ALL_IN	0
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#endif /* _NF_CONNTRACK_PT_H */
diff --git a/include/linux/netfilter/nf_conntrack_rtsp.h b/include/linux/netfilter/nf_conntrack_rtsp.h
new file mode 100644
index 0000000000000000000000000000000000000000..8978ad0ceb6da4f2f814c868cefa5eabe8d6b5d6
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_rtsp.h
@@ -0,0 +1,89 @@
+#if defined(CONFIG_BCM_KF_NETFILTER)
+
+/*
+* <:copyright-BRCM:2012:DUAL/GPL:standard
+* 
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+:>
+*/
+
+#ifndef _NF_CONNTRACK_RTSP_H
+#define _NF_CONNTRACK_RTSP_H
+
+#ifdef __KERNEL__
+
+/* This structure exists only once per master */
+struct nf_ct_rtsp_master {
+	/* The client has sent PAUSE message and not replied */
+	int paused;
+};
+
+/* Single data channel */
+extern int (*nat_rtsp_channel_hook) (struct sk_buff *skb,
+				     struct nf_conn *ct,
+				     enum ip_conntrack_info ctinfo,
+				     unsigned int matchoff,
+				     unsigned int matchlen,
+				     struct nf_conntrack_expect *exp,
+				     int *delta);
+
+/* A pair of data channels (RTP/RTCP) */
+extern int (*nat_rtsp_channel2_hook) (struct sk_buff *skb,
+				      struct nf_conn *ct,
+				      enum ip_conntrack_info ctinfo,
+				      unsigned int matchoff,
+				      unsigned int matchlen,
+				      struct nf_conntrack_expect *rtp_exp,
+				      struct nf_conntrack_expect *rtcp_exp,
+				      char dash, int *delta);
+
+/* Modify parameters like client_port in Transport for single data channel */
+extern int (*nat_rtsp_modify_port_hook) (struct sk_buff *skb,
+					 struct nf_conn *ct,
+			      	  	 enum ip_conntrack_info ctinfo,
+			      	  	 unsigned int matchoff,
+					 unsigned int matchlen,
+			      	  	 __be16 rtpport, int *delta);
+
+/* Modify parameters like client_port in Transport for multiple data channels*/
+extern int (*nat_rtsp_modify_port2_hook) (struct sk_buff *skb,
+					  struct nf_conn *ct,
+			       	   	  enum ip_conntrack_info ctinfo,
+			       	   	  unsigned int matchoff,
+					  unsigned int matchlen,
+			       	   	  __be16 rtpport, __be16 rtcpport,
+				   	  char dash, int *delta);
+
+/* Modify parameters like destination in Transport */
+extern int (*nat_rtsp_modify_addr_hook) (struct sk_buff *skb,
+					 struct nf_conn *ct,
+				 	 enum ip_conntrack_info ctinfo,
+					 int matchoff, int matchlen,
+					 int *delta);
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_RTSP_H */
+
+#endif
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index 0ce91d56a5f264c989ee5b17a027b489eaabc593..11bbe2df9f550b044598e7bbd879f1244656b64f 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -5,6 +5,54 @@
 #define SIP_PORT	5060
 #define SIP_TIMEOUT	3600
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+
+/* Classes defined by Broadcom */
+#define SIP_EXPECT_CLASS_SIGNALLING	0
+#define SIP_EXPECT_CLASS_AUDIO		1
+#define SIP_EXPECT_CLASS_VIDEO		2
+#define SIP_EXPECT_CLASS_OTHER		3
+#define SIP_EXPECT_CLASS_MAX		3
+
+enum sip_header_pos {
+	POS_VIA,
+	POS_CONTACT,
+	POS_CONTENT,
+	POS_OWNER_IP4,
+	POS_CONNECTION_IP4,
+	POS_ANAT,
+	POS_MEDIA_AUDIO,
+	POS_MEDIA_VIDEO,
+};
+
+extern int (*nf_nat_addr_hook)(struct sk_buff *skb, unsigned int protoff,
+			       struct nf_conn *ct,
+			       enum ip_conntrack_info ctinfo, char **dptr,
+			       int *dlen, char **addr_begin, int *addr_len,
+			       struct nf_conntrack_man *addr);
+
+extern int (*nf_nat_rtp_hook)(struct sk_buff *skb, unsigned int protoff,
+			      struct nf_conn *ct,
+			      enum ip_conntrack_info ctinfo, char **dptr,
+			      int *dlen, struct nf_conntrack_expect *exp,
+			      char **port_begin, int *port_len);
+
+extern int (*nf_nat_snat_hook)(struct nf_conn *ct,
+			       enum ip_conntrack_info ctinfo,
+			       struct nf_conntrack_expect *exp);
+
+extern int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
+			      struct nf_conn *ct,
+			      enum ip_conntrack_info ctinfo, char **dptr,
+			      int *dlen, struct nf_conntrack_expect *exp,
+			      char **addr_begin, int *addr_len);
+
+struct nf_ct_sip_master {
+	unsigned int	register_cseq;
+	unsigned int	invite_cseq;
+};
+#else /* CONFIG_BCM_KF_NETFILTER */
+
 struct nf_ct_sip_master {
 	unsigned int	register_cseq;
 	unsigned int	invite_cseq;
@@ -174,6 +222,6 @@ extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
 				 enum sdp_header_types type,
 				 enum sdp_header_types term,
 				 unsigned int *matchoff, unsigned int *matchlen);
-
+#endif /* CONFIG_BCM_KF_NETFILTER */
 #endif /* __KERNEL__ */
 #endif /* __NF_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h
index 2f6bbc5b812543de9f3afefdba2a7d67ba581a02..d7101c708fe9fae02faa9ad1a22f1ea3f5e94a75 100644
--- a/include/linux/netfilter/nf_conntrack_tuple_common.h
+++ b/include/linux/netfilter/nf_conntrack_tuple_common.h
@@ -32,6 +32,12 @@ union nf_conntrack_man_proto {
 	struct {
 		__be16 key;	/* GRE key is 32bit, PPtP only uses 16bit */
 	} gre;
+#if defined(CONFIG_BCM_KF_PROTO_ESP) && \
+	(defined(CONFIG_NF_CT_PROTO_ESP) || defined(CONFIG_NF_CT_PROTO_ESP_MODULE))
+	struct {
+		__be32 spi;
+	} esp;
+#endif
 };
 
 #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 8d674a78674461c44422ed67bf45a59be270d198..5760a0dcdb2ef774e16dca52fb2fe1418994e059 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -124,6 +124,25 @@ struct xt_counters_info {
 #define XT_INV_PROTO		0x40	/* Invert the sense of PROTO. */
 
 #ifndef __KERNEL__
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+#define XT_MATCH_ITERATE(type, e, fn, args...)			\
+({								\
+	unsigned int __i;					\
+	int __ret = 0, __rval = 0;						\
+	struct xt_entry_match *__m;				\
+								\
+	for (__i = sizeof(type);				\
+	     __i < (e)->target_offset;				\
+	     __i += __m->u.match_size) {			\
+		__m = (void *)e + __i;				\
+								\
+		__ret = fn(__m , ## args);			\
+		if (__ret != 0)					\
+			__rval = __ret;					\
+	}							\
+	__rval;							\
+})
+#else
 /* fn returns 0 to continue iteration */
 #define XT_MATCH_ITERATE(type, e, fn, args...)			\
 ({								\
@@ -142,6 +161,7 @@ struct xt_counters_info {
 	}							\
 	__ret;							\
 })
+#endif
 
 /* fn returns 0 to continue iteration */
 #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \
diff --git a/include/linux/netfilter/xt_layer7.h b/include/linux/netfilter/xt_layer7.h
new file mode 100644
index 0000000000000000000000000000000000000000..2f5fc3cb7c5cd6dd9fd44409cd572d382d6bc28a
--- /dev/null
+++ b/include/linux/netfilter/xt_layer7.h
@@ -0,0 +1,19 @@
+#ifndef _XT_LAYER7_H
+#define _XT_LAYER7_H
+
+#if 0
+#define MAX_PATTERN_LEN 8192
+#define MAX_PROTOCOL_LEN 256
+#else
+#define MAX_PATTERN_LEN 256
+#define MAX_PROTOCOL_LEN 64
+#endif
+
+struct xt_layer7_info {
+    char protocol[MAX_PROTOCOL_LEN];
+    char pattern[MAX_PATTERN_LEN];
+    u_int8_t invert;
+    u_int8_t pkt;
+};
+
+#endif /* _XT_LAYER7_H */
diff --git a/include/linux/netfilter_bridge/ebt_ftos_t.h b/include/linux/netfilter_bridge/ebt_ftos_t.h
new file mode 100644
index 0000000000000000000000000000000000000000..721e9ce9cabdc7f4e1a888a7affaec64fa52ba4c
--- /dev/null
+++ b/include/linux/netfilter_bridge/ebt_ftos_t.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_BRIDGE_EBT_FTOS_T_H
+#define __LINUX_BRIDGE_EBT_FTOS_T_H
+
+struct ebt_ftos_t_info
+{
+    int           ftos_set;
+	unsigned char ftos;
+	// EBT_ACCEPT, EBT_DROP or EBT_CONTINUE or EBT_RETURN
+	int target;
+};
+#define EBT_FTOS_TARGET "ftos"
+
+#define FTOS_TARGET       0x01
+#define FTOS_SETFTOS      0x02
+#define FTOS_WMMFTOS      0x04
+#define FTOS_8021QFTOS    0x08
+
+#define DSCP_MASK_SHIFT   5
+#define PRIO_LOC_NFMARK   16
+#define PRIO_LOC_NFMASK   7
+
+#endif
diff --git a/include/linux/netfilter_bridge/ebt_ip.h b/include/linux/netfilter_bridge/ebt_ip.h
index c4bbc41b0ea47c576d4649262fc337f445fb0601..bfe99c062fb3c789e0aaa7bd8d04255c69c316f3 100644
--- a/include/linux/netfilter_bridge/ebt_ip.h
+++ b/include/linux/netfilter_bridge/ebt_ip.h
@@ -23,8 +23,14 @@
 #define EBT_IP_PROTO 0x08
 #define EBT_IP_SPORT 0x10
 #define EBT_IP_DPORT 0x20
+#if defined(CONFIG_BCM_KF_NETFILTER) || !defined(CONFIG_BCM_IN_KERNEL)
+#define EBT_IP_DSCP  0x40
+#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO |\
+ EBT_IP_SPORT | EBT_IP_DPORT | EBT_IP_DSCP )
+#else 
 #define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO |\
  EBT_IP_SPORT | EBT_IP_DPORT )
+#endif
 #define EBT_IP_MATCH "ip"
 
 /* the same values are used for the invflags */
@@ -34,6 +40,9 @@ struct ebt_ip_info {
 	__be32 smsk;
 	__be32 dmsk;
 	__u8  tos;
+#if defined(CONFIG_BCM_KF_NETFILTER) || !defined(CONFIG_BCM_IN_KERNEL)
+	__u8  dscp;
+#endif
 	__u8  protocol;
 	__u8  bitmask;
 	__u8  invflags;
diff --git a/include/linux/netfilter_bridge/ebt_mark_t.h b/include/linux/netfilter_bridge/ebt_mark_t.h
index 7d5a268a43111401644615474570b96a88e295d6..87925c4b332bd47074a14df345ed13ec78dd5df3 100644
--- a/include/linux/netfilter_bridge/ebt_mark_t.h
+++ b/include/linux/netfilter_bridge/ebt_mark_t.h
@@ -12,6 +12,10 @@
 #define MARK_OR_VALUE  (0xffffffe0)
 #define MARK_AND_VALUE (0xffffffd0)
 #define MARK_XOR_VALUE (0xffffffc0)
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#define VTAG_SET_VALUE (0xffffffb0)
+#endif
+
 
 struct ebt_mark_t_info {
 	unsigned long mark;
diff --git a/include/linux/netfilter_bridge/ebt_time.h b/include/linux/netfilter_bridge/ebt_time.h
new file mode 100644
index 0000000000000000000000000000000000000000..f47b531d7b9b8322d2fa71527c37950a054ec23c
--- /dev/null
+++ b/include/linux/netfilter_bridge/ebt_time.h
@@ -0,0 +1,14 @@
+#ifndef __LINUX_BRIDGE_EBT_TIME_H
+#define __LINUX_BRIDGE_EBT_TIME_H
+
+
+struct ebt_time_info {
+	u_int8_t  days_match;   /* 1 bit per day. -SMTWTFS                      */
+	u_int16_t time_start;   /* 0 < time_start < 23*60+59 = 1439             */
+	u_int16_t time_stop;    /* 0:0 < time_stat < 23:59                      */
+	u_int8_t  kerneltime;   /* ignore skb time (and use kerneltime) or not. */
+};
+
+#define EBT_TIME_MATCH "time"
+
+#endif /* __LINUX_BRIDGE_EBT_TIME_H */
diff --git a/include/linux/netfilter_bridge/ebt_wmm_mark_t.h b/include/linux/netfilter_bridge/ebt_wmm_mark_t.h
new file mode 100644
index 0000000000000000000000000000000000000000..d9ecef75277d15f9697a0b465b361166932f107f
--- /dev/null
+++ b/include/linux/netfilter_bridge/ebt_wmm_mark_t.h
@@ -0,0 +1,27 @@
+#ifndef __LINUX_BRIDGE_EBT_MARK_T_H
+#define __LINUX_BRIDGE_EBT_MARK_T_H
+
+#define WMM_MARK_DSCP		1
+#define WMM_MARK_8021D		2
+
+#define WMM_MARK_DSCP_STR	"dscp"
+#define WMM_MARK_8021D_STR	"vlan"
+
+#define PRIO_LOC_NFMARK		16
+#define PRIO_LOC_NFMASK		7	
+
+#define WMM_DSCP_MASK_SHIFT	5
+#define WMM_MARK_VALUE_NONE	-1
+
+
+struct ebt_wmm_mark_t_info
+{
+	int mark; 
+	int markpos;
+	int markset;
+	/* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */
+	int target;
+};
+#define EBT_WMM_MARK_TARGET "wmm-mark"
+
+#endif
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index a2092f582a784f6df9443f6a773cff660288a9a7..3029f8f72373800868970c31300c41487c618863 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -29,6 +29,22 @@
 
 #define NETLINK_INET_DIAG	NETLINK_SOCK_DIAG
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#define NETLINK_BRCM_MONITOR 25 /*send events to userspace monitor task(broadcom specific)*/
+#define NETLINK_BRCM_EPON       26
+#endif
+#if defined(CONFIG_BCM_KF_MLD) || defined(CONFIG_BCM_KF_IGMP)
+#define NETLINK_MCPD            30       /* for multicast */
+#endif
+
+#if defined(CONFIG_BCM_KF_WL)
+#define NETLINK_WLCSM            31       /*  for brcm wireless cfg[nvram]/statics/management extention */
+#endif
+
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+#define NETLINK_DPI            27       /* for dpictl */
+#endif
+
 #define MAX_LINKS 32		
 
 struct sockaddr_nl {
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 81733d12cbeaa822e965bc6d9fc7222b50c325cf..008d20a1d846422963ac3f61272d009fd130dc54 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -64,6 +64,9 @@ extern int of_gpio_simple_xlate(struct gpio_chip *gc,
 				u32 *flags);
 
 #else /* CONFIG_OF_GPIO */
+#if defined(CONFIG_BCM_KF_KERN_WARNING)
+struct gpio_chip;
+#endif
 
 /* Drivers may not strictly depend on the GPIO support, so let them link. */
 static inline int of_get_named_gpio_flags(struct device_node *np,
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index f48bfc80cb4bfc7736173f6687550867ed0fc882..af9cd2dbace8c3797cbdf7c68be35eeeba709088 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -26,6 +26,15 @@
 #include <linux/types.h>
 #include <linux/socket.h>
 
+#ifdef CONFIG_BCM_KF_PHONET
+/* Phonet media types */
+#define PN_MEDIA_ROUTING        0x00
+#define PN_MEDIA_USB            0x1B
+#define PN_MEDIA_DEFAULT        0x25
+#define PN_MEDIA_MODEM_HOST_IF  0x26
+#define PN_MEDIA_AUX_HOST_HOST_IF  0x27
+#endif /* CONFIG_BCM_KF_PHONET */
+
 /* Automatic protocol selection */
 #define PN_PROTO_TRANSPORT	0
 /* Phonet datagram socket */
@@ -44,12 +53,21 @@
 #define PNADDR_BROADCAST	0xFC
 #define PNPORT_RESOURCE_ROUTING	0
 
+#ifdef CONFIG_BCM_KF_PHONET
+/* define object for multicast */
+#define PNOBJECT_MULTICAST      0x20
+#endif /* CONFIG_BCM_KF_PHONET */
+
 /* Values for PNPIPE_ENCAP option */
 #define PNPIPE_ENCAP_NONE	0
 #define PNPIPE_ENCAP_IP		1
 
 /* ioctls */
 #define SIOCPNGETOBJECT		(SIOCPROTOPRIVATE + 0)
+#ifdef CONFIG_BCM_KF_PHONET
+#define SIOCCONFIGTYPE          (SIOCPROTOPRIVATE + 1)
+#define SIOCCONFIGSUBTYPE       (SIOCPROTOPRIVATE + 2)
+#endif
 #define SIOCPNENABLEPIPE	(SIOCPROTOPRIVATE + 13)
 #define SIOCPNADDRESOURCE	(SIOCPROTOPRIVATE + 14)
 #define SIOCPNDELRESOURCE	(SIOCPROTOPRIVATE + 15)
diff --git a/include/linux/ppp-ioctl.h b/include/linux/ppp-ioctl.h
index 2d9a8859550a00a16cd3178ccd0b8444bd861d60..121941455e2008dae48a7e1add568a0ab8a79fa7 100644
--- a/include/linux/ppp-ioctl.h
+++ b/include/linux/ppp-ioctl.h
@@ -77,6 +77,11 @@ struct pppol2tp_ioc_stats {
 	__aligned_u64	rx_errors;
 };
 
+#if defined(CONFIG_BCM_KF_PPP)
+/* PPP device name type */
+typedef char	ppp_real_dev_name[IFNAMSIZ];
+#endif
+
 /*
  * Ioctl definitions.
  */
@@ -111,6 +116,9 @@ struct pppol2tp_ioc_stats {
 #define PPPIOCATTCHAN	_IOW('t', 56, int)	/* attach to ppp channel */
 #define PPPIOCGCHAN	_IOR('t', 55, int)	/* get ppp channel number */
 #define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats)
+#if defined(CONFIG_BCM_KF_PPP)
+#define	PPPIOCSREALDEV	_IOW('t', 53, ppp_real_dev_name) /* set real device name */
+#endif
 
 #define SIOCGPPPSTATS   (SIOCDEVPRIVATE + 0)
 #define SIOCGPPPVER     (SIOCDEVPRIVATE + 1)	/* NEVER change this!! */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 5e712854fc8d868044ae2eb95a267d47e78c4d51..e93ae42a1948e89d80c4d805ed2db26ed95619b6 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -119,6 +119,10 @@ do { \
 # ifdef CONFIG_SMP
    extern void migrate_disable(void);
    extern void migrate_enable(void);
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON)
+# define migrate_disable_preempt_on()	migrate_disable()
+# define migrate_enable_preempt_on()	migrate_enble()
+#endif
 # else /* CONFIG_SMP */
 #  define migrate_disable()		do { } while (0)
 #  define migrate_enable()		do { } while (0)
@@ -130,6 +134,10 @@ do { \
 # define preempt_enable_nort()		preempt_enable()
 # define migrate_disable()		preempt_disable()
 # define migrate_enable()		preempt_enable()
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON)
+   extern void migrate_disable_preempt_on(void);
+   extern void migrate_enable_preempt_on(void);
+#endif
 #endif
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index a3bfbdf63d32a9da2d2b3227967303c5e87e2aba..0da1d182e6175f7e8833673e16b6b5d23c2861bb 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -61,4 +61,19 @@ static inline void prefetch_range(void *addr, size_t len)
 #endif
 }
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_KF_ARM_PLD)
+#if defined(CONFIG_BCM963138) || defined(CONFIG_BCM963148)
+static inline void bcm_prefetch(const void * addr, const int cachelines)
+{
+    switch (cachelines) {
+        case 4: __asm__ __volatile__("pld\t%a0" : : "p"(addr + ((0x1 << CONFIG_ARM_L1_CACHE_SHIFT) * 3)) : "cc");
+        case 3: __asm__ __volatile__("pld\t%a0" : : "p"(addr + ((0x1 << CONFIG_ARM_L1_CACHE_SHIFT) * 2)) : "cc");
+        case 2: __asm__ __volatile__("pld\t%a0" : : "p"(addr + ((0x1 << CONFIG_ARM_L1_CACHE_SHIFT) * 1)) : "cc");
+        case 1: __asm__ __volatile__("pld\t%a0" : : "p"(addr + ((0x1 << CONFIG_ARM_L1_CACHE_SHIFT) * 0)) : "cc");
+    }
+}
+#else
+static inline void bcm_prefetch(const void * addr, const int cachelines) { }
+#endif /* CONFIG_BCM963138 || defined(CONFIG_BCM963148) */
+#endif
 #endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index aaf8b7dc360caebc90201fa0e73dd4e987f0db4b..e1347b9ad19bf3e867c4c1fcfeceb494f3b16757 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -940,7 +940,11 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
 {
 	typedef void (*rcu_callback)(struct rcu_head *);
 
+#if defined(CONFIG_BCM_KF_RCU_CONSTANT_BUG)
+        // this causes a bug if optimization is disabled
+#else
 	BUILD_BUG_ON(!__builtin_constant_p(offset));
+#endif
 
 	/* See the kfree_rcu() header comment. */
 	BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 12a92fc758a4b150a29b54769621c0d0e4d5a4d6..66ed0d6e2570dcb89292bdf0ff4f249b4ea6be72 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -171,6 +171,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 }
 #endif
 
+#if defined(CONFIG_BCM_KF_SCHEDAUDIT)
+extern void proc_schedaudit_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_schedaudit_set_task(struct task_struct *p, uint32_t setindex,
+         uint32_t trig_latency, uint32_t trig_runtime, uint32_t trig_printk);
+#endif
+
 /*
  * Task state bitmask. NOTE! These bits are also
  * encoded in fs/proc/array.c: get_task_state().
@@ -807,6 +813,23 @@ static inline int sched_info_on(void)
 #endif
 }
 
+#if defined(CONFIG_BCM_KF_SCHEDAUDIT)
+struct bcm_schedaudit {
+	uint32_t trig_latency; /* rw, in us, if 0 schedaudit is totally disabled */
+	uint32_t _start_tstamp; /* internal bookkeeping: start point for timing */
+	uint32_t trig_runtime; /* rw, in us */
+	uint32_t trig_printk;  /* rw, if 1 violations will be noted with printk */
+	uint32_t conforming_latency; /* ro */
+	uint32_t conforming_runtime; /* ro */
+	uint32_t latency_violations; /* ro */
+	uint32_t runtime_violations; /* ro */
+	uint32_t max_latency;        /* ro, in us */
+	uint32_t max_runtime;        /* ro, in us */
+};
+#define BCM_SCHEDAUDIT_QUEUED(p)  if (p->bcm_saudit.trig_latency > 0) { \
+                           p->bcm_saudit._start_tstamp = bcm_tstamp_read();}
+#endif  /* CONFIG_BCM_KF_SCHEDAUDIT */
+
 enum cpu_idle_type {
 	CPU_IDLE,
 	CPU_NOT_IDLE,
@@ -1303,12 +1326,19 @@ struct task_struct {
 #endif
 
 	unsigned int policy;
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON)
+	int migrate_disable;
+#ifdef CONFIG_SCHED_DEBUG
+	int migrate_disable_atomic;
+#endif
+#else
 #ifdef CONFIG_PREEMPT_RT_FULL
 	int migrate_disable;
 #ifdef CONFIG_SCHED_DEBUG
 	int migrate_disable_atomic;
 #endif
 #endif
+#endif	/* CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON */
 	cpumask_t cpus_allowed;
 
 #ifdef CONFIG_PREEMPT_RCU
@@ -1326,6 +1356,9 @@ struct task_struct {
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 	struct sched_info sched_info;
 #endif
+#if defined(CONFIG_BCM_KF_SCHEDAUDIT)
+	struct bcm_schedaudit bcm_saudit;
+#endif
 
 	struct list_head tasks;
 #ifdef CONFIG_SMP
@@ -2799,22 +2832,33 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 
 static inline int __migrate_disabled(struct task_struct *p)
 {
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON)
+	return p->migrate_disable;
+#else
 #ifdef CONFIG_PREEMPT_RT_FULL
 	return p->migrate_disable;
 #else
 	return 0;
 #endif
+#endif /* CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON */
 }
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
 {
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON)
+	if (p->migrate_disable)
+		return cpumask_of(task_cpu(p));
+
+	return &p->cpus_allowed;
+#else
 #ifdef CONFIG_PREEMPT_RT_FULL
 	if (p->migrate_disable)
 		return cpumask_of(task_cpu(p));
 #endif
 
 	return &p->cpus_allowed;
+#endif	/* CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON */
 }
 
 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2249b110274b0f738177bf3b65d99a19d172c700..852cfd9ff7cabc32c153d34707d5f731a87cc127 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -29,6 +29,11 @@
 #include <net/checksum.h>
 #include <linux/rcupdate.h>
 #include <linux/dmaengine.h>
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 #include <linux/hrtimer.h>
 #include <linux/dma-mapping.h>
 #include <linux/netdev_features.h>
@@ -109,6 +114,39 @@ struct net_device;
 struct scatterlist;
 struct pipe_inode_info;
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+// This is required even if blog is not defined, so it falls
+// under the nbuff catagory
+struct blog_t;					/* defined(CONFIG_BLOG) */
+
+#ifndef NULL_STMT
+#define NULL_STMT		do { /* NULL BODY */ } while (0)
+#endif
+
+typedef void (*RecycleFuncP)(void *nbuff_p, unsigned context, unsigned flags);
+#define SKB_DATA_RECYCLE	(1 << 0)
+#define SKB_RECYCLE		(1 << 1)
+#define SKB_DATA_NO_RECYCLE	(~SKB_DATA_RECYCLE)	/* to mask out */
+#define SKB_NO_RECYCLE		(~SKB_RECYCLE)		/* to mask out */
+#define SKB_RECYCLE_NOFREE	(1 << 2)		/* do not use */
+
+struct fkbuff;
+
+extern void skb_frag_xmit4(struct sk_buff *origskb, struct net_device *txdev,
+			   uint32_t is_pppoe, uint32_t minMtu, void *ip_p);
+extern void skb_frag_xmit6(struct sk_buff *origskb, struct net_device *txdev,
+			   uint32_t is_pppoe, uint32_t minMtu, void *ip_p);
+extern struct sk_buff * skb_xlate(struct fkbuff *fkb_p);
+extern struct sk_buff * skb_xlate_dp(struct fkbuff *fkb_p, uint8_t *dirty_p);
+extern int skb_avail_headroom(const struct sk_buff *skb);
+
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+#define SKB_VLAN_MAX_TAGS	4
+#endif
+
+#define CONFIG_SKBSHINFO_HAS_DIRTYP	1
+#endif // CONFIG_BCM_KF_NBUFF
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 struct nf_conntrack {
 	atomic_t use;
@@ -249,6 +287,11 @@ struct ubuf_info {
  * the end of the header data, ie. at skb->end.
  */
 struct skb_shared_info {
+#if defined(CONFIG_BCM_KF_NBUFF)
+	/* to preserve compat with binary only modules, do not change the
+	 * position of this field relative to the start of the structure. */
+	__u8		*dirty_p;
+#endif	/* defined(CONFIG_BCM_KF_NBUFF) */
 	unsigned char	nr_frags;
 	__u8		tx_flags;
 	unsigned short	gso_size;
@@ -272,6 +315,20 @@ struct skb_shared_info {
 	skb_frag_t	frags[MAX_SKB_FRAGS];
 };
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+typedef struct bl_buffer_info {
+	unsigned char	*buffer;		/* address of the buffer from bpm */
+	unsigned char	*packet;		/* address of the data */
+	unsigned int	buffer_len;		/* size of the buffer */
+	unsigned int	packet_len;		/* size of the data packet */
+	unsigned int	buffer_number;	/* the buffer location in the bpm */
+	unsigned int	port;			/* the port */
+} bl_skbuff_info;
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+
 /* We divide dataref into two halves.  The higher 16 bits hold references
  * to the payload part of skb->data.  The lower 16 bits hold references to
  * the entire skb->data.  A clone of a headerless skb holds the length of
@@ -323,6 +380,62 @@ typedef unsigned char *sk_buff_data_t;
 #define NET_SKBUFF_NF_DEFRAG_NEEDED 1
 #endif
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+typedef union wlFlowInf
+{
+	uint32_t u32;
+	union 
+	{
+		union
+		{
+			struct
+			{
+				/* Start - Shared fields between ucast and mcast */
+				uint32_t is_ucast:1;
+				/* wl_prio is 4 bits for nic and 3 bits for dhd. Plan is
+				to make NIC as 3 bits after more analysis */
+				uint32_t wl_prio:4;
+				/* End - Shared fields between ucast and mcast */
+				uint32_t nic_reserved1:11;
+				uint32_t nic_reserved2:8;
+				uint32_t wl_chainidx:8;
+			};
+			struct
+			{
+				uint32_t overlayed_field:16;
+				uint32_t ssid_dst:16; /* For bridged traffic we don't have chainidx (0xFE) */
+			};
+		}nic;
+
+		struct
+		{
+			/* Start - Shared fields between ucast and mcast */
+			uint32_t is_ucast:1;
+			uint32_t wl_prio:4;
+			/* End - Shared fields between ucast and mcast */
+			/* Start - Shared fields between dhd ucast and dhd mcast */
+			uint32_t flowring_idx:10;
+			/* End - Shared fields between dhd ucast and dhd mcast */
+			uint32_t dhd_reserved:13;
+			uint32_t ssid:4;
+		}dhd;
+	}ucast;
+	struct 
+	{
+		/* Start - Shared fields between ucast and mcast */
+		/* for multicast, WFD does not need to populate this flowring_idx, it is used internally by dhd driver */ 
+		uint32_t is_ucast:1; 
+		uint32_t wl_prio:4;
+		/* End - Shared fields between ucast and mcast */
+		/* Start - Shared fields between dhd ucast and dhd mcast */
+		uint32_t flowring_idx:10;
+		/* End - Shared fields between dhd ucast and dhd mcast */
+		uint32_t mcast_reserved:1;
+		uint32_t ssid_vector:16;
+	}mcast;
+}wlFlowInf_t;
+#endif
+
 /** 
  *	struct sk_buff - socket buffer
  *	@next: Next buffer in list
@@ -391,11 +504,109 @@ struct sk_buff {
 	struct sk_buff		*next;
 	struct sk_buff		*prev;
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+	/* tstamp and sk are moved for BLOG to maximize cache performance */
+#else
 	ktime_t			tstamp;
-
 	struct sock		*sk;
+#endif
+
 	struct net_device	*dev;
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+	struct ip_tunnel	*tunl;
+
+	unsigned int		recycle_flags;	/* 3 bytes unused */
+	sk_buff_data_t		tail;
+	sk_buff_data_t		end;
+	unsigned char		*head;
+
+	/*
+	 * Several skb fields have been regrouped together for better data locality
+	 * cache performance, 16byte cache line proximity.
+	 */
+
+/*--- members common to fkbuff: begin here ---*/
+	union {
+		void 			*fkbInSkb;		/* see fkb_in_skb_test() */
+		struct sk_buff_head	*list;
+	};		/* ____cacheline_aligned */
+	struct blog_t		*blog_p;	/* defined(CONFIG_BLOG), use blog_ptr() */
+	unsigned char		*data;
+
+    /* The len is fkb is only 24 bits other 8 bits are used as internal flags
+     * when fkbInSkb is used the max len can be only 24 bits, the bits 31-24  
+     * are cleared
+     * currently we don't have a case where len can be >24 bits.
+     */ 
+	union{
+		unsigned int		len;
+		__u32			len_word;/* used for fkb_in_skb test */
+	};
+
+	union {
+		__u32		mark;
+		__u32		dropcount;
+		__u32		avail_size;
+		void		*queue;
+	};
+	union {
+		__u32		priority;
+		wlFlowInf_t	wl;
+	};
+	RecycleFuncP		recycle_hook;	/* Recycle preallocated skb or data */
+	union {
+		unsigned int	recycle_context;
+		struct sk_buff	*next_free;
+	};
+/*--- members common to fkbuff: end here ---*/
+
+	void				(*destructor)(struct sk_buff *skb);
+	atomic_t			users;
+	struct nf_conntrack	*nfct;			/* CONFIG_NETFILTER */
+	struct sk_buff		*nfct_reasm;	/* CONFIG_NF_CONNTRACK MODULE*/
+
+/*
+ * ------------------------------- CAUTION!!! ---------------------------------
+ * Do NOT add a new field or modify any existing field before this line
+ * to the beginning of the struct sk_buff. Doing so will cause struct sk_buff
+ * to be incompatible with the compiled binaries and may cause the binary to
+ * crash.
+ * ---------------------------------------------------------------------------
+ */
+
+	unsigned char		*clone_wr_head; /* indicates drivers(ex:enet)about writable headroom in aggregated skb*/
+	unsigned char		*clone_fc_head; /* indicates fcache about writable headroom in aggregated skb */
+
+	unsigned short		queue_mapping;
+	unsigned short		vlan_tci;
+	union {
+		unsigned int	vtag_word;
+		struct 		{ unsigned short vtag, vtag_save; };
+	};
+	union {			/* CONFIG_NET_SCHED CONFIG_NET_CLS_ACT*/
+		unsigned int	tc_word;
+		struct		{ unsigned short tc_index, tc_verd; };
+	};
+
+	sk_buff_data_t		transport_header;
+	sk_buff_data_t		network_header;
+	sk_buff_data_t		mac_header;
+	int			skb_iif;
+
+	/* These two are not BLOG specific, but have been moved for cache performance */
+	ktime_t			tstamp;
+	struct sock		*sk;
+
+#endif /* CONFIG_BCM_KF_NBUFF */
+#if defined(CONFIG_BCM_KF_WL)
+	/* These two are for WLAN pktc use */
+	unsigned char		pktc_cb[8];
+	__u32			pktc_flags;
+        void                    *wlan_rx_handle;
+        __u32                   wlan_rx_data;
+#endif
+
 	/*
 	 * This is the control buffer. It is free to use for every
 	 * layer. Please put your private variables there. If you
@@ -408,8 +619,12 @@ struct sk_buff {
 #ifdef CONFIG_XFRM
 	struct	sec_path	*sp;
 #endif
+#if defined(CONFIG_BCM_KF_NBUFF)
+	unsigned int		data_len;
+#else
 	unsigned int		len,
 				data_len;
+#endif
 	__u16			mac_len,
 				hdr_len;
 	union {
@@ -419,7 +634,10 @@ struct sk_buff {
 			__u16	csum_offset;
 		};
 	};
+#if defined(CONFIG_BCM_KF_NBUFF)
+#else
 	__u32			priority;
+#endif
 	kmemcheck_bitfield_begin(flags1);
 	__u8			local_df:1,
 				cloned:1,
@@ -434,6 +652,8 @@ struct sk_buff {
 	kmemcheck_bitfield_end(flags1);
 	__be16			protocol;
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+#else /* CONFIG_BCM_KF_NBUFF */
 	void			(*destructor)(struct sk_buff *skb);
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	struct nf_conntrack	*nfct;
@@ -441,10 +661,16 @@ struct sk_buff {
 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	struct sk_buff		*nfct_reasm;
 #endif
+#endif /* CONFIG_BCM_KF_NBUFF */
+
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct nf_bridge_info	*nf_bridge;
 #endif
 
+
+#if defined(CONFIG_BCM_KF_NBUFF)
+	__u32			rxhash;
+#else /* CONFIG_BCM_KF_NBUFF */
 	int			skb_iif;
 
 	__u32			rxhash;
@@ -459,6 +685,7 @@ struct sk_buff {
 #endif
 
 	__u16			queue_mapping;
+#endif /* CONFIG_BCM_KF_NBUFF */
 	kmemcheck_bitfield_begin(flags2);
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
 	__u8			ndisc_nodetype:2;
@@ -471,17 +698,72 @@ struct sk_buff {
 	/* 9/11 bit hole (depending on ndisc_nodetype presence) */
 	kmemcheck_bitfield_end(flags2);
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	__u16			bl_alloc;		/* true for runnerpacket buffer allocation */
+	__u16			bl_buffer_number;	/* the buffer location in the bpm */
+	__u16			bl_port;		/* the port */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 #ifdef CONFIG_NET_DMA
 	dma_cookie_t		dma_cookie;
 #endif
 #ifdef CONFIG_NETWORK_SECMARK
 	__u32			secmark;
 #endif
+#if defined(CONFIG_BCM_KF_NBUFF)
+#else
 	union {
 		__u32		mark;
 		__u32		dropcount;
 		__u32		avail_size;
 	};
+#endif /* CONFIG_BCM_KF_NBUFF */
+
+#if defined(CONFIG_BCM_KF_NBUFF)
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+	__u16			vlan_count;
+	__u16			vlan_tpid;
+	__u32			vlan_header[SKB_VLAN_MAX_TAGS];
+	struct net_device	*rxdev;
+#endif // CONFIG_BCM_KF_VLAN
+#if defined(CONFIG_BLOG_FEATURE)
+	union {
+		__u32		u32[BLOG_MAX_PARAM_NUM];
+		__u16		u16[BLOG_MAX_PARAM_NUM * 2];
+		__u8		u8[BLOG_MAX_PARAM_NUM * 4];
+	} ipt_log;
+	__u32 ipt_check;
+#define IPT_MATCH_LENGTH	(1 << 1)
+#define IPT_MATCH_TCP		(1 << 2)
+#define IPT_MATCH_UDP		(1 << 3)
+#define IPT_MATCH_TOS		(1 << 4)
+#define IPT_MATCH_DSCP		(1 << 5)
+#define IPT_TARGET_CLASSIFY	(1 << 6)
+#define IPT_TARGET_CONNMARK	(1 << 7)
+#define IPT_TARGET_CONNSECMARK	(1 << 8)
+#define IPT_TARGET_DSCP		(1 << 9)
+#define IPT_TARGET_HL		(1 << 10)
+#define IPT_TARGET_LED		(1 << 11)
+#define IPT_TARGET_MARK		(1 << 12)
+#define IPT_TARGET_NFLOG	(1 << 13)
+#define IPT_TARGET_NFQUEUE	(1 << 14)
+#define IPT_TARGET_NOTRACK	(1 << 15)
+#define IPT_TARGET_RATEEST	(1 << 16)
+#define IPT_TARGET_SECMARK	(1 << 17)
+#define IPT_TARGET_SKIPLOG	(1 << 18)
+#define IPT_TARGET_TCPMSS	(1 << 19)
+#define IPT_TARGET_TCPOPTSTRIP	(1 << 20)
+#define IPT_TARGET_TOS		(1 << 21)
+#define IPT_TARGET_TPROXY	(1 << 22)
+#define IPT_TARGET_TRACE	(1 << 23)
+#define IPT_TARGET_TTL		(1 << 24)
+#define IPT_TARGET_CHECK	(1 << 25)
+#endif
+	/* DO NOT MOVE!!! alloc_skb assumes this field is at or near the end of structure */
+	unsigned int		truesize;
+#else /* CONFIG_BCM_KF_NBUFF */
 
 	sk_buff_data_t		transport_header;
 	sk_buff_data_t		network_header;
@@ -493,6 +775,7 @@ struct sk_buff {
 				*data;
 	unsigned int		truesize;
 	atomic_t		users;
+#endif /* CONFIG_BCM_KF_NBUFF */
 };
 
 #ifdef __KERNEL__
@@ -560,6 +843,15 @@ extern void consume_skb(struct sk_buff *skb);
 extern void	       __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
 				   gfp_t priority, int fclone, int node);
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+extern struct sk_buff* bl_dev_alloc_skb(bl_skbuff_info *buff_info);
+extern void bl_kfree_skb_structure(struct sk_buff *skb);
+extern void bl_kfree_skb_structure_irq(struct sk_buff *skb);
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 extern struct sk_buff *build_skb(void *data);
 static inline struct sk_buff *alloc_skb(unsigned int size,
 					gfp_t priority)
@@ -656,6 +948,43 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
 	return &skb_shinfo(skb)->hwtstamps;
 }
 
+
+#if defined(CONFIG_BCM_KF_NBUFF)
+/* Returns size of struct sk_buff */
+extern size_t skb_size(void);
+extern size_t skb_aligned_size(void);
+extern int skb_layout_test(int head_offset, int tail_offset, int end_offset);
+
+/**
+ *	skb_headerinit	-	initialize a socket buffer header
+ *	@headroom: reserved headroom size
+ *	@datalen: data buffer size, data buffer is allocated by caller
+ *	@skb: skb allocated by caller
+ *	@data: data buffer allocated by caller
+ *	@recycle_hook: callback function to free data buffer and skb
+ *	@recycle_context: context value passed to recycle_hook, param1
+ *  @blog_p: pass a blog to a skb for logging
+ *
+ *	Initializes the socket buffer and assigns the data buffer to it.
+ *	Both the sk_buff and the pointed data buffer are pre-allocated.
+ *
+ */
+void skb_headerinit(unsigned int headroom, unsigned int datalen,
+		    struct sk_buff *skb, unsigned char *data,
+		    RecycleFuncP recycle_hook, unsigned int recycle_context,
+		    struct blog_t * blog_p);
+
+/* Wrapper function to skb_headerinit() with no Blog association */
+static inline void skb_hdrinit(unsigned int headroom, unsigned int datalen,
+			       struct sk_buff *skb, unsigned char * data,
+			       RecycleFuncP recycle_hook,
+			       unsigned int recycle_context)
+{
+	skb_headerinit(headroom, datalen, skb, data, recycle_hook, recycle_context,
+			(struct blog_t *)NULL);	/* No associated Blog object */
+}
+#endif  /* CONFIG_BCM_KF_NBUFF */
+
 /**
  *	skb_queue_empty - check if a queue is empty
  *	@list: queue head
@@ -795,6 +1124,26 @@ static inline void skb_header_release(struct sk_buff *skb)
 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
 }
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION))
+/**
+ *	skb_clone_headers_set - set the clone_fc_head and clone_wr_head in
+ *  an aggregated skb(ex: used in USBNET RX packet aggregation)
+ *	@skb: buffer to operate on
+ *  @len: lenghth of writable clone headroom
+ *
+ *  when this pointer is set you can still modify the cloned packet and also
+ *  expand the packet till clone_wr_head. This is used in cases on packet aggregation.
+ */
+static inline void skb_clone_headers_set(struct sk_buff *skb, unsigned int len)
+{
+	skb->clone_fc_head = skb->data - len;
+	if (skb_cloned(skb))
+		skb->clone_wr_head = skb->data - len;
+	else
+		skb->clone_wr_head = NULL;
+}
+#endif
+
 /**
  *	skb_shared - is the buffer shared
  *	@skb: buffer to check
@@ -1360,6 +1709,30 @@ static inline unsigned int skb_headroom(const struct sk_buff *skb)
 	return skb->data - skb->head;
 }
 
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION))
+/**
+ *	skb_writable_headroom - bytes preceding skb->data that are writable(even on some
+ *  cloned skb's);
+ *	@skb: buffer to check
+ *
+ *	Return the number of bytes of writable free space preceding the skb->data of an &sk_buff.
+ *  note:skb->cloned_wr_head is used to indicate the padding between 2 packets when multiple packets
+ *  are present in buffer pointed by skb->head(ex: used in USBNET RX packet aggregation)
+ *
+ */
+static inline unsigned int skb_writable_headroom(const struct sk_buff *skb)
+{
+	if (skb_cloned(skb)) {
+		if (skb->clone_wr_head)
+			return skb->data - skb->clone_wr_head;
+		else if (skb->clone_fc_head)
+			return 0;
+	}
+
+	return skb_headroom(skb);
+}
+#endif
+
 /**
  *	skb_tailroom - bytes at buffer end
  *	@skb: buffer to check
@@ -2107,6 +2480,11 @@ extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
 					       int offset, struct iovec *to,
 					       int size);
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+extern int	       skb_copy_datagram_to_kernel_iovec(const struct sk_buff *from,
+					       int offset, struct iovec *to,
+					       int size, unsigned int *dma_cookie);
+#endif
 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
 							int hlen,
 							struct iovec *iov);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a595dce6b0c7596d1481e2c87a2b55028c66a449..338dca0ac49353dbada379a700d671546d64b5d2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,6 +21,9 @@
 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define SLAB_CACHE_ACP		0x00008000UL	/* Use GFP_ACP memory */
+#endif
 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
 /*
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index fbd1117fdfde9fce2b059a4e9283b803d14f43d6..072db5a707afc50f5946381df73bab8a6d482820 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -103,6 +103,9 @@ struct cache_sizes {
 #ifdef CONFIG_ZONE_DMA
 	struct kmem_cache	*cs_dmacachep;
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	struct kmem_cache	*cs_acpcachep;
+#endif
 };
 extern struct cache_sizes malloc_sizes[];
 
@@ -145,6 +148,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
 #undef CACHE
 		return NULL;
 found:
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+		if (flags & GFP_ACP)
+			cachep = malloc_sizes[i].cs_acpcachep;
+		else
+#endif
 #ifdef CONFIG_ZONE_DMA
 		if (flags & GFP_DMA)
 			cachep = malloc_sizes[i].cs_dmacachep;
@@ -198,6 +206,11 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 #undef CACHE
 		return NULL;
 found:
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+		if (flags & GFP_ACP)
+			cachep = malloc_sizes[i].cs_acpcachep;
+		else
+#endif
 #ifdef CONFIG_ZONE_DMA
 		if (flags & GFP_DMA)
 			cachep = malloc_sizes[i].cs_dmacachep;
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c2f8c8bc56edd08183dd0dd22b4f7d3b39f1c63d..d78c365b7cea469275d0d1e0234402536e90f420 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -142,6 +142,9 @@ struct kmem_cache {
 /* Disable DMA functionality */
 #define SLUB_DMA (__force gfp_t)0
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define SLUB_ACP __GFP_ACP
+#endif
 
 /*
  * We keep the general caches in an array of slab caches that are used for
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b84bbd48b874b22d40d22c3cc8d57cc58346b86f..49d1fb5416b77ae08d4aa99d7c03f85b9b297fcd 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -195,7 +195,14 @@ struct ucred {
 #define AF_CAIF		37	/* CAIF sockets			*/
 #define AF_ALG		38	/* Algorithm sockets		*/
 #define AF_NFC		39	/* NFC sockets			*/
+#ifdef CONFIG_BCM_KF_MHI
+#define AF_VSOCK	40	/* vSockets			*/
+#define AF_MHI          41      /* MHI sockets                  */
+#define AF_RAW          42      /* RAW sockets                  */
+#define AF_MAX          43      /* For now.. */
+#else
 #define AF_MAX		40	/* For now.. */
+#endif
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -238,6 +245,11 @@ struct ucred {
 #define PF_CAIF		AF_CAIF
 #define PF_ALG		AF_ALG
 #define PF_NFC		AF_NFC
+#ifdef CONFIG_BCM_KF_MHI
+#define PF_VSOCK	AF_VSOCK
+#define PF_RAW          AF_RAW
+#define PF_MHI          AF_MHI
+#endif
 #define PF_MAX		AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
@@ -266,6 +278,10 @@ struct ucred {
 #define MSG_MORE	0x8000	/* Sender will send more */
 #define MSG_WAITFORONE	0x10000	/* recvmmsg(): block until 1+ packets avail */
 #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+#define MSG_KERNSPACE	0x40000
+#define MSG_NOCATCHSIG	0x80000
+#endif
 #define MSG_EOF         MSG_FIN
 
 #define MSG_CMSG_CLOEXEC 0x40000000	/* Set close_on_exit for file
@@ -330,6 +346,10 @@ extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_sto
 extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
 			     int offset, int len);
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+extern void memcpy_tokerneliovec(struct iovec *iov, unsigned char *kdata, int len,
+						unsigned int *dma_cookie);
+#endif
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
 extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
 
diff --git a/include/linux/sockios.h b/include/linux/sockios.h
index 7997a506ad4105fb145a9ddc120286a8431a1502..f7dfe1bc8baedf382d9b4f2174f19d6200b48296 100644
--- a/include/linux/sockios.h
+++ b/include/linux/sockios.h
@@ -127,6 +127,17 @@
 /* hardware time stamping: parameters in linux/net_tstamp.h */
 #define SIOCSHWTSTAMP   0x89b0
 
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_MISC_IOCTLS) || defined(CONFIG_BCM_KF_NETFILTER)
+/***********************BRCM global ioctl calls*****************************/
+#define SIOC_BRCM_GLOBAL_BASE    0x89c0
+#define SIOCGIFTRANSSTART  (SIOC_BRCM_GLOBAL_BASE+0)    /* Used by SNMP */
+#define SIOCCIFSTATS  (SIOC_BRCM_GLOBAL_BASE+1)  /* Clean up the Stats of a device */
+#endif
+#if !defined(CONFIG_BCM_IN_KERNEL) || defined(CONFIG_BCM_KF_IGMP) || defined(CONFIG_BCM_KF_MLD)
+#define SIOCDEVISWANDEV (SIOC_BRCM_GLOBAL_BASE+2)
+#define SIOCDEVISBRDEV  (SIOC_BRCM_GLOBAL_BASE+3)
+#endif
+
 /* Device private ioctl calls */
 
 /*
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index fa702aeb5038d40b096e61cc65ba1b8d54d7ef39..c0f9df807ccbc8b014ac1a5782e7b2f3b0be1ab5 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -507,6 +507,39 @@ struct spi_transfer {
 	u16		delay_usecs;
 	u32		speed_hz;
 
+#if defined(CONFIG_BCM_KF_SPI)
+    /* added for controllers that support an ignore count for read
+       operations. This is useful if the read requires command bytes
+       and you want to ignore the read data on the bus during the 
+       transmission of those bytes. Note that only prepend_cnt bytes
+       of data will be written from tx_buf.
+    */
+    u8  prepend_cnt;
+    
+    /* added for multibit support 
+       @multi_bit_en - enable multibit operation for this transfer
+       @multi_bit_start_offset - start offset for multibit data
+    */
+    u8  multi_bit_en;
+    u8  multi_bit_start_offset;
+    
+    /* added for controllers that do not support large transfers
+       the controller will break up the transfer into smaller
+       transfers to avoid additional data copies
+       Note that hdr_len should not be included in len
+       @hdr_len - length of header
+       @unit_size - data for each transfer will be divided into multiples of 
+                   unit_size
+       @adr_len - length of address field (max 4 bytes)
+       @adr_offset - offset of first addr byte in header
+    */
+    u8  hdr_len;
+    u8  unit_size;
+    u8  addr_len;
+    u8  addr_offset;
+#endif
+
+
 	struct list_head transfer_list;
 };
 
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 09a545a7dfa39bcd7f736f446358d4c3b112aae4..5e3cd36741c88c0e10011a31e046c31a1372efff 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -58,6 +58,16 @@ struct splice_pipe_desc {
 	void (*spd_release)(struct splice_pipe_desc *, unsigned int);
 };
 
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+struct recvfile_ctl_blk
+{
+	struct page *rv_page;
+	loff_t rv_pos;
+	size_t rv_count;
+	void *rv_fsdata;
+};
+#endif
+
 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
 			   struct splice_desc *);
 typedef int (splice_direct_actor)(struct pipe_inode_info *,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 4fbc9f78be20a2d8aeb88900b8ff1c6f7a4e2b31..52a57d21e31c546f786249f2435bf815eba5fdc8 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -153,6 +153,9 @@ enum
 	KERN_MAX_LOCK_DEPTH=74, /* int: rtmutex's maximum lock depth */
 	KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
 	KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
+#if defined(CONFIG_BCM_KF_PRINTK_INT_ENABLED) && defined(CONFIG_BCM_PRINTK_INT_ENABLED)
+	KERN_PRINTK_WITH_INTERRUPTS_ENABLED=77, /* int: print with interrupts enabled */
+#endif
 };
 
 
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b6c62d2943805c52411d7d0c881831dee5905577..da75471f54aac1c4daca8228368320a114142e3e 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -54,7 +54,11 @@ struct tcphdr {
 	__be16	window;
 	__sum16	check;
 	__be16	urg_ptr;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+} LINUX_NET_PACKED;
+#else
 };
+#endif
 
 /*
  *	The union cast uses a gcc extension to avoid aliasing problems
@@ -64,7 +68,11 @@ struct tcphdr {
 union tcp_word_hdr { 
 	struct tcphdr hdr;
 	__be32 		  words[5];
-}; 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+} LINUX_NET_PACKED;
+#else
+};
+#endif
 
 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 
 
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 9f47ab540f65e997b79b0a16c52332c564354234..1f16653fe1d323750da891c62a92c5b0b23dcc0d 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -34,6 +34,11 @@
 #define N_TI_WL		22	/* for TI's WL BT, FM, GPS combo chips */
 #define N_TRACESINK	23	/* Trace data routing for MIPI P1149.7 */
 #define N_TRACEROUTER	24	/* Trace data routing for MIPI P1149.7 */
+#ifdef CONFIG_BCM_KF_PHONET
+#define N_BRCM_HCI	25	/* Broadcom Bluetooth HCI */
+#define N_PHONET	26	/* PHONET over USB/ACM */
+#define N_LDTMODEM	27	/* Line discipline for Thin Modem support */
+#endif
 
 #ifdef __KERNEL__
 #include <linux/fs.h>
diff --git a/include/linux/types.h b/include/linux/types.h
index 7f480db60231a714b9e520f3a16856c5d4e4a5e1..c45237fcdfa03cf23c47beb095ced60e59b97e7b 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -256,5 +256,15 @@ struct rcu_head {
 };
 
 #endif	/* __KERNEL__ */
+
+
+#if defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+#if defined(CONFIG_MIPS_BCM963XX)
+#define LINUX_NET_PACKED __attribute__((packed))
+#else
+#define LINUX_NET_PACKED 
+#endif /* CONFIG_MIPS_BCM963XX */
+#endif /* CONFIG_BCM_KF_UNALIGNED_EXCEPTION  */
+
 #endif /*  __ASSEMBLY__ */
 #endif /* _LINUX_TYPES_H */
diff --git a/include/linux/urlinfo.h b/include/linux/urlinfo.h
new file mode 100644
index 0000000000000000000000000000000000000000..003e67c57cbeb371cb8675b067cf25d52c280469
--- /dev/null
+++ b/include/linux/urlinfo.h
@@ -0,0 +1,41 @@
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+#ifndef __URLINFO
+#define __URLINFO
+
+#include <linux/brcm_dll.h>
+
+//#define CC_URLINFO_SUPPORT_DEBUG
+#define DPI_URL_RECORD
+#ifndef URLINFO_NULL_STMT
+#define URLINFO_NULL_STMT                   do { /* NULL BODY */ } while (0)
+#endif
+
+#define URLINFO_HTABLE_SIZE 2048
+#define URLINFO_MAX_ENTRIES 8192
+#define URLINFO_MAX_HOST_LEN 64
+#define URLINFO_IX_INVALID 0
+#define URLINFO_NULL ((UrlInfo_t*)NULL)
+#define URLINFO_DONE 1
+
+typedef struct urlinfo_entry_t {
+    uint16_t idx;
+    uint16_t hostlen;
+    char host[URLINFO_MAX_HOST_LEN];
+
+    /* In the future, URI and refer may be needed */
+} UrlInfoEntry_t;
+
+typedef struct urlinfo_t {
+    struct dll_t node;
+    struct urlinfo_t *chain_p;
+
+    UrlInfoEntry_t entry;
+} __attribute__((packed)) UrlInfo_t;
+
+
+extern uint16_t urlinfo_lookup( const UrlInfoEntry_t *url );
+extern void urlinfo_get( uint16_t idx, UrlInfoEntry_t *entry );
+extern void urlinfo_set( const UrlInfoEntry_t *entry );
+extern int urlinfo_init( void );
+#endif
+#endif
diff --git a/include/linux/vlanctl_bind.h b/include/linux/vlanctl_bind.h
new file mode 100644
index 0000000000000000000000000000000000000000..f67341a790d675442e27761cc02a59d98e3818e6
--- /dev/null
+++ b/include/linux/vlanctl_bind.h
@@ -0,0 +1,107 @@
+#if defined(CONFIG_BCM_KF_VLANCTL_BIND)
+/*
+*    Copyright (c) 2003-2014 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2014:DUAL/GPL:standard 
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#ifndef _VLANCTL_BIND_H_INCLUDED_
+#define _VLANCTL_BIND_H_INCLUDED_
+
+typedef enum {
+#if defined(CONFIG_BCM_KF_FAP)
+        VLANCTL_BIND_CLIENT_FAP,
+#endif
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+        VLANCTL_BIND_CLIENT_RUNNER,
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+        VLANCTL_BIND_CLIENT_MAX
+} vlanctl_bind_client_t;
+
+
+/*
+ * vlanctl_bind defines three(!) hooks:
+ *  NotifHook: When blog_notify is invoked, the bound hook is invoked. Based on
+ *           event type the bound Blog client may perform a custom action.
+ *  SC Hook: If this hook is defined, blog_activate() will pass a blog with
+ *           necessary information for statical configuration.
+ *  SD Hook: If this hook is defined, blog_deactivate() will pass a pointer
+ *           to a network object with BlogActivateKey information. The
+ *           respective flow entry will be deleted.
+ */
+typedef union {
+    struct {
+        uint8_t         unused      : 5;
+        uint8_t         SN_HOOK     : 1;
+        uint8_t         SC_HOOK     : 1;
+        uint8_t         SD_HOOK     : 1;
+    } bmap;
+    uint8_t             hook_info;
+} vlanctl_bind_t;
+
+typedef enum {
+        VLANCTL_BIND_TPID_NOTIFY     /* set interface tpid */
+} vlanctl_bind_Notify_t;
+
+
+typedef uint32_t (* vlanctl_bind_ScHook_t)(Blog_t * blog_p, BlogTraffic_t traffic);
+
+typedef Blog_t * (* vlanctl_bind_SdHook_t)(uint32_t key, BlogTraffic_t traffic);
+
+typedef void     (* vlanctl_bind_SnHook_t)(vlanctl_bind_Notify_t event, void *ptr);
+
+void vlanctl_bind_config(vlanctl_bind_ScHook_t vlanctl_bind_sc, 
+	                     vlanctl_bind_SdHook_t vlanctl_bind_sd,  
+	                     vlanctl_bind_SnHook_t vlanctl_bind_sn,  
+	                     vlanctl_bind_client_t client, 
+                         vlanctl_bind_t bind);
+
+
+int vlanctl_bind_activate(vlanctl_bind_client_t client);
+
+int	vlanctl_notify(vlanctl_bind_Notify_t event, void *ptr, vlanctl_bind_client_t client);
+
+/*
+ *------------------------------------------------------------------------------
+ *  vlanctl_activate(): static configuration function of blog application
+ *             pass a filled blog to the hook for configuration
+ *------------------------------------------------------------------------------
+ */
+extern uint32_t vlanctl_activate( Blog_t * blog_p,  vlanctl_bind_client_t client );
+
+/*
+ *------------------------------------------------------------------------------
+ *  vlanctl_deactivate(): static deconfiguration function of blog application
+ *------------------------------------------------------------------------------
+ */
+extern Blog_t * vlanctl_deactivate( uint32_t key,  vlanctl_bind_client_t client );
+
+
+#endif /* ! _VLANCTL_BIND_H_INCLUDED_ */
+
+#endif /* CONFIG_BCM_KF_VLANCTL_BIND */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 06f8e38582512eb7be8713f5579887cdd559a813..84ea3a136a7cd71d7c58f970379655f3b2004268 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -12,6 +12,9 @@
 #else
 #define DMA32_ZONE(xx)
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define ACP_ZONE(xx) xx##_ACP,
+#endif
 
 #ifdef CONFIG_HIGHMEM
 #define HIGHMEM_ZONE(xx) , xx##_HIGH
@@ -19,7 +22,11 @@
 #define HIGHMEM_ZONE(xx)
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) ACP_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
+#else
 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
+#endif
 
 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 		FOR_ALL_ZONES(PGALLOC),
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1b3f2efd3ad002337b817dfb2c5a48042ae8754f..6b5a02092a14283264516c01ed1174aa6dde51ce 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -164,6 +164,9 @@ static inline unsigned long node_page_state(int node,
 #ifdef CONFIG_ZONE_DMA32
 		zone_page_state(&zones[ZONE_DMA32], item) +
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+		zone_page_state(&zones[ZONE_ACP], item) +
+#endif
 #ifdef CONFIG_HIGHMEM
 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
 #endif
diff --git a/include/mtd/brcmnand_oob.h b/include/mtd/brcmnand_oob.h
new file mode 100644
index 0000000000000000000000000000000000000000..4b901ce26c9c9e89fb9c910756fd027316349793
--- /dev/null
+++ b/include/mtd/brcmnand_oob.h
@@ -0,0 +1,883 @@
+/*
+ *  include/mtd/brcmnand_oob.h
+ *
+<:copyright-BRCM:2002:GPL/GPL:standard
+
+   Copyright (c) 2002 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+ */
+
+#ifndef __BRCMNAND_OOB_H
+#define __BRCMNAND_OOB_H
+
+#include <linux/version.h>
+#include <generated/autoconf.h>
+
+#ifndef CONFIG_BRCMNAND_MTD_EXTENSION
+#define UNDERSIZED_ECCPOS_API	1
+#endif
+
+
+/*
+ * Assuming proper include that precede this has the typedefs for struct nand_oobinfo
+ */
+
+/**
+ * brcmnand_oob oob info for 2K page
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18))
+
+/**
+ * brcmnand_oob oob info for 512 page
+ */
+static struct nand_ecclayout brcmnand_oob_16 = {
+	.eccbytes	= 3,
+	.eccpos		= {
+		6,7,8
+		},
+	.oobfree	= { {.offset=0, .length=5}, 
+				{.offset=9,.length=7}, /* Byte 5 (6th byte) used for BI */
+				{.offset=0, .length=0}		/* End marker */
+			   }
+			/* THT Bytes offset 4&5 are used by BBT.  Actually only byte 5 is used, but in order to accomodate
+			 * for 16 bit bus width, byte 4 is also not used.  If we only use byte-width chip, (We did)
+			 * then we can also use byte 4 as free bytes.
+			 */
+};
+
+static struct nand_ecclayout brcmnand_oob_64 = {
+	.eccbytes	= 12,
+	.eccpos		= {
+		6,7,8,
+		22,23,24,
+		38,39,40,
+		54,55,56
+		},
+	.oobfree	= { /* 0-1 used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 2 bytes for BBT */
+				{.offset=2, .length=4}, 
+				{.offset=9,.length=13}, 		/* First slice {9,7} 2nd slice {16,6}are combined */ 
+									/* ST uses 6th byte (offset=5) as Bad Block Indicator, 
+									  * in addition to the 1st byte, and will be adjusted at run time */
+				{.offset=25, .length=13},				/* 2nd slice  */
+				{.offset=41, .length=13},				/* 3rd slice */
+				{.offset=57, .length=7},				/* 4th slice */
+	            {.offset=0, .length=0}				/* End marker */
+			}
+};
+
+
+/*
+ * 4K page SLC with Hamming ECC 
+ */
+static struct nand_ecclayout brcmnand_oob_128 = {
+	.eccbytes	= 24,
+	.eccpos		= {
+		6,7,8,
+		22,23,24,
+		38,39,40,
+		54,55,56,
+		70,71,72,
+		86,87,88,
+		102,103,104,
+		118,119,120
+		},
+	.oobfree	= { /* 0-1 used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 2 bytes for BBT */
+				{.offset=2, .length=4}, 
+				{.offset=9,.length=13}, 		
+				{.offset=25, .length=13},				/* 2nd slice  */
+				{.offset=41, .length=13},				/* 3rd slice */
+				{.offset=57, .length=13},				/* 4th slice */
+				{.offset=73, .length=13},				/* 5th slice  */
+				{.offset=89, .length=13},				/* 6th slice */
+				{.offset=105, .length=13},				/* 7th slice */
+#if ! defined(UNDERSIZED_ECCPOS_API)
+				{.offset=121, .length=7},				/* 8th slice */
+	            {.offset=0, .length=0}				/* End marker */
+#endif
+			}
+};
+
+
+/* Small page with BCH-4 */
+static struct nand_ecclayout brcmnand_oob_bch4_512 = {
+	.eccbytes	= 7,
+	.eccpos		= {
+		9,10,11,12,13,14,15
+		},
+	.oobfree	= { 	{.offset=0, .length=5}, 
+				{.offset=6,.length=3}, /* Byte 5 (6th byte) used for BI */
+				{.offset=0, .length=0}		/* End marker */
+			   }
+};
+
+/*
+ * 2K page SLC/MLC with BCH-4 ECC, uses 7 ECC bytes per 512B ECC step
+ */
+static struct nand_ecclayout brcmnand_oob_bch4_2k = {
+	.eccbytes	= 7*4,  /* 7*4 = 28 bytes */
+	.eccpos		= { 
+		9,10,11,12,13,14,15,
+		25,26,27,28,29,30,31,
+		41,42,43,44,45,46,47,
+		57,58,59,60,61,62,63
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=8}, 		/* 1st slice loses byte 0 */
+				{.offset=16,.length=9}, 		/* 2nd slice  */
+				{.offset=32, .length=9},		/* 3rd slice  */
+				{.offset=48, .length=9},		/* 4th slice */
+	            		{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+
+/*
+ * 4K page MLC with BCH-4 ECC, uses 7 ECC bytes per 512B ECC step
+ */
+static struct nand_ecclayout brcmnand_oob_bch4_4k = {
+	.eccbytes	= 7*8,  /* 7*8 = 56 bytes */
+	.eccpos		= { 
+		9,10,11,12,13,14,15,
+		25,26,27,28,29,30,31,
+		41,42,43,44,45,46,47,
+		57,58,59,60,61,62,63,
+		73,74,75,76,77,78,79,
+		89,90,91,92,93,94,95,
+		105,106,107,108,109,110,111,
+		121,122,123,124,125,126,127
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=8}, 		/* 1st slice loses byte 0 */
+				{.offset=16,.length=9}, 		/* 2nd slice  */
+				{.offset=32, .length=9},		/* 3rd slice  */
+				{.offset=48, .length=9},		/* 4th slice */
+				{.offset=64, .length=9},		/* 5th slice */
+				{.offset=80, .length=9},		/* 6th slice */
+				{.offset=96, .length=9},		/* 7th slice */
+				{.offset=112, .length=9},		/* 8th slice */
+	            		//{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/*
+ * 4K page MLC with BCH-4 ECC, uses 7 ECC bytes per 512B ECC step
+ */
+static struct nand_ecclayout brcmnand_oob_bch4_8k = {
+	.eccbytes	= 7*16,  /* 7*16 = 112 bytes */
+	.eccpos		= { 
+		9,10,11,12,13,14,15,
+		25,26,27,28,29,30,31,
+		41,42,43,44,45,46,47,
+		57,58,59,60,61,62,63,
+		73,74,75,76,77,78,79,
+		89,90,91,92,93,94,95,
+		105,106,107,108,109,110,111,
+		121,122,123,124,125,126,127,
+#if ! defined(UNDERSIZED_ECCPOS_API)		
+		137,138,139,140,141,142,142,
+		153,154,155,156,157,158,159,
+		169,170,171,172,173,174,175,
+		185,186,187,188,189,190,191,
+		201,202,203,204,205,206,207,
+		217,208,209,210,211,212,218,
+		233,204,205,206,207,208,209,
+		249,250,251,252,253,254,255
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=8}, 		/* 1st slice loses byte 0 */
+				{.offset=16,.length=9}, 		/* 2nd slice  */
+				{.offset=32, .length=9},		/* 3rd slice  */
+				{.offset=48, .length=9},		/* 4th slice */
+				{.offset=64, .length=9},		/* 5th slice */
+				{.offset=80, .length=9},		/* 6th slice */
+				{.offset=96, .length=9},		/* 7th slice */
+				{.offset=112, .length=9},		/* 8th slice */
+#if ! defined(UNDERSIZED_ECCPOS_API)	
+				{.offset=128, .length=9},		/* 9th slice */
+				{.offset=144, .length=9},		/* 10th slice */
+				{.offset=160, .length=9},		/* 11th slice */
+				{.offset=176, .length=9},		/* 12th slice */
+				{.offset=192, .length=9},		/* 13th slice */
+				{.offset=208, .length=9},		/* 14th slice */
+				{.offset=240, .length=9},		/* 15th slice */	
+#endif
+	            		//{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/* For NAND controller REV 7.0 or later, it  use new ECC algorithm that requires more ECC
+   bytes. ECC_Bytes_Reqd (per 512 data Bytes) = roundup(ECC_LEVEL * M/8) where
+   M is the BCH finite field order. For early chip, M is 13. For 63138, M is 14.
+   It does not affect the Hamming and BCH4. But for BCH8 and BCH12, 63138 use 
+   one more byte. On 63138, for BCH8 2K page size, there is not enough spare area 
+   for cleanmarker if spare area is 16 bytes. So only the NAND part with 27 bytes 
+   spare area is supported   */
+
+#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_7_0
+/*
+ * 2K page SLC with BCH-8 ECC, uses 13 ECC bytes per 512B ECC step, and only have 16B OOB
+ * Rely on the fact that the UBI/UBIFS layer does not store anything in the OOB
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_16_2k = {
+	.eccbytes	= 13*4,  /* 13*4 = 52 bytes */
+	.eccpos		= { 
+		3,4,5,6,7,8,9,10,11,12,13,14,15,
+		19,20,21,22,23,24,25,26,27,28,29,30,31,
+		35,36,37,38,39,40,41,42,43,44,45,46,47,
+		51,52,53,54,55,56,57,58,59,60,61,62,63,
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=2}, 		/* 1st slice loses byte 0 */
+				{.offset=16,.length=3}, 		/* 2nd slice  */
+				{.offset=32, .length=3},		/* 3rd slice  */
+				{.offset=48, .length=3},		/* 4th slice */
+	            		//{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/*
+ * 2K page SLC with BCH-8 ECC, uses 13 ECC bytes per 512B ECC step, 27B+ OOB size 
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_27_2k = {
+	.eccbytes	= 13*4,  /* 13*4 = 52 bytes */
+	.eccpos		= { 
+        	14,15,16,17,18,19,20,21,22,23,24,25,26,
+		41,42,43,44,45,46,47,48,49,50,51,52,53,
+		68,69,70,71,72,73,74,75,76,77,78,79,80,
+		95,96,97,98,99,100,101,102,103,104,105,106,107,
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=13}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=14}, 		/* 2nd slice  */
+				{.offset=54, .length=14},		/* 3rd slice  */
+				{.offset=81, .length=14},		/* 4th slice */
+	            		{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/*
+ * 4K page SLC/MLC with BCH-8 ECC, uses 13 ECC bytes per 512B ECC step, and only have 16B OOB
+ * Rely on the fact that the UBI/UBIFS layer does not store anything in the OOB
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_16_4k = {
+	.eccbytes	= 13*8,  /* 13*8 = 104 bytes */
+	.eccpos		= { 
+		3,4,5,6,7,8,9,10,11,12,13,14,15,
+		19,20,21,22,23,24,25,26,27,28,29,30,31,
+		35,36,37,38,39,40,41,42,43,44,45,46,47,
+		51,52,53,54,55,56,57,58,59,60,61,62,63,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		67,68,69,70,71,72,73,74,75,76,77,78,79,
+		83,84,85,86,87,88,89,90,91,92,93,94,95,
+		99,100,101,102,103,104,105,106,107,108,109,110,111,
+		115,116,117,118,119,120,121,122,123,124,125,126,127,
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=2}, 		/* 1st slice loses byte 0 */
+				{.offset=16,.length=3}, 		/* 2nd slice  */
+				{.offset=32, .length=3},		/* 3rd slice  */
+				{.offset=48, .length=3},		/* 4th slice */
+				{.offset=64, .length=3},		/* 5th slice */
+				{.offset=80, .length=3},		/* 6th slice */
+				{.offset=96, .length=3},		/* 7th slice */
+				{.offset=112, .length=3},		/* 8th slice */
+	            		//{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+
+/*
+ * 4K page MLC with BCH-8 ECC, uses 13 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_27_4k = {
+	.eccbytes	= 13*8,  /* 13*8 = 104 bytes */
+	.eccpos		= { 
+		14,15,16,17,18,19,20,21,22,23,24,25,26,
+		41,42,43,44,45,46,47,48,49,50,51,52,53,
+		68,69,70,71,72,73,74,75,76,77,78,79,80,
+		95,96,97,98,99,100,101,102,103,104,105,106,107,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		122,123,124,125,126,127,128,129,130,131,132,133,134,
+		149,150,151,152,153,154,155,156,157,158,159,160,161,
+		176,177,178,179,180,181,182,183,184,185,186,187,188,
+		203,204,205,206,207,208,209,210,211,212,213,214,215
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=13}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=14}, 		/* 2nd slice  */
+				{.offset=54, .length=14},		/* 3rd slice  */
+				{.offset=81, .length=14},		/* 4th slice */
+				{.offset=108, .length=14},		/* 5th slice */
+				{.offset=135, .length=14},		/* 6th slice */
+				{.offset=162, .length=14},		/* 7th slice */
+				{.offset=189, .length=14},		/* 8th slice */
+	            		//{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/*
+ * 4K page SLC/MLC with BCH-8 ECC, uses 13 ECC bytes per 512B ECC step, and only have 16B OOB
+ * Rely on the fact that the UBI/UBIFS layer does not store anything in the OOB
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_16_8k = {
+	.eccbytes	= 13*16,  /* 13*8 = 208 bytes */
+	.eccpos		= { 
+		3,4,5,6,7,8,9,10,11,12,13,14,15,
+		19,20,21,22,23,24,25,26,27,28,29,30,31,
+		35,36,37,38,39,40,41,42,43,44,45,46,47,
+		51,52,53,54,55,56,57,58,59,60,61,62,63,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		67,68,69,70,71,72,73,74,75,76,77,78,79,
+		83,84,85,86,87,88,89,90,91,92,93,94,95,
+		99,100,101,102,103,104,105,106,107,108,109,110,111,
+		115,116,117,118,119,120,121,122,123,124,125,126,127,
+
+		131,132,133,134,135,136,137,138,139,140,141,142,143,
+		147,148,149,150,151,152,153,154,155,156,157,158,159,
+		163,164,165,166,167,168,169,170,171,172,173,174,175,
+		179,180,181,182,183,184,185,186,187,188,189,190,191,
+		195,196,197,198,199,200,201,202,203,204,205,206,207,
+		211,212,213,214,215,216,217,218,219,220,221,222,223,
+		227,228,229,230,231,232,233,234,235,236,237,238,239,
+		243,244,245,246,247,248,249,250,251,252,253,254,255
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=2}, 		/* 1st slice loses byte 0 */
+				{.offset=16,.length=3}, 		/* 2nd slice  */
+				{.offset=32, .length=3},		/* 3rd slice  */
+				{.offset=48, .length=3},		/* 4th slice */
+				{.offset=64, .length=3},		/* 5th slice */
+				{.offset=80, .length=3},		/* 6th slice */
+				{.offset=96, .length=3},		/* 7th slice */
+				{.offset=112, .length=3},		/* 8th slice */
+#if ! defined(UNDERSIZED_ECCPOS_API)
+				{.offset=128, .length=3},		/* 9th slice */
+				{.offset=144, .length=3},		/* 10th slice */
+				{.offset=128, .length=3},		/* 11th slice */
+				{.offset=144, .length=3},		/* 12th slice */
+				{.offset=160, .length=3},		/* 13th slice */
+				{.offset=176, .length=3},		/* 14th slice */
+				{.offset=192, .length=3},		/* 15th slice */
+				{.offset=208, .length=3},		/* 16th slice */	
+#endif
+	            		//{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+
+/*
+ * 4K page MLC with BCH-8 ECC, uses 13 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_27_8k = {
+	.eccbytes	= 13*16,  /* 13*16 = 208 bytes */
+	.eccpos		= { 
+		14,15,16,17,18,19,20,21,22,23,24,25,26,
+		41,42,43,44,45,46,47,48,49,50,51,52,53,
+		68,69,70,71,72,73,74,75,76,77,78,79,80,
+		95,96,97,98,99,100,101,102,103,104,105,106,107,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		122,123,124,125,126,127,128,129,130,131,132,133,134,
+		149,150,151,152,153,154,155,156,157,158,159,160,161,
+		176,177,178,179,180,181,182,183,184,185,186,187,188,
+		203,204,205,206,207,208,209,210,211,212,213,214,215,
+
+		230,231,232,233,234,235,236,237,238,239,240,241,242,
+		257,258,259,260,261,262,263,264,265,266,267,268,269,
+		284,285,286,287,288,289,290,291,292,293,294,295,296,
+		311,312,313,314,315,316,317,318,319,320,321,322,323,
+		338,339,340,341,342,343,344,345,346,347,348,349,350,
+		365,366,367,368,369,370,371,372,373,374,375,376,377,
+		392,393,394,395,396,397,398,399,400,401,402,403,404,
+		419,420,421,422,423,424,425,426,427,428,429,430,431
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=13}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=14}, 		/* 2nd slice  */
+				{.offset=54, .length=14},		/* 3rd slice  */
+				{.offset=81, .length=14},		/* 4th slice */
+				{.offset=108, .length=14},		/* 5th slice */
+				{.offset=135, .length=14},		/* 6th slice */
+				{.offset=162, .length=14},		/* 7th slice */
+				{.offset=189, .length=14},		/* 8th slice */
+#if ! defined(UNDERSIZED_ECCPOS_API)
+				{.offset=216, .length=14},		/* 9th slice */
+				{.offset=243, .length=14},		/* 10th slice */
+				{.offset=270, .length=14},		/* 11th slice */
+				{.offset=297, .length=14},		/* 12th slice */
+				{.offset=324, .length=14},		/* 13th slice */
+				{.offset=351, .length=14},		/* 14th slice */
+				{.offset=378, .length=14},		/* 15th slice */
+				{.offset=405, .length=14},		/* 16th slice */
+#endif
+			}
+};
+
+/*
+ * 2K page with BCH-12 ECC, uses 20 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch12_27_2k = {
+	.eccbytes	= 20*4,  /* 20*8 = 160 bytes */
+	.eccpos		= { 
+		 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+		88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=6}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=7}, 		/* 2nd slice  */
+				{.offset=54, .length=7},		/* 3rd slice  */
+				{.offset=81, .length=7},		/* 4th slice */
+	            		{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/*
+ * 4K page MLC with BCH-12 ECC, uses 20 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch12_27_4k = {
+	.eccbytes	= 20*8,  /* 20*8 = 160 bytes */
+	.eccpos		= { 
+		 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+		88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,
+		142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+		169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,
+		196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=6}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=7}, 		/* 2nd slice  */
+				{.offset=54, .length=7},		/* 3rd slice  */
+				{.offset=81, .length=7},		/* 4th slice */
+				{.offset=108, .length=7},		/* 5th slice */
+				{.offset=135, .length=7},		/* 6th slice */
+				{.offset=162, .length=7},		/* 7th slice */
+				{.offset=189, .length=7},		/* 8th slice */
+	            		//{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+
+/*
+ * 4K page MLC with BCH-12 ECC, uses 20 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch12_27_8k = {
+	.eccbytes	= 20*16,  /* 20*8 = 320 bytes */
+	.eccpos		= { 
+		 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+		88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,
+		142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+		169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,
+		196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,
+
+		223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,
+		250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,
+		277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,
+		304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,
+		331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,
+		358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,
+		385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,
+		412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=6}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=7}, 		/* 2nd slice  */
+				{.offset=54, .length=7},		/* 3rd slice  */
+				{.offset=81, .length=7},		/* 4th slice */
+				{.offset=108, .length=7},		/* 5th slice */
+				{.offset=135, .length=7},		/* 6th slice */
+				{.offset=162, .length=7},		/* 7th slice */
+				{.offset=189, .length=7},		/* 8th slice */
+#if ! defined(UNDERSIZED_ECCPOS_API)
+				{.offset=216, .length=7},		/* 5th slice */
+				{.offset=243, .length=7},		/* 6th slice */
+				{.offset=270, .length=7},		/* 7th slice */
+				{.offset=297, .length=7},		/* 8th slice */
+				{.offset=324, .length=7},		/* 5th slice */
+				{.offset=351, .length=7},		/* 6th slice */
+				{.offset=378, .length=7},		/* 7th slice */
+				{.offset=405, .length=7},		/* 8th slice */
+#endif
+			}
+};
+#else //CONFIG_MTD_BRCMNAND_VERSION = CONFIG_MTD_BRCMNAND_VERS_7_0
+/*
+ * 2K page SLC with BCH-8 ECC, uses 14 ECC bytes per 512B ECC step, 27B+ OOB size 
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_27_2k = {
+	.eccbytes	= 14*4,  /* 14*4 = 56 bytes */
+	.eccpos		= { 
+		13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+		94,95,96,97,98,99,100,101,102,103,104,105,106,107,
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=12}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=13}, 		/* 2nd slice  */
+				{.offset=54, .length=13},		/* 3rd slice  */
+				{.offset=81, .length=13},		/* 4th slice */
+	            		{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/*
+ * 4K page SLC/MLC with BCH-8 ECC, uses 14 ECC bytes per 512B ECC step, and only have 16B OOB
+ * Rely on the fact that the UBI/UBIFS layer does not store anything in the OOB
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_16_4k = {
+	.eccbytes	= 14*8,  /* 14*8 = 112 bytes */
+	.eccpos		= { 
+		2,3,4,5,6,7,8,9,10,11,12,13,14,15,
+		18,19,20,21,22,23,24,25,26,27,28,29,30,31,
+		34,35,36,37,38,39,40,41,42,43,44,45,46,47,
+		50,51,52,53,54,55,56,57,58,59,60,61,62,63,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		66,67,68,69,70,71,72,73,74,75,76,77,78,79,
+		82,83,84,85,86,87,88,89,90,91,92,93,94,95,
+		98,99,100,101,102,103,104,105,106,107,108,109,110,111,
+		114,115,116,117,118,119,120,121,122,123,124,125,126,127,
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=1}, 		/* 1st slice loses byte 0 */
+				{.offset=16,.length=2}, 		/* 2nd slice  */
+				{.offset=32, .length=2},		/* 3rd slice  */
+				{.offset=48, .length=2},		/* 4th slice */
+				{.offset=64, .length=2},		/* 5th slice */
+				{.offset=80, .length=2},		/* 6th slice */
+				{.offset=96, .length=2},		/* 7th slice */
+				{.offset=112, .length=2},		/* 8th slice */
+				{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+
+/*
+ * 4K page MLC with BCH-8 ECC, uses 14 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_27_4k = {
+	.eccbytes	= 14*8,  /* 14*8 = 112 bytes */
+	.eccpos		= { 
+		13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+		94,95,96,97,98,99,100,101,102,103,104,105,106,107,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		121,122,123,124,125,126,127,128,129,130,131,132,133,134,
+		148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+		175,176,177,178,179,180,181,182,183,184,185,186,187,188,
+		202,203,204,205,206,207,208,209,210,211,212,213,214,215
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=12}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=13}, 		/* 2nd slice  */
+				{.offset=54, .length=13},		/* 3rd slice  */
+				{.offset=81, .length=13},		/* 4th slice */
+				{.offset=108, .length=13},		/* 5th slice */
+				{.offset=135, .length=13},		/* 6th slice */
+				{.offset=162, .length=13},		/* 7th slice */
+				{.offset=189, .length=13},		/* 8th slice */
+	            		{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/*
+ * 4K page SLC/MLC with BCH-8 ECC, uses 14 ECC bytes per 512B ECC step, and only have 16B OOB
+ * Rely on the fact that the UBI/UBIFS layer does not store anything in the OOB
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_16_8k = {
+	.eccbytes	= 14*16,  /* 14*16 = 224 bytes */
+	.eccpos		= { 
+		2,3,4,5,6,7,8,9,10,11,12,13,14,15,
+		18,19,20,21,22,23,24,25,26,27,28,29,30,31,
+		34,35,36,37,38,39,40,41,42,43,44,45,46,47,
+		50,51,52,53,54,55,56,57,58,59,60,61,62,63,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		66,67,68,69,70,71,72,73,74,75,76,77,78,79,
+		82,83,84,85,86,87,88,89,90,91,92,93,94,95,
+		98,99,100,101,102,103,104,105,106,107,108,109,110,111,
+		114,115,116,117,118,119,120,121,122,123,124,125,126,127,
+
+		130,131,132,133,134,135,136,137,138,139,140,141,142,143,
+		146,147,148,149,150,151,152,153,154,155,156,157,158,159,
+		162,163,164,165,166,167,168,169,170,171,172,173,174,175,
+		178,179,180,181,182,183,184,185,186,187,188,189,190,191,
+		194,195,196,197,198,199,200,201,202,203,204,205,206,207,
+		210,211,212,213,214,215,216,217,218,219,220,221,222,223,
+		226,227,228,229,230,231,232,233,234,235,236,237,238,239,
+		242,243,244,245,246,247,248,249,250,251,252,253,254,255
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=1}, 		/* 1st slice loses byte 0 */
+				{.offset=16,.length=2}, 		/* 2nd slice  */
+				{.offset=32, .length=2},		/* 3rd slice  */
+				{.offset=48, .length=2},		/* 4th slice */
+				{.offset=64, .length=2},		/* 5th slice */
+				{.offset=80, .length=2},		/* 6th slice */
+				{.offset=96, .length=2},		/* 7th slice */
+				{.offset=112, .length=2},		/* 8th slice */
+#if ! defined(UNDERSIZED_ECCPOS_API)
+				{.offset=128, .length=2},		/* 9th slice */
+				{.offset=144, .length=2},		/* 10th slice */
+				{.offset=128, .length=2},		/* 11th slice */
+				{.offset=144, .length=2},		/* 12th slice */
+				{.offset=160, .length=2},		/* 13th slice */
+				{.offset=176, .length=2},		/* 14th slice */
+				{.offset=192, .length=2},		/* 15th slice */
+				{.offset=208, .length=2},		/* 16th slice */	
+#endif
+	            		{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+
+/*
+ * 4K page MLC with BCH-8 ECC, uses 14 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch8_27_8k = {
+	.eccbytes	= 14*16,  /* 14*16 = 224 bytes */
+	.eccpos		= { 
+		13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+		94,95,96,97,98,99,100,101,102,103,104,105,106,107,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		121,122,123,124,125,126,127,128,129,130,131,132,133,134,
+		148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+		175,176,177,178,179,180,181,182,183,184,185,186,187,188,
+		202,203,204,205,206,207,208,209,210,211,212,213,214,215,
+
+		229,230,231,232,233,234,235,236,237,238,239,240,241,242,
+		256,257,258,259,260,261,262,263,264,265,266,267,268,269,
+		283,284,285,286,287,288,289,290,291,292,293,294,295,296,
+		310,311,312,313,314,315,316,317,318,319,320,321,322,323,
+		337,338,339,340,341,342,343,344,345,346,347,348,349,350,
+		364,365,366,367,368,369,370,371,372,373,374,375,376,377,
+		391,392,393,394,395,396,397,398,399,400,401,402,403,404,
+		418,419,420,421,422,423,424,425,426,427,428,429,430,431
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=12}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=13}, 		/* 2nd slice  */
+				{.offset=54, .length=13},		/* 3rd slice  */
+				{.offset=81, .length=13},		/* 4th slice */
+				{.offset=108, .length=13},		/* 5th slice */
+				{.offset=135, .length=13},		/* 6th slice */
+				{.offset=162, .length=13},		/* 7th slice */
+				{.offset=189, .length=13},		/* 8th slice */
+#if ! defined(UNDERSIZED_ECCPOS_API)
+				{.offset=216, .length=13},		/* 9th slice */
+				{.offset=243, .length=13},		/* 10th slice */
+				{.offset=270, .length=13},		/* 11th slice */
+				{.offset=297, .length=13},		/* 12th slice */
+				{.offset=324, .length=13},		/* 13th slice */
+				{.offset=351, .length=13},		/* 14th slice */
+				{.offset=378, .length=13},		/* 15th slice */
+				{.offset=405, .length=13},		/* 16th slice */
+#endif
+			}
+};
+
+/*
+ * 2K page with BCH-12 ECC, uses 21 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch12_27_2k = {
+	.eccbytes	= 21*4,  /* 21*4 = 84 bytes */
+	.eccpos		= { 
+		 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+		87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=5}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=6}, 		/* 2nd slice  */
+				{.offset=54, .length=6},		/* 3rd slice  */
+				{.offset=81, .length=6},		/* 4th slice */
+	            		{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+/*
+ * 4K page MLC with BCH-12 ECC, uses 21 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch12_27_4k = {
+	.eccbytes	= 21*8,  /* 21*8 = 168 bytes */
+	.eccpos		= { 
+		 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+  		87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,
+		141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+		168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,
+		195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=5}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=6}, 		/* 2nd slice  */
+				{.offset=54, .length=6},		/* 3rd slice  */
+				{.offset=81, .length=6},		/* 4th slice */
+				{.offset=108, .length=6},		/* 5th slice */
+				{.offset=135, .length=6},		/* 6th slice */
+				{.offset=162, .length=6},		/* 7th slice */
+				{.offset=189, .length=6},		/* 8th slice */
+	            		{.offset=0, .length=0}			/* End marker */
+			}
+};
+
+
+/*
+ * 4K page MLC with BCH-12 ECC, uses 21 ECC bytes per 512B ECC step, and requires OOB size of 27B+
+ */
+static struct nand_ecclayout brcmnand_oob_bch12_27_8k = {
+	.eccbytes	= 21*16,  /* 21*16 = 336 bytes */
+	.eccpos		= { 
+		 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,
+		33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+		60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,
+		87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,
+#if ! defined(UNDERSIZED_ECCPOS_API)
+		114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,
+		141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+		168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,
+                195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,
+		222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,
+		249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,
+		276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,
+		303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,
+		330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,
+		357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,
+		384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,
+		411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431
+#endif
+		},
+	.oobfree	= { /* 0  used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 1 byte for BBT */
+				{.offset=1, .length=5}, 		/* 1st slice loses byte 0 */
+				{.offset=27,.length=6}, 		/* 2nd slice  */
+				{.offset=54, .length=6},		/* 3rd slice  */
+				{.offset=81, .length=6},		/* 4th slice */
+				{.offset=108, .length=6},		/* 5th slice */
+				{.offset=135, .length=6},		/* 6th slice */
+				{.offset=162, .length=6},		/* 7th slice */
+				{.offset=189, .length=6},		/* 8th slice */
+#if ! defined(UNDERSIZED_ECCPOS_API)
+				{.offset=216, .length=6},		/* 5th slice */
+				{.offset=243, .length=6},		/* 6th slice */
+				{.offset=270, .length=6},		/* 7th slice */
+				{.offset=297, .length=6},		/* 8th slice */
+				{.offset=324, .length=6},		/* 5th slice */
+				{.offset=351, .length=6},		/* 6th slice */
+				{.offset=378, .length=6},		/* 7th slice */
+				{.offset=405, .length=6},		/* 8th slice */
+#endif
+			}
+};
+#endif
+
+#else
+/* MLC not supported in 2.6.12 */
+
+static struct nand_oobinfo brcmnand_oob_64 = {
+	.useecc		= MTD_NANDECC_AUTOPLACE,
+	.eccbytes	= 12,
+	.eccpos		= {
+		6,7,8,
+		22,23,24,
+		38,39,40,
+		54,55,56
+		},
+	.oobfree	= { /* 0-1 used for BBT and/or manufacturer bad block marker, 
+	                    * first slice loses 2 bytes for BBT */
+				{2, 4}, {9,13}, 		/* First slice {9,7} 2nd slice {16,6}are combined */ 
+									/* ST uses 6th byte (offset=5) as Bad Block Indicator, 
+									  * in addition to the 1st byte, and will be adjusted at run time */
+				{25, 13},				/* 2nd slice  */
+				{41, 13},				/* 3rd slice */
+				{57, 7},				/* 4th slice */
+	                   {0, 0}				/* End marker */
+			}
+};
+
+
+/**
+ * brcmnand_oob oob info for 512 page
+ */
+static struct nand_oobinfo brcmnand_oob_16 = {
+	.useecc		= MTD_NANDECC_AUTOPLACE,
+	.eccbytes	= 3,
+	.eccpos		= {
+		6,7,8
+		},
+	.oobfree	= { {0, 5}, {9,7}, /* Byte 5 (6th byte) used for BI */
+				{0, 0}		/* End marker */
+			   }
+			/* THT Bytes offset 4&5 are used by BBT.  Actually only byte 5 is used, but in order to accomodate
+			 * for 16 bit bus width, byte 4 is also not used.  If we only use byte-width chip, (We did)
+			 * then we can also use byte 4 as free bytes.
+			 */
+};
+
+
+#endif /* 2.6.17 or earlier */
+#endif
+
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index 36eace03b2ac79229984b09254db9aed3e3b8d5f..3259e27e03b5058620e15f604afe57745afeebd2 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -134,9 +134,9 @@ struct mtd_info_user {
 
 struct region_info_user {
 	__u32 offset;		/* At which this region starts,
-				 * from the beginning of the MTD */
-	__u32 erasesize;	/* For this region */
-	__u32 numblocks;	/* Number of blocks in this region */
+					 * from the beginning of the MTD */
+	__u32 erasesize;		/* For this region */
+	__u32 numblocks;		/* Number of blocks in this region */
 	__u32 regionindex;
 };
 
@@ -220,7 +220,7 @@ struct nand_oobfree {
 };
 
 #define MTD_MAX_OOBFREE_ENTRIES	8
-#define MTD_MAX_ECCPOS_ENTRIES	64
+#define MTD_MAX_ECCPOS_ENTRIES	64	
 /*
  * OBSOLETE: ECC layout control structure. Exported to user-space via ioctl
  * ECCGETLAYOUT for backwards compatbility and should not be mistaken as a
diff --git a/include/net/af_mhi.h b/include/net/af_mhi.h
new file mode 100644
index 0000000000000000000000000000000000000000..5ae13a82d421dbe9843465cd1e637b60f89f6d59
--- /dev/null
+++ b/include/net/af_mhi.h
@@ -0,0 +1,53 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: net/af_mhi.h
+ *
+ * MHI Protocol Family kernel definitions
+ */
+
+#ifndef __LINUX_NET_AFMHI_H
+#define __LINUX_NET_AFMHI_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
+
+
+extern int mhi_register_protocol(int protocol);
+extern int mhi_unregister_protocol(int protocol);
+extern int mhi_protocol_registered(int protocol);
+
+extern int mhi_skb_send(struct sk_buff *skb, struct net_device *dev, u8 proto);
+
+
+#endif /* __LINUX_NET_AFMHI_H */
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/net/bl_ops.h b/include/net/bl_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..2f82355b7c100d91116a636812bb3a706005e129
--- /dev/null
+++ b/include/net/bl_ops.h
@@ -0,0 +1,49 @@
+#ifndef BL_OPS_H
+#define BL_OPS_H
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RUNNER_RG) || defined(CONFIG_BCM_RUNNER_RG_MODULE)
+
+#include <linux/types.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct bl_ops_t {
+	void (*net_sched_sch_tbf_tbf_change)(void *_q, void *_sch, void *_qopt, int max_size);
+	void (*net_sched_sch_tbf_tbf_destroy)(void *_q, void *_sch);
+	void (*net_sched_sch_prio_prio_classify)(struct sk_buff *skb, u32 band);
+        void (*net_netfilter_nf_conntrack_ftp)(struct nf_conn *ct, int ctinfo, struct nf_conntrack_expect *exp,
+            int ftptype);
+	void (*net_ipv4_netfilter_nf_nat_ftp)(struct nf_conn *ct, u_int16_t port, char buffer[], int ctinfo);
+	void (*net_ipv4_netfilter_nf_nat_sip)(struct nf_conn *ct, u_int16_t port, int dir);
+	void (*net_ipv4_netfilter_nf_nat_rtsp)(int num, int ctinfo, struct nf_conn *ct, u_int16_t loport,
+            u_int16_t hiport);
+	void (*net_ipv4_netfilter_ip_tables_check_match)(void *_m, void *_par, const void *_ip);
+	void (*net_ipv4_netfilter_ip_tables___do_replace)(void *_oldinfo, void *_newinfo);
+	void (*net_ipv4_netfilter_ip_tables_do_replace)(void *_oldinfo);
+	void (*net_netfilter_xt_PORTTRIG_trigger_refresh)(void *_trig);
+	void (*net_netfilter_xt_PORTTRIG_trigger_delete)(void *_trig);
+	void (*net_netfilter_xt_PORTTRIG_trigger_new)(struct nf_conn *ct, __be32 srcip, __be32 dstip, __be16 port_start, __be16 port_end, __be16 protocol);
+	void (*net_netfilter_nf_conntrack_core_destroy_conntrack)(struct nf_conn *ct);
+	int (*net_netfilter_nf_conntrack_core_death_by_timeout)(struct nf_conn *ct);
+	void (*net_netfilter_nf_conntrack_core_nf_conntrack_confirm)(struct nf_conn *ct, struct sk_buff  *skb);
+	void (*net_netfilter_nf_conntrack_core_nf_conntrack_in)(struct nf_conn *ct, struct sk_buff  *skb);
+	void (*net_netfilter_nf_conntrack_core_nf_conntrack_alloc)(struct nf_conn *ct);
+	void (*net_netfilter_nf_conntrack_core_nf_conntrack_free)(struct nf_conn *ct);
+};
+
+#define BL_OPS(op)     { if (bl_ops) bl_ops->op; }
+#define BL_OPS_CR(op)  { if (bl_ops && (bl_ops->op)) return; }
+
+extern struct bl_ops_t *bl_ops;
+
+#else /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */
+
+#define BL_OPS(op)
+#define BL_OPS_CR(op)
+
+#endif /* CONFIG_BCM_RUNNER_RG || CONFIG_BCM_RUNNER_RG_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#endif /* BL_OPS_H */
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index a7a683e30b64e6beb2bc87907c85576d85385007..4967a41581606a1384ed9e0f764d5d994ce0e8d7 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -290,6 +290,9 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
 	unsigned char err_offset = 0;
 	u8 opt_len = opt[1];
 	u8 opt_iter;
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)		
+	u8 tag_len;
+#endif
 
 	if (opt_len < 8) {
 		err_offset = 1;
@@ -302,11 +305,23 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
 	}
 
 	for (opt_iter = 6; opt_iter < opt_len;) {
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+/* back ported from 3.8 kernel 
+ * Upstream commit f2e5ddcc0d12f9c4c7b254358ad245c9dddce13b
+ */
+		tag_len = opt[opt_iter + 1];
+		if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
+			err_offset = opt_iter + 1;
+			goto out;
+		}
+		opt_iter += tag_len;
+#else
 		if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
 			err_offset = opt_iter + 1;
 			goto out;
 		}
 		opt_iter += opt[opt_iter + 1];
+#endif
 	}
 
 out:
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 51a7031b4aa379463a56fca35f69f97dfdf4ffa8..aafe0ea8c8c853290d4d72dd67d0f5151ac4a194 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -18,6 +18,10 @@
 #include <net/snmp.h>
 #include <linux/ipv6.h>
 
+#if defined(CONFIG_BCM_KF_MLD)
+#define CC_BRCM_KF_MULTI_MLD_GR_SUPPRESSION
+#endif
+
 /* inet6_dev.if_flags */
 
 #define IF_RA_OTHERCONF	0x80
@@ -117,6 +121,9 @@ struct ifmcaddr6 {
 	struct ip6_sf_list	*mca_sources;
 	struct ip6_sf_list	*mca_tomb;
 	unsigned int		mca_sfmode;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CC_BRCM_KF_MULTI_MLD_GR_SUPPRESSION)
+	unsigned int		mca_osfmode;
+#endif
 	unsigned char		mca_crcount;
 	unsigned long		mca_sfcount[2];
 	struct timer_list	mca_timer;
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 16ff29a7bb30cfbf5e1581c1388e321e9d9f2a1b..21dadab462e4dc6ab2b3807aaef44f43483e05b9 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -5,6 +5,10 @@ struct netns_frags {
 	int			nqueues;
 	atomic_t		mem;
 	struct list_head	lru_list;
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2014-0100*/
+	spinlock_t		lru_lock;
+#endif
 
 	/* sysctls */
 	int			timeout;
@@ -71,4 +75,29 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
 		inet_frag_destroy(q, f, NULL);
 }
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+/*CVE-2014-0100*/
+static inline void inet_frag_lru_move(struct inet_frag_queue *q)
+{
+	spin_lock(&q->net->lru_lock);
+	list_move_tail(&q->lru_list, &q->net->lru_list);
+	spin_unlock(&q->net->lru_lock);
+}
+
+static inline void inet_frag_lru_del(struct inet_frag_queue *q)
+{
+	spin_lock(&q->net->lru_lock);
+	list_del(&q->lru_list);
+	spin_unlock(&q->net->lru_lock);
+}
+
+static inline void inet_frag_lru_add(struct netns_frags *nf,
+				     struct inet_frag_queue *q)
+{
+	spin_lock(&nf->lru_lock);
+	list_add_tail(&q->lru_list, &nf->lru_list);
+	spin_unlock(&nf->lru_lock);
+}
+#endif
+
 #endif
diff --git a/include/net/ipip.h b/include/net/ipip.h
index a32654d52730d751a4c89dd8f8fea98b57add370..2736f09de73f62e6b99bea4e313cbfba10088e94 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -13,6 +13,9 @@ struct ip_tunnel_6rd_parm {
 	__be32			relay_prefix;
 	u16			prefixlen;
 	u16			relay_prefixlen;
+#if defined(CONFIG_BCM_KF_IPV6RD_SECURITY)
+	__be32			br_addr;
+#endif
 };
 
 struct ip_tunnel {
diff --git a/include/net/mhi/dgram.h b/include/net/mhi/dgram.h
new file mode 100644
index 0000000000000000000000000000000000000000..4d1334f65dd9d033e68ac97c956a98e804ed7fa6
--- /dev/null
+++ b/include/net/mhi/dgram.h
@@ -0,0 +1,55 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi/dgram.h
+ *
+ * MHI DGRAM socket definitions
+ */
+
+#ifndef MHI_DGRAM_H
+#define MHI_DGRAM_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
+
+
+extern int mhi_dgram_sock_create(
+	struct net *net,
+	struct socket *sock,
+	int proto,
+	int kern);
+
+extern int  mhi_dgram_proto_init(void);
+extern void mhi_dgram_proto_exit(void);
+
+#endif /* MHI_DGRAM_H */
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/net/mhi/mhdp.h b/include/net/mhi/mhdp.h
new file mode 100644
index 0000000000000000000000000000000000000000..2de17f93cd11b59473521273cf6313b540726d24
--- /dev/null
+++ b/include/net/mhi/mhdp.h
@@ -0,0 +1,61 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhdp.h
+ *
+ * Modem-Host Interface (MHI) - MHDP kernel interface
+ */
+
+
+#ifndef __NET_MHI_MHDP_H
+#define __NET_MHI_MHDP_H
+
+struct mhdp_tunnel_parm {
+	char name[IFNAMSIZ];
+	char master[IFNAMSIZ];
+	int  pdn_id;
+	int  sim_id;
+};
+
+struct mhdp_udp_filter {
+
+	unsigned short port_id;
+	unsigned char active;
+};
+
+#define SIOCADDPDNID	(SIOCDEVPRIVATE + 1)
+#define SIOCDELPDNID	(SIOCDEVPRIVATE + 2)
+#define SIOCRESETMHDP	(SIOCDEVPRIVATE + 3)
+#define SIOSETUDPFILTER	(SIOCDEVPRIVATE + 4)
+
+struct net_device *mhdp_get_netdev_by_pdn_id(struct net_device *dev, int pdn_id);
+
+#endif /* __NET_MHI_MHDP_H */
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/net/mhi/raw.h b/include/net/mhi/raw.h
new file mode 100644
index 0000000000000000000000000000000000000000..06e00e2452f6768e9ffe2de0f30b0c2d8a0cfcc6
--- /dev/null
+++ b/include/net/mhi/raw.h
@@ -0,0 +1,55 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi/raw.h
+ *
+ * MHI RAW socket definitions
+ */
+
+#ifndef MHI_RAW_H
+#define MHI_RAW_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
+
+
+extern int mhi_raw_sock_create(
+	struct net *net,
+	struct socket *sock,
+	int proto,
+	int kern);
+
+extern int  mhi_raw_proto_init(void);
+extern void mhi_raw_proto_exit(void);
+
+#endif /* MHI_RAW_H */
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/net/mhi/sched.h b/include/net/mhi/sched.h
new file mode 100644
index 0000000000000000000000000000000000000000..940e6cdf870e337d21d02411827ba4a53b6c7cd4
--- /dev/null
+++ b/include/net/mhi/sched.h
@@ -0,0 +1,53 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi/sched.h
+ *
+ * Modem-Host Interface Scheduling
+ */
+
+#ifndef MHI_SCHED_H
+#define MHI_SCHED_H
+
+#define MHI_NOTIFY_QUEUE_LOW     19
+#define MHI_NOTIFY_QUEUE_HIGH    20
+
+extern int
+mhi_register_queue_notifier(struct Qdisc *sch,
+			struct notifier_block *nb,
+			unsigned long cl);
+
+extern int
+mhi_unregister_queue_notifier(struct Qdisc *sch,
+			struct notifier_block *nb,
+			unsigned long cl);
+
+#endif /* MHI_SCHED_H */
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/net/mhi/sock.h b/include/net/mhi/sock.h
new file mode 100644
index 0000000000000000000000000000000000000000..0613cd551173936a7bcbcd9b57cd5083a9a34d47
--- /dev/null
+++ b/include/net/mhi/sock.h
@@ -0,0 +1,57 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2012:DUAL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi/sock.h
+ *
+ * MHI socket definitions
+ */
+
+#ifndef MHI_SOCK_H
+#define MHI_SOCK_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
+
+
+extern const struct proto_ops mhi_socket_ops;
+
+extern int  mhi_sock_rcv_unicast(struct sk_buff *skb, u8 l3prot, u32 l3len);
+extern int  mhi_sock_rcv_multicast(struct sk_buff *skb, u8 l3prot, u32 l3len);
+
+extern void mhi_sock_hash(struct sock *sk);
+extern void mhi_sock_unhash(struct sock *sk);
+
+extern int  mhi_sock_init(void);
+extern void mhi_sock_exit(void);
+
+#endif /* MHI_SOCK_H */
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/include/net/netevent.h b/include/net/netevent.h
index 086f8a5b59dc3782014f40edbb10be4d25f51d0a..2e588f25d2477f841b5d32c0a9c9b30e234076f8 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -21,6 +21,9 @@ struct netevent_redirect {
 enum netevent_notif_type {
 	NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
 	NETEVENT_REDIRECT,	   /* arg is struct netevent_redirect ptr */
+#if defined(CONFIG_BCM_KF_BLOG)
+	NETEVENT_ARP_BINDING_CHANGE,   /* arg is structure neighbour pointer */
+#endif
 };
 
 extern int register_netevent_notifier(struct notifier_block *nb);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index ab86036bbf0c2807240afde3cf970329107358b8..27df64592ec440599bd994a703a965c33180f417 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -22,6 +22,11 @@
 #include <linux/netfilter/nf_conntrack_dccp.h>
 #include <linux/netfilter/nf_conntrack_sctp.h>
 #include <linux/netfilter/nf_conntrack_proto_gre.h>
+#if defined(CONFIG_BCM_KF_PROTO_IPSEC) && \
+	(defined(CONFIG_NF_CONNTRACK_IPSEC) || defined(CONFIG_NF_CONNTRACK_IPSEC_MODULE))
+#include <linux/netfilter/nf_conntrack_ipsec.h>
+#include <linux/netfilter/nf_conntrack_proto_esp.h>
+#endif
 #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
 
 #include <net/netfilter/nf_conntrack_tuple.h>
@@ -33,6 +38,10 @@ union nf_conntrack_proto {
 	struct ip_ct_sctp sctp;
 	struct ip_ct_tcp tcp;
 	struct nf_ct_gre gre;
+#if defined(CONFIG_BCM_KF_PROTO_ESP) && \
+	(defined(CONFIG_NF_CT_PROTO_ESP) || defined(CONFIG_NF_CT_PROTO_ESP_MODULE))
+	struct nf_ct_esp esp;
+#endif
 };
 
 union nf_conntrack_expect_proto {
@@ -46,6 +55,14 @@ union nf_conntrack_expect_proto {
 #include <linux/netfilter/nf_conntrack_sane.h>
 #include <linux/netfilter/nf_conntrack_sip.h>
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <linux/netfilter/nf_conntrack_rtsp.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#define NF_ALG_BUFFER_SIZE 2000
+#endif
+
 /* per conntrack: application helper private data */
 union nf_conntrack_help {
 	/* insert conntrack helper private data (master) here */
@@ -64,9 +81,16 @@ union nf_conntrack_help {
     defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
 	struct nf_ct_sane_master ct_sane_info;
 #endif
-#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
+#if defined(CONFIG_BCM_KF_NETFILTER) || defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
 	struct nf_ct_sip_master ct_sip_info;
 #endif
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	struct nf_ct_rtsp_master ct_rtsp_info;
+#if defined(CONFIG_BCM_KF_PROTO_IPSEC) && \
+	(defined(CONFIG_NF_CONNTRACK_IPSEC) || defined(CONFIG_NF_CONNTRACK_IPSEC_MODULE))
+	struct nf_ct_ipsec_master ct_ipsec_info;
+#endif
+#endif
 };
 
 #include <linux/types.h>
@@ -100,6 +124,16 @@ struct nf_conn_help {
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
 
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+typedef struct dpi_info
+{
+	unsigned int app_id;
+	uint16_t dev_key;
+	uint16_t flags;
+	unsigned int url_id;
+} dpi_info_t;
+#endif
+
 struct nf_conn {
 	/* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
            plus 1 for any connection(s) we are `master' for */
@@ -107,15 +141,55 @@ struct nf_conn {
 
 	spinlock_t lock;
 
+#if defined(CONFIG_BCM_KF_BLOG)
+#if defined(CONFIG_BLOG)
+	unsigned int blog_key[2];	/* Associating 2=IP_CT_DIR_MAX blogged flows  */
+    unsigned long idle_jiffies; /* connection idled duration, 0 means active  */
+    unsigned long extra_jiffies;/* connection timeout value                   */
+    unsigned long prev_idle;    /* previous idle state                        */
+	struct timer_list prev_timeout;
+#endif
+	unsigned int iq_prio;	    /* Ingress QoS Prio */
+#endif
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	struct list_head safe_list; /* bugfix for lost connections */
+	struct list_head derived_connections; /* Used by master connection */
+	struct list_head derived_list; /* Used by child connection */
+	unsigned derived_timeout;	/* 0 means no derived_timeout, 0xFFFFFFFF
+								 * means never timeout until master ct is
+								 * disconnected, others means timeout secs */
+
+	/* Have we seen traffic both ways yet? (bitset) */ // bcm version
+	unsigned long status; // moved position for bcm
+
+#if defined(CONFIG_NF_DYNDSCP) || defined(CONFIG_NF_DYNDSCP_MODULE)
+	struct nf_tos_inheritance {
+		u_int16_t status;
+		u_int8_t dscp[2];		/* IP_CT_DIR_MAX */
+	}dyndscp; 
+#endif
+	/*---------- Add any custom fields below this line ----------*/
+
+	/* If we were expected by an expectation, this will be it */
+	struct nf_conn *master;  // moved position for bcm
+#endif /* CONFIG_BCM_KF_NETFILTER */
+
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+	dpi_info_t dpi;
+	uint32_t stats_idx;
+#endif
+
 	/* XXX should I move this to the tail ? - Y.K */
 	/* These are my tuples; original and reply */
 	struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
 
+#if !defined(CONFIG_BCM_KF_NETFILTER)
 	/* Have we seen traffic both ways yet? (bitset) */
 	unsigned long status;
 
 	/* If we were expected by an expectation, this will be it */
 	struct nf_conn *master;
+#endif
 
 	/* Timer function; drops refcnt when it goes off. */
 	struct timer_list timeout;
@@ -136,6 +210,28 @@ struct nf_conn {
 
 	/* Storage reserved for other modules, must be the last member */
 	union nf_conntrack_proto proto;
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	void *bl_ctx;
+#endif /* CONFIG_BCM_RDPA || CONFIG_BCM_RDPA_MODULE */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#if defined(CONFIG_BCM_KF_XT_MATCH_LAYER7) && \
+	(defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE))
+	struct {
+		/*
+		* e.g. "http". NULL before decision. "unknown" after decision
+		* if no match.
+		*/
+		char *app_proto;
+		/*
+		* application layer data so far. NULL after match decision.
+		*/
+		char *app_data;
+		unsigned int app_data_len;
+	} layer7;
+#endif    
 };
 
 static inline struct nf_conn *
@@ -280,11 +376,21 @@ extern void nf_ct_untracked_status_or(unsigned long bits);
 extern void
 nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
 extern void nf_conntrack_free(struct nf_conn *ct);
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
 extern struct nf_conn *
 nf_conntrack_alloc(struct net *net, u16 zone,
+		   struct sk_buff *skb,
 		   const struct nf_conntrack_tuple *orig,
 		   const struct nf_conntrack_tuple *repl,
 		   gfp_t gfp);
+#else
+extern struct nf_conn *
+nf_conntrack_alloc(struct net *net, u16 zone,
+		   const struct nf_conntrack_tuple *orig,
+		   const struct nf_conntrack_tuple *repl,
+		   gfp_t gfp);
+#endif
 
 static inline int nf_ct_is_template(const struct nf_conn *ct)
 {
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 463ae8e166965908d2fc0a60e92296123b846f2e..c825e23e43abb8cf291651f65fc67e09a0cc19b1 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -13,10 +13,19 @@
 #include <linux/netfilter/nf_conntrack_tuple_common.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_extend.h>
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+#include <linux/dpistats.h>
+#endif
+
 
 struct nf_conn_counter {
 	atomic64_t packets;
 	atomic64_t bytes;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	unsigned long cum_fast_pkts;
+	unsigned long long cum_fast_bytes;
+	unsigned long ts;
+#endif    
 };
 
 static inline
@@ -45,6 +54,15 @@ struct nf_conn_counter *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
 extern unsigned int
 seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir);
 
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+extern unsigned int
+seq_print_acct_dpi(struct seq_file *s, const struct nf_conn *ct, int dir);
+extern int 
+conntrack_get_stats( const struct nf_conn *ct, int dir, CtkStats_t *stats_p );
+extern int 
+conntrack_evict_stats( const struct nf_conn *ct, int dir, CtkStats_t *stats_p );
+#endif
+
 /* Check if connection tracking accounting is enabled */
 static inline bool nf_ct_acct_enabled(struct net *net)
 {
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 4619caadd9d1055fd5caf3e7a07a10eb0e263d1b..3ee6d2d938a430e3c1664243e0d18c7dee32a13d 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -42,6 +42,12 @@ struct nf_conntrack_expect {
 	/* Expectation class */
 	unsigned int class;
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	unsigned derived_timeout; /* 0 means no derived_timeout, 0xFFFFFFFF
+				   * means never timeout until master ct is
+				   * disconnected, others means timeout secs */
+#endif
+
 #ifdef CONFIG_NF_NAT_NEEDED
 	__be32 saved_ip;
 	/* This is the original per-proto part, used to map the
@@ -67,6 +73,10 @@ struct nf_conntrack_expect_policy {
 
 #define NF_CT_EXPECT_CLASS_DEFAULT	0
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#define NF_CT_EXPECT_DERIVED_TIMEOUT 0x80
+#endif
+
 int nf_conntrack_expect_init(struct net *net);
 void nf_conntrack_expect_fini(struct net *net);
 
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index aea3f8221be08b2208586900447f363af796440b..c392938461407bff8a17ea53615243a2739ba4af 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -61,6 +61,12 @@ struct nf_conntrack_tuple {
 			struct {
 				__be16 key;
 			} gre;
+#if defined(CONFIG_BCM_KF_PROTO_ESP) && \
+	(defined(CONFIG_NF_CT_PROTO_ESP) || defined(CONFIG_NF_CT_PROTO_ESP_MODULE))
+			struct {
+				__be32 spi;
+			} esp;
+#endif
 		} u;
 
 		/* The protocol. */
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index 68e509750caa4cb60d54b48d62cb23cf109dd17d..00538df3ae9d5adee24ebd11c7fe18170e6551aa 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -38,6 +38,10 @@ struct pn_sock {
 	u16		sobject;
 	u16		dobject;
 	u8		resource;
+#ifdef CONFIG_BCM_KF_PHONET
+	u8		resource_type;
+	u8		resource_subtype;
+#endif
 };
 
 static inline struct pn_sock *pn_sk(struct sock *sk)
@@ -48,6 +52,11 @@ static inline struct pn_sock *pn_sk(struct sock *sk)
 extern const struct proto_ops phonet_dgram_ops;
 
 void pn_sock_init(void);
+#ifdef CONFIG_BCM_KF_PHONET
+struct sock *pn_find_sock_by_sa_and_skb(struct net *net,
+						const struct sockaddr_pn *spn,
+						struct sk_buff *skb);
+#endif
 struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa);
 void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb);
 void phonet_get_local_port_range(int *min, int *max);
@@ -116,4 +125,36 @@ void phonet_sysctl_exit(void);
 int isi_register(void);
 void isi_unregister(void);
 
+#ifdef CONFIG_BCM_KF_PHONET
+#ifdef CONFIG_PHONET_DEBUG
+#define ACTIVATE_PHONET_DEBUG
+#else
+#undef ACTIVATE_PHONET_DEBUG
+#endif
+
+#ifdef ACTIVATE_PHONET_DEBUG
+typedef enum {
+	OFF = 0,
+	ON,
+	DATA,
+} phonet_debug_state ;
+extern phonet_debug_state phonet_dbg_state;
+
+# define PN_PRINTK(...)    if (OFF != phonet_dbg_state) \
+				pr_debug("PHONET: " __VA_ARGS__)
+# define PN_DATA_PRINTK(...)    if (DATA == phonet_dbg_state) \
+				  pr_debug(__VA_ARGS__)
+# define PEP_PRINTK(...)    if (OFF != phonet_dbg_state) \
+				pr_debug("PEP: " __VA_ARGS__)
+# define PEP_DATA_PRINTK(...)    if (DATA == phonet_dbg_state) \
+				  pr_debug(__VA_ARGS__)
+#else
+# define PN_PRINTK(...)
+# define PN_DATA_PRINTK(...)
+# define PEP_PRINTK(...)
+# define PEP_DATA_PRINTK(...)
+
+#endif
+#endif /* CONFIG_BCM_KF_PHONET */
+
 #endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 96239e78e621fa0654d9e436d8cbe50b8ebff5d0..27666c133b7b320cf6926f02f1246a14b5ecae71 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1146,6 +1146,10 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
 	}
 }
 
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+extern void xfrm_garbage_collect(struct net *net);
+#endif
+
 #else
 
 static inline void xfrm_sk_free_policy(struct sock *sk) {}
diff --git a/init/Kconfig b/init/Kconfig
index c06208bd39ac6bc863ee391fdce2dd49f871a8f1..e21a9a81d7992b260ae0e4ad26d110c0f956b71c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -954,6 +954,13 @@ config SYSCTL
 config ANON_INODES
 	bool
 
+config PANIC_TIMEOUT
+	int "Default panic timeout"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default 0
+	help
+	  Set default panic timeout.
+
 menuconfig EXPERT
 	bool "Configure standard kernel features (expert users)"
 	# Unhide debug options, to make the on-by-default options visible
diff --git a/init/calibrate.c b/init/calibrate.c
index fda0a7b0f06c006467c43d04f658744b146a6266..9db625cd230d66bc4c1d858a3e86737f3ff2393e 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -11,6 +11,20 @@
 #include <linux/smp.h>
 #include <linux/percpu.h>
 
+#if defined(CONFIG_BCM_KF_IKOS) && defined(CONFIG_BRCM_IKOS)
+void __cpuinit calibrate_delay(void)
+{
+	printk("IKOS bypassing delay loop calibration, using ");
+#if defined(CONFIG_BCM63138_SIM) || defined(CONFIG_BCM63148_SIM)
+	loops_per_jiffy = 800000;
+#else
+	loops_per_jiffy = 500000;
+#endif
+	printk("%lu.%02lu BogoMIPS\n",
+		loops_per_jiffy/(500000/HZ),
+		(loops_per_jiffy/(5000/HZ)) % 100);
+}
+#else
 unsigned long lpj_fine;
 unsigned long preset_lpj;
 static int __init lpj_setup(char *str)
@@ -299,3 +313,4 @@ void __cpuinit calibrate_delay(void)
 	loops_per_jiffy = lpj;
 	printed = true;
 }
+#endif
diff --git a/init/main.c b/init/main.c
index f07f2b0f0c3bf98a9208b30040c8bd1501efc9a2..cac77ca81de79137fc4e6c64646fd3368062addd 100644
--- a/init/main.c
+++ b/init/main.c
@@ -88,6 +88,9 @@ extern void mca_init(void);
 extern void sbus_init(void);
 extern void prio_tree_init(void);
 extern void radix_tree_init(void);
+#if defined(CONFIG_BCM_KF_LOG)
+extern void bcmLog_init(void);
+#endif
 #ifndef CONFIG_DEBUG_RODATA
 static inline void mark_rodata_ro(void) { }
 #endif
@@ -114,6 +117,14 @@ EXPORT_SYMBOL(system_state);
 #define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT
 #define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT
 
+#if defined(CONFIG_BCM_KF_DSP)
+extern void __init allocDspModBuffers(void);
+
+#if defined(CONFIG_BCM_KF_GPON_DDRO)
+extern void __init allocGponDDROBuffers(void);
+#endif
+#endif /* CONFIG_BCM_KF_DSP */
+
 extern void time_init(void);
 /* Default late time init is NULL. archs can override this later. */
 void (*__initdata late_time_init)(void);
@@ -218,8 +229,8 @@ static int __init loglevel(char *str)
 	 */
 	if (get_option(&str, &newlevel)) {
 		console_loglevel = newlevel;
-		return 0;
-	}
+	return 0;
+}
 
 	return -EINVAL;
 }
@@ -463,6 +474,10 @@ static void __init mm_init(void)
 	vmalloc_init();
 }
 
+#if defined(CONFIG_BCM_KF_LINKER_WORKAROUND)
+volatile int __attribute__ ((section ("__modver_tmp"))) someModVerVariable = 0;
+#endif
+ 
 asmlinkage void __init start_kernel(void)
 {
 	char * command_line;
@@ -592,6 +607,21 @@ asmlinkage void __init start_kernel(void)
 		initrd_start = 0;
 	}
 #endif
+
+#if defined(CONFIG_BCM_KF_DSP)
+	/*
+	** Allocate boot time memory for the special DSP module. This allocation can be 
+	** possible only before mem_init(). Please ensure that this allocation is performed 
+	** before mem_init().
+	*/
+	allocDspModBuffers();
+
+#if defined(CONFIG_BCM_KF_GPON_DDRO)
+	allocGponDDROBuffers();
+#endif
+#endif /* CONFIG_BCM_KF_DSP */
+
+
 	page_cgroup_init();
 	debug_objects_mem_init();
 	kmemleak_init();
@@ -633,7 +663,9 @@ asmlinkage void __init start_kernel(void)
 	sfi_init_late();
 
 	ftrace_init();
-
+#if defined(CONFIG_BCM_KF_LOG) && defined(CONFIG_BCM_LOG)
+	bcmLog_init();
+#endif
 	/* Do the rest non-__init'ed, we're now alive */
 	rest_init();
 }
@@ -661,7 +693,7 @@ static int __init_or_module do_one_initcall_debug(initcall_t fn)
 	int ret;
 
 	printk(KERN_DEBUG "calling  %pF @ %i\n", fn, task_pid_nr(current));
-	calltime = ktime_get();
+		calltime = ktime_get();
 	ret = fn();
 	rettime = ktime_get();
 	delta = ktime_sub(rettime, calltime);
@@ -670,7 +702,7 @@ static int __init_or_module do_one_initcall_debug(initcall_t fn)
 		ret, duration);
 
 	return ret;
-}
+	}
 
 int __init_or_module do_one_initcall(initcall_t fn)
 {
@@ -788,6 +820,13 @@ static void __init do_pre_smp_initcalls(void)
 		do_one_initcall(*fn);
 }
 
+#if defined(CONFIG_BCM_KF_IKOS) && defined(CONFIG_BRCM_IKOS) && defined(CONFIG_MIPS)
+/*  
+   IKOS jump_to_kernel_entry function removed. MIPS head.S has the CONFIG_BOOT_RAW to enable entry point 
+   at 0x80010400. 96000D profile has this configuration enabled. Or enable this option in your profile. 
+ */
+#endif
+
 static void run_init_process(const char *init_filename)
 {
 	argv_init[0] = init_filename;
@@ -826,11 +865,13 @@ static noinline int init_post(void)
 		printk(KERN_WARNING "Failed to execute %s.  Attempting "
 					"defaults...\n", execute_command);
 	}
+	run_init_process("/etc/preinit");
 	run_init_process("/sbin/init");
 	run_init_process("/etc/init");
 	run_init_process("/bin/init");
 	run_init_process("/bin/sh");
 
+
 	panic("No init found.  Try passing init= option to kernel. "
 	      "See Linux Documentation/init.txt for guidance.");
 }
diff --git a/kernel/Makefile b/kernel/Makefile
index f4bf68a768769fadecf9636a230a98b4561e5b67..f04c85d76d4c2b0ec4336fb3f89af7dfe5dd0c96 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,6 +2,12 @@
 # Makefile for the linux kernel.
 #
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_BOUNCE)
+ifdef CONFIG_BCM_BOUNCE_DATAPATH
+EXTRA_CFLAGS += -O0 -finstrument-functions
+endif
+endif # BCM_KF
+
 obj-y     = fork.o exec_domain.o panic.o printk.o \
 	    cpu.o exit.o itimer.o time.o softirq.o resource.o \
 	    sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
@@ -105,6 +111,10 @@ obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_CPU_PM) += cpu_pm.o
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_LOG)
+obj-$(CONFIG_BCM_LOG) += bcm_log.o
+endif # BCM_KF
+
 obj-$(CONFIG_PERF_EVENTS) += events/
 
 obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
diff --git a/kernel/bcm_log.c b/kernel/bcm_log.c
new file mode 100644
index 0000000000000000000000000000000000000000..dab1c18a2809e0c12bc5423309706a26ae02b6f6
--- /dev/null
+++ b/kernel/bcm_log.c
@@ -0,0 +1,928 @@
+/*
+* <:copyright-BRCM:2010:DUAL/GPL:standard
+* 
+*    Copyright (c) 2010 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+:>
+*/
+
+#include <asm/uaccess.h> /*copy_from_user*/
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+
+#include <linux/bcm_log.h>
+
+
+
+#define VERSION     "0.1"
+#define VER_STR     "v" VERSION " " __DATE__ " " __TIME__
+
+#define PROC_ENTRY_NAME "bcmlog"
+
+#if defined(BCM_DATADUMP_SUPPORTED)
+#define MAX_NUM_DATADUMP_IDS 20
+#define MAX_NUM_QIDS 10
+#define PRINTBUF_SIZE 0x10000
+#endif
+
+#define BCM_LOG_CHECK_LOG_ID(_logId)                                    \
+    BCM_ASSERT((_logId) >= 0 && (_logId) < BCM_LOG_ID_MAX);
+
+#define BCM_LOG_CHECK_LOG_LEVEL(_logLevel)                              \
+    BCM_ASSERT((_logLevel) >= 0 && (_logLevel) < BCM_LOG_LEVEL_MAX);
+
+#define BCM_LOG_CHECK_DD_LEVEL(_ddLevel)                                \
+    BCM_ASSERT((_ddLevel) >= 0 && (_ddLevel) < BCM_LOG_DD_MAX);
+
+static bcmLogLevel_t globalLogLevel = BCM_LOG_LEVEL_DEBUG;
+
+static bcmLogModuleInfo_t modInfo[] = BCM_LOG_MODULE_INFO;
+
+#if defined(BCM_DATADUMP_SUPPORTED)
+static bcmLogDataDumpLevel_t globalDataDumpLevel = BCM_LOG_DD_IMPORTANT;
+static Bcm_DataDumpPrintFunc *printFuns[MAX_NUM_DATADUMP_IDS*MAX_NUM_QIDS];
+static char buf[PRINTBUF_SIZE];
+static const char* qids[MAX_NUM_QIDS];
+#endif
+
+static bcmFun_t* funTable[BCM_FUN_ID_MAX];
+static bcmLogSpiCallbacks_t spiFns = { .reserveSlave      = NULL,
+                                       .syncTrans         = NULL,
+                                       .kerSysSlaveWrite  = NULL,
+                                       .kerSysSlaveRead   = NULL,
+                                       .bpGet6829PortInfo = NULL};
+static int spiDev = 0;
+
+/**
+ ** Local Functions
+ **/
+
+static char char2num(char in) {
+    char out;
+
+    if ((in >= '0') && (in <= '9'))
+        out = (in - '0');
+    else if ((in >= 'a') && (in <= 'f'))
+        out = (in - 'a') + 10;
+    else if ((in >= 'A') && (in <= 'F'))
+        out = (in - 'A') + 10;
+    else
+        out = 0;
+
+    return out;
+}
+
+static int ishex(char *str) {
+  return str && (str[0]=='0') && (str[1]=='x');
+}
+
+static uint32_t str2val(char *str) {
+    int i;
+    int value;
+    int base = ishex(str) ? 16 : 10;
+
+    if (str == NULL) return(0);
+
+    for (i=0,value=0; str[i]; i++) {
+        value = (value*base) + char2num(str[i]);
+    }
+
+    return(value);
+}
+
+#define UNIT_SIZE_BYTES 1
+#define UNIT_SIZE_HALFWORDS 2
+#define UNIT_SIZE_WORDS 4
+
+static void setMem(void *start, uint32_t val, uint32_t len, uint32_t unitSize) {
+  int i;
+  uint8_t* curPtr = start;
+
+  BCM_ASSERT((unitSize == UNIT_SIZE_BYTES) ||
+             (unitSize == UNIT_SIZE_HALFWORDS) ||
+             (unitSize == UNIT_SIZE_WORDS));
+  BCM_ASSERT(((uint32_t)start&~(unitSize-1)) == (uint32_t)start);
+
+  for (i = 0; i < len; ++i) {
+      switch (unitSize) {
+        case UNIT_SIZE_BYTES:
+        {
+          *curPtr = (uint8_t)val;
+          break;
+        }
+        case UNIT_SIZE_HALFWORDS:
+        {
+          uint16_t *cur16Ptr = (uint16_t*)curPtr;
+          *cur16Ptr = (uint16_t)val;
+          break;
+        }
+        case UNIT_SIZE_WORDS:
+        {
+          uint32_t *cur32Ptr = (uint32_t*)curPtr;
+          *cur32Ptr = (uint32_t)val;
+          break;
+        }
+        default:
+          break;
+      }
+
+      curPtr += unitSize;
+  }
+}
+
+static void dumpHexData(void *start, uint32_t len, uint32_t unitSize, int bSpiRead)
+{
+    int i;
+    unsigned long temp;
+    /*Force natural alignment*/
+    uint8_t* curPtr;
+
+    BCM_ASSERT((unitSize == UNIT_SIZE_BYTES) ||
+               (unitSize == UNIT_SIZE_HALFWORDS) ||
+               (unitSize == UNIT_SIZE_WORDS));
+
+    curPtr = (uint8_t*)((uint32_t)start&(~(unitSize-1)));
+
+    for (i = 0; i < len; ++i) {
+        if (i % (4/unitSize) == 0)
+            bcmPrint(" ");
+        if (i % (16/unitSize) == 0)
+            bcmPrint("\n0x%08X : ", (unsigned int)curPtr);
+
+        switch (unitSize) {
+          case UNIT_SIZE_BYTES:
+          {
+            if ( bSpiRead )
+            {
+               spiFns.kerSysSlaveRead(spiDev, (unsigned long)curPtr, &temp, unitSize);
+               bcmPrint("%02X ", (unsigned char)temp);
+            }
+            else
+               
+            {
+               bcmPrint("%02X ", *curPtr);
+            }
+            break;
+          }
+          case UNIT_SIZE_HALFWORDS:
+          {
+            uint16_t *cur16Ptr = (uint16_t*)curPtr;
+            if ( bSpiRead )
+            {
+               spiFns.kerSysSlaveRead(spiDev, (unsigned long)curPtr, &temp, unitSize);
+               bcmPrint("%04X ", (unsigned short)temp);
+            }
+            else
+            {
+               bcmPrint("%04X ", *cur16Ptr);
+            }
+            break;
+          }
+          case UNIT_SIZE_WORDS:
+          {
+            uint32_t *cur32Ptr = (uint32_t*)curPtr;
+            if ( bSpiRead )
+            {
+               spiFns.kerSysSlaveRead(spiDev, (unsigned long)curPtr, &temp, unitSize);
+               bcmPrint("%08lX ", (unsigned long)temp);
+            }
+            else
+            {
+               bcmPrint("%08X ", *cur32Ptr);
+            }
+            break;
+          }
+          default:
+            break;
+        }
+
+        curPtr += unitSize;
+    }
+
+    bcmPrint("\n");
+}
+
+static bcmLogModuleInfo_t *getModInfoByName(char *name) {
+    int logId;
+
+    for(logId=0; logId<BCM_LOG_ID_MAX; logId++) {
+        if(!strcmp(modInfo[logId].name, name))
+            return &modInfo[logId];
+    }
+
+    return NULL;
+}
+
+static ssize_t log_proc_read(struct file *f,
+                             char *buf,
+                             size_t cnt,
+                             loff_t *pos) {
+    return 0;
+}
+
+static ssize_t log_proc_write(struct file *f, const char *buf, size_t cnt, loff_t *pos) {
+    int i;
+#define MAX_ARGS 5
+#define MAX_ARG_SIZE 32
+    typedef char arg_t[MAX_ARG_SIZE];
+    arg_t arg[MAX_ARGS];
+    int argc;
+    char cmd;
+    bcmLogModuleInfo_t *pModInfo;
+#define LOG_WR_KBUF_SIZE 128
+    char kbuf[LOG_WR_KBUF_SIZE];
+
+    if ((cnt > LOG_WR_KBUF_SIZE-1) || (copy_from_user(kbuf, buf, cnt) != 0))
+        return -EFAULT;
+
+    kbuf[cnt]=0;
+
+    argc = sscanf(kbuf, "%c %s %s %s %s %s", &cmd, arg[0], arg[1], arg[2], arg[3], arg[4]);
+
+    for (i=0; i<MAX_ARGS; ++i) {
+        arg[i][MAX_ARG_SIZE-1] = '\0';
+    }
+
+    BCM_LOG_INFO(BCM_LOG_ID_LOG, "WRITE: cmd: %c, argc: %d", cmd, argc);
+    for (i=0; i<argc-1; ++i) {
+        BCM_LOG_INFO(BCM_LOG_ID_LOG, "arg[%d]: %s ", i, arg[i]);
+    }
+
+    switch ( cmd ) {
+        BCM_LOGCODE(
+            case 'g':
+            {
+                bcmLogLevel_t logLevel = str2val(arg[0]);
+                if(argc == 2 && logLevel >= 0 && logLevel < BCM_LOG_LEVEL_MAX)
+                    bcmLog_setGlobalLogLevel(logLevel);
+                else
+                    BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Parameter '%s'\n", arg[0]);
+                break;
+            } )
+
+        BCM_LOGCODE(
+            case 'r':
+            {
+                bcmPrint ("Global Log Level : %d\n", bcmLog_getGlobalLogLevel());
+                break;
+            } )
+
+        BCM_LOGCODE(
+            case 'i':
+            {
+                if (argc == 1) {
+                  int logId;
+                  for(logId=0; logId<BCM_LOG_ID_MAX; logId++) {
+                    pModInfo = &modInfo[logId];
+                    bcmPrint("Name      : %s\n", pModInfo->name);
+                    bcmPrint("Id        : %d, Log Level : %d\n", pModInfo->logId, bcmLog_getLogLevel(pModInfo->logId));
+                  }
+                }
+                else if((argc==2) && ((pModInfo=getModInfoByName(arg[0])) != NULL)) {
+                    bcmPrint("Name      : %s\n", pModInfo->name);
+                    bcmPrint("Id        : %d, Log Level : %d\n", pModInfo->logId, bcmLog_getLogLevel(pModInfo->logId));
+                } else {
+                    BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Parameter '%s'\n", arg[0]);
+                }
+                break;
+            } )
+
+        BCM_LOGCODE(
+            case 'l':
+            {
+                bcmLogLevel_t logLevel = str2val(arg[1]);
+                if(argc == 3 && ((pModInfo=getModInfoByName(arg[0])) != NULL)) {
+                    if(logLevel >= 0 && logLevel < BCM_LOG_LEVEL_MAX) {
+                        bcmLog_setLogLevel( pModInfo->logId, logLevel);
+                        break;
+                    }
+                }
+
+                BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Parameters '%s' '%s'\n", arg[0], arg[1]);
+
+                break;
+            } )
+
+        BCM_DATADUMPCODE(
+            case 'd':
+            {
+                bcmLogDataDumpLevel_t ddLevel = str2val(arg[1]);
+                if(argc == 3 && ((pModInfo=getModInfoByName(arg[0])) != NULL)) {
+                    if(ddLevel >= 0 && ddLevel < BCM_LOG_DD_MAX) {
+                        pModInfo->ddLevel = ddLevel;
+                        break;
+                    }
+                }
+
+                BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Parameters '%s' '%s'\n", arg[0], arg[1]);
+
+                break;
+            } )
+
+        BCM_DATADUMPCODE(
+            case 'e':
+            {
+                if (argc == 1) {
+                  int logId;
+                  for(logId=0; logId<BCM_LOG_ID_MAX; logId++) {
+                    pModInfo = &modInfo[logId];
+                    bcmPrint("Name      : %s\n", pModInfo->name);
+                    bcmPrint("Id        : %d, DataDump Level : %d\n", pModInfo->logId, pModInfo->ddLevel);
+                  }
+                }
+                else if((argc==2) && ((pModInfo=getModInfoByName(arg[0])) != NULL)) {
+                    bcmPrint("Name      : %s\n", pModInfo->name);
+                    bcmPrint("Id        : %d, DataDump Level : %d\n", pModInfo->logId, pModInfo->ddLevel);
+                } else {
+                    BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Parameter '%s'\n", arg[0]);
+                }
+                break;
+            } )
+        BCM_DATADUMPCODE(
+            case 'h':
+            {
+                bcmLogDataDumpLevel_t ddLevel = str2val(arg[0]);
+                if(argc == 2 && ddLevel >= 0 && ddLevel < BCM_LOG_DD_MAX)
+                    globalDataDumpLevel = ddLevel;
+                else
+                    BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Parameter '%s'\n", arg[0]);
+                break;
+            } )
+        BCM_LOGCODE(
+            case 's':
+            {
+                bcmPrint ("Global Datadump Level : %d\n", globalDataDumpLevel);
+                break;
+            } )
+        case 'm':
+        {
+          uint32_t addr = 0;
+          uint32_t len = 1;
+          uint32_t unitSize = UNIT_SIZE_BYTES;
+          int cmdValid = 1;
+
+          if ((argc < 3) || (argc > 4)) {
+            cmdValid = 0;
+          }
+          else {
+            if (!ishex(arg[0])) {
+              cmdValid = 0;
+              BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect address: %s Must be in hex., starting with 0x\n", arg[0]);
+            }
+            else {
+              addr = str2val(arg[0]);
+            }
+
+            if (argc >= 3)
+              len = str2val(arg[1]);
+
+            if (argc == 4) {
+              switch (arg[2][0]) {
+              case 'b':
+                unitSize = UNIT_SIZE_BYTES;
+                break;
+              case 'h':
+                unitSize = UNIT_SIZE_HALFWORDS;
+                break;
+              case 'w':
+                unitSize = UNIT_SIZE_WORDS;
+                break;
+              default:
+                BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect unit size '%s', must be 'b', 'h' or 'w'\n", arg[2]);
+                cmdValid = 0;
+              }
+            }
+          }
+
+          if (cmdValid) {
+            dumpHexData((void *)addr, len, unitSize, 0);
+          } else {
+            BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Command: %s", kbuf);
+          }
+          break;
+        }
+
+        case 'w':
+        {
+          uint32_t addr = 0;
+          uint32_t val = 0;
+          uint32_t len = 1;
+          uint32_t unitSize = UNIT_SIZE_BYTES;
+          int cmdValid = 1;
+
+          if ((argc < 3) || (argc > 5)) {
+            cmdValid = 0;
+          }
+          else {
+            if (!ishex(arg[0])) {
+              cmdValid = 0;
+              BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect address: %s Must be in hex., starting with 0x\n", arg[0]);
+            }
+            else {
+              addr = str2val(arg[0]);
+            }
+
+            val = str2val(arg[1]);
+
+            if (argc >= 4) {
+              len = str2val(arg[2]);
+            }
+
+            if (argc == 5) {
+              switch (arg[3][0]) {
+              case 'b':
+                unitSize = UNIT_SIZE_BYTES;
+                break;
+              case 'h':
+                unitSize = UNIT_SIZE_HALFWORDS;
+                break;
+              case 'w':
+                unitSize = UNIT_SIZE_WORDS;
+                break;
+              default:
+                BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect unit size '%s', must be 'b', 'h' or 'w'\n", arg[3]);
+                cmdValid = 0;
+              }
+            }
+          }
+
+          if ((addr&~(unitSize-1)) != addr) {
+            BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect address alignment: 0x%08X\n", addr);
+            cmdValid = 0;
+          }
+
+          if (cmdValid) {
+            setMem((void *)addr, val, len, unitSize);
+            bcmPrint("Done.\n");
+          } else {
+            BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Command: %s", kbuf);
+          }
+          break;
+        }
+        case 'p':
+        {
+            // Generic SPI commands
+            // Should be usable with any SPI device
+            // Leg(0)/HS(1), CS (0-3), CLK Speed, Write Data (hex), length
+            uint32_t busnum = 0;
+            uint32_t chipsel = 0;
+            uint32_t clkspeed = 0;
+            uint32_t writeint = 0;
+            uint32_t length = 0;
+            unsigned char txbuf[32];
+            unsigned char rxbuf[32];
+            int      cmdValid = 1;
+
+            if (argc != 6) {
+               cmdValid = 0;
+            }
+            else if (spiFns.syncTrans == NULL) {
+               BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Attempt to use spi before registered\n");
+               cmdValid = 0;
+            }   
+            else {
+                if (!ishex(arg[3])) {
+                    cmdValid = 0;
+                    BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect write data: %s Must be in hex., starting with 0x\n", arg[3]);
+                }
+                else {
+                    // Pad the write buffer with 0s to ensure it is a complete 4 bytes word
+                    for (i=0;i<10;++i) {
+                       if(arg[3][i] == 0) {
+                           arg[3][i] = 0x30;
+                       }
+                    }
+                    arg[3][10] = 0;
+
+                    if ((length = str2val(arg[4])) > 32) {
+                        cmdValid = 0;
+                        BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect length: Must be <= 32\n");
+                    }
+                    else {
+                        busnum = str2val(arg[0]);
+                        chipsel  = str2val(arg[1]);
+                        clkspeed  = str2val(arg[2]);
+                        writeint  = str2val(arg[3]);
+                        memset(txbuf,0,sizeof(txbuf));
+                        memset(rxbuf,0,sizeof(txbuf));
+                        txbuf[0] = (writeint >> 24) & 0x000000FF;
+                        txbuf[1] = (writeint >> 16) & 0x000000FF;
+                        txbuf[2] = (writeint >> 8) & 0x000000FF;
+                        txbuf[3] = (writeint >> 0) & 0x000000FF;
+                        if (0 != spiFns.reserveSlave(busnum, chipsel, clkspeed))
+                        {
+                            bcmPrint ("Spi device already reserved, clkspeed parameter ignored\n");
+                        }
+                        spiFns.syncTrans(txbuf, rxbuf, 0, length, busnum, chipsel);
+                        bcmPrint ("Transmitted:\n");
+                        dumpHexData((void *)txbuf, length, UNIT_SIZE_BYTES, 0);
+                        bcmPrint ("Received:\n");
+                        dumpHexData((void *)rxbuf, length, UNIT_SIZE_BYTES, 0);
+                    }
+                }
+            }
+
+            if (0 == cmdValid) {
+              BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Command: %s", kbuf);
+            }
+            break;
+        }
+#if defined(CONFIG_BCM963268)
+        // SPI command to read/write to an external BRCM chip configured as spi slave device (eg. 6829 for BHR)
+        case 'u':
+        {
+            unsigned long addr = 0;
+            unsigned long val  = 0;
+            unsigned long rwCount  = 0;
+            int           unitSize = 0;
+            int           loopCount;
+            int           cmdValid = 1;
+            if (spiFns.kerSysSlaveRead == NULL)
+            {
+                BCM_LOG_ERROR(BCM_LOG_ID_LOG, "SPI slave not registered");
+                cmdValid = 0;
+            }
+            if ((0 == cmdValid) || ((argc != 5) && (argc != 6)))
+            {
+               cmdValid = 0;
+            }
+            else
+            {
+            	if( ((spiDev = str2val(arg[0])) < 0) || !ishex(arg[1]) )
+                {
+                    cmdValid = 0;
+                    BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect address: %s Must be in hex., starting with 0x or device number %s\n", arg[1], arg[0]);
+                }
+                else
+                {
+                    char  trSize;
+
+                    addr    = str2val(arg[1]);
+                    trSize  = arg[2][0];
+                    rwCount = str2val(arg[3]);
+                    if ( 6 == argc )
+                       val = str2val(arg[4]);
+
+                    switch (trSize)
+                    {
+                        case 'b':
+                            unitSize = 1;
+                            break;
+                        case 'h':
+                            unitSize = 2;
+                            break;
+                        case 'w':
+                            unitSize = 4;
+                            break;
+                        default:
+                           BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Incorrect unit size '%s', must be 'b', 'h' or 'w'\n", arg[2]);
+                           cmdValid = 0;
+                           unitSize = 0;
+                           break;
+                    }
+
+                    if ( 1 == cmdValid )
+                    {
+                        if ( 5 == argc )
+                        {
+                           /* read operation */
+                           dumpHexData((void *)addr, rwCount, unitSize, 1);
+                        }
+                        else
+                        {
+                           /* write operation */
+                           for ( loopCount = 0; loopCount < rwCount; loopCount++ )
+                           {
+                               spiFns.kerSysSlaveWrite(spiDev, addr, val, unitSize);
+                               addr += unitSize;
+                           }
+                        }
+                    }
+                }
+            }
+
+            if (0 == cmdValid)
+            {
+              BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Invalid Command: %s", kbuf);
+            }
+            break;
+        }
+#endif
+        default:
+        {
+          bcmPrint("Usage:\n");
+
+          BCM_LOGCODE(
+            bcmPrint("g <level>               : Set global log level\n");
+            bcmPrint("r                       : Get global log level\n");
+            bcmPrint("l <module_name> <level> : Set the log level of a module\n");
+            bcmPrint("i [<module_name>]       : Get module information\n");
+          )
+
+          BCM_DATADUMPCODE(
+            bcmPrint("h <level>               : Set global datadump level\n");
+            bcmPrint("s                       : Get global datadump level\n");
+            bcmPrint("d <module_name> <level> : Set data dump detail level\n");
+            bcmPrint("e [<module_name>]       : Get data dump detail level\n");
+          )
+
+          bcmPrint("m <hexaddr> [<length> [<unitsize>]]: Dump a memory region\n");
+          bcmPrint("w <hexaddr> <val> [<length> [<unitsize>]]: Write to a memory region\n");
+          break;
+        }
+    }
+
+    return cnt;
+}
+
+static struct file_operations log_proc_fops = {
+    read: log_proc_read,
+    write: log_proc_write
+};
+
+
+
+/**
+ ** Helper Functions
+ **/
+
+bcmLogModuleInfo_t *bcmLog_logIsEnabled(bcmLogId_t logId, bcmLogLevel_t logLevel) {
+    BCM_LOG_CHECK_LOG_ID(logId);
+    BCM_LOG_CHECK_LOG_LEVEL(logLevel);
+
+    if(globalLogLevel >= logLevel &&
+       modInfo[logId].logLevel >= logLevel)
+        return &modInfo[logId];
+
+    return NULL;
+}
+
+#if defined(BCM_DATADUMP_SUPPORTED)
+bcmLogModuleInfo_t *bcmLog_ddIsEnabled(bcmLogId_t logId, bcmLogDataDumpLevel_t ddLevel) {
+    BCM_LOG_CHECK_LOG_ID(logId);
+    BCM_LOG_CHECK_DD_LEVEL(ddLevel);
+
+    if(globalDataDumpLevel >= ddLevel &&
+       modInfo[logId].ddLevel >= ddLevel)
+        return &modInfo[logId];
+
+    return NULL;
+}
+#endif
+
+EXPORT_SYMBOL(bcmLog_logIsEnabled);
+
+/**
+ ** Public API
+ **/
+
+void bcmLog_setGlobalLogLevel(bcmLogLevel_t logLevel) {
+
+    bcmLogId_t logId;
+    bcmLogLevel_t oldGlobalLevel;
+    
+    BCM_LOG_CHECK_LOG_LEVEL(logLevel);
+
+    oldGlobalLevel = globalLogLevel;
+    globalLogLevel = logLevel;
+
+    for (logId = 0; logId < BCM_LOG_ID_MAX; logId++)
+    {
+        if (modInfo[logId].lcCallback)
+        {
+            bcmLogLevel_t oldLevel;
+            bcmLogLevel_t newLevel;
+
+            oldLevel = min(modInfo[logId].logLevel, oldGlobalLevel);
+            newLevel = min(modInfo[logId].logLevel, globalLogLevel);
+            if (oldLevel != newLevel)
+            {
+                modInfo[logId].lcCallback(logId, newLevel, modInfo[logId].lcCallbackCtx);
+            }
+        }
+    }
+
+    BCM_LOG_INFO(BCM_LOG_ID_LOG, "Global log level was set to %d", globalLogLevel);
+}
+
+bcmLogLevel_t bcmLog_getGlobalLogLevel(void) {
+    return globalLogLevel;
+}
+
+void bcmLog_setLogLevel(bcmLogId_t logId, bcmLogLevel_t logLevel) {
+
+    bcmLogLevel_t oldLocalLevel;
+
+    BCM_LOG_CHECK_LOG_ID(logId);
+    BCM_LOG_CHECK_LOG_LEVEL(logLevel);
+    
+    oldLocalLevel = modInfo[logId].logLevel;
+    modInfo[logId].logLevel = logLevel;
+
+    if (modInfo[logId].lcCallback)
+    {
+        bcmLogLevel_t newLevel;
+        bcmLogLevel_t oldLevel;
+       
+        oldLevel = min(oldLocalLevel, globalLogLevel);
+        newLevel = min(modInfo[logId].logLevel, globalLogLevel);   
+        
+        if (oldLevel != newLevel)
+        {
+            modInfo[logId].lcCallback(logId, newLevel, modInfo[logId].lcCallbackCtx);
+        }
+    }
+
+    BCM_LOG_INFO(BCM_LOG_ID_LOG, "Log level of %s was set to %d",
+                 modInfo[logId].name, modInfo[logId].logLevel);
+}
+
+
+void bcmLog_registerLevelChangeCallback(bcmLogId_t logId, bcmLogLevelChangeCallback_t callback, void *ctx) {
+    BCM_LOG_CHECK_LOG_ID(logId);
+
+    modInfo[logId].lcCallback = callback;
+    modInfo[logId].lcCallbackCtx = ctx;
+}
+
+
+bcmLogLevel_t bcmLog_getLogLevel(bcmLogId_t logId) {
+    BCM_LOG_CHECK_LOG_ID(logId);
+    return modInfo[logId].logLevel;
+}
+
+char *bcmLog_getModName(bcmLogId_t logId) {
+    BCM_LOG_CHECK_LOG_ID(logId);
+    return modInfo[logId].name;
+}
+
+#if defined(BCM_DATADUMP_SUPPORTED)
+/*Dummy implementation*/
+void bcm_dataDumpRegPrinter(uint32_t qId, uint32_t dataDumpId, Bcm_DataDumpPrintFunc *printFun) {
+    BCM_ASSERT(qId < MAX_NUM_QIDS);
+    BCM_ASSERT(dataDumpId < MAX_NUM_DATADUMP_IDS);
+    printFuns[qId*MAX_NUM_DATADUMP_IDS + dataDumpId] = printFun;
+}
+
+/*Dummy implementation*/
+void bcm_dataDump(uint32_t qID, uint32_t dataDumpID, const char* dataDumpName, void *ptr, uint32_t numBytes) {
+    Bcm_DataDumpPrintFunc* printFun;
+    BCM_ASSERT( qID < MAX_NUM_QIDS);
+    BCM_ASSERT( dataDumpID < MAX_NUM_DATADUMP_IDS);
+    bcmPrint("---DataDump Start---\n");
+    if (qids[qID] == 0) {
+        BCM_LOG_ERROR(BCM_LOG_ID_LOG, "DataDump qID %d not registered.\n", qID);
+    }
+    else {
+        printFun = printFuns[qID*MAX_NUM_DATADUMP_IDS + dataDumpID];
+        bcmPrint("qID: %s, DataDump ID: %s, numBytes: %d\n", qids[qID], dataDumpName, numBytes);
+        if (printFun) {
+            buf[0]=0;
+            (*printFun)(dataDumpID, ptr, numBytes, buf, PRINTBUF_SIZE);
+            bcmPrint(buf);
+        }
+        else {
+            uint32_t *data = ptr;
+            uint8_t *dataBytes;
+            int i=0;
+
+            while (i+16<=numBytes) {
+                bcmPrint("%4.4x: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n", i, data[i/4], data[i/4+1], data[i/4+2], data[i/4+3]);
+                i+=16;
+            }
+
+            if (i+4<=numBytes) {
+                bcmPrint("%4.4x: ", i);
+                while (i+4<=numBytes) {
+                    bcmPrint("0x%8.8x ", data[i/4]);
+                    i+=4;
+                }
+            }
+
+            if (i< numBytes) {
+               if (i % 16 == 0) {
+                   bcmPrint("%4.4x: ", i);
+               }
+
+                dataBytes = (uint8_t*)&data[i/4];
+                bcmPrint("0x");
+                while (i<numBytes) {
+                    bcmPrint("%2.2x", *dataBytes++);
+                    ++i;
+                }
+                bcmPrint("\n");
+            }
+        }
+    }
+    bcmPrint("\n---DataDump End---\n");
+}
+
+uint32_t bcm_dataDumpCreateQ(const char* qName) {
+    int i;
+    for (i=0; i<MAX_NUM_QIDS; ++i) {
+        if (qids[i] == 0) {
+            qids[i] = qName;
+            return i;
+        }
+    }
+
+    BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Can not create dataDump queue. Max. #qids reached.\n");
+    return ~0U;
+}
+
+void bcm_dataDumpDeleteQ(uint32_t qid) {
+    BCM_ASSERT( qid < MAX_NUM_QIDS);
+    if (qids[qid] != 0) {
+        qids[qid] = 0;
+    }
+    else {
+        BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Can not delete dataDump queue. qid unknown.\n");
+    }
+}
+
+void bcmFun_reg(bcmFunId_t funId, bcmFun_t *f) {
+  BCM_ASSERT(f);
+  BCM_ASSERT(funId < BCM_FUN_ID_MAX);
+
+  funTable[funId] = f;
+}
+
+void bcmFun_dereg(bcmFunId_t funId) {
+  BCM_ASSERT(funId < BCM_FUN_ID_MAX);
+
+  funTable[funId] = 0;
+}
+
+bcmFun_t* bcmFun_get(bcmFunId_t funId) {
+  BCM_ASSERT(funId < BCM_FUN_ID_MAX);
+
+  return funTable[funId];
+}
+
+
+
+
+void __init bcmLog_init( void ) {
+    struct proc_dir_entry *p;
+
+    p = create_proc_entry(PROC_ENTRY_NAME, 0, 0);
+    if (!p) {
+        bcmPrint("bcmlog: unable to create /proc/%s!\n", PROC_ENTRY_NAME);
+        return;
+    }
+    p->proc_fops = &log_proc_fops;
+
+    bcmPrint("Broadcom Logger %s\n", VER_STR);
+	
+}
+
+void bcmLog_registerSpiCallbacks(bcmLogSpiCallbacks_t callbacks) 
+{
+    spiFns = callbacks;
+    BCM_ASSERT(spiFns.reserveSlave != NULL);
+    BCM_ASSERT(spiFns.syncTrans != NULL); 
+}
+
+
+EXPORT_SYMBOL(bcmLog_ddIsEnabled);
+EXPORT_SYMBOL(bcm_dataDumpRegPrinter);
+EXPORT_SYMBOL(bcm_dataDump);
+EXPORT_SYMBOL(bcm_dataDumpCreateQ);
+EXPORT_SYMBOL(bcm_dataDumpDeleteQ);
+EXPORT_SYMBOL(bcmFun_reg);
+EXPORT_SYMBOL(bcmFun_dereg);
+EXPORT_SYMBOL(bcmFun_get);
+
+#endif /*defined(BCM_DATADUMP_SUPPORTED)*/
+
+EXPORT_SYMBOL(bcmLog_setGlobalLogLevel);
+EXPORT_SYMBOL(bcmLog_getGlobalLogLevel);
+EXPORT_SYMBOL(bcmLog_setLogLevel);
+EXPORT_SYMBOL(bcmLog_getLogLevel);
+EXPORT_SYMBOL(bcmLog_getModName);
+EXPORT_SYMBOL(bcmLog_registerSpiCallbacks);
+EXPORT_SYMBOL(bcmLog_registerLevelChangeCallback);
+
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3e722c08fad9b5317df981d2ad3af083c03822d2..78ee16efd41707462a634a4d59656e4b56d0000c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -499,11 +499,19 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
 	set_cpus_allowed_ptr(current, cpumask);
 	free_cpumask_var(cpumask);
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON)
+	migrate_disable_preempt_on();
+#else
 	migrate_disable();
+#endif
 	mycpu = smp_processor_id();
 	if (mycpu == cpu) {
 		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON)
+		migrate_enable_preempt_on();
+#else
 		migrate_enable();
+#endif
 		return -EBUSY;
 	}
 
@@ -556,7 +564,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 out_release:
 	cpu_unplug_done(cpu);
 out_cancel:
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON)
+	migrate_enable_preempt_on();
+#else
 	migrate_enable();
+#endif
 	cpu_hotplug_done();
 	if (!err)
 		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
diff --git a/kernel/fork.c b/kernel/fork.c
index ec2ff23674dba8087c1a8d130b30e41724d6e714..138f3dda1f0fdf12af72cfc3caa1654e682d939f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1156,6 +1156,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
 		return ERR_PTR(-EINVAL);
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)	
+	/*CVE-2013-1858*/
+	if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
+		return ERR_PTR(-EINVAL);
+#endif
+
 	/*
 	 * Thread groups must share signals as well, and detached threads
 	 * can only be started up within the thread group.
@@ -1784,6 +1790,15 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
 	 */
 	if (unshare_flags & CLONE_NEWNS)
 		unshare_flags |= CLONE_FS;
+
+
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)	
+	/*CVE-2013-1858*/
+	if (unshare_flags & CLONE_NEWUSER)
+		unshare_flags |= CLONE_THREAD | CLONE_FS;
+#endif
+	
+
 	/*
 	 * CLONE_NEWIPC must also detach from the undolist: after switching
 	 * to a new ipc namespace, the semaphore arrays from the old
diff --git a/kernel/futex.c b/kernel/futex.c
index a6abeb7cbe29ad1fa40bb5e8a3212692adaa69be..7c14f4b8868fa5b5df636bd6f989e70b237d6966 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1255,6 +1255,15 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
 	u32 curval2;
 
 	if (requeue_pi) {
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+		/*CVE-2014-3153*/
+		/*
+		 * Requeue PI only works on two distinct uaddrs. This
+		 * check is only valid for private futexes. See below.
+		 */
+		if (uaddr1 == uaddr2)
+			return -EINVAL;
+#endif
 		/*
 		 * requeue_pi requires a pi_state, try to allocate it now
 		 * without any locks in case it fails.
@@ -1293,6 +1302,18 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
 	if (unlikely(ret != 0))
 		goto out_put_key1;
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2014-3153*/
+	/*
+	 * The check above which compares uaddrs is not sufficient for
+	 * shared futexes. We need to compare the keys:
+	 */
+	if (requeue_pi && match_futex(&key1, &key2)) {
+		ret = -EINVAL;
+		goto out_put_keys;
+	}
+#endif
+
 	hb1 = hash_futex(&key1);
 	hb2 = hash_futex(&key2);
 
@@ -2320,6 +2341,18 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
 	if (ret)
 		goto out_key2;
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2014-3153*/
+	/*
+	 * The check above which compares uaddrs is not sufficient for
+	 * shared futexes. We need to compare the keys:
+	 */
+	if (match_futex(&q.key, &key2)) {
+		ret = -EINVAL;
+		goto out_put_keys;
+	}
+#endif
+
 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
 	futex_wait_queue_me(hb, &q, to);
 
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 311c4e62d1aa0ba556f34efda4535ee62971cdd3..a2915df25b508779c76b46b8000bf2cfbe63b63b 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -20,6 +20,10 @@
 
 #include "internals.h"
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT)
+#include <asm/buzzz.h>
+#endif  /*  CONFIG_BUZZZ_KEVT */
+
 /**
  * handle_bad_irq - handle spurious and unhandled irqs
  * @irq:       the interrupt number
@@ -29,6 +33,10 @@
  */
 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
 {
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+	buzzz_kevt_log1(BUZZZ_KEVT_ID_IRQ_BAD, irq);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 	print_irq_desc(irq, desc);
 	kstat_incr_irqs_this_cpu(irq, desc);
 	ack_bad_irq(irq);
@@ -129,17 +137,95 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
 	wake_up_process(action->thread);
 }
 
+#if defined(CONFIG_BCM_KF_HARDIRQ_CYCLES)
+/* see the description in arch/mips/bcm963xx/Kconfig */
+struct kernel_stat_shadow {
+	struct cpu_usage_stat last_cpustat;  /* cpustat when we started accumulating */
+	unsigned int start_cnt;            /**< c0 count when starting hardirq */
+	unsigned int accumulated_cnt;      /**< cycles accumulated so far */
+	unsigned int intrs;     /**< debug only, how many intrs accumulate whole tick */
+	/* we could even expand this structure to keep track of cycle counts on a
+	 * per interrupt basis and find out which interrupt is using too many
+	 * cycles.  Surprisingly, the timer interrupt seems to take about 10-15us.
+	 */
+};
+
+DEFINE_PER_CPU(struct kernel_stat_shadow, kstat_shadow);
+static unsigned int cycles_per_tick;
+extern unsigned int mips_hpt_frequency;
+
+static void start_hardirq_count(void)
+{
+	struct kernel_stat_shadow *ks_shadow = &per_cpu(kstat_shadow, smp_processor_id());
+	ks_shadow->start_cnt = read_c0_count();
+}
+
+static void stop_hardirq_count(void)
+{
+	unsigned int end_cnt = read_c0_count();
+	struct kernel_stat_shadow *ks_shadow;
+	ks_shadow = &per_cpu(kstat_shadow, smp_processor_id());
+	ks_shadow->intrs++;
+	if (end_cnt > ks_shadow->start_cnt)
+		ks_shadow->accumulated_cnt += end_cnt - ks_shadow->start_cnt;
+	else
+		//counter rolled over
+		ks_shadow->accumulated_cnt += (UINT_MAX - ks_shadow->start_cnt) + end_cnt;
+
+	if (cycles_per_tick == 0) {
+		cycles_per_tick = mips_hpt_frequency/HZ;
+	}
+
+	// See if we have accumulated a whole tick
+	if (ks_shadow->accumulated_cnt >= cycles_per_tick) {
+		struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+		cputime64_t user_delta = cpustat->user - ks_shadow->last_cpustat.user;
+		cputime64_t system_delta = cpustat->system - ks_shadow->last_cpustat.system;
+		cputime64_t softirq_delta = cpustat->softirq - ks_shadow->last_cpustat.softirq;
+		cputime64_t idle_delta = cpustat->idle - ks_shadow->last_cpustat.idle;
+
+//		printk("TICK on %d in %d intrs!\n", smp_processor_id(), ks_shadow->intrs);
+		cpustat->irq++;
+		// subtract 1 tick from the field that has incremented the most
+		if (user_delta > system_delta && user_delta > softirq_delta && user_delta > idle_delta)
+			cpustat->user--;
+		else if (system_delta > user_delta && system_delta > softirq_delta && system_delta > idle_delta)
+			cpustat->system--;
+		else if (softirq_delta > user_delta && softirq_delta > system_delta && softirq_delta > idle_delta)
+			cpustat->softirq--;
+		else
+			cpustat->idle--;
+
+		ks_shadow->accumulated_cnt -= cycles_per_tick;
+		ks_shadow->intrs = 0;
+		ks_shadow->last_cpustat = *cpustat;
+	}
+}
+#endif
+
+
 irqreturn_t
 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 {
 	irqreturn_t retval = IRQ_NONE;
 	unsigned int flags = 0, irq = desc->irq_data.irq;
 
+#if defined(CONFIG_BCM_KF_HARDIRQ_CYCLES)
+	start_hardirq_count();
+#endif
+
 	do {
 		irqreturn_t res;
 
 		trace_irq_handler_entry(irq, action);
+
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+		buzzz_kevt_log2(BUZZZ_KEVT_ID_IRQ_ENTRY, irq, (int)(action->handler));
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
 		res = action->handler(irq, action->dev_id);
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+		buzzz_kevt_log2(BUZZZ_KEVT_ID_IRQ_EXIT, irq, (int)(action->handler));
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
 		trace_irq_handler_exit(irq, action, res);
 
 		if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
@@ -179,6 +265,11 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 
 	if (!noirqdebug)
 		note_interrupt(irq, desc, retval);
+
+#if defined(CONFIG_BCM_KF_HARDIRQ_CYCLES)
+	stop_hardirq_count();
+#endif
+		
 	return retval;
 }
 
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 14dd5761e8c96e1508c72499e12c93edeb45a7ab..a9dca90aaa707e083b6f1e6f6f9cf7971c038178 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -20,6 +20,10 @@
 
 #include "internals.h"
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT)
+#include <asm/buzzz.h>
+#endif    /*  CONFIG_BUZZZ_KEVT */
+
 #ifdef CONFIG_HARDIRQS_SW_RESEND
 
 /* Bitmap to handle software resend of interrupts: */
@@ -35,6 +39,11 @@ static void resend_irqs(unsigned long arg)
 
 	while (!bitmap_empty(irqs_resend, nr_irqs)) {
 		irq = find_first_bit(irqs_resend, nr_irqs);
+
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+		buzzz_kevt_log1(BUZZZ_KEVT_ID_IRQ_RESEND, irq);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 		clear_bit(irq, irqs_resend);
 		desc = irq_to_desc(irq);
 		local_irq_disable();
@@ -55,6 +64,10 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
  */
 void check_irq_resend(struct irq_desc *desc, unsigned int irq)
 {
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+	buzzz_kevt_log1(BUZZZ_KEVT_ID_IRQ_CHECK, irq);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 	/*
 	 * We do not resend level type interrupts. Level type
 	 * interrupts are resent by hardware when they are still
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index d1c80fa035b782defb86edc381bf22cb1edeee0c..021b7b994df89cd295d21e5af42a5ea18ef02e01 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -16,6 +16,11 @@
 
 #include "internals.h"
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT)
+#include <asm/buzzz.h>
+#endif  /*  CONFIG_BUZZZ */
+
+
 static int irqfixup __read_mostly;
 
 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
@@ -65,6 +70,10 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
 	irqreturn_t ret = IRQ_NONE;
 	struct irqaction *action;
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+	buzzz_kevt_log1(BUZZZ_KEVT_ID_IRQ_MISROUTED, irq);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 	raw_spin_lock(&desc->lock);
 
 	/* PER_CPU and nested thread interrupts are never polled */
diff --git a/kernel/module.c b/kernel/module.c
index 78ac6ec1e425f8dc03fe27eccd9beee2a233d90a..e60f9f94ffbf319d842461396535109fc5342c2a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -66,6 +66,33 @@
 #define ARCH_SHF_SMALL 0
 #endif
 
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+/*
+** These are pointers to memory chunks allocated for the DSP module. The memory is allocated in
+** start_kernel() during initialization. 
+*/
+extern void *dsp_core;
+extern void *dsp_init;
+
+/* Size of the DSP core and init buffers. */
+static unsigned long dsp_core_size;
+static unsigned long dsp_init_size;
+
+/*
+ * is_dsp_module - is this the DSP module?
+ * @addr: the module to check.
+ */
+#define is_dsp_module(mod) (strcmp(mod->name, "dspdd") == 0)
+
+/*
+ * is_dsp_module_address - is this address inside the DSP module?
+ * @addr: the address to check.
+ */
+#define is_dsp_module_address(addr) \
+	(dsp_core && ((unsigned long*)addr >= (unsigned long*)dsp_core) && ((unsigned long*)addr < ((unsigned long*)dsp_core) + dsp_core_size)) || \
+	(dsp_init && ((unsigned long*)addr >= (unsigned long*)dsp_init) && ((unsigned long*)addr < ((unsigned long*)dsp_init) + dsp_init_size))
+
+#endif /* defined(CONFIG_BCM_KF_DSP) */
 /*
  * Modules' sections will be aligned on page boundaries
  * to ensure complete separation of code and data, but
@@ -788,6 +815,12 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 		goto out;
 	}
 
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+    /* This check is not needed for the DSP module */
+	if ( !is_dsp_module(mod) )
+	{
+#endif
+
 	if (!list_empty(&mod->source_list)) {
 		/* Other modules depend on us: get rid of them first. */
 		ret = -EWOULDBLOCK;
@@ -825,6 +858,14 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 	if (!forced && module_refcount(mod) != 0)
 		wait_for_zero_refcount(mod);
 
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+	}
+	else
+	{
+		ret = 0;
+	}
+#endif
+
 	mutex_unlock(&module_mutex);
 	/* Final destruction now no one is using it. */
 	if (mod->exit != NULL)
@@ -911,7 +952,7 @@ void __module_get(struct module *module)
 		__this_cpu_inc(module->refptr->incs);
 		trace_module_get(module, _RET_IP_);
 		preempt_enable();
-	}
+}
 }
 EXPORT_SYMBOL(__module_get);
 
@@ -1820,6 +1861,10 @@ static void free_module(struct module *mod)
 
 	/* This may be NULL, but that's OK */
 	unset_module_init_ro_nx(mod);
+
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+	if ( !is_dsp_module(mod) )
+#endif
 	module_free(mod, mod->module_init);
 	kfree(mod->args);
 	percpu_modfree(mod);
@@ -1829,6 +1874,9 @@ static void free_module(struct module *mod)
 
 	/* Finally, free the core (containing the module structure) */
 	unset_module_core_ro_nx(mod);
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+	if ( !is_dsp_module(mod) )
+#endif
 	module_free(mod, mod->module_core);
 
 #ifdef CONFIG_MPU
@@ -2661,6 +2709,14 @@ static int move_module(struct module *mod, struct load_info *info)
 	void *ptr;
 
 	/* Do the allocs. */
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+	if ( is_dsp_module(mod) )
+	{
+		ptr = dsp_core;
+		dsp_core_size = dsp_core ? mod->core_size : 0;
+	}
+	else
+#endif
 	ptr = module_alloc_update_bounds(mod->core_size);
 	/*
 	 * The pointer to this block is stored in the module structure
@@ -2674,6 +2730,14 @@ static int move_module(struct module *mod, struct load_info *info)
 	memset(ptr, 0, mod->core_size);
 	mod->module_core = ptr;
 
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+	if ( is_dsp_module(mod) )
+	{
+		ptr = dsp_init;
+		dsp_init_size = dsp_init ? mod->init_size : 0;
+	}
+	else
+#endif
 	ptr = module_alloc_update_bounds(mod->init_size);
 	/*
 	 * The pointer to this block is stored in the module structure
@@ -3048,9 +3112,17 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
 		module_put(mod);
 		blocking_notifier_call_chain(&module_notify_list,
 					     MODULE_STATE_GOING, mod);
-		free_module(mod);
-		wake_up(&module_wq);
-		return ret;
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+        /* Only if not the dsp module */
+		if ( !is_dsp_module(mod) )
+		{
+#endif /* defined(CONFIG_BCM_KF_DSP) */
+			free_module(mod);
+			wake_up(&module_wq);
+			return ret;
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+		}
+#endif /* defined(CONFIG_BCM_KF_DSP) */
 	}
 	if (ret > 0) {
 		printk(KERN_WARNING
@@ -3080,11 +3152,14 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
 	mod->strtab = mod->core_strtab;
 #endif
 	unset_module_init_ro_nx(mod);
+#if    ! ( defined(CONFIG_BCM_KF_BOUNCE) && defined(CONFIG_BRCM_BOUNCE) )
 	module_free(mod, mod->module_init);
 	mod->module_init = NULL;
 	mod->init_size = 0;
 	mod->init_ro_size = 0;
 	mod->init_text_size = 0;
+#endif
+
 	mutex_unlock(&module_mutex);
 
 	return 0;
@@ -3364,6 +3439,10 @@ static int m_show(struct seq_file *m, void *p)
 	/* Used by oprofile and other similar tools. */
 	seq_printf(m, " 0x%pK", mod->module_core);
 
+#if ( defined(CONFIG_BCM_KF_BOUNCE) && defined(CONFIG_BRCM_BOUNCE) )
+	seq_printf(m, " 0x%p", mod->module_init);
+#endif
+
 	/* Taints info */
 	if (mod->taints)
 		seq_printf(m, " %s", module_flags(mod, buf));
@@ -3456,8 +3535,11 @@ bool is_module_address(unsigned long addr)
 struct module *__module_address(unsigned long addr)
 {
 	struct module *mod;
-
+#if defined(CONFIG_BCM_KF_DSP) && defined(CONFIG_BCM_BCMDSP_MODULE)
+	if ((!is_dsp_module_address(addr)) && (addr < module_addr_min || addr > module_addr_max))
+#else
 	if (addr < module_addr_min || addr > module_addr_max)
+#endif
 		return NULL;
 
 	list_for_each_entry_rcu(mod, &modules, list)
@@ -3517,7 +3599,18 @@ void print_modules(void)
 	/* Most callers should already have preempt disabled, but make sure */
 	preempt_disable();
 	list_for_each_entry_rcu(mod, &modules, list)
+#if defined(CONFIG_BCM_KF_EXTRA_DEBUG) 
+	{
 		printk(" %s%s", mod->name, module_flags(mod, buf));
+		printk(" init_addr(%p - %p), core_addr(%p - %p)\n",
+			mod->module_init,
+			mod->module_init+mod->init_text_size,
+			mod->module_core, 
+			mod->module_core+mod->core_text_size);
+	}
+#else
+		printk(" %s%s", mod->name, module_flags(mod, buf));
+#endif
 	preempt_enable();
 	if (last_unloaded_module[0])
 		printk(" [last unloaded: %s]", last_unloaded_module);
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 7e3443fe1f48a1dbea11f7287fd3a362f1adb2a7..4d277bd4f5f17d2ac2a0bc826d74e0c9b31ffb78 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -71,6 +71,17 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 
 void debug_mutex_unlock(struct mutex *lock)
 {
+#if defined(CONFIG_BCM_KF_MUTEX_FIX)
+	/*
+	 * debug_locks is set to 0 by add_taint() when a proprietary module
+	 * is loaded.  But mutex owner is recorded regardless of debug_locks
+	 * or proprietary module.  We just need to clear the owner so that
+	 * our own mutex assert code works.
+	 */
+	if (unlikely(!debug_locks))
+		mutex_clear_owner(lock);
+#endif
+
 	if (unlikely(!debug_locks))
 		return;
 
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index deb5461e3216905bc6b626da168bc98321e7b21f..76655061f8cb38527d7421a65a1c07416608b70d 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,16 @@ config SUSPEND_FREEZER
 
 	  Turning OFF this setting is NOT recommended! If in doubt, say Y.
 
+config HAS_WAKELOCK
+	bool
+	default y
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+
+config WAKELOCK
+	bool
+	default y
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+
 config HIBERNATE_CALLBACKS
 	bool
 
@@ -103,6 +113,37 @@ config PM_SLEEP_SMP
 	select HOTPLUG
 	select HOTPLUG_CPU
 
+config PM_AUTOSLEEP
+	bool "Opportunistic sleep"
+	depends on PM_SLEEP
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	---help---
+	Allow the kernel to trigger a system transition into a global sleep
+	state automatically whenever there are no active wakeup sources.
+
+config PM_WAKELOCKS
+	bool "User space wakeup sources interface"
+	depends on PM_SLEEP
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	---help---
+	Allow user space to create, activate and deactivate wakeup source
+	objects with the help of a sysfs-based interface.
+
+config PM_WAKELOCKS_LIMIT
+	int "Maximum number of user space wakeup sources (0 = no limit)"
+	range 0 100000
+	default 100
+	depends on PM_WAKELOCKS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+
+config PM_WAKELOCKS_GC
+	bool "Garbage collector for user space wakeup sources"
+	depends on PM_WAKELOCKS
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+
 config PM_RUNTIME
 	bool "Run-time PM core functionality"
 	depends on !IA64_HP_SIM
@@ -243,3 +284,11 @@ config PM_GENERIC_DOMAINS_RUNTIME
 config CPU_PM
 	bool
 	depends on SUSPEND || CPU_IDLE
+
+config SUSPEND_TIME
+	bool "Log time spent in suspend"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	---help---
+	  Prints the time spent in suspend in the kernel log, and
+	  keeps statistics on the time spent in suspend in
+	  /sys/kernel/debug/suspend_time
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 66d808ec525234bfee1433a4df2c33ff9c399201..2cbc2b09298707ae5e84075a94e745a7f97637cf 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -9,5 +9,17 @@ obj-$(CONFIG_SUSPEND)		+= suspend.o
 obj-$(CONFIG_PM_TEST_SUSPEND)	+= suspend_test.o
 obj-$(CONFIG_HIBERNATION)	+= hibernate.o snapshot.o swap.o user.o \
 				   block_io.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_PM_AUTOSLEEP)	+= autosleep.o
+obj-$(CONFIG_PM_WAKELOCKS)	+= wakelock.o
+obj-$(CONFIG_SUSPEND_TIME)	+= suspend_time.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_ANDROID)	+= wakelock.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/kernel/printk.c b/kernel/printk.c
index 7109711235497820576b194a85ab1c568131ac3c..6469e689416713a273a917907b9943166ec7209e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -54,7 +54,11 @@
 
 /* We show everything that is MORE important than this.. */
 #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
+#if defined(CONFIG_BCM_KF_CONSOLE_LOGLEVEL)
+#define DEFAULT_CONSOLE_LOGLEVEL CONFIG_BCM_DEFAULT_CONSOLE_LOGLEVEL
+#else
 #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
+#endif
 
 DECLARE_WAIT_QUEUE_HEAD(log_wait);
 
@@ -138,6 +142,13 @@ EXPORT_SYMBOL(console_set_on_cmdline);
 /* Flag: console code may call schedule() */
 static int console_may_schedule;
 
+#if defined(CONFIG_BCM_KF_PRINTK_INT_ENABLED) 
+int printk_with_interrupt_enabled = 0;
+#if defined(CONFIG_PREEMPT_RT_FULL)
+#define CC_CHECK_PRINTK_RT_FULL 1
+#endif
+#endif
+
 #ifdef CONFIG_PRINTK
 
 static char __log_buf[__LOG_BUF_LEN];
@@ -845,11 +856,20 @@ static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
 	__releases(&logbuf_lock)
 {
 	int retval = 0, wake = 0;
+#if defined(CONFIG_BCM_KF_PRINTK_INT_ENABLED) && (defined(CONFIG_BCM_PRINTK_INT_ENABLED) || defined(CC_CHECK_PRINTK_RT_FULL))
+	int lock = 1;
+	if(printk_with_interrupt_enabled) {
+		lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
+			!(preempt_count() & 0xffff0000);
+		lock |= oops_in_progress;
+	}
+#else
 #ifdef CONFIG_PREEMPT_RT_FULL
 	int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
 		(preempt_count() <= 1);
 #else
 	int lock = 1;
+#endif
 #endif
 
 	if (lock && console_trylock()) {
@@ -1030,6 +1050,15 @@ asmlinkage int vprintk(const char *fmt, va_list args)
 	 * actually gets the semaphore or not.
 	 */
 	if (console_trylock_for_printk(this_cpu, flags)) {
+#if defined(CONFIG_BCM_KF_PRINTK_INT_ENABLED) && (defined(CONFIG_BCM_PRINTK_INT_ENABLED) || defined(CC_CHECK_PRINTK_RT_FULL))
+		if(printk_with_interrupt_enabled) {
+			raw_local_irq_restore(flags);
+			console_unlock();
+			raw_local_irq_save(flags);
+		} else {
+			console_unlock();
+		}
+#else // CONFIG_BCM_KF_PRINTK_INT_ENABLED
 #ifndef CONFIG_PREEMPT_RT_FULL
 		console_unlock();
 #else
@@ -1037,6 +1066,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
 		console_unlock();
 		raw_local_irq_save(flags);
 #endif
+#endif // CONFIG_BCM_KF_PRINTK_INT_ENABLED
 	}
 
 	lockdep_on();
@@ -1360,6 +1390,18 @@ void console_unlock(void)
 		_con_start = con_start;
 		_log_end = log_end;
 		con_start = log_end;		/* Flush */
+#if defined(CONFIG_BCM_KF_PRINTK_INT_ENABLED) && (defined(CONFIG_BCM_PRINTK_INT_ENABLED) || defined(CC_CHECK_PRINTK_RT_FULL))
+		if(printk_with_interrupt_enabled) {
+			raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+			call_console_drivers(_con_start, _log_end);
+		} else {
+			raw_spin_unlock(&logbuf_lock);
+			stop_critical_timings();	/* don't trace print latency */
+			call_console_drivers(_con_start, _log_end);
+			start_critical_timings();
+			local_irq_restore(flags);
+		}
+#else
 #ifndef CONFIG_PREEMPT_RT_FULL
 		raw_spin_unlock(&logbuf_lock);
 		stop_critical_timings();	/* don't trace print latency */
@@ -1369,6 +1411,7 @@ void console_unlock(void)
 #else
 		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
 		call_console_drivers(_con_start, _log_end);
+#endif
 #endif
 	}
 	console_locked = 0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c85f927b18d3cc8ef8b10eb276a8c2e170b0a7bd..3331ec087decb72e3da6fe94d277870c14c69e32 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -87,6 +87,17 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT)
+#include <asm/buzzz.h>
+/* These global varaibles are needed to hold prev, next tasks to log context
+ * switch as stack will be invalid after context_switch.
+ * Also per-cpu macros are not needed as these variables are accessed
+ * only inside pre-emption disabled code.
+ */
+struct task_struct *buzzz_prev[NR_CPUS];
+struct task_struct *buzzz_next[NR_CPUS];
+#endif  /*  CONFIG_BUZZZ_KEVT */
+
 void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
 {
 	unsigned long delta;
@@ -281,7 +292,13 @@ const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
  * period over which we measure -rt task cpu usage in us.
  * default: 1s
  */
+#if defined(CONFIG_BCM_KF_SCHED_RT) && defined(CONFIG_BCM_SCHED_RT_PERIOD)
+unsigned int sysctl_sched_rt_period = CONFIG_BCM_SCHED_RT_PERIOD;
+#else
 unsigned int sysctl_sched_rt_period = 1000000;
+#endif
+
+
 
 __read_mostly int scheduler_running;
 
@@ -289,8 +306,12 @@ __read_mostly int scheduler_running;
  * part of the period that we allow rt tasks to run in us.
  * default: 0.95s
  */
+#if defined(CONFIG_BCM_KF_SCHED_RT) && defined(CONFIG_BCM_SCHED_RT_RUNTIME)
+/* RT task takes 100% of time */
+int sysctl_sched_rt_runtime = CONFIG_BCM_SCHED_RT_RUNTIME;
+#else
 int sysctl_sched_rt_runtime = 950000;
-
+#endif
 
 
 /*
@@ -390,6 +411,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 
 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 3)
+	buzzz_kevt_log0(BUZZZ_KEVT_ID_SCHED_HRTICK);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 3 */
+
 	raw_spin_lock(&rq->lock);
 	update_rq_clock(rq);
 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
@@ -3169,6 +3194,10 @@ void scheduler_tick(void)
 	struct rq *rq = cpu_rq(cpu);
 	struct task_struct *curr = rq->curr;
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+	buzzz_kevt_log1(BUZZZ_KEVT_ID_SCHED_TICK, jiffies);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 	sched_clock_tick();
 
 	raw_spin_lock(&rq->lock);
@@ -3281,6 +3310,123 @@ static inline void schedule_debug(struct task_struct *prev)
 	schedstat_inc(this_rq(), sched_count);
 }
 
+#if defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON) && !defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+#define MIGRATE_DISABLE_SET_AFFIN	(1<<30) /* Can't make a negative */
+#define migrate_disabled_updated(p)	((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
+#define migrate_disable_count(p)	((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
+
+static inline void update_migrate_disable(struct task_struct *p)
+{
+	const struct cpumask *mask;
+
+	if (likely(!p->migrate_disable))
+		return;
+
+	/* Did we already update affinity? */
+	if (unlikely(migrate_disabled_updated(p)))
+		return;
+
+	/*
+	 * Since this is always current we can get away with only locking
+	 * rq->lock, the ->cpus_allowed value can normally only be changed
+	 * while holding both p->pi_lock and rq->lock, but seeing that this
+	 * is current, we cannot actually be waking up, so all code that
+	 * relies on serialization against p->pi_lock is out of scope.
+	 *
+	 * Having rq->lock serializes us against things like
+	 * set_cpus_allowed_ptr() that can still happen concurrently.
+	 */
+	mask = tsk_cpus_allowed(p);
+
+	if (p->sched_class->set_cpus_allowed)
+		p->sched_class->set_cpus_allowed(p, mask);
+	p->rt.nr_cpus_allowed = cpumask_weight(mask);
+
+	/* Let migrate_enable know to fix things back up */
+	p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
+}
+
+void migrate_disable_preempt_on(void)
+{
+	struct task_struct *p = current;
+
+	if (in_atomic()) {
+#ifdef CONFIG_SCHED_DEBUG
+		p->migrate_disable_atomic++;
+#endif
+		return;
+	}
+
+#ifdef CONFIG_SCHED_DEBUG
+	WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+
+	preempt_disable();
+	if (p->migrate_disable) {
+		p->migrate_disable++;
+		preempt_enable();
+		return;
+	}
+
+	pin_current_cpu();
+	p->migrate_disable = 1;
+	preempt_enable();
+}
+EXPORT_SYMBOL(migrate_disable_preempt_on);
+
+void migrate_enable_preempt_on(void)
+{
+	struct task_struct *p = current;
+	const struct cpumask *mask;
+	unsigned long flags;
+	struct rq *rq;
+
+	if (in_atomic()) {
+#ifdef CONFIG_SCHED_DEBUG
+		p->migrate_disable_atomic--;
+#endif
+		return;
+	}
+
+#ifdef CONFIG_SCHED_DEBUG
+	WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+	WARN_ON_ONCE(p->migrate_disable <= 0);
+
+	preempt_disable();
+	if (migrate_disable_count(p) > 1) {
+		p->migrate_disable--;
+		preempt_enable();
+		return;
+	}
+
+	if (unlikely(migrate_disabled_updated(p))) {
+		/*
+		 * Undo whatever update_migrate_disable() did, also see there
+		 * about locking.
+		 */
+		rq = this_rq();
+		raw_spin_lock_irqsave(&rq->lock, flags);
+
+		/*
+		 * Clearing migrate_disable causes tsk_cpus_allowed to
+		 * show the tasks original cpu affinity.
+		 */
+		p->migrate_disable = 0;
+		mask = tsk_cpus_allowed(p);
+		if (p->sched_class->set_cpus_allowed)
+			p->sched_class->set_cpus_allowed(p, mask);
+		p->rt.nr_cpus_allowed = cpumask_weight(mask);
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
+	} else
+		p->migrate_disable = 0;
+
+	unpin_current_cpu();
+	preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable_preempt_on);
+
+#else
 #if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
 #define MIGRATE_DISABLE_SET_AFFIN	(1<<30) /* Can't make a negative */
 #define migrate_disabled_updated(p)	((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
@@ -3400,6 +3546,7 @@ EXPORT_SYMBOL(migrate_enable);
 static inline void update_migrate_disable(struct task_struct *p) { }
 #define migrate_disabled_updated(p)		0
 #endif
+#endif /* defined(CONFIG_BCM_KF_CPU_DOWN_PREEMPT_ON) && !defined(CONFIG_PREEMPT_RT_FULL) */
 
 static void put_prev_task(struct rq *rq, struct task_struct *prev)
 {
@@ -3488,6 +3635,11 @@ static void __sched __schedule(void)
 		rq->curr = next;
 		++*switch_count;
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+		buzzz_prev[cpu] = prev;
+		buzzz_next[cpu] = next;
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 		context_switch(rq, prev, next); /* unlocks the rq */
 		/*
 		 * The context switch have flipped the stack from under us
@@ -3497,6 +3649,10 @@ static void __sched __schedule(void)
 		 */
 		cpu = smp_processor_id();
 		rq = cpu_rq(cpu);
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+		buzzz_kevt_log2(BUZZZ_KEVT_ID_SCHEDULE,
+			(uint32_t)buzzz_prev[cpu], (uint32_t)buzzz_next[cpu]);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
 	} else
 		raw_spin_unlock_irq(&rq->lock);
 
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 27afd1e8662d976a861f6eee9a1546f57178653b..bfdc7d9df9a0a805c5ddbe1c85ab0380805244a9 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -71,5 +71,9 @@ SCHED_FEAT(TTWU_QUEUE, false)
 #endif
 
 SCHED_FEAT(FORCE_SD_OVERLAP, false)
+#if defined(CONFIG_BCM_KF_SCHED_RT_SHARE) && !defined(CONFIG_BCM_SCHED_RT_SHARE)
+SCHED_FEAT(RT_RUNTIME_SHARE, false)
+#else
 SCHED_FEAT(RT_RUNTIME_SHARE, true)
+#endif
 SCHED_FEAT(LB_MIN, false)
diff --git a/kernel/signal.c b/kernel/signal.c
index 3d326518c1a78be449428dffd0cd59356ff07b0b..2ed2ba80541c721cb7c283abd28f39c91b5bdc81 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1243,7 +1243,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
 }
 
 static void print_fatal_signal(struct pt_regs *regs, int signr)
-{
+{   
 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
 		current->comm, task_pid_nr(current), signr);
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 2f8b10ecf75996c1bf40feb78bf2a0c1d5471489..517f813097204612ed996caf045aa633af4866e0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -316,8 +316,26 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 	 * send smp call function interrupt to this cpu and as such deadlocks
 	 * can't happen.
 	 */
+#if defined(CONFIG_BCM_KF_BYPASS_SMP_WARNING)
+	/*
+	 * There is a tiny chance that some thread has locked the per-cpu
+	 * csd_data locked but has not called generic_exec_single yet,
+	 * then we come in on an interrupt and also try to lock it, but it
+	 * is already locked.  Hence the warning about deadlock.  The original
+	 * sysrq code played by the rules and deferred the calling of this
+	 * function to a workqueue, which can sleep and allow for the original
+	 * lock holder to complete.  But we want to force stack dump in the
+	 * other cpu from interrupt context instead of from workqueue because
+	 * the bottom half/scheduling on this CPU may be disabled due to
+	 * buggy software.  So pass in a magic cookie in the info variable to
+	 * bypass the warning.
+	 */
+	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
+		     && !oops_in_progress && (info != (void *)0xeeee));
+#else
 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
 		     && !oops_in_progress);
+#endif
 
 	if (cpu == this_cpu) {
 		local_irq_save(flags);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 34fe1db0d5e08b491d477ecd5bbde52561e7b980..25c92ef58c4a83aa9f779a194132792424ef9605 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -31,6 +31,11 @@
 #include <trace/events/irq.h>
 
 #include <asm/irq.h>
+
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT)
+#include <asm/buzzz.h>
+#endif  /*  CONFIG_BUZZZ_KEVT */
+
 /*
    - No shared variables, all the data are CPU local.
    - If a softirq needs serialization, let it serialize itself
@@ -153,7 +158,17 @@ static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
 
 		kstat_incr_softirqs_this_cpu(vec_nr);
 		trace_softirq_entry(vec_nr);
+
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+		buzzz_kevt_log1(BUZZZ_KEVT_ID_SIRQ_ENTRY, (int)h->action);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 		h->action(h);
+
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+		buzzz_kevt_log1(BUZZZ_KEVT_ID_SIRQ_EXIT, (int)h->action);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 		trace_softirq_exit(vec_nr);
 		if (unlikely(prev_count != preempt_count())) {
 			printk(KERN_ERR
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4ab11879aeb4d15baa3d8d25fa753725084ed809..9a3e45443f928fcffaa413b7c40ef797ecd13b37 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -716,6 +716,15 @@ static struct ctl_table kern_table[] = {
 		.extra1		= &zero,
 		.extra2		= &two,
 	},
+#if defined(CONFIG_BCM_KF_PRINTK_INT_ENABLED) && defined(CONFIG_BCM_PRINTK_INT_ENABLED)
+	{
+		.procname	= "printk_with_interrupt_enabled",
+		.data		= &printk_with_interrupt_enabled,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+#endif
 #endif
 	{
 		.procname	= "ngroups_max",
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
new file mode 100644
index 0000000000000000000000000000000000000000..362da653813da60e339d57c9d28262b0dc7c36cd
--- /dev/null
+++ b/kernel/sysctl_check.c
@@ -0,0 +1,160 @@
+#include <linux/stat.h>
+#include <linux/sysctl.h>
+#include "../fs/xfs/xfs_sysctl.h"
+#include <linux/sunrpc/debug.h>
+#include <linux/string.h>
+#include <net/ip_vs.h>
+
+
+static int sysctl_depth(struct ctl_table *table)
+{
+	struct ctl_table *tmp;
+	int depth;
+
+	depth = 0;
+	for (tmp = table; tmp->parent; tmp = tmp->parent)
+		depth++;
+
+	return depth;
+}
+
+static struct ctl_table *sysctl_parent(struct ctl_table *table, int n)
+{
+	int i;
+
+	for (i = 0; table && i < n; i++)
+		table = table->parent;
+
+	return table;
+}
+
+
+static void sysctl_print_path(struct ctl_table *table)
+{
+	struct ctl_table *tmp;
+	int depth, i;
+	depth = sysctl_depth(table);
+	if (table->procname) {
+		for (i = depth; i >= 0; i--) {
+			tmp = sysctl_parent(table, i);
+			printk("/%s", tmp->procname?tmp->procname:"");
+		}
+	}
+	printk(" ");
+}
+
+static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces,
+						struct ctl_table *table)
+{
+	struct ctl_table_header *head;
+	struct ctl_table *ref, *test;
+	int depth, cur_depth;
+
+	depth = sysctl_depth(table);
+
+	for (head = __sysctl_head_next(namespaces, NULL); head;
+	     head = __sysctl_head_next(namespaces, head)) {
+		cur_depth = depth;
+		ref = head->ctl_table;
+repeat:
+		test = sysctl_parent(table, cur_depth);
+		for (; ref->procname; ref++) {
+			int match = 0;
+			if (cur_depth && !ref->child)
+				continue;
+
+			if (test->procname && ref->procname &&
+			    (strcmp(test->procname, ref->procname) == 0))
+					match++;
+
+			if (match) {
+				if (cur_depth != 0) {
+					cur_depth--;
+					ref = ref->child;
+					goto repeat;
+				}
+				goto out;
+			}
+		}
+	}
+	ref = NULL;
+out:
+	sysctl_head_finish(head);
+	return ref;
+}
+
+static void set_fail(const char **fail, struct ctl_table *table, const char *str)
+{
+	if (*fail) {
+		printk(KERN_ERR "sysctl table check failed: ");
+		sysctl_print_path(table);
+		printk(" %s\n", *fail);
+		dump_stack();
+	}
+	*fail = str;
+}
+
+static void sysctl_check_leaf(struct nsproxy *namespaces,
+				struct ctl_table *table, const char **fail)
+{
+	struct ctl_table *ref;
+
+	ref = sysctl_check_lookup(namespaces, table);
+	if (ref && (ref != table))
+		set_fail(fail, table, "Sysctl already exists");
+}
+
+int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
+{
+	int error = 0;
+	for (; table->procname; table++) {
+		const char *fail = NULL;
+
+		if (table->parent) {
+			if (!table->parent->procname)
+				set_fail(&fail, table, "Parent without procname");
+		}
+		if (table->child) {
+			if (table->data)
+				set_fail(&fail, table, "Directory with data?");
+			if (table->maxlen)
+				set_fail(&fail, table, "Directory with maxlen?");
+			if ((table->mode & (S_IRUGO|S_IXUGO)) != table->mode)
+				set_fail(&fail, table, "Writable sysctl directory");
+			if (table->proc_handler)
+				set_fail(&fail, table, "Directory with proc_handler");
+			if (table->extra1)
+				set_fail(&fail, table, "Directory with extra1");
+			if (table->extra2)
+				set_fail(&fail, table, "Directory with extra2");
+		} else {
+			if ((table->proc_handler == proc_dostring) ||
+			    (table->proc_handler == proc_dointvec) ||
+			    (table->proc_handler == proc_dointvec_minmax) ||
+			    (table->proc_handler == proc_dointvec_jiffies) ||
+			    (table->proc_handler == proc_dointvec_userhz_jiffies) ||
+			    (table->proc_handler == proc_dointvec_ms_jiffies) ||
+			    (table->proc_handler == proc_doulongvec_minmax) ||
+			    (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
+				if (!table->data)
+					set_fail(&fail, table, "No data");
+				if (!table->maxlen)
+					set_fail(&fail, table, "No maxlen");
+			}
+#ifdef CONFIG_PROC_SYSCTL
+			if (!table->proc_handler)
+				set_fail(&fail, table, "No proc_handler");
+#endif
+			sysctl_check_leaf(namespaces, table, &fail);
+		}
+		if (table->mode > 0777)
+			set_fail(&fail, table, "bogus .mode");
+		if (fail) {
+			set_fail(&fail, table, NULL);
+			error = -EINVAL;
+		}
+		if (table->child)
+			error |= sysctl_check_table(namespaces, table->child);
+	}
+	return error;
+}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c2e849efb7d3b78fa106234e4a085c13d7986c29..d73d10a14379744a8e56bbf26a5923570e55c21f 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -30,16 +30,19 @@
  */
 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
 
+#if !defined(CONFIG_BCM_KF_KERN_WARNING) || defined(CONFIG_NO_HZ) || defined(CONFIG_HIGH_RES_TIMERS)
 /*
  * The time, when the last jiffy update happened. Protected by xtime_lock.
  */
 static ktime_t last_jiffies_update;
+#endif
 
 struct tick_sched *tick_get_tick_sched(int cpu)
 {
 	return &per_cpu(tick_cpu_sched, cpu);
 }
 
+#if !defined(CONFIG_BCM_KF_KERN_WARNING) || defined(CONFIG_NO_HZ) || defined(CONFIG_HIGH_RES_TIMERS)
 /*
  * Must be called with interrupts disabled !
  */
@@ -101,6 +104,7 @@ static ktime_t tick_init_jiffy_update(void)
 	raw_spin_unlock(&xtime_lock);
 	return period;
 }
+#endif
 
 /*
  * NOHZ - aka dynamic tick functionality
diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
index eb51d76e058a401477776e279fba95c95a315a4a..3f42652a6a3749ca2b17dbaf397e483a3f2e6667 100644
--- a/kernel/timeconst.pl
+++ b/kernel/timeconst.pl
@@ -369,10 +369,8 @@ if ($hz eq '--can') {
 		die "Usage: $0 HZ\n";
 	}
 
-	@val = @{$canned_values{$hz}};
-	if (!defined(@val)) {
-		@val = compute_values($hz);
-	}
+	$cv = $canned_values{$hz};
+	@val = defined($cv) ? @$cv : compute_values($hz);
 	output($hz, @val);
 }
 exit 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0fa92f677c9209e2ed046752752de986e34f071e..04e360694a8a5a9550254753d88190afdc3067c7 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3660,7 +3660,11 @@ static const struct file_operations ftrace_graph_fops = {
 	.read		= seq_read,
 	.write		= ftrace_graph_write,
 	.release	= ftrace_graph_release,
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	.llseek		= ftrace_regex_lseek,
+#else
 	.llseek		= seq_lseek,
+#endif
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
@@ -4243,7 +4247,11 @@ static const struct file_operations ftrace_pid_fops = {
 	.open		= ftrace_pid_open,
 	.write		= ftrace_pid_write,
 	.read		= seq_read,
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	.llseek		= ftrace_regex_lseek,
+#else
 	.llseek		= seq_lseek,
+#endif	
 	.release	= ftrace_pid_release,
 };
 
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 394783531cbb76d9f7d9ab34ae0b6963d5d14dff..c266a6aa6f239157245b644a93d1cb710923ceab 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -68,6 +68,11 @@ u64 notrace trace_clock(void)
  * Used by plugins that need globally coherent timestamps.
  */
 
+#if defined(CONFIG_BCM_KF_TRACE_CUSTOM)
+#include <linux/bcm_tstamp.h>
+static u64 bcm_tstamp_rollover_base[NR_CPUS];
+static u32 bcm_tstamp_last[NR_CPUS];
+#else
 /* keep prev_time and lock in the same cacheline. */
 static struct {
 	u64 prev_time;
@@ -76,9 +81,37 @@ static struct {
 	{
 		.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
 	};
+#endif
+
 
 u64 notrace trace_clock_global(void)
 {
+#if defined(CONFIG_BCM_KF_TRACE_CUSTOM)
+	u64 ns;
+	u32 tstamp = bcm_tstamp_read();
+	int cpuid = smp_processor_id();
+
+	if (tstamp < bcm_tstamp_last[cpuid]) {
+		// 32 bit counter has wrapped, add to our 64bit base
+		bcm_tstamp_rollover_base[cpuid] += bcm_tstamp2ns(0xffffffff);
+	}
+	bcm_tstamp_last[cpuid] = tstamp;
+
+	/*
+	 * The base value is updated independently on each CPU, but we want
+	 * to report a consistent base from any CPU, so take the larger base.
+	 * The trace buffers seem to require increasing timestamps (no rollover),
+	 * so unfortunately I have to add all this extra code.
+	 */
+#if NR_CPUS > 1
+	ns = (bcm_tstamp_rollover_base[0] > bcm_tstamp_rollover_base[1]) ?
+	      bcm_tstamp_rollover_base[0] : bcm_tstamp_rollover_base[1];
+#else
+	ns = bcm_tstamp_rollover_base[0];
+#endif
+	ns += bcm_tstamp2ns(tstamp);
+	return ns;
+#else /* CONFIG_BCM_KF_TRACE_CUSTOM */
 	unsigned long flags;
 	int this_cpu;
 	u64 now;
@@ -112,6 +145,7 @@ u64 notrace trace_clock_global(void)
 	local_irq_restore(flags);
 
 	return now;
+#endif /* else CONFIG_BCM_KF_TRACE_CUSTOM */
 }
 
 static atomic64_t trace_counter;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e0cce07b745a93dfd5cdff081edfa838d605cd03..ac010995a78b013e383c34b67895360890c8b0e4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -44,6 +44,10 @@
 
 #include "workqueue_sched.h"
 
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT)
+#include <asm/buzzz.h>
+#endif  /*  CONFIG_BUZZZ_KEVT */
+
 enum {
 	/* global_cwq flags */
 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
@@ -1856,7 +1860,17 @@ __acquires(&gcwq->lock)
 	lock_map_acquire_read(&cwq->wq->lockdep_map);
 	lock_map_acquire(&lockdep_map);
 	trace_workqueue_execute_start(work);
+
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+	buzzz_kevt_log1(BUZZZ_KEVT_ID_WORKQ_ENTRY, (int)f);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 	f(work);
+
+#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) && (BUZZZ_KEVT_LVL >= 1)
+	buzzz_kevt_log1(BUZZZ_KEVT_ID_WORKQ_EXIT, (int)f);
+#endif  /*  CONFIG_BUZZZ_KEVT && BUZZZ_KEVT_LVL >= 1 */
+
 	/*
 	 * While we must be careful to not use "work" after this, the trace
 	 * point will only record its address.
diff --git a/lib/Kconfig b/lib/Kconfig
index 4c03fe3b7b1b37c9a808b027238deefd986b1e50..ebd9e1b18cf6e1de37dfb1c6fa6ff45dd7d886ee 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -21,9 +21,11 @@ config GENERIC_FIND_FIRST_BIT
 
 config NO_GENERIC_PCI_IOPORT_MAP
 	bool
+	default y
 
 config GENERIC_PCI_IOMAP
 	bool
+	default y
 
 config GENERIC_IOMAP
 	bool
@@ -179,6 +181,12 @@ config LZO_DECOMPRESS
 
 source "lib/xz/Kconfig"
 
+config LZMA_COMPRESS
+    tristate
+
+config LZMA_DECOMPRESS
+    tristate
+
 #
 # These all provide a common interface (hence the apparent duplication with
 # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1d80795bb4732d9a90ee432241d380405ff42866..e2f47758626046c99b8837291751a6c3e1145d83 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -182,9 +182,23 @@ config LOCKUP_DETECTOR
 	  The frequency of hrtimer and NMI events and the soft and hard lockup
 	  thresholds can be controlled through the sysctl watchdog_thresh.
 
+if !BCM_KF_ANDROID
 config HARDLOCKUP_DETECTOR
 	def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
 		 !HAVE_NMI_WATCHDOG
+endif
+if BCM_KF_ANDROID
+config HARDLOCKUP_DETECTOR_NMI
+	def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
+		 !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR_OTHER_CPU
+	def_bool LOCKUP_DETECTOR && SMP && !HARDLOCKUP_DETECTOR_NMI && \
+		 !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR
+	def_bool HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
+endif
 
 config BOOTPARAM_HARDLOCKUP_PANIC
 	bool "Panic (Reboot) On Hard Lockups"
@@ -303,6 +317,16 @@ config SCHEDSTATS
 	  application, you can say N to avoid the very slight overhead
 	  this adds.
 
+# BRCM CONFIG_BCM_KF_PROC_BCM
+config BCM_SCHEDAUDIT
+	bool "Broadcom Scheduler Audit features"
+	depends on PROC_FS
+	default n
+	help
+	  If you say Y here, the /proc/<pid>/bcm_schedaudit file will be
+	  provided to allow auditing of scheduler performance.
+# BRCM END
+
 config TIMER_STATS
 	bool "Collect kernel timers statistics"
 	depends on DEBUG_KERNEL && PROC_FS
@@ -675,9 +699,17 @@ config DEBUG_LOCKING_API_SELFTESTS
 	  The following locking APIs are covered: spinlocks, rwlocks,
 	  mutexes and rwsems.
 
+if !BCM_KF_ANDROID
 config STACKTRACE
 	bool
 	depends on STACKTRACE_SUPPORT
+endif
+if BCM_KF_ANDROID
+config STACKTRACE
+	bool "Stacktrace"
+	depends on STACKTRACE_SUPPORT
+	default y
+endif
 
 config DEBUG_STACK_USAGE
 	bool "Stack utilization instrumentation"
diff --git a/lib/Makefile b/lib/Makefile
index a8da40717a2a6731b29ea71512beed542f44b614..961fa0edcc9eaf1fac060fe9e7e64aec36e8a7d9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,6 +2,16 @@
 # Makefile for some libs needed in the kernel.
 #
 
+ifdef CONFIG_JFFS2_ZLIB
+	CONFIG_ZLIB_INFLATE:=y
+	CONFIG_ZLIB_DEFLATE:=y
+endif
+
+ifdef CONFIG_JFFS2_LZMA
+	CONFIG_LZMA_DECOMPRESS:=y
+	CONFIG_LZMA_COMPRESS:=y
+endif
+
 ifdef CONFIG_FUNCTION_TRACER
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
@@ -76,6 +86,8 @@ obj-$(CONFIG_LZO_COMPRESS) += lzo/
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
 obj-$(CONFIG_XZ_DEC) += xz/
 obj-$(CONFIG_RAID6_PQ) += raid6/
+obj-$(CONFIG_LZMA_COMPRESS) += lzma/
+obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/
 
 lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
 lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
diff --git a/lib/lzma/LzFind.c b/lib/lzma/LzFind.c
new file mode 100644
index 0000000000000000000000000000000000000000..433de25caed20d88f2637a5cc5621766787f6bb0
--- /dev/null
+++ b/lib/lzma/LzFind.c
@@ -0,0 +1,761 @@
+/* LzFind.c -- Match finder for LZ algorithms
+2009-04-22 : Igor Pavlov : Public domain */
+
+#include <string.h>
+
+#include "LzFind.h"
+#include "LzHash.h"
+
+#define kEmptyHashValue 0
+#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
+#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
+#define kNormalizeMask (~(kNormalizeStepMin - 1))
+#define kMaxHistorySize ((UInt32)3 << 30)
+
+#define kStartMaxLen 3
+
+static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)
+{
+  if (!p->directInput)
+  {
+    alloc->Free(alloc, p->bufferBase);
+    p->bufferBase = 0;
+  }
+}
+
+/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
+
+static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)
+{
+  UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
+  if (p->directInput)
+  {
+    p->blockSize = blockSize;
+    return 1;
+  }
+  if (p->bufferBase == 0 || p->blockSize != blockSize)
+  {
+    LzInWindow_Free(p, alloc);
+    p->blockSize = blockSize;
+    p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);
+  }
+  return (p->bufferBase != 0);
+}
+
+static Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
+static Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
+
+static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
+
+static void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
+{
+  p->posLimit -= subValue;
+  p->pos -= subValue;
+  p->streamPos -= subValue;
+}
+
+static void MatchFinder_ReadBlock(CMatchFinder *p)
+{
+  if (p->streamEndWasReached || p->result != SZ_OK)
+    return;
+  if (p->directInput)
+  {
+    UInt32 curSize = 0xFFFFFFFF - p->streamPos;
+    if (curSize > p->directInputRem)
+      curSize = (UInt32)p->directInputRem;
+    p->directInputRem -= curSize;
+    p->streamPos += curSize;
+    if (p->directInputRem == 0)
+      p->streamEndWasReached = 1;
+    return;
+  }
+  for (;;)
+  {
+    Byte *dest = p->buffer + (p->streamPos - p->pos);
+    size_t size = (p->bufferBase + p->blockSize - dest);
+    if (size == 0)
+      return;
+    p->result = p->stream->Read(p->stream, dest, &size);
+    if (p->result != SZ_OK)
+      return;
+    if (size == 0)
+    {
+      p->streamEndWasReached = 1;
+      return;
+    }
+    p->streamPos += (UInt32)size;
+    if (p->streamPos - p->pos > p->keepSizeAfter)
+      return;
+  }
+}
+
+void MatchFinder_MoveBlock(CMatchFinder *p)
+{
+  memmove(p->bufferBase,
+    p->buffer - p->keepSizeBefore,
+    (size_t)(p->streamPos - p->pos + p->keepSizeBefore));
+  p->buffer = p->bufferBase + p->keepSizeBefore;
+}
+
+int MatchFinder_NeedMove(CMatchFinder *p)
+{
+  if (p->directInput)
+    return 0;
+  /* if (p->streamEndWasReached) return 0; */
+  return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
+}
+
+void MatchFinder_ReadIfRequired(CMatchFinder *p)
+{
+  if (p->streamEndWasReached)
+    return;
+  if (p->keepSizeAfter >= p->streamPos - p->pos)
+    MatchFinder_ReadBlock(p);
+}
+
+static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
+{
+  if (MatchFinder_NeedMove(p))
+    MatchFinder_MoveBlock(p);
+  MatchFinder_ReadBlock(p);
+}
+
+static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
+{
+  p->cutValue = 32;
+  p->btMode = 1;
+  p->numHashBytes = 4;
+  p->bigHash = 0;
+}
+
+#define kCrcPoly 0xEDB88320
+
+void MatchFinder_Construct(CMatchFinder *p)
+{
+  UInt32 i;
+  p->bufferBase = 0;
+  p->directInput = 0;
+  p->hash = 0;
+  MatchFinder_SetDefaultSettings(p);
+
+  for (i = 0; i < 256; i++)
+  {
+    UInt32 r = i;
+    int j;
+    for (j = 0; j < 8; j++)
+      r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
+    p->crc[i] = r;
+  }
+}
+
+static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->hash);
+  p->hash = 0;
+}
+
+void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)
+{
+  MatchFinder_FreeThisClassMemory(p, alloc);
+  LzInWindow_Free(p, alloc);
+}
+
+static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc)
+{
+  size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
+  if (sizeInBytes / sizeof(CLzRef) != num)
+    return 0;
+  return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);
+}
+
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
+    UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+    ISzAlloc *alloc)
+{
+  UInt32 sizeReserv;
+  if (historySize > kMaxHistorySize)
+  {
+    MatchFinder_Free(p, alloc);
+    return 0;
+  }
+  sizeReserv = historySize >> 1;
+  if (historySize > ((UInt32)2 << 30))
+    sizeReserv = historySize >> 2;
+  sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
+
+  p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
+  p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
+  /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
+  if (LzInWindow_Create(p, sizeReserv, alloc))
+  {
+    UInt32 newCyclicBufferSize = historySize + 1;
+    UInt32 hs;
+    p->matchMaxLen = matchMaxLen;
+    {
+      p->fixedHashSize = 0;
+      if (p->numHashBytes == 2)
+        hs = (1 << 16) - 1;
+      else
+      {
+        hs = historySize - 1;
+        hs |= (hs >> 1);
+        hs |= (hs >> 2);
+        hs |= (hs >> 4);
+        hs |= (hs >> 8);
+        hs >>= 1;
+        hs |= 0xFFFF; /* don't change it! It's required for Deflate */
+        if (hs > (1 << 24))
+        {
+          if (p->numHashBytes == 3)
+            hs = (1 << 24) - 1;
+          else
+            hs >>= 1;
+        }
+      }
+      p->hashMask = hs;
+      hs++;
+      if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
+      if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
+      if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
+      hs += p->fixedHashSize;
+    }
+
+    {
+      UInt32 prevSize = p->hashSizeSum + p->numSons;
+      UInt32 newSize;
+      p->historySize = historySize;
+      p->hashSizeSum = hs;
+      p->cyclicBufferSize = newCyclicBufferSize;
+      p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize);
+      newSize = p->hashSizeSum + p->numSons;
+      if (p->hash != 0 && prevSize == newSize)
+        return 1;
+      MatchFinder_FreeThisClassMemory(p, alloc);
+      p->hash = AllocRefs(newSize, alloc);
+      if (p->hash != 0)
+      {
+        p->son = p->hash + p->hashSizeSum;
+        return 1;
+      }
+    }
+  }
+  MatchFinder_Free(p, alloc);
+  return 0;
+}
+
+static void MatchFinder_SetLimits(CMatchFinder *p)
+{
+  UInt32 limit = kMaxValForNormalize - p->pos;
+  UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
+  if (limit2 < limit)
+    limit = limit2;
+  limit2 = p->streamPos - p->pos;
+  if (limit2 <= p->keepSizeAfter)
+  {
+    if (limit2 > 0)
+      limit2 = 1;
+  }
+  else
+    limit2 -= p->keepSizeAfter;
+  if (limit2 < limit)
+    limit = limit2;
+  {
+    UInt32 lenLimit = p->streamPos - p->pos;
+    if (lenLimit > p->matchMaxLen)
+      lenLimit = p->matchMaxLen;
+    p->lenLimit = lenLimit;
+  }
+  p->posLimit = p->pos + limit;
+}
+
+static void MatchFinder_Init(CMatchFinder *p)
+{
+  UInt32 i;
+  for (i = 0; i < p->hashSizeSum; i++)
+    p->hash[i] = kEmptyHashValue;
+  p->cyclicBufferPos = 0;
+  p->buffer = p->bufferBase;
+  p->pos = p->streamPos = p->cyclicBufferSize;
+  p->result = SZ_OK;
+  p->streamEndWasReached = 0;
+  MatchFinder_ReadBlock(p);
+  MatchFinder_SetLimits(p);
+}
+
+static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
+{
+  return (p->pos - p->historySize - 1) & kNormalizeMask;
+}
+
+static void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
+{
+  UInt32 i;
+  for (i = 0; i < numItems; i++)
+  {
+    UInt32 value = items[i];
+    if (value <= subValue)
+      value = kEmptyHashValue;
+    else
+      value -= subValue;
+    items[i] = value;
+  }
+}
+
+static void MatchFinder_Normalize(CMatchFinder *p)
+{
+  UInt32 subValue = MatchFinder_GetSubValue(p);
+  MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons);
+  MatchFinder_ReduceOffsets(p, subValue);
+}
+
+static void MatchFinder_CheckLimits(CMatchFinder *p)
+{
+  if (p->pos == kMaxValForNormalize)
+    MatchFinder_Normalize(p);
+  if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
+    MatchFinder_CheckAndMoveAndRead(p);
+  if (p->cyclicBufferPos == p->cyclicBufferSize)
+    p->cyclicBufferPos = 0;
+  MatchFinder_SetLimits(p);
+}
+
+static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+    UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+    UInt32 *distances, UInt32 maxLen)
+{
+  son[_cyclicBufferPos] = curMatch;
+  for (;;)
+  {
+    UInt32 delta = pos - curMatch;
+    if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+      return distances;
+    {
+      const Byte *pb = cur - delta;
+      curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
+      if (pb[maxLen] == cur[maxLen] && *pb == *cur)
+      {
+        UInt32 len = 0;
+        while (++len != lenLimit)
+          if (pb[len] != cur[len])
+            break;
+        if (maxLen < len)
+        {
+          *distances++ = maxLen = len;
+          *distances++ = delta - 1;
+          if (len == lenLimit)
+            return distances;
+        }
+      }
+    }
+  }
+}
+
+static UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+    UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+    UInt32 *distances, UInt32 maxLen)
+{
+  CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
+  CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
+  UInt32 len0 = 0, len1 = 0;
+  for (;;)
+  {
+    UInt32 delta = pos - curMatch;
+    if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+    {
+      *ptr0 = *ptr1 = kEmptyHashValue;
+      return distances;
+    }
+    {
+      CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+      const Byte *pb = cur - delta;
+      UInt32 len = (len0 < len1 ? len0 : len1);
+      if (pb[len] == cur[len])
+      {
+        if (++len != lenLimit && pb[len] == cur[len])
+          while (++len != lenLimit)
+            if (pb[len] != cur[len])
+              break;
+        if (maxLen < len)
+        {
+          *distances++ = maxLen = len;
+          *distances++ = delta - 1;
+          if (len == lenLimit)
+          {
+            *ptr1 = pair[0];
+            *ptr0 = pair[1];
+            return distances;
+          }
+        }
+      }
+      if (pb[len] < cur[len])
+      {
+        *ptr1 = curMatch;
+        ptr1 = pair + 1;
+        curMatch = *ptr1;
+        len1 = len;
+      }
+      else
+      {
+        *ptr0 = curMatch;
+        ptr0 = pair;
+        curMatch = *ptr0;
+        len0 = len;
+      }
+    }
+  }
+}
+
+static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+    UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
+{
+  CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
+  CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
+  UInt32 len0 = 0, len1 = 0;
+  for (;;)
+  {
+    UInt32 delta = pos - curMatch;
+    if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+    {
+      *ptr0 = *ptr1 = kEmptyHashValue;
+      return;
+    }
+    {
+      CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+      const Byte *pb = cur - delta;
+      UInt32 len = (len0 < len1 ? len0 : len1);
+      if (pb[len] == cur[len])
+      {
+        while (++len != lenLimit)
+          if (pb[len] != cur[len])
+            break;
+        {
+          if (len == lenLimit)
+          {
+            *ptr1 = pair[0];
+            *ptr0 = pair[1];
+            return;
+          }
+        }
+      }
+      if (pb[len] < cur[len])
+      {
+        *ptr1 = curMatch;
+        ptr1 = pair + 1;
+        curMatch = *ptr1;
+        len1 = len;
+      }
+      else
+      {
+        *ptr0 = curMatch;
+        ptr0 = pair;
+        curMatch = *ptr0;
+        len0 = len;
+      }
+    }
+  }
+}
+
+#define MOVE_POS \
+  ++p->cyclicBufferPos; \
+  p->buffer++; \
+  if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
+
+#define MOVE_POS_RET MOVE_POS return offset;
+
+static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
+
+#define GET_MATCHES_HEADER2(minLen, ret_op) \
+  UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \
+  lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
+  cur = p->buffer;
+
+#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
+#define SKIP_HEADER(minLen)        GET_MATCHES_HEADER2(minLen, continue)
+
+#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
+
+#define GET_MATCHES_FOOTER(offset, maxLen) \
+  offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \
+  distances + offset, maxLen) - distances); MOVE_POS_RET;
+
+#define SKIP_FOOTER \
+  SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS;
+
+static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+  UInt32 offset;
+  GET_MATCHES_HEADER(2)
+  HASH2_CALC;
+  curMatch = p->hash[hashValue];
+  p->hash[hashValue] = p->pos;
+  offset = 0;
+  GET_MATCHES_FOOTER(offset, 1)
+}
+
+static __maybe_unused UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+  UInt32 offset;
+  GET_MATCHES_HEADER(3)
+  HASH_ZIP_CALC;
+  curMatch = p->hash[hashValue];
+  p->hash[hashValue] = p->pos;
+  offset = 0;
+  GET_MATCHES_FOOTER(offset, 2)
+}
+
+static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+  UInt32 hash2Value, delta2, maxLen, offset;
+  GET_MATCHES_HEADER(3)
+
+  HASH3_CALC;
+
+  delta2 = p->pos - p->hash[hash2Value];
+  curMatch = p->hash[kFix3HashSize + hashValue];
+  
+  p->hash[hash2Value] =
+  p->hash[kFix3HashSize + hashValue] = p->pos;
+
+
+  maxLen = 2;
+  offset = 0;
+  if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
+  {
+    for (; maxLen != lenLimit; maxLen++)
+      if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
+        break;
+    distances[0] = maxLen;
+    distances[1] = delta2 - 1;
+    offset = 2;
+    if (maxLen == lenLimit)
+    {
+      SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
+      MOVE_POS_RET;
+    }
+  }
+  GET_MATCHES_FOOTER(offset, maxLen)
+}
+
+static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+  UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
+  GET_MATCHES_HEADER(4)
+
+  HASH4_CALC;
+
+  delta2 = p->pos - p->hash[                hash2Value];
+  delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
+  curMatch = p->hash[kFix4HashSize + hashValue];
+  
+  p->hash[                hash2Value] =
+  p->hash[kFix3HashSize + hash3Value] =
+  p->hash[kFix4HashSize + hashValue] = p->pos;
+
+  maxLen = 1;
+  offset = 0;
+  if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
+  {
+    distances[0] = maxLen = 2;
+    distances[1] = delta2 - 1;
+    offset = 2;
+  }
+  if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
+  {
+    maxLen = 3;
+    distances[offset + 1] = delta3 - 1;
+    offset += 2;
+    delta2 = delta3;
+  }
+  if (offset != 0)
+  {
+    for (; maxLen != lenLimit; maxLen++)
+      if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
+        break;
+    distances[offset - 2] = maxLen;
+    if (maxLen == lenLimit)
+    {
+      SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
+      MOVE_POS_RET;
+    }
+  }
+  if (maxLen < 3)
+    maxLen = 3;
+  GET_MATCHES_FOOTER(offset, maxLen)
+}
+
+static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+  UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
+  GET_MATCHES_HEADER(4)
+
+  HASH4_CALC;
+
+  delta2 = p->pos - p->hash[                hash2Value];
+  delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
+  curMatch = p->hash[kFix4HashSize + hashValue];
+
+  p->hash[                hash2Value] =
+  p->hash[kFix3HashSize + hash3Value] =
+  p->hash[kFix4HashSize + hashValue] = p->pos;
+
+  maxLen = 1;
+  offset = 0;
+  if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
+  {
+    distances[0] = maxLen = 2;
+    distances[1] = delta2 - 1;
+    offset = 2;
+  }
+  if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
+  {
+    maxLen = 3;
+    distances[offset + 1] = delta3 - 1;
+    offset += 2;
+    delta2 = delta3;
+  }
+  if (offset != 0)
+  {
+    for (; maxLen != lenLimit; maxLen++)
+      if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
+        break;
+    distances[offset - 2] = maxLen;
+    if (maxLen == lenLimit)
+    {
+      p->son[p->cyclicBufferPos] = curMatch;
+      MOVE_POS_RET;
+    }
+  }
+  if (maxLen < 3)
+    maxLen = 3;
+  offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
+    distances + offset, maxLen) - (distances));
+  MOVE_POS_RET
+}
+
+static __maybe_unused UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+  UInt32 offset;
+  GET_MATCHES_HEADER(3)
+  HASH_ZIP_CALC;
+  curMatch = p->hash[hashValue];
+  p->hash[hashValue] = p->pos;
+  offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
+    distances, 2) - (distances));
+  MOVE_POS_RET
+}
+
+static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+  do
+  {
+    SKIP_HEADER(2)
+    HASH2_CALC;
+    curMatch = p->hash[hashValue];
+    p->hash[hashValue] = p->pos;
+    SKIP_FOOTER
+  }
+  while (--num != 0);
+}
+
+static __maybe_unused void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+  do
+  {
+    SKIP_HEADER(3)
+    HASH_ZIP_CALC;
+    curMatch = p->hash[hashValue];
+    p->hash[hashValue] = p->pos;
+    SKIP_FOOTER
+  }
+  while (--num != 0);
+}
+
+static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+  do
+  {
+    UInt32 hash2Value;
+    SKIP_HEADER(3)
+    HASH3_CALC;
+    curMatch = p->hash[kFix3HashSize + hashValue];
+    p->hash[hash2Value] =
+    p->hash[kFix3HashSize + hashValue] = p->pos;
+    SKIP_FOOTER
+  }
+  while (--num != 0);
+}
+
+static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+  do
+  {
+    UInt32 hash2Value, hash3Value;
+    SKIP_HEADER(4)
+    HASH4_CALC;
+    curMatch = p->hash[kFix4HashSize + hashValue];
+    p->hash[                hash2Value] =
+    p->hash[kFix3HashSize + hash3Value] = p->pos;
+    p->hash[kFix4HashSize + hashValue] = p->pos;
+    SKIP_FOOTER
+  }
+  while (--num != 0);
+}
+
+static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+  do
+  {
+    UInt32 hash2Value, hash3Value;
+    SKIP_HEADER(4)
+    HASH4_CALC;
+    curMatch = p->hash[kFix4HashSize + hashValue];
+    p->hash[                hash2Value] =
+    p->hash[kFix3HashSize + hash3Value] =
+    p->hash[kFix4HashSize + hashValue] = p->pos;
+    p->son[p->cyclicBufferPos] = curMatch;
+    MOVE_POS
+  }
+  while (--num != 0);
+}
+
+static __maybe_unused void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+  do
+  {
+    SKIP_HEADER(3)
+    HASH_ZIP_CALC;
+    curMatch = p->hash[hashValue];
+    p->hash[hashValue] = p->pos;
+    p->son[p->cyclicBufferPos] = curMatch;
+    MOVE_POS
+  }
+  while (--num != 0);
+}
+
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
+{
+  vTable->Init = (Mf_Init_Func)MatchFinder_Init;
+  vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte;
+  vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
+  vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
+  if (!p->btMode)
+  {
+    vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
+    vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
+  }
+  else if (p->numHashBytes == 2)
+  {
+    vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
+    vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
+  }
+  else if (p->numHashBytes == 3)
+  {
+    vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
+    vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
+  }
+  else
+  {
+    vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
+    vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
+  }
+}
diff --git a/lib/lzma/LzmaDec.c b/lib/lzma/LzmaDec.c
new file mode 100644
index 0000000000000000000000000000000000000000..efd541881afce7e0778041e35c442bf13b3ff51e
--- /dev/null
+++ b/lib/lzma/LzmaDec.c
@@ -0,0 +1,999 @@
+/* LzmaDec.c -- LZMA Decoder
+2009-09-20 : Igor Pavlov : Public domain */
+
+#include "LzmaDec.h"
+
+#include <string.h>
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+#define RC_INIT_SIZE 5
+
+#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
+
+#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
+#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
+#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
+#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
+  { UPDATE_0(p); i = (i + i); A0; } else \
+  { UPDATE_1(p); i = (i + i) + 1; A1; }
+#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)
+
+#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }
+#define TREE_DECODE(probs, limit, i) \
+  { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
+
+/* #define _LZMA_SIZE_OPT */
+
+#ifdef _LZMA_SIZE_OPT
+#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
+#else
+#define TREE_6_DECODE(probs, i) \
+  { i = 1; \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  i -= 0x40; }
+#endif
+
+#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
+
+#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
+#define UPDATE_0_CHECK range = bound;
+#define UPDATE_1_CHECK range -= bound; code -= bound;
+#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
+  { UPDATE_0_CHECK; i = (i + i); A0; } else \
+  { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
+#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
+#define TREE_DECODE_CHECK(probs, limit, i) \
+  { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
+
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
+
+
+#define kNumStates 12
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+#define LZMA_DIC_MIN (1 << 12)
+
+/* First LZMA-symbol is always decoded.
+And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization
+Out:
+  Result:
+    SZ_OK - OK
+    SZ_ERROR_DATA - Error
+  p->remainLen:
+    < kMatchSpecLenStart : normal remain
+    = kMatchSpecLenStart : finished
+    = kMatchSpecLenStart + 1 : Flush marker
+    = kMatchSpecLenStart + 2 : State Init Marker
+*/
+
+static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
+{
+  CLzmaProb *probs = p->probs;
+
+  unsigned state = p->state;
+  UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
+  unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
+  unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;
+  unsigned lc = p->prop.lc;
+
+  Byte *dic = p->dic;
+  SizeT dicBufSize = p->dicBufSize;
+  SizeT dicPos = p->dicPos;
+  
+  UInt32 processedPos = p->processedPos;
+  UInt32 checkDicSize = p->checkDicSize;
+  unsigned len = 0;
+
+  const Byte *buf = p->buf;
+  UInt32 range = p->range;
+  UInt32 code = p->code;
+
+  do
+  {
+    CLzmaProb *prob;
+    UInt32 bound;
+    unsigned ttt;
+    unsigned posState = processedPos & pbMask;
+
+    prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
+    IF_BIT_0(prob)
+    {
+      unsigned symbol;
+      UPDATE_0(prob);
+      prob = probs + Literal;
+      if (checkDicSize != 0 || processedPos != 0)
+        prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +
+        (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));
+
+      if (state < kNumLitStates)
+      {
+        state -= (state < 4) ? state : 3;
+        symbol = 1;
+        do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100);
+      }
+      else
+      {
+        unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+        unsigned offs = 0x100;
+        state -= (state < 10) ? 3 : 6;
+        symbol = 1;
+        do
+        {
+          unsigned bit;
+          CLzmaProb *probLit;
+          matchByte <<= 1;
+          bit = (matchByte & offs);
+          probLit = prob + offs + bit + symbol;
+          GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)
+        }
+        while (symbol < 0x100);
+      }
+      dic[dicPos++] = (Byte)symbol;
+      processedPos++;
+      continue;
+    }
+    else
+    {
+      UPDATE_1(prob);
+      prob = probs + IsRep + state;
+      IF_BIT_0(prob)
+      {
+        UPDATE_0(prob);
+        state += kNumStates;
+        prob = probs + LenCoder;
+      }
+      else
+      {
+        UPDATE_1(prob);
+        if (checkDicSize == 0 && processedPos == 0)
+          return SZ_ERROR_DATA;
+        prob = probs + IsRepG0 + state;
+        IF_BIT_0(prob)
+        {
+          UPDATE_0(prob);
+          prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
+          IF_BIT_0(prob)
+          {
+            UPDATE_0(prob);
+            dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+            dicPos++;
+            processedPos++;
+            state = state < kNumLitStates ? 9 : 11;
+            continue;
+          }
+          UPDATE_1(prob);
+        }
+        else
+        {
+          UInt32 distance;
+          UPDATE_1(prob);
+          prob = probs + IsRepG1 + state;
+          IF_BIT_0(prob)
+          {
+            UPDATE_0(prob);
+            distance = rep1;
+          }
+          else
+          {
+            UPDATE_1(prob);
+            prob = probs + IsRepG2 + state;
+            IF_BIT_0(prob)
+            {
+              UPDATE_0(prob);
+              distance = rep2;
+            }
+            else
+            {
+              UPDATE_1(prob);
+              distance = rep3;
+              rep3 = rep2;
+            }
+            rep2 = rep1;
+          }
+          rep1 = rep0;
+          rep0 = distance;
+        }
+        state = state < kNumLitStates ? 8 : 11;
+        prob = probs + RepLenCoder;
+      }
+      {
+        unsigned limit, offset;
+        CLzmaProb *probLen = prob + LenChoice;
+        IF_BIT_0(probLen)
+        {
+          UPDATE_0(probLen);
+          probLen = prob + LenLow + (posState << kLenNumLowBits);
+          offset = 0;
+          limit = (1 << kLenNumLowBits);
+        }
+        else
+        {
+          UPDATE_1(probLen);
+          probLen = prob + LenChoice2;
+          IF_BIT_0(probLen)
+          {
+            UPDATE_0(probLen);
+            probLen = prob + LenMid + (posState << kLenNumMidBits);
+            offset = kLenNumLowSymbols;
+            limit = (1 << kLenNumMidBits);
+          }
+          else
+          {
+            UPDATE_1(probLen);
+            probLen = prob + LenHigh;
+            offset = kLenNumLowSymbols + kLenNumMidSymbols;
+            limit = (1 << kLenNumHighBits);
+          }
+        }
+        TREE_DECODE(probLen, limit, len);
+        len += offset;
+      }
+
+      if (state >= kNumStates)
+      {
+        UInt32 distance;
+        prob = probs + PosSlot +
+            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
+        TREE_6_DECODE(prob, distance);
+        if (distance >= kStartPosModelIndex)
+        {
+          unsigned posSlot = (unsigned)distance;
+          int numDirectBits = (int)(((distance >> 1) - 1));
+          distance = (2 | (distance & 1));
+          if (posSlot < kEndPosModelIndex)
+          {
+            distance <<= numDirectBits;
+            prob = probs + SpecPos + distance - posSlot - 1;
+            {
+              UInt32 mask = 1;
+              unsigned i = 1;
+              do
+              {
+                GET_BIT2(prob + i, i, ; , distance |= mask);
+                mask <<= 1;
+              }
+              while (--numDirectBits != 0);
+            }
+          }
+          else
+          {
+            numDirectBits -= kNumAlignBits;
+            do
+            {
+              NORMALIZE
+              range >>= 1;
+              
+              {
+                UInt32 t;
+                code -= range;
+                t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
+                distance = (distance << 1) + (t + 1);
+                code += range & t;
+              }
+              /*
+              distance <<= 1;
+              if (code >= range)
+              {
+                code -= range;
+                distance |= 1;
+              }
+              */
+            }
+            while (--numDirectBits != 0);
+            prob = probs + Align;
+            distance <<= kNumAlignBits;
+            {
+              unsigned i = 1;
+              GET_BIT2(prob + i, i, ; , distance |= 1);
+              GET_BIT2(prob + i, i, ; , distance |= 2);
+              GET_BIT2(prob + i, i, ; , distance |= 4);
+              GET_BIT2(prob + i, i, ; , distance |= 8);
+            }
+            if (distance == (UInt32)0xFFFFFFFF)
+            {
+              len += kMatchSpecLenStart;
+              state -= kNumStates;
+              break;
+            }
+          }
+        }
+        rep3 = rep2;
+        rep2 = rep1;
+        rep1 = rep0;
+        rep0 = distance + 1;
+        if (checkDicSize == 0)
+        {
+          if (distance >= processedPos)
+            return SZ_ERROR_DATA;
+        }
+        else if (distance >= checkDicSize)
+          return SZ_ERROR_DATA;
+        state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
+      }
+
+      len += kMatchMinLen;
+
+      if (limit == dicPos)
+        return SZ_ERROR_DATA;
+      {
+        SizeT rem = limit - dicPos;
+        unsigned curLen = ((rem < len) ? (unsigned)rem : len);
+        SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0);
+
+        processedPos += curLen;
+
+        len -= curLen;
+        if (pos + curLen <= dicBufSize)
+        {
+          Byte *dest = dic + dicPos;
+          ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
+          const Byte *lim = dest + curLen;
+          dicPos += curLen;
+          do
+            *(dest) = (Byte)*(dest + src);
+          while (++dest != lim);
+        }
+        else
+        {
+          do
+          {
+            dic[dicPos++] = dic[pos];
+            if (++pos == dicBufSize)
+              pos = 0;
+          }
+          while (--curLen != 0);
+        }
+      }
+    }
+  }
+  while (dicPos < limit && buf < bufLimit);
+  NORMALIZE;
+  p->buf = buf;
+  p->range = range;
+  p->code = code;
+  p->remainLen = len;
+  p->dicPos = dicPos;
+  p->processedPos = processedPos;
+  p->reps[0] = rep0;
+  p->reps[1] = rep1;
+  p->reps[2] = rep2;
+  p->reps[3] = rep3;
+  p->state = state;
+
+  return SZ_OK;
+}
+
+static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
+{
+  if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
+  {
+    Byte *dic = p->dic;
+    SizeT dicPos = p->dicPos;
+    SizeT dicBufSize = p->dicBufSize;
+    unsigned len = p->remainLen;
+    UInt32 rep0 = p->reps[0];
+    if (limit - dicPos < len)
+      len = (unsigned)(limit - dicPos);
+
+    if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
+      p->checkDicSize = p->prop.dicSize;
+
+    p->processedPos += len;
+    p->remainLen -= len;
+    while (len-- != 0)
+    {
+      dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+      dicPos++;
+    }
+    p->dicPos = dicPos;
+  }
+}
+
+static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
+{
+  do
+  {
+    SizeT limit2 = limit;
+    if (p->checkDicSize == 0)
+    {
+      UInt32 rem = p->prop.dicSize - p->processedPos;
+      if (limit - p->dicPos > rem)
+        limit2 = p->dicPos + rem;
+    }
+    RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));
+    if (p->processedPos >= p->prop.dicSize)
+      p->checkDicSize = p->prop.dicSize;
+    LzmaDec_WriteRem(p, limit);
+  }
+  while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
+
+  if (p->remainLen > kMatchSpecLenStart)
+  {
+    p->remainLen = kMatchSpecLenStart;
+  }
+  return 0;
+}
+
+typedef enum
+{
+  DUMMY_ERROR, /* unexpected end of input stream */
+  DUMMY_LIT,
+  DUMMY_MATCH,
+  DUMMY_REP
+} ELzmaDummy;
+
+static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
+{
+  UInt32 range = p->range;
+  UInt32 code = p->code;
+  const Byte *bufLimit = buf + inSize;
+  CLzmaProb *probs = p->probs;
+  unsigned state = p->state;
+  ELzmaDummy res;
+
+  {
+    CLzmaProb *prob;
+    UInt32 bound;
+    unsigned ttt;
+    unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);
+
+    prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
+    IF_BIT_0_CHECK(prob)
+    {
+      UPDATE_0_CHECK
+
+      /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
+
+      prob = probs + Literal;
+      if (p->checkDicSize != 0 || p->processedPos != 0)
+        prob += (LZMA_LIT_SIZE *
+          ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
+          (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
+
+      if (state < kNumLitStates)
+      {
+        unsigned symbol = 1;
+        do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
+      }
+      else
+      {
+        unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
+            ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)];
+        unsigned offs = 0x100;
+        unsigned symbol = 1;
+        do
+        {
+          unsigned bit;
+          CLzmaProb *probLit;
+          matchByte <<= 1;
+          bit = (matchByte & offs);
+          probLit = prob + offs + bit + symbol;
+          GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)
+        }
+        while (symbol < 0x100);
+      }
+      res = DUMMY_LIT;
+    }
+    else
+    {
+      unsigned len;
+      UPDATE_1_CHECK;
+
+      prob = probs + IsRep + state;
+      IF_BIT_0_CHECK(prob)
+      {
+        UPDATE_0_CHECK;
+        state = 0;
+        prob = probs + LenCoder;
+        res = DUMMY_MATCH;
+      }
+      else
+      {
+        UPDATE_1_CHECK;
+        res = DUMMY_REP;
+        prob = probs + IsRepG0 + state;
+        IF_BIT_0_CHECK(prob)
+        {
+          UPDATE_0_CHECK;
+          prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
+          IF_BIT_0_CHECK(prob)
+          {
+            UPDATE_0_CHECK;
+            NORMALIZE_CHECK;
+            return DUMMY_REP;
+          }
+          else
+          {
+            UPDATE_1_CHECK;
+          }
+        }
+        else
+        {
+          UPDATE_1_CHECK;
+          prob = probs + IsRepG1 + state;
+          IF_BIT_0_CHECK(prob)
+          {
+            UPDATE_0_CHECK;
+          }
+          else
+          {
+            UPDATE_1_CHECK;
+            prob = probs + IsRepG2 + state;
+            IF_BIT_0_CHECK(prob)
+            {
+              UPDATE_0_CHECK;
+            }
+            else
+            {
+              UPDATE_1_CHECK;
+            }
+          }
+        }
+        state = kNumStates;
+        prob = probs + RepLenCoder;
+      }
+      {
+        unsigned limit, offset;
+        CLzmaProb *probLen = prob + LenChoice;
+        IF_BIT_0_CHECK(probLen)
+        {
+          UPDATE_0_CHECK;
+          probLen = prob + LenLow + (posState << kLenNumLowBits);
+          offset = 0;
+          limit = 1 << kLenNumLowBits;
+        }
+        else
+        {
+          UPDATE_1_CHECK;
+          probLen = prob + LenChoice2;
+          IF_BIT_0_CHECK(probLen)
+          {
+            UPDATE_0_CHECK;
+            probLen = prob + LenMid + (posState << kLenNumMidBits);
+            offset = kLenNumLowSymbols;
+            limit = 1 << kLenNumMidBits;
+          }
+          else
+          {
+            UPDATE_1_CHECK;
+            probLen = prob + LenHigh;
+            offset = kLenNumLowSymbols + kLenNumMidSymbols;
+            limit = 1 << kLenNumHighBits;
+          }
+        }
+        TREE_DECODE_CHECK(probLen, limit, len);
+        len += offset;
+      }
+
+      if (state < 4)
+      {
+        unsigned posSlot;
+        prob = probs + PosSlot +
+            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
+            kNumPosSlotBits);
+        TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
+        if (posSlot >= kStartPosModelIndex)
+        {
+          int numDirectBits = ((posSlot >> 1) - 1);
+
+          /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
+
+          if (posSlot < kEndPosModelIndex)
+          {
+            prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;
+          }
+          else
+          {
+            numDirectBits -= kNumAlignBits;
+            do
+            {
+              NORMALIZE_CHECK
+              range >>= 1;
+              code -= range & (((code - range) >> 31) - 1);
+              /* if (code >= range) code -= range; */
+            }
+            while (--numDirectBits != 0);
+            prob = probs + Align;
+            numDirectBits = kNumAlignBits;
+          }
+          {
+            unsigned i = 1;
+            do
+            {
+              GET_BIT_CHECK(prob + i, i);
+            }
+            while (--numDirectBits != 0);
+          }
+        }
+      }
+    }
+  }
+  NORMALIZE_CHECK;
+  return res;
+}
+
+
+static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data)
+{
+  p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]);
+  p->range = 0xFFFFFFFF;
+  p->needFlush = 0;
+}
+
+static void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
+{
+  p->needFlush = 1;
+  p->remainLen = 0;
+  p->tempBufSize = 0;
+
+  if (initDic)
+  {
+    p->processedPos = 0;
+    p->checkDicSize = 0;
+    p->needInitState = 1;
+  }
+  if (initState)
+    p->needInitState = 1;
+}
+
+static void LzmaDec_Init(CLzmaDec *p)
+{
+  p->dicPos = 0;
+  LzmaDec_InitDicAndState(p, True, True);
+}
+
+static void LzmaDec_InitStateReal(CLzmaDec *p)
+{
+  UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp));
+  UInt32 i;
+  CLzmaProb *probs = p->probs;
+  for (i = 0; i < numProbs; i++)
+    probs[i] = kBitModelTotal >> 1;
+  p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
+  p->state = 0;
+  p->needInitState = 0;
+}
+
+static SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
+    ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+  SizeT inSize = *srcLen;
+  (*srcLen) = 0;
+  LzmaDec_WriteRem(p, dicLimit);
+  
+  *status = LZMA_STATUS_NOT_SPECIFIED;
+
+  while (p->remainLen != kMatchSpecLenStart)
+  {
+      int checkEndMarkNow;
+
+      if (p->needFlush != 0)
+      {
+        for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
+          p->tempBuf[p->tempBufSize++] = *src++;
+        if (p->tempBufSize < RC_INIT_SIZE)
+        {
+          *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+          return SZ_OK;
+        }
+        if (p->tempBuf[0] != 0)
+          return SZ_ERROR_DATA;
+
+        LzmaDec_InitRc(p, p->tempBuf);
+        p->tempBufSize = 0;
+      }
+
+      checkEndMarkNow = 0;
+      if (p->dicPos >= dicLimit)
+      {
+        if (p->remainLen == 0 && p->code == 0)
+        {
+          *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
+          return SZ_OK;
+        }
+        if (finishMode == LZMA_FINISH_ANY)
+        {
+          *status = LZMA_STATUS_NOT_FINISHED;
+          return SZ_OK;
+        }
+        if (p->remainLen != 0)
+        {
+          *status = LZMA_STATUS_NOT_FINISHED;
+          return SZ_ERROR_DATA;
+        }
+        checkEndMarkNow = 1;
+      }
+
+      if (p->needInitState)
+        LzmaDec_InitStateReal(p);
+  
+      if (p->tempBufSize == 0)
+      {
+        SizeT processed;
+        const Byte *bufLimit;
+        if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
+        {
+          int dummyRes = LzmaDec_TryDummy(p, src, inSize);
+          if (dummyRes == DUMMY_ERROR)
+          {
+            memcpy(p->tempBuf, src, inSize);
+            p->tempBufSize = (unsigned)inSize;
+            (*srcLen) += inSize;
+            *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+            return SZ_OK;
+          }
+          if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
+          {
+            *status = LZMA_STATUS_NOT_FINISHED;
+            return SZ_ERROR_DATA;
+          }
+          bufLimit = src;
+        }
+        else
+          bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
+        p->buf = src;
+        if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
+          return SZ_ERROR_DATA;
+        processed = (SizeT)(p->buf - src);
+        (*srcLen) += processed;
+        src += processed;
+        inSize -= processed;
+      }
+      else
+      {
+        unsigned rem = p->tempBufSize, lookAhead = 0;
+        while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
+          p->tempBuf[rem++] = src[lookAhead++];
+        p->tempBufSize = rem;
+        if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
+        {
+          int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem);
+          if (dummyRes == DUMMY_ERROR)
+          {
+            (*srcLen) += lookAhead;
+            *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+            return SZ_OK;
+          }
+          if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
+          {
+            *status = LZMA_STATUS_NOT_FINISHED;
+            return SZ_ERROR_DATA;
+          }
+        }
+        p->buf = p->tempBuf;
+        if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
+          return SZ_ERROR_DATA;
+        lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf));
+        (*srcLen) += lookAhead;
+        src += lookAhead;
+        inSize -= lookAhead;
+        p->tempBufSize = 0;
+      }
+  }
+  if (p->code == 0)
+    *status = LZMA_STATUS_FINISHED_WITH_MARK;
+  return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
+}
+
+static __maybe_unused SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+  SizeT outSize = *destLen;
+  SizeT inSize = *srcLen;
+  *srcLen = *destLen = 0;
+  for (;;)
+  {
+    SizeT inSizeCur = inSize, outSizeCur, dicPos;
+    ELzmaFinishMode curFinishMode;
+    SRes res;
+    if (p->dicPos == p->dicBufSize)
+      p->dicPos = 0;
+    dicPos = p->dicPos;
+    if (outSize > p->dicBufSize - dicPos)
+    {
+      outSizeCur = p->dicBufSize;
+      curFinishMode = LZMA_FINISH_ANY;
+    }
+    else
+    {
+      outSizeCur = dicPos + outSize;
+      curFinishMode = finishMode;
+    }
+
+    res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
+    src += inSizeCur;
+    inSize -= inSizeCur;
+    *srcLen += inSizeCur;
+    outSizeCur = p->dicPos - dicPos;
+    memcpy(dest, p->dic + dicPos, outSizeCur);
+    dest += outSizeCur;
+    outSize -= outSizeCur;
+    *destLen += outSizeCur;
+    if (res != 0)
+      return res;
+    if (outSizeCur == 0 || outSize == 0)
+      return SZ_OK;
+  }
+}
+
+static void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->probs);
+  p->probs = 0;
+}
+
+static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->dic);
+  p->dic = 0;
+}
+
+static void __maybe_unused LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
+{
+  LzmaDec_FreeProbs(p, alloc);
+  LzmaDec_FreeDict(p, alloc);
+}
+
+static SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
+{
+  UInt32 dicSize;
+  Byte d;
+  
+  if (size < LZMA_PROPS_SIZE)
+    return SZ_ERROR_UNSUPPORTED;
+  else
+    dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
+ 
+  if (dicSize < LZMA_DIC_MIN)
+    dicSize = LZMA_DIC_MIN;
+  p->dicSize = dicSize;
+
+  d = data[0];
+  if (d >= (9 * 5 * 5))
+    return SZ_ERROR_UNSUPPORTED;
+
+  p->lc = d % 9;
+  d /= 9;
+  p->pb = d / 5;
+  p->lp = d % 5;
+
+  return SZ_OK;
+}
+
+static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)
+{
+  UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
+  if (p->probs == 0 || numProbs != p->numProbs)
+  {
+    LzmaDec_FreeProbs(p, alloc);
+    p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));
+    p->numProbs = numProbs;
+    if (p->probs == 0)
+      return SZ_ERROR_MEM;
+  }
+  return SZ_OK;
+}
+
+static SRes __maybe_unused LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+{
+  CLzmaProps propNew;
+  RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+  RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
+  p->prop = propNew;
+  return SZ_OK;
+}
+
+static SRes __maybe_unused LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+{
+  CLzmaProps propNew;
+  SizeT dicBufSize;
+  RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+  RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
+  dicBufSize = propNew.dicSize;
+  if (p->dic == 0 || dicBufSize != p->dicBufSize)
+  {
+    LzmaDec_FreeDict(p, alloc);
+    p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize);
+    if (p->dic == 0)
+    {
+      LzmaDec_FreeProbs(p, alloc);
+      return SZ_ERROR_MEM;
+    }
+  }
+  p->dicBufSize = dicBufSize;
+  p->prop = propNew;
+  return SZ_OK;
+}
+
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+    const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
+    ELzmaStatus *status, ISzAlloc *alloc)
+{
+  CLzmaDec p;
+  SRes res;
+  SizeT inSize = *srcLen;
+  SizeT outSize = *destLen;
+  *srcLen = *destLen = 0;
+  if (inSize < RC_INIT_SIZE)
+    return SZ_ERROR_INPUT_EOF;
+
+  LzmaDec_Construct(&p);
+  res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc);
+  if (res != 0)
+    return res;
+  p.dic = dest;
+  p.dicBufSize = outSize;
+
+  LzmaDec_Init(&p);
+  
+  *srcLen = inSize;
+  res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
+
+  if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
+    res = SZ_ERROR_INPUT_EOF;
+
+  (*destLen) = p.dicPos;
+  LzmaDec_FreeProbs(&p, alloc);
+  return res;
+}
diff --git a/lib/lzma/LzmaEnc.c b/lib/lzma/LzmaEnc.c
new file mode 100644
index 0000000000000000000000000000000000000000..17afd01cc70faa922eb6576e960ebaa6904b5fd8
--- /dev/null
+++ b/lib/lzma/LzmaEnc.c
@@ -0,0 +1,2271 @@
+/* LzmaEnc.c -- LZMA Encoder
+2009-11-24 : Igor Pavlov : Public domain */
+
+#include <string.h>
+
+/* #define SHOW_STAT */
+/* #define SHOW_STAT2 */
+
+#if defined(SHOW_STAT) || defined(SHOW_STAT2)
+#include <stdio.h>
+#endif
+
+#include "LzmaEnc.h"
+
+/* disable MT */
+#define _7ZIP_ST
+
+#include "LzFind.h"
+#ifndef _7ZIP_ST
+#include "LzFindMt.h"
+#endif
+
+#ifdef SHOW_STAT
+static int ttt = 0;
+#endif
+
+#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1)
+
+#define kBlockSize (9 << 10)
+#define kUnpackBlockSize (1 << 18)
+#define kMatchArraySize (1 << 21)
+#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX)
+
+#define kNumMaxDirectBits (31)
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+#define kProbInitValue (kBitModelTotal >> 1)
+
+#define kNumMoveReducingBits 4
+#define kNumBitPriceShiftBits 4
+#define kBitPrice (1 << kNumBitPriceShiftBits)
+
+void LzmaEncProps_Init(CLzmaEncProps *p)
+{
+  p->level = 5;
+  p->dictSize = p->mc = 0;
+  p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
+  p->writeEndMark = 0;
+}
+
+static void LzmaEncProps_Normalize(CLzmaEncProps *p)
+{
+  int level = p->level;
+  if (level < 0) level = 5;
+  p->level = level;
+  if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26)));
+  if (p->lc < 0) p->lc = 3;
+  if (p->lp < 0) p->lp = 0;
+  if (p->pb < 0) p->pb = 2;
+  if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
+  if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
+  if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
+  if (p->numHashBytes < 0) p->numHashBytes = 4;
+  if (p->mc == 0)  p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
+  if (p->numThreads < 0)
+    p->numThreads =
+      #ifndef _7ZIP_ST
+      ((p->btMode && p->algo) ? 2 : 1);
+      #else
+      1;
+      #endif
+}
+
+static UInt32 __maybe_unused LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
+{
+  CLzmaEncProps props = *props2;
+  LzmaEncProps_Normalize(&props);
+  return props.dictSize;
+}
+
+/* #define LZMA_LOG_BSR */
+/* Define it for Intel's CPU */
+
+
+#ifdef LZMA_LOG_BSR
+
+#define kDicLogSizeMaxCompress 30
+
+#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
+
+static UInt32 GetPosSlot1(UInt32 pos)
+{
+  UInt32 res;
+  BSR2_RET(pos, res);
+  return res;
+}
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
+#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
+
+#else
+
+#define kNumLogBits (9 + (int)sizeof(size_t) / 2)
+#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
+
+static void LzmaEnc_FastPosInit(Byte *g_FastPos)
+{
+  int c = 2, slotFast;
+  g_FastPos[0] = 0;
+  g_FastPos[1] = 1;
+  
+  for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++)
+  {
+    UInt32 k = (1 << ((slotFast >> 1) - 1));
+    UInt32 j;
+    for (j = 0; j < k; j++, c++)
+      g_FastPos[c] = (Byte)slotFast;
+  }
+}
+
+#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \
+  (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
+  res = p->g_FastPos[pos >> i] + (i * 2); }
+/*
+#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
+  p->g_FastPos[pos >> 6] + 12 : \
+  p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
+*/
+
+#define GetPosSlot1(pos) p->g_FastPos[pos]
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
+#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); }
+
+#endif
+
+
+#define LZMA_NUM_REPS 4
+
+typedef unsigned CState;
+
+typedef struct
+{
+  UInt32 price;
+
+  CState state;
+  int prev1IsChar;
+  int prev2;
+
+  UInt32 posPrev2;
+  UInt32 backPrev2;
+
+  UInt32 posPrev;
+  UInt32 backPrev;
+  UInt32 backs[LZMA_NUM_REPS];
+} COptimal;
+
+#define kNumOpts (1 << 12)
+
+#define kNumLenToPosStates 4
+#define kNumPosSlotBits 6
+#define kDicLogSizeMin 0
+#define kDicLogSizeMax 32
+#define kDistTableSizeMax (kDicLogSizeMax * 2)
+
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+#define kAlignMask (kAlignTableSize - 1)
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex)
+
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#ifdef _LZMA_PROB32
+#define CLzmaProb UInt32
+#else
+#define CLzmaProb UInt16
+#endif
+
+#define LZMA_PB_MAX 4
+#define LZMA_LC_MAX 8
+#define LZMA_LP_MAX 4
+
+#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
+
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
+
+#define LZMA_MATCH_LEN_MIN 2
+#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
+
+#define kNumStates 12
+
+typedef struct
+{
+  CLzmaProb choice;
+  CLzmaProb choice2;
+  CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits];
+  CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits];
+  CLzmaProb high[kLenNumHighSymbols];
+} CLenEnc;
+
+typedef struct
+{
+  CLenEnc p;
+  UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
+  UInt32 tableSize;
+  UInt32 counters[LZMA_NUM_PB_STATES_MAX];
+} CLenPriceEnc;
+
+typedef struct
+{
+  UInt32 range;
+  Byte cache;
+  UInt64 low;
+  UInt64 cacheSize;
+  Byte *buf;
+  Byte *bufLim;
+  Byte *bufBase;
+  ISeqOutStream *outStream;
+  UInt64 processed;
+  SRes res;
+} CRangeEnc;
+
+typedef struct
+{
+  CLzmaProb *litProbs;
+
+  CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
+  CLzmaProb isRep[kNumStates];
+  CLzmaProb isRepG0[kNumStates];
+  CLzmaProb isRepG1[kNumStates];
+  CLzmaProb isRepG2[kNumStates];
+  CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
+
+  CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
+  CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
+  CLzmaProb posAlignEncoder[1 << kNumAlignBits];
+  
+  CLenPriceEnc lenEnc;
+  CLenPriceEnc repLenEnc;
+
+  UInt32 reps[LZMA_NUM_REPS];
+  UInt32 state;
+} CSaveState;
+
+typedef struct
+{
+  IMatchFinder matchFinder;
+  void *matchFinderObj;
+
+  #ifndef _7ZIP_ST
+  Bool mtMode;
+  CMatchFinderMt matchFinderMt;
+  #endif
+
+  CMatchFinder matchFinderBase;
+
+  #ifndef _7ZIP_ST
+  Byte pad[128];
+  #endif
+  
+  UInt32 optimumEndIndex;
+  UInt32 optimumCurrentIndex;
+
+  UInt32 longestMatchLength;
+  UInt32 numPairs;
+  UInt32 numAvail;
+  COptimal opt[kNumOpts];
+  
+  #ifndef LZMA_LOG_BSR
+  Byte g_FastPos[1 << kNumLogBits];
+  #endif
+
+  UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
+  UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
+  UInt32 numFastBytes;
+  UInt32 additionalOffset;
+  UInt32 reps[LZMA_NUM_REPS];
+  UInt32 state;
+
+  UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
+  UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
+  UInt32 alignPrices[kAlignTableSize];
+  UInt32 alignPriceCount;
+
+  UInt32 distTableSize;
+
+  unsigned lc, lp, pb;
+  unsigned lpMask, pbMask;
+
+  CLzmaProb *litProbs;
+
+  CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
+  CLzmaProb isRep[kNumStates];
+  CLzmaProb isRepG0[kNumStates];
+  CLzmaProb isRepG1[kNumStates];
+  CLzmaProb isRepG2[kNumStates];
+  CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
+
+  CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
+  CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
+  CLzmaProb posAlignEncoder[1 << kNumAlignBits];
+  
+  CLenPriceEnc lenEnc;
+  CLenPriceEnc repLenEnc;
+
+  unsigned lclp;
+
+  Bool fastMode;
+  
+  CRangeEnc rc;
+
+  Bool writeEndMark;
+  UInt64 nowPos64;
+  UInt32 matchPriceCount;
+  Bool finished;
+  Bool multiThread;
+
+  SRes result;
+  UInt32 dictSize;
+  UInt32 matchFinderCycles;
+
+  int needInit;
+
+  CSaveState saveState;
+} CLzmaEnc;
+
+static void __maybe_unused LzmaEnc_SaveState(CLzmaEncHandle pp)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  CSaveState *dest = &p->saveState;
+  int i;
+  dest->lenEnc = p->lenEnc;
+  dest->repLenEnc = p->repLenEnc;
+  dest->state = p->state;
+
+  for (i = 0; i < kNumStates; i++)
+  {
+    memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
+    memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
+  }
+  for (i = 0; i < kNumLenToPosStates; i++)
+    memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
+  memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
+  memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
+  memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
+  memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
+  memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
+  memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
+  memcpy(dest->reps, p->reps, sizeof(p->reps));
+  memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
+}
+
+static void __maybe_unused LzmaEnc_RestoreState(CLzmaEncHandle pp)
+{
+  CLzmaEnc *dest = (CLzmaEnc *)pp;
+  const CSaveState *p = &dest->saveState;
+  int i;
+  dest->lenEnc = p->lenEnc;
+  dest->repLenEnc = p->repLenEnc;
+  dest->state = p->state;
+
+  for (i = 0; i < kNumStates; i++)
+  {
+    memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
+    memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
+  }
+  for (i = 0; i < kNumLenToPosStates; i++)
+    memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
+  memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
+  memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
+  memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
+  memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
+  memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
+  memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
+  memcpy(dest->reps, p->reps, sizeof(p->reps));
+  memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb));
+}
+
+SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  CLzmaEncProps props = *props2;
+  LzmaEncProps_Normalize(&props);
+
+  if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX ||
+      props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30))
+    return SZ_ERROR_PARAM;
+  p->dictSize = props.dictSize;
+  p->matchFinderCycles = props.mc;
+  {
+    unsigned fb = props.fb;
+    if (fb < 5)
+      fb = 5;
+    if (fb > LZMA_MATCH_LEN_MAX)
+      fb = LZMA_MATCH_LEN_MAX;
+    p->numFastBytes = fb;
+  }
+  p->lc = props.lc;
+  p->lp = props.lp;
+  p->pb = props.pb;
+  p->fastMode = (props.algo == 0);
+  p->matchFinderBase.btMode = props.btMode;
+  {
+    UInt32 numHashBytes = 4;
+    if (props.btMode)
+    {
+      if (props.numHashBytes < 2)
+        numHashBytes = 2;
+      else if (props.numHashBytes < 4)
+        numHashBytes = props.numHashBytes;
+    }
+    p->matchFinderBase.numHashBytes = numHashBytes;
+  }
+
+  p->matchFinderBase.cutValue = props.mc;
+
+  p->writeEndMark = props.writeEndMark;
+
+  #ifndef _7ZIP_ST
+  /*
+  if (newMultiThread != _multiThread)
+  {
+    ReleaseMatchFinder();
+    _multiThread = newMultiThread;
+  }
+  */
+  p->multiThread = (props.numThreads > 1);
+  #endif
+
+  return SZ_OK;
+}
+
+static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4,  5,  6,   4, 5};
+static const int kMatchNextStates[kNumStates]   = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
+static const int kRepNextStates[kNumStates]     = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
+static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
+
+#define IsCharState(s) ((s) < 7)
+
+#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
+
+#define kInfinityPrice (1 << 30)
+
+static void RangeEnc_Construct(CRangeEnc *p)
+{
+  p->outStream = 0;
+  p->bufBase = 0;
+}
+
+#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
+
+#define RC_BUF_SIZE (1 << 16)
+static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc)
+{
+  if (p->bufBase == 0)
+  {
+    p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE);
+    if (p->bufBase == 0)
+      return 0;
+    p->bufLim = p->bufBase + RC_BUF_SIZE;
+  }
+  return 1;
+}
+
+static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->bufBase);
+  p->bufBase = 0;
+}
+
+static void RangeEnc_Init(CRangeEnc *p)
+{
+  /* Stream.Init(); */
+  p->low = 0;
+  p->range = 0xFFFFFFFF;
+  p->cacheSize = 1;
+  p->cache = 0;
+
+  p->buf = p->bufBase;
+
+  p->processed = 0;
+  p->res = SZ_OK;
+}
+
+static void RangeEnc_FlushStream(CRangeEnc *p)
+{
+  size_t num;
+  if (p->res != SZ_OK)
+    return;
+  num = p->buf - p->bufBase;
+  if (num != p->outStream->Write(p->outStream, p->bufBase, num))
+    p->res = SZ_ERROR_WRITE;
+  p->processed += num;
+  p->buf = p->bufBase;
+}
+
+static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
+{
+  if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0)
+  {
+    Byte temp = p->cache;
+    do
+    {
+      Byte *buf = p->buf;
+      *buf++ = (Byte)(temp + (Byte)(p->low >> 32));
+      p->buf = buf;
+      if (buf == p->bufLim)
+        RangeEnc_FlushStream(p);
+      temp = 0xFF;
+    }
+    while (--p->cacheSize != 0);
+    p->cache = (Byte)((UInt32)p->low >> 24);
+  }
+  p->cacheSize++;
+  p->low = (UInt32)p->low << 8;
+}
+
+static void RangeEnc_FlushData(CRangeEnc *p)
+{
+  int i;
+  for (i = 0; i < 5; i++)
+    RangeEnc_ShiftLow(p);
+}
+
+static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits)
+{
+  do
+  {
+    p->range >>= 1;
+    p->low += p->range & (0 - ((value >> --numBits) & 1));
+    if (p->range < kTopValue)
+    {
+      p->range <<= 8;
+      RangeEnc_ShiftLow(p);
+    }
+  }
+  while (numBits != 0);
+}
+
+static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol)
+{
+  UInt32 ttt = *prob;
+  UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt;
+  if (symbol == 0)
+  {
+    p->range = newBound;
+    ttt += (kBitModelTotal - ttt) >> kNumMoveBits;
+  }
+  else
+  {
+    p->low += newBound;
+    p->range -= newBound;
+    ttt -= ttt >> kNumMoveBits;
+  }
+  *prob = (CLzmaProb)ttt;
+  if (p->range < kTopValue)
+  {
+    p->range <<= 8;
+    RangeEnc_ShiftLow(p);
+  }
+}
+
+static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol)
+{
+  symbol |= 0x100;
+  do
+  {
+    RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1);
+    symbol <<= 1;
+  }
+  while (symbol < 0x10000);
+}
+
+static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte)
+{
+  UInt32 offs = 0x100;
+  symbol |= 0x100;
+  do
+  {
+    matchByte <<= 1;
+    RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1);
+    symbol <<= 1;
+    offs &= ~(matchByte ^ symbol);
+  }
+  while (symbol < 0x10000);
+}
+
+static void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
+{
+  UInt32 i;
+  for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
+  {
+    const int kCyclesBits = kNumBitPriceShiftBits;
+    UInt32 w = i;
+    UInt32 bitCount = 0;
+    int j;
+    for (j = 0; j < kCyclesBits; j++)
+    {
+      w = w * w;
+      bitCount <<= 1;
+      while (w >= ((UInt32)1 << 16))
+      {
+        w >>= 1;
+        bitCount++;
+      }
+    }
+    ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
+  }
+}
+
+
+#define GET_PRICE(prob, symbol) \
+  p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
+
+#define GET_PRICEa(prob, symbol) \
+  ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
+
+#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
+#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
+
+#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
+#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
+
+static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices)
+{
+  UInt32 price = 0;
+  symbol |= 0x100;
+  do
+  {
+    price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1);
+    symbol <<= 1;
+  }
+  while (symbol < 0x10000);
+  return price;
+}
+
+static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices)
+{
+  UInt32 price = 0;
+  UInt32 offs = 0x100;
+  symbol |= 0x100;
+  do
+  {
+    matchByte <<= 1;
+    price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1);
+    symbol <<= 1;
+    offs &= ~(matchByte ^ symbol);
+  }
+  while (symbol < 0x10000);
+  return price;
+}
+
+
+static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
+{
+  UInt32 m = 1;
+  int i;
+  for (i = numBitLevels; i != 0;)
+  {
+    UInt32 bit;
+    i--;
+    bit = (symbol >> i) & 1;
+    RangeEnc_EncodeBit(rc, probs + m, bit);
+    m = (m << 1) | bit;
+  }
+}
+
+static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
+{
+  UInt32 m = 1;
+  int i;
+  for (i = 0; i < numBitLevels; i++)
+  {
+    UInt32 bit = symbol & 1;
+    RangeEnc_EncodeBit(rc, probs + m, bit);
+    m = (m << 1) | bit;
+    symbol >>= 1;
+  }
+}
+
+static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
+{
+  UInt32 price = 0;
+  symbol |= (1 << numBitLevels);
+  while (symbol != 1)
+  {
+    price += GET_PRICEa(probs[symbol >> 1], symbol & 1);
+    symbol >>= 1;
+  }
+  return price;
+}
+
+static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
+{
+  UInt32 price = 0;
+  UInt32 m = 1;
+  int i;
+  for (i = numBitLevels; i != 0; i--)
+  {
+    UInt32 bit = symbol & 1;
+    symbol >>= 1;
+    price += GET_PRICEa(probs[m], bit);
+    m = (m << 1) | bit;
+  }
+  return price;
+}
+
+
+static void LenEnc_Init(CLenEnc *p)
+{
+  unsigned i;
+  p->choice = p->choice2 = kProbInitValue;
+  for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++)
+    p->low[i] = kProbInitValue;
+  for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++)
+    p->mid[i] = kProbInitValue;
+  for (i = 0; i < kLenNumHighSymbols; i++)
+    p->high[i] = kProbInitValue;
+}
+
+static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState)
+{
+  if (symbol < kLenNumLowSymbols)
+  {
+    RangeEnc_EncodeBit(rc, &p->choice, 0);
+    RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol);
+  }
+  else
+  {
+    RangeEnc_EncodeBit(rc, &p->choice, 1);
+    if (symbol < kLenNumLowSymbols + kLenNumMidSymbols)
+    {
+      RangeEnc_EncodeBit(rc, &p->choice2, 0);
+      RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols);
+    }
+    else
+    {
+      RangeEnc_EncodeBit(rc, &p->choice2, 1);
+      RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols);
+    }
+  }
+}
+
+static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices)
+{
+  UInt32 a0 = GET_PRICE_0a(p->choice);
+  UInt32 a1 = GET_PRICE_1a(p->choice);
+  UInt32 b0 = a1 + GET_PRICE_0a(p->choice2);
+  UInt32 b1 = a1 + GET_PRICE_1a(p->choice2);
+  UInt32 i = 0;
+  for (i = 0; i < kLenNumLowSymbols; i++)
+  {
+    if (i >= numSymbols)
+      return;
+    prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices);
+  }
+  for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++)
+  {
+    if (i >= numSymbols)
+      return;
+    prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices);
+  }
+  for (; i < numSymbols; i++)
+    prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices);
+}
+
+static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices)
+{
+  LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices);
+  p->counters[posState] = p->tableSize;
+}
+
+static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices)
+{
+  UInt32 posState;
+  for (posState = 0; posState < numPosStates; posState++)
+    LenPriceEnc_UpdateTable(p, posState, ProbPrices);
+}
+
+static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices)
+{
+  LenEnc_Encode(&p->p, rc, symbol, posState);
+  if (updatePrice)
+    if (--p->counters[posState] == 0)
+      LenPriceEnc_UpdateTable(p, posState, ProbPrices);
+}
+
+
+
+
+static void MovePos(CLzmaEnc *p, UInt32 num)
+{
+  #ifdef SHOW_STAT
+  ttt += num;
+  printf("\n MovePos %d", num);
+  #endif
+  if (num != 0)
+  {
+    p->additionalOffset += num;
+    p->matchFinder.Skip(p->matchFinderObj, num);
+  }
+}
+
+static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes)
+{
+  UInt32 lenRes = 0, numPairs;
+  p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+  numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
+  #ifdef SHOW_STAT
+  printf("\n i = %d numPairs = %d    ", ttt, numPairs / 2);
+  ttt++;
+  {
+    UInt32 i;
+    for (i = 0; i < numPairs; i += 2)
+      printf("%2d %6d   | ", p->matches[i], p->matches[i + 1]);
+  }
+  #endif
+  if (numPairs > 0)
+  {
+    lenRes = p->matches[numPairs - 2];
+    if (lenRes == p->numFastBytes)
+    {
+      const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+      UInt32 distance = p->matches[numPairs - 1] + 1;
+      UInt32 numAvail = p->numAvail;
+      if (numAvail > LZMA_MATCH_LEN_MAX)
+        numAvail = LZMA_MATCH_LEN_MAX;
+      {
+        const Byte *pby2 = pby - distance;
+        for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++);
+      }
+    }
+  }
+  p->additionalOffset++;
+  *numDistancePairsRes = numPairs;
+  return lenRes;
+}
+
+
+#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False;
+#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False;
+#define IsShortRep(p) ((p)->backPrev == 0)
+
+static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState)
+{
+  return
+    GET_PRICE_0(p->isRepG0[state]) +
+    GET_PRICE_0(p->isRep0Long[state][posState]);
+}
+
+static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState)
+{
+  UInt32 price;
+  if (repIndex == 0)
+  {
+    price = GET_PRICE_0(p->isRepG0[state]);
+    price += GET_PRICE_1(p->isRep0Long[state][posState]);
+  }
+  else
+  {
+    price = GET_PRICE_1(p->isRepG0[state]);
+    if (repIndex == 1)
+      price += GET_PRICE_0(p->isRepG1[state]);
+    else
+    {
+      price += GET_PRICE_1(p->isRepG1[state]);
+      price += GET_PRICE(p->isRepG2[state], repIndex - 2);
+    }
+  }
+  return price;
+}
+
+static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState)
+{
+  return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] +
+    GetPureRepPrice(p, repIndex, state, posState);
+}
+
+static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur)
+{
+  UInt32 posMem = p->opt[cur].posPrev;
+  UInt32 backMem = p->opt[cur].backPrev;
+  p->optimumEndIndex = cur;
+  do
+  {
+    if (p->opt[cur].prev1IsChar)
+    {
+      MakeAsChar(&p->opt[posMem])
+      p->opt[posMem].posPrev = posMem - 1;
+      if (p->opt[cur].prev2)
+      {
+        p->opt[posMem - 1].prev1IsChar = False;
+        p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2;
+        p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2;
+      }
+    }
+    {
+      UInt32 posPrev = posMem;
+      UInt32 backCur = backMem;
+      
+      backMem = p->opt[posPrev].backPrev;
+      posMem = p->opt[posPrev].posPrev;
+      
+      p->opt[posPrev].backPrev = backCur;
+      p->opt[posPrev].posPrev = cur;
+      cur = posPrev;
+    }
+  }
+  while (cur != 0);
+  *backRes = p->opt[0].backPrev;
+  p->optimumCurrentIndex  = p->opt[0].posPrev;
+  return p->optimumCurrentIndex;
+}
+
+#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300)
+
+static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes)
+{
+  UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur;
+  UInt32 matchPrice, repMatchPrice, normalMatchPrice;
+  UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS];
+  UInt32 *matches;
+  const Byte *data;
+  Byte curByte, matchByte;
+  if (p->optimumEndIndex != p->optimumCurrentIndex)
+  {
+    const COptimal *opt = &p->opt[p->optimumCurrentIndex];
+    UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex;
+    *backRes = opt->backPrev;
+    p->optimumCurrentIndex = opt->posPrev;
+    return lenRes;
+  }
+  p->optimumCurrentIndex = p->optimumEndIndex = 0;
+  
+  if (p->additionalOffset == 0)
+    mainLen = ReadMatchDistances(p, &numPairs);
+  else
+  {
+    mainLen = p->longestMatchLength;
+    numPairs = p->numPairs;
+  }
+
+  numAvail = p->numAvail;
+  if (numAvail < 2)
+  {
+    *backRes = (UInt32)(-1);
+    return 1;
+  }
+  if (numAvail > LZMA_MATCH_LEN_MAX)
+    numAvail = LZMA_MATCH_LEN_MAX;
+
+  data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+  repMaxIndex = 0;
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+  {
+    UInt32 lenTest;
+    const Byte *data2;
+    reps[i] = p->reps[i];
+    data2 = data - (reps[i] + 1);
+    if (data[0] != data2[0] || data[1] != data2[1])
+    {
+      repLens[i] = 0;
+      continue;
+    }
+    for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
+    repLens[i] = lenTest;
+    if (lenTest > repLens[repMaxIndex])
+      repMaxIndex = i;
+  }
+  if (repLens[repMaxIndex] >= p->numFastBytes)
+  {
+    UInt32 lenRes;
+    *backRes = repMaxIndex;
+    lenRes = repLens[repMaxIndex];
+    MovePos(p, lenRes - 1);
+    return lenRes;
+  }
+
+  matches = p->matches;
+  if (mainLen >= p->numFastBytes)
+  {
+    *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
+    MovePos(p, mainLen - 1);
+    return mainLen;
+  }
+  curByte = *data;
+  matchByte = *(data - (reps[0] + 1));
+
+  if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2)
+  {
+    *backRes = (UInt32)-1;
+    return 1;
+  }
+
+  p->opt[0].state = (CState)p->state;
+
+  posState = (position & p->pbMask);
+
+  {
+    const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
+    p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
+        (!IsCharState(p->state) ?
+          LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
+          LitEnc_GetPrice(probs, curByte, p->ProbPrices));
+  }
+
+  MakeAsChar(&p->opt[1]);
+
+  matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
+  repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
+
+  if (matchByte == curByte)
+  {
+    UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState);
+    if (shortRepPrice < p->opt[1].price)
+    {
+      p->opt[1].price = shortRepPrice;
+      MakeAsShortRep(&p->opt[1]);
+    }
+  }
+  lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]);
+
+  if (lenEnd < 2)
+  {
+    *backRes = p->opt[1].backPrev;
+    return 1;
+  }
+
+  p->opt[1].posPrev = 0;
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+    p->opt[0].backs[i] = reps[i];
+
+  len = lenEnd;
+  do
+    p->opt[len--].price = kInfinityPrice;
+  while (len >= 2);
+
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+  {
+    UInt32 repLen = repLens[i];
+    UInt32 price;
+    if (repLen < 2)
+      continue;
+    price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState);
+    do
+    {
+      UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2];
+      COptimal *opt = &p->opt[repLen];
+      if (curAndLenPrice < opt->price)
+      {
+        opt->price = curAndLenPrice;
+        opt->posPrev = 0;
+        opt->backPrev = i;
+        opt->prev1IsChar = False;
+      }
+    }
+    while (--repLen >= 2);
+  }
+
+  normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
+
+  len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
+  if (len <= mainLen)
+  {
+    UInt32 offs = 0;
+    while (len > matches[offs])
+      offs += 2;
+    for (; ; len++)
+    {
+      COptimal *opt;
+      UInt32 distance = matches[offs + 1];
+
+      UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN];
+      UInt32 lenToPosState = GetLenToPosState(len);
+      if (distance < kNumFullDistances)
+        curAndLenPrice += p->distancesPrices[lenToPosState][distance];
+      else
+      {
+        UInt32 slot;
+        GetPosSlot2(distance, slot);
+        curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot];
+      }
+      opt = &p->opt[len];
+      if (curAndLenPrice < opt->price)
+      {
+        opt->price = curAndLenPrice;
+        opt->posPrev = 0;
+        opt->backPrev = distance + LZMA_NUM_REPS;
+        opt->prev1IsChar = False;
+      }
+      if (len == matches[offs])
+      {
+        offs += 2;
+        if (offs == numPairs)
+          break;
+      }
+    }
+  }
+
+  cur = 0;
+
+    #ifdef SHOW_STAT2
+    if (position >= 0)
+    {
+      unsigned i;
+      printf("\n pos = %4X", position);
+      for (i = cur; i <= lenEnd; i++)
+      printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price);
+    }
+    #endif
+
+  for (;;)
+  {
+    UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen;
+    UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice;
+    Bool nextIsChar;
+    Byte curByte, matchByte;
+    const Byte *data;
+    COptimal *curOpt;
+    COptimal *nextOpt;
+
+    cur++;
+    if (cur == lenEnd)
+      return Backward(p, backRes, cur);
+
+    newLen = ReadMatchDistances(p, &numPairs);
+    if (newLen >= p->numFastBytes)
+    {
+      p->numPairs = numPairs;
+      p->longestMatchLength = newLen;
+      return Backward(p, backRes, cur);
+    }
+    position++;
+    curOpt = &p->opt[cur];
+    posPrev = curOpt->posPrev;
+    if (curOpt->prev1IsChar)
+    {
+      posPrev--;
+      if (curOpt->prev2)
+      {
+        state = p->opt[curOpt->posPrev2].state;
+        if (curOpt->backPrev2 < LZMA_NUM_REPS)
+          state = kRepNextStates[state];
+        else
+          state = kMatchNextStates[state];
+      }
+      else
+        state = p->opt[posPrev].state;
+      state = kLiteralNextStates[state];
+    }
+    else
+      state = p->opt[posPrev].state;
+    if (posPrev == cur - 1)
+    {
+      if (IsShortRep(curOpt))
+        state = kShortRepNextStates[state];
+      else
+        state = kLiteralNextStates[state];
+    }
+    else
+    {
+      UInt32 pos;
+      const COptimal *prevOpt;
+      if (curOpt->prev1IsChar && curOpt->prev2)
+      {
+        posPrev = curOpt->posPrev2;
+        pos = curOpt->backPrev2;
+        state = kRepNextStates[state];
+      }
+      else
+      {
+        pos = curOpt->backPrev;
+        if (pos < LZMA_NUM_REPS)
+          state = kRepNextStates[state];
+        else
+          state = kMatchNextStates[state];
+      }
+      prevOpt = &p->opt[posPrev];
+      if (pos < LZMA_NUM_REPS)
+      {
+        UInt32 i;
+        reps[0] = prevOpt->backs[pos];
+        for (i = 1; i <= pos; i++)
+          reps[i] = prevOpt->backs[i - 1];
+        for (; i < LZMA_NUM_REPS; i++)
+          reps[i] = prevOpt->backs[i];
+      }
+      else
+      {
+        UInt32 i;
+        reps[0] = (pos - LZMA_NUM_REPS);
+        for (i = 1; i < LZMA_NUM_REPS; i++)
+          reps[i] = prevOpt->backs[i - 1];
+      }
+    }
+    curOpt->state = (CState)state;
+
+    curOpt->backs[0] = reps[0];
+    curOpt->backs[1] = reps[1];
+    curOpt->backs[2] = reps[2];
+    curOpt->backs[3] = reps[3];
+
+    curPrice = curOpt->price;
+    nextIsChar = False;
+    data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+    curByte = *data;
+    matchByte = *(data - (reps[0] + 1));
+
+    posState = (position & p->pbMask);
+
+    curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]);
+    {
+      const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
+      curAnd1Price +=
+        (!IsCharState(state) ?
+          LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
+          LitEnc_GetPrice(probs, curByte, p->ProbPrices));
+    }
+
+    nextOpt = &p->opt[cur + 1];
+
+    if (curAnd1Price < nextOpt->price)
+    {
+      nextOpt->price = curAnd1Price;
+      nextOpt->posPrev = cur;
+      MakeAsChar(nextOpt);
+      nextIsChar = True;
+    }
+
+    matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]);
+    repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
+    
+    if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0))
+    {
+      UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState);
+      if (shortRepPrice <= nextOpt->price)
+      {
+        nextOpt->price = shortRepPrice;
+        nextOpt->posPrev = cur;
+        MakeAsShortRep(nextOpt);
+        nextIsChar = True;
+      }
+    }
+    numAvailFull = p->numAvail;
+    {
+      UInt32 temp = kNumOpts - 1 - cur;
+      if (temp < numAvailFull)
+        numAvailFull = temp;
+    }
+
+    if (numAvailFull < 2)
+      continue;
+    numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
+
+    if (!nextIsChar && matchByte != curByte) /* speed optimization */
+    {
+      /* try Literal + rep0 */
+      UInt32 temp;
+      UInt32 lenTest2;
+      const Byte *data2 = data - (reps[0] + 1);
+      UInt32 limit = p->numFastBytes + 1;
+      if (limit > numAvailFull)
+        limit = numAvailFull;
+
+      for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++);
+      lenTest2 = temp - 1;
+      if (lenTest2 >= 2)
+      {
+        UInt32 state2 = kLiteralNextStates[state];
+        UInt32 posStateNext = (position + 1) & p->pbMask;
+        UInt32 nextRepMatchPrice = curAnd1Price +
+            GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+            GET_PRICE_1(p->isRep[state2]);
+        /* for (; lenTest2 >= 2; lenTest2--) */
+        {
+          UInt32 curAndLenPrice;
+          COptimal *opt;
+          UInt32 offset = cur + 1 + lenTest2;
+          while (lenEnd < offset)
+            p->opt[++lenEnd].price = kInfinityPrice;
+          curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+          opt = &p->opt[offset];
+          if (curAndLenPrice < opt->price)
+          {
+            opt->price = curAndLenPrice;
+            opt->posPrev = cur + 1;
+            opt->backPrev = 0;
+            opt->prev1IsChar = True;
+            opt->prev2 = False;
+          }
+        }
+      }
+    }
+    
+    startLen = 2; /* speed optimization */
+    {
+    UInt32 repIndex;
+    for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++)
+    {
+      UInt32 lenTest;
+      UInt32 lenTestTemp;
+      UInt32 price;
+      const Byte *data2 = data - (reps[repIndex] + 1);
+      if (data[0] != data2[0] || data[1] != data2[1])
+        continue;
+      for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
+      while (lenEnd < cur + lenTest)
+        p->opt[++lenEnd].price = kInfinityPrice;
+      lenTestTemp = lenTest;
+      price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState);
+      do
+      {
+        UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2];
+        COptimal *opt = &p->opt[cur + lenTest];
+        if (curAndLenPrice < opt->price)
+        {
+          opt->price = curAndLenPrice;
+          opt->posPrev = cur;
+          opt->backPrev = repIndex;
+          opt->prev1IsChar = False;
+        }
+      }
+      while (--lenTest >= 2);
+      lenTest = lenTestTemp;
+      
+      if (repIndex == 0)
+        startLen = lenTest + 1;
+        
+      /* if (_maxMode) */
+        {
+          UInt32 lenTest2 = lenTest + 1;
+          UInt32 limit = lenTest2 + p->numFastBytes;
+          UInt32 nextRepMatchPrice;
+          if (limit > numAvailFull)
+            limit = numAvailFull;
+          for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
+          lenTest2 -= lenTest + 1;
+          if (lenTest2 >= 2)
+          {
+            UInt32 state2 = kRepNextStates[state];
+            UInt32 posStateNext = (position + lenTest) & p->pbMask;
+            UInt32 curAndLenCharPrice =
+                price + p->repLenEnc.prices[posState][lenTest - 2] +
+                GET_PRICE_0(p->isMatch[state2][posStateNext]) +
+                LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
+                    data[lenTest], data2[lenTest], p->ProbPrices);
+            state2 = kLiteralNextStates[state2];
+            posStateNext = (position + lenTest + 1) & p->pbMask;
+            nextRepMatchPrice = curAndLenCharPrice +
+                GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+                GET_PRICE_1(p->isRep[state2]);
+            
+            /* for (; lenTest2 >= 2; lenTest2--) */
+            {
+              UInt32 curAndLenPrice;
+              COptimal *opt;
+              UInt32 offset = cur + lenTest + 1 + lenTest2;
+              while (lenEnd < offset)
+                p->opt[++lenEnd].price = kInfinityPrice;
+              curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+              opt = &p->opt[offset];
+              if (curAndLenPrice < opt->price)
+              {
+                opt->price = curAndLenPrice;
+                opt->posPrev = cur + lenTest + 1;
+                opt->backPrev = 0;
+                opt->prev1IsChar = True;
+                opt->prev2 = True;
+                opt->posPrev2 = cur;
+                opt->backPrev2 = repIndex;
+              }
+            }
+          }
+        }
+    }
+    }
+    /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */
+    if (newLen > numAvail)
+    {
+      newLen = numAvail;
+      for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
+      matches[numPairs] = newLen;
+      numPairs += 2;
+    }
+    if (newLen >= startLen)
+    {
+      UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
+      UInt32 offs, curBack, posSlot;
+      UInt32 lenTest;
+      while (lenEnd < cur + newLen)
+        p->opt[++lenEnd].price = kInfinityPrice;
+
+      offs = 0;
+      while (startLen > matches[offs])
+        offs += 2;
+      curBack = matches[offs + 1];
+      GetPosSlot2(curBack, posSlot);
+      for (lenTest = /*2*/ startLen; ; lenTest++)
+      {
+        UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN];
+        UInt32 lenToPosState = GetLenToPosState(lenTest);
+        COptimal *opt;
+        if (curBack < kNumFullDistances)
+          curAndLenPrice += p->distancesPrices[lenToPosState][curBack];
+        else
+          curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask];
+        
+        opt = &p->opt[cur + lenTest];
+        if (curAndLenPrice < opt->price)
+        {
+          opt->price = curAndLenPrice;
+          opt->posPrev = cur;
+          opt->backPrev = curBack + LZMA_NUM_REPS;
+          opt->prev1IsChar = False;
+        }
+
+        if (/*_maxMode && */lenTest == matches[offs])
+        {
+          /* Try Match + Literal + Rep0 */
+          const Byte *data2 = data - (curBack + 1);
+          UInt32 lenTest2 = lenTest + 1;
+          UInt32 limit = lenTest2 + p->numFastBytes;
+          UInt32 nextRepMatchPrice;
+          if (limit > numAvailFull)
+            limit = numAvailFull;
+          for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
+          lenTest2 -= lenTest + 1;
+          if (lenTest2 >= 2)
+          {
+            UInt32 state2 = kMatchNextStates[state];
+            UInt32 posStateNext = (position + lenTest) & p->pbMask;
+            UInt32 curAndLenCharPrice = curAndLenPrice +
+                GET_PRICE_0(p->isMatch[state2][posStateNext]) +
+                LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
+                    data[lenTest], data2[lenTest], p->ProbPrices);
+            state2 = kLiteralNextStates[state2];
+            posStateNext = (posStateNext + 1) & p->pbMask;
+            nextRepMatchPrice = curAndLenCharPrice +
+                GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+                GET_PRICE_1(p->isRep[state2]);
+            
+            /* for (; lenTest2 >= 2; lenTest2--) */
+            {
+              UInt32 offset = cur + lenTest + 1 + lenTest2;
+              UInt32 curAndLenPrice;
+              COptimal *opt;
+              while (lenEnd < offset)
+                p->opt[++lenEnd].price = kInfinityPrice;
+              curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+              opt = &p->opt[offset];
+              if (curAndLenPrice < opt->price)
+              {
+                opt->price = curAndLenPrice;
+                opt->posPrev = cur + lenTest + 1;
+                opt->backPrev = 0;
+                opt->prev1IsChar = True;
+                opt->prev2 = True;
+                opt->posPrev2 = cur;
+                opt->backPrev2 = curBack + LZMA_NUM_REPS;
+              }
+            }
+          }
+          offs += 2;
+          if (offs == numPairs)
+            break;
+          curBack = matches[offs + 1];
+          if (curBack >= kNumFullDistances)
+            GetPosSlot2(curBack, posSlot);
+        }
+      }
+    }
+  }
+}
+
+#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
+
+static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes)
+{
+  UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i;
+  const Byte *data;
+  const UInt32 *matches;
+
+  if (p->additionalOffset == 0)
+    mainLen = ReadMatchDistances(p, &numPairs);
+  else
+  {
+    mainLen = p->longestMatchLength;
+    numPairs = p->numPairs;
+  }
+
+  numAvail = p->numAvail;
+  *backRes = (UInt32)-1;
+  if (numAvail < 2)
+    return 1;
+  if (numAvail > LZMA_MATCH_LEN_MAX)
+    numAvail = LZMA_MATCH_LEN_MAX;
+  data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+
+  repLen = repIndex = 0;
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+  {
+    UInt32 len;
+    const Byte *data2 = data - (p->reps[i] + 1);
+    if (data[0] != data2[0] || data[1] != data2[1])
+      continue;
+    for (len = 2; len < numAvail && data[len] == data2[len]; len++);
+    if (len >= p->numFastBytes)
+    {
+      *backRes = i;
+      MovePos(p, len - 1);
+      return len;
+    }
+    if (len > repLen)
+    {
+      repIndex = i;
+      repLen = len;
+    }
+  }
+
+  matches = p->matches;
+  if (mainLen >= p->numFastBytes)
+  {
+    *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
+    MovePos(p, mainLen - 1);
+    return mainLen;
+  }
+
+  mainDist = 0; /* for GCC */
+  if (mainLen >= 2)
+  {
+    mainDist = matches[numPairs - 1];
+    while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1)
+    {
+      if (!ChangePair(matches[numPairs - 3], mainDist))
+        break;
+      numPairs -= 2;
+      mainLen = matches[numPairs - 2];
+      mainDist = matches[numPairs - 1];
+    }
+    if (mainLen == 2 && mainDist >= 0x80)
+      mainLen = 1;
+  }
+
+  if (repLen >= 2 && (
+        (repLen + 1 >= mainLen) ||
+        (repLen + 2 >= mainLen && mainDist >= (1 << 9)) ||
+        (repLen + 3 >= mainLen && mainDist >= (1 << 15))))
+  {
+    *backRes = repIndex;
+    MovePos(p, repLen - 1);
+    return repLen;
+  }
+  
+  if (mainLen < 2 || numAvail <= 2)
+    return 1;
+
+  p->longestMatchLength = ReadMatchDistances(p, &p->numPairs);
+  if (p->longestMatchLength >= 2)
+  {
+    UInt32 newDistance = matches[p->numPairs - 1];
+    if ((p->longestMatchLength >= mainLen && newDistance < mainDist) ||
+        (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) ||
+        (p->longestMatchLength > mainLen + 1) ||
+        (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist)))
+      return 1;
+  }
+  
+  data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+  {
+    UInt32 len, limit;
+    const Byte *data2 = data - (p->reps[i] + 1);
+    if (data[0] != data2[0] || data[1] != data2[1])
+      continue;
+    limit = mainLen - 1;
+    for (len = 2; len < limit && data[len] == data2[len]; len++);
+    if (len >= limit)
+      return 1;
+  }
+  *backRes = mainDist + LZMA_NUM_REPS;
+  MovePos(p, mainLen - 2);
+  return mainLen;
+}
+
+static void WriteEndMarker(CLzmaEnc *p, UInt32 posState)
+{
+  UInt32 len;
+  RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
+  RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
+  p->state = kMatchNextStates[p->state];
+  len = LZMA_MATCH_LEN_MIN;
+  LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+  RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1);
+  RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits);
+  RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
+}
+
+static SRes CheckErrors(CLzmaEnc *p)
+{
+  if (p->result != SZ_OK)
+    return p->result;
+  if (p->rc.res != SZ_OK)
+    p->result = SZ_ERROR_WRITE;
+  if (p->matchFinderBase.result != SZ_OK)
+    p->result = SZ_ERROR_READ;
+  if (p->result != SZ_OK)
+    p->finished = True;
+  return p->result;
+}
+
+static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
+{
+  /* ReleaseMFStream(); */
+  p->finished = True;
+  if (p->writeEndMark)
+    WriteEndMarker(p, nowPos & p->pbMask);
+  RangeEnc_FlushData(&p->rc);
+  RangeEnc_FlushStream(&p->rc);
+  return CheckErrors(p);
+}
+
+static void FillAlignPrices(CLzmaEnc *p)
+{
+  UInt32 i;
+  for (i = 0; i < kAlignTableSize; i++)
+    p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
+  p->alignPriceCount = 0;
+}
+
+static void FillDistancesPrices(CLzmaEnc *p)
+{
+  UInt32 tempPrices[kNumFullDistances];
+  UInt32 i, lenToPosState;
+  for (i = kStartPosModelIndex; i < kNumFullDistances; i++)
+  {
+    UInt32 posSlot = GetPosSlot1(i);
+    UInt32 footerBits = ((posSlot >> 1) - 1);
+    UInt32 base = ((2 | (posSlot & 1)) << footerBits);
+    tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices);
+  }
+
+  for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++)
+  {
+    UInt32 posSlot;
+    const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState];
+    UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState];
+    for (posSlot = 0; posSlot < p->distTableSize; posSlot++)
+      posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices);
+    for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++)
+      posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
+
+    {
+      UInt32 *distancesPrices = p->distancesPrices[lenToPosState];
+      UInt32 i;
+      for (i = 0; i < kStartPosModelIndex; i++)
+        distancesPrices[i] = posSlotPrices[i];
+      for (; i < kNumFullDistances; i++)
+        distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i];
+    }
+  }
+  p->matchPriceCount = 0;
+}
+
+static void LzmaEnc_Construct(CLzmaEnc *p)
+{
+  RangeEnc_Construct(&p->rc);
+  MatchFinder_Construct(&p->matchFinderBase);
+  #ifndef _7ZIP_ST
+  MatchFinderMt_Construct(&p->matchFinderMt);
+  p->matchFinderMt.MatchFinder = &p->matchFinderBase;
+  #endif
+
+  {
+    CLzmaEncProps props;
+    LzmaEncProps_Init(&props);
+    LzmaEnc_SetProps(p, &props);
+  }
+
+  #ifndef LZMA_LOG_BSR
+  LzmaEnc_FastPosInit(p->g_FastPos);
+  #endif
+
+  LzmaEnc_InitPriceTables(p->ProbPrices);
+  p->litProbs = 0;
+  p->saveState.litProbs = 0;
+}
+
+CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc)
+{
+  void *p;
+  p = alloc->Alloc(alloc, sizeof(CLzmaEnc));
+  if (p != 0)
+    LzmaEnc_Construct((CLzmaEnc *)p);
+  return p;
+}
+
+static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->litProbs);
+  alloc->Free(alloc, p->saveState.litProbs);
+  p->litProbs = 0;
+  p->saveState.litProbs = 0;
+}
+
+void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  #ifndef _7ZIP_ST
+  MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
+  #endif
+  MatchFinder_Free(&p->matchFinderBase, allocBig);
+  LzmaEnc_FreeLits(p, alloc);
+  RangeEnc_Free(&p->rc, alloc);
+}
+
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
+  alloc->Free(alloc, p);
+}
+
+static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize)
+{
+  UInt32 nowPos32, startPos32;
+  if (p->needInit)
+  {
+    p->matchFinder.Init(p->matchFinderObj);
+    p->needInit = 0;
+  }
+
+  if (p->finished)
+    return p->result;
+  RINOK(CheckErrors(p));
+
+  nowPos32 = (UInt32)p->nowPos64;
+  startPos32 = nowPos32;
+
+  if (p->nowPos64 == 0)
+  {
+    UInt32 numPairs;
+    Byte curByte;
+    if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
+      return Flush(p, nowPos32);
+    ReadMatchDistances(p, &numPairs);
+    RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0);
+    p->state = kLiteralNextStates[p->state];
+    curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset);
+    LitEnc_Encode(&p->rc, p->litProbs, curByte);
+    p->additionalOffset--;
+    nowPos32++;
+  }
+
+  if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
+  for (;;)
+  {
+    UInt32 pos, len, posState;
+
+    if (p->fastMode)
+      len = GetOptimumFast(p, &pos);
+    else
+      len = GetOptimum(p, nowPos32, &pos);
+
+    #ifdef SHOW_STAT2
+    printf("\n pos = %4X,   len = %d   pos = %d", nowPos32, len, pos);
+    #endif
+
+    posState = nowPos32 & p->pbMask;
+    if (len == 1 && pos == (UInt32)-1)
+    {
+      Byte curByte;
+      CLzmaProb *probs;
+      const Byte *data;
+
+      RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0);
+      data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+      curByte = *data;
+      probs = LIT_PROBS(nowPos32, *(data - 1));
+      if (IsCharState(p->state))
+        LitEnc_Encode(&p->rc, probs, curByte);
+      else
+        LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1));
+      p->state = kLiteralNextStates[p->state];
+    }
+    else
+    {
+      RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
+      if (pos < LZMA_NUM_REPS)
+      {
+        RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1);
+        if (pos == 0)
+        {
+          RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0);
+          RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1));
+        }
+        else
+        {
+          UInt32 distance = p->reps[pos];
+          RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1);
+          if (pos == 1)
+            RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0);
+          else
+          {
+            RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1);
+            RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2);
+            if (pos == 3)
+              p->reps[3] = p->reps[2];
+            p->reps[2] = p->reps[1];
+          }
+          p->reps[1] = p->reps[0];
+          p->reps[0] = distance;
+        }
+        if (len == 1)
+          p->state = kShortRepNextStates[p->state];
+        else
+        {
+          LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+          p->state = kRepNextStates[p->state];
+        }
+      }
+      else
+      {
+        UInt32 posSlot;
+        RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
+        p->state = kMatchNextStates[p->state];
+        LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+        pos -= LZMA_NUM_REPS;
+        GetPosSlot(pos, posSlot);
+        RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot);
+        
+        if (posSlot >= kStartPosModelIndex)
+        {
+          UInt32 footerBits = ((posSlot >> 1) - 1);
+          UInt32 base = ((2 | (posSlot & 1)) << footerBits);
+          UInt32 posReduced = pos - base;
+
+          if (posSlot < kEndPosModelIndex)
+            RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced);
+          else
+          {
+            RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
+            RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
+            p->alignPriceCount++;
+          }
+        }
+        p->reps[3] = p->reps[2];
+        p->reps[2] = p->reps[1];
+        p->reps[1] = p->reps[0];
+        p->reps[0] = pos;
+        p->matchPriceCount++;
+      }
+    }
+    p->additionalOffset -= len;
+    nowPos32 += len;
+    if (p->additionalOffset == 0)
+    {
+      UInt32 processed;
+      if (!p->fastMode)
+      {
+        if (p->matchPriceCount >= (1 << 7))
+          FillDistancesPrices(p);
+        if (p->alignPriceCount >= kAlignTableSize)
+          FillAlignPrices(p);
+      }
+      if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
+        break;
+      processed = nowPos32 - startPos32;
+      if (useLimits)
+      {
+        if (processed + kNumOpts + 300 >= maxUnpackSize ||
+            RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize)
+          break;
+      }
+      else if (processed >= (1 << 15))
+      {
+        p->nowPos64 += nowPos32 - startPos32;
+        return CheckErrors(p);
+      }
+    }
+  }
+  p->nowPos64 += nowPos32 - startPos32;
+  return Flush(p, nowPos32);
+}
+
+#define kBigHashDicLimit ((UInt32)1 << 24)
+
+static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  UInt32 beforeSize = kNumOpts;
+  Bool btMode;
+  if (!RangeEnc_Alloc(&p->rc, alloc))
+    return SZ_ERROR_MEM;
+  btMode = (p->matchFinderBase.btMode != 0);
+  #ifndef _7ZIP_ST
+  p->mtMode = (p->multiThread && !p->fastMode && btMode);
+  #endif
+
+  {
+    unsigned lclp = p->lc + p->lp;
+    if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp)
+    {
+      LzmaEnc_FreeLits(p, alloc);
+      p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
+      p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
+      if (p->litProbs == 0 || p->saveState.litProbs == 0)
+      {
+        LzmaEnc_FreeLits(p, alloc);
+        return SZ_ERROR_MEM;
+      }
+      p->lclp = lclp;
+    }
+  }
+
+  p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit);
+
+  if (beforeSize + p->dictSize < keepWindowSize)
+    beforeSize = keepWindowSize - p->dictSize;
+
+  #ifndef _7ZIP_ST
+  if (p->mtMode)
+  {
+    RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig));
+    p->matchFinderObj = &p->matchFinderMt;
+    MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
+  }
+  else
+  #endif
+  {
+    if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
+      return SZ_ERROR_MEM;
+    p->matchFinderObj = &p->matchFinderBase;
+    MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
+  }
+  return SZ_OK;
+}
+
+void LzmaEnc_Init(CLzmaEnc *p)
+{
+  UInt32 i;
+  p->state = 0;
+  for (i = 0 ; i < LZMA_NUM_REPS; i++)
+    p->reps[i] = 0;
+
+  RangeEnc_Init(&p->rc);
+
+
+  for (i = 0; i < kNumStates; i++)
+  {
+    UInt32 j;
+    for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
+    {
+      p->isMatch[i][j] = kProbInitValue;
+      p->isRep0Long[i][j] = kProbInitValue;
+    }
+    p->isRep[i] = kProbInitValue;
+    p->isRepG0[i] = kProbInitValue;
+    p->isRepG1[i] = kProbInitValue;
+    p->isRepG2[i] = kProbInitValue;
+  }
+
+  {
+    UInt32 num = 0x300 << (p->lp + p->lc);
+    for (i = 0; i < num; i++)
+      p->litProbs[i] = kProbInitValue;
+  }
+
+  {
+    for (i = 0; i < kNumLenToPosStates; i++)
+    {
+      CLzmaProb *probs = p->posSlotEncoder[i];
+      UInt32 j;
+      for (j = 0; j < (1 << kNumPosSlotBits); j++)
+        probs[j] = kProbInitValue;
+    }
+  }
+  {
+    for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++)
+      p->posEncoders[i] = kProbInitValue;
+  }
+
+  LenEnc_Init(&p->lenEnc.p);
+  LenEnc_Init(&p->repLenEnc.p);
+
+  for (i = 0; i < (1 << kNumAlignBits); i++)
+    p->posAlignEncoder[i] = kProbInitValue;
+
+  p->optimumEndIndex = 0;
+  p->optimumCurrentIndex = 0;
+  p->additionalOffset = 0;
+
+  p->pbMask = (1 << p->pb) - 1;
+  p->lpMask = (1 << p->lp) - 1;
+}
+
+void LzmaEnc_InitPrices(CLzmaEnc *p)
+{
+  if (!p->fastMode)
+  {
+    FillDistancesPrices(p);
+    FillAlignPrices(p);
+  }
+
+  p->lenEnc.tableSize =
+  p->repLenEnc.tableSize =
+      p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
+  LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices);
+  LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices);
+}
+
+static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  UInt32 i;
+  for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++)
+    if (p->dictSize <= ((UInt32)1 << i))
+      break;
+  p->distTableSize = i * 2;
+
+  p->finished = False;
+  p->result = SZ_OK;
+  RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
+  LzmaEnc_Init(p);
+  LzmaEnc_InitPrices(p);
+  p->nowPos64 = 0;
+  return SZ_OK;
+}
+
+static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
+    ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  p->matchFinderBase.stream = inStream;
+  p->needInit = 1;
+  p->rc.outStream = outStream;
+  return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
+}
+
+SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
+    ISeqInStream *inStream, UInt32 keepWindowSize,
+    ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  p->matchFinderBase.stream = inStream;
+  p->needInit = 1;
+  return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+}
+
+static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
+{
+  p->matchFinderBase.directInput = 1;
+  p->matchFinderBase.bufferBase = (Byte *)src;
+  p->matchFinderBase.directInputRem = srcLen;
+}
+
+SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
+    UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  LzmaEnc_SetInputBuf(p, src, srcLen);
+  p->needInit = 1;
+
+  return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+}
+
+static void LzmaEnc_Finish(CLzmaEncHandle pp)
+{
+  #ifndef _7ZIP_ST
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  if (p->mtMode)
+    MatchFinderMt_ReleaseStream(&p->matchFinderMt);
+  #else
+  pp = pp;
+  #endif
+}
+
+typedef struct
+{
+  ISeqOutStream funcTable;
+  Byte *data;
+  SizeT rem;
+  Bool overflow;
+} CSeqOutStreamBuf;
+
+static size_t MyWrite(void *pp, const void *data, size_t size)
+{
+  CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp;
+  if (p->rem < size)
+  {
+    size = p->rem;
+    p->overflow = True;
+  }
+  memcpy(p->data, data, size);
+  p->rem -= size;
+  p->data += size;
+  return size;
+}
+
+
+static UInt32 __maybe_unused LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
+{
+  const CLzmaEnc *p = (CLzmaEnc *)pp;
+  return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+}
+
+const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
+{
+  const CLzmaEnc *p = (CLzmaEnc *)pp;
+  return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+}
+
+static SRes __maybe_unused LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
+    Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  UInt64 nowPos64;
+  SRes res;
+  CSeqOutStreamBuf outStream;
+
+  outStream.funcTable.Write = MyWrite;
+  outStream.data = dest;
+  outStream.rem = *destLen;
+  outStream.overflow = False;
+
+  p->writeEndMark = False;
+  p->finished = False;
+  p->result = SZ_OK;
+
+  if (reInit)
+    LzmaEnc_Init(p);
+  LzmaEnc_InitPrices(p);
+  nowPos64 = p->nowPos64;
+  RangeEnc_Init(&p->rc);
+  p->rc.outStream = &outStream.funcTable;
+
+  res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize);
+  
+  *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
+  *destLen -= outStream.rem;
+  if (outStream.overflow)
+    return SZ_ERROR_OUTPUT_EOF;
+
+  return res;
+}
+
+static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
+{
+  SRes res = SZ_OK;
+
+  #ifndef _7ZIP_ST
+  Byte allocaDummy[0x300];
+  int i = 0;
+  for (i = 0; i < 16; i++)
+    allocaDummy[i] = (Byte)i;
+  #endif
+
+  for (;;)
+  {
+    res = LzmaEnc_CodeOneBlock(p, False, 0, 0);
+    if (res != SZ_OK || p->finished != 0)
+      break;
+    if (progress != 0)
+    {
+      res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
+      if (res != SZ_OK)
+      {
+        res = SZ_ERROR_PROGRESS;
+        break;
+      }
+    }
+  }
+  LzmaEnc_Finish(p);
+  return res;
+}
+
+SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
+    ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
+  return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
+}
+
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  int i;
+  UInt32 dictSize = p->dictSize;
+  if (*size < LZMA_PROPS_SIZE)
+    return SZ_ERROR_PARAM;
+  *size = LZMA_PROPS_SIZE;
+  props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
+
+  for (i = 11; i <= 30; i++)
+  {
+    if (dictSize <= ((UInt32)2 << i))
+    {
+      dictSize = (2 << i);
+      break;
+    }
+    if (dictSize <= ((UInt32)3 << i))
+    {
+      dictSize = (3 << i);
+      break;
+    }
+  }
+
+  for (i = 0; i < 4; i++)
+    props[1 + i] = (Byte)(dictSize >> (8 * i));
+  return SZ_OK;
+}
+
+SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+    int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  SRes res;
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+
+  CSeqOutStreamBuf outStream;
+
+  LzmaEnc_SetInputBuf(p, src, srcLen);
+
+  outStream.funcTable.Write = MyWrite;
+  outStream.data = dest;
+  outStream.rem = *destLen;
+  outStream.overflow = False;
+
+  p->writeEndMark = writeEndMark;
+
+  p->rc.outStream = &outStream.funcTable;
+  res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
+  if (res == SZ_OK)
+    res = LzmaEnc_Encode2(p, progress);
+
+  *destLen -= outStream.rem;
+  if (outStream.overflow)
+    return SZ_ERROR_OUTPUT_EOF;
+  return res;
+}
+
+static __maybe_unused SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+    const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+    ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
+  SRes res;
+  if (p == 0)
+    return SZ_ERROR_MEM;
+
+  res = LzmaEnc_SetProps(p, props);
+  if (res == SZ_OK)
+  {
+    res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
+    if (res == SZ_OK)
+      res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
+          writeEndMark, progress, alloc, allocBig);
+  }
+
+  LzmaEnc_Destroy(p, alloc, allocBig);
+  return res;
+}
diff --git a/lib/lzma/Makefile b/lib/lzma/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..02e799c9938131975b0e1eed562adf6e5c46e9f3
--- /dev/null
+++ b/lib/lzma/Makefile
@@ -0,0 +1,7 @@
+lzma_compress-objs := LzFind.o LzmaEnc.o
+lzma_decompress-objs := LzmaDec.o
+
+obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o
+obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o
+
+EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h
diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile
index e764116ea12d3869da9fbd5c42be3c71c7325506..46024769d6d5e32b84d450190d02f4c232e83712 100644
--- a/lib/lzo/Makefile
+++ b/lib/lzo/Makefile
@@ -1,5 +1,13 @@
 lzo_compress-objs := lzo1x_compress.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+lzo_decompress-objs := lzo1x_decompress_safe.o
+else
 lzo_decompress-objs := lzo1x_decompress.o
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+lzo_decompress-objs := lzo1x_decompress.o
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 0d83ea8a9605429aa5e79262bef539e51bfec456..25018bc84c5d9116b57156cc1b471a148e99113d 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -33,7 +33,13 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 		return NULL;
 	if (maxlen && len > maxlen)
 		len = maxlen;
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX)
+	/* IO Resource not supported */
+	if (!IS_ENABLED(CONFIG_NO_GENERIC_PCI_IOPORT_MAP) &&
+			(flags & IORESOURCE_IO))
+#else
 	if (flags & IORESOURCE_IO)
+#endif
 		return __pci_ioport_map(dev, start, len);
 	if (flags & IORESOURCE_MEM) {
 		if (flags & IORESOURCE_CACHEABLE)
diff --git a/mm/Makefile b/mm/Makefile
index 50ec00ef2a0e85a11ef8202529b405b069702a8d..1bc79478a47e6628c9b2e65f5825a4a6d11e6aec 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -46,6 +46,11 @@ obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_QUICKLIST) += quicklist.o
 obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
 obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += vmpressure.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
 obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
 obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 46bf2ed5594c73a86cbbe61fec40669bcfea74a3..90d508269f37629c59ddd8dbb6ffd3a269ad6644 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -242,6 +242,8 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
 /*
  * Determine the type of allocation constraint.
  */
+#if !defined(CONFIG_BCM_KF_OOM_REBOOT)
+
 #ifdef CONFIG_NUMA
 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
 				gfp_t gfp_mask, nodemask_t *nodemask,
@@ -302,6 +304,9 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
 }
 #endif
 
+#endif
+#if !defined(CONFIG_BCM_KF_OOM_REBOOT) || defined(CONFIG_CGROUP_MEM_RES_CTLR)
+
 /*
  * Simple selection loop. We chose the process with the highest
  * number of 'points'. We expect the caller will lock the tasklist.
@@ -376,6 +381,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 	return chosen;
 }
 
+#endif
 /**
  * dump_tasks - dump current memory state of all system tasks
  * @mem: current's memory controller, if constrained
@@ -418,6 +424,9 @@ static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemas
 	}
 }
 
+#if !defined(CONFIG_BCM_KF_OOM_REBOOT) || defined(CONFIG_CGROUP_MEM_RES_CTLR)
+
+
 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
 			struct mem_cgroup *memcg, const nodemask_t *nodemask)
 {
@@ -435,6 +444,8 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
 		dump_tasks(memcg, nodemask);
 }
 
+
+
 #define K(x) ((x) << (PAGE_SHIFT-10))
 static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
 			     unsigned int points, unsigned long totalpages,
@@ -529,6 +540,8 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
 }
 #undef K
 
+
+
 /*
  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
  */
@@ -553,6 +566,9 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
 		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
 }
 
+
+#endif
+
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
 			      int order)
@@ -699,6 +715,13 @@ static void clear_system_oom(void)
 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
 		int order, nodemask_t *nodemask, bool force_kill)
 {
+#if defined(CONFIG_BCM_KF_OOM_REBOOT)
+#define OOM_REBOOT_DELAY (5)
+#define OOM_REBOOT_INTERVAL (HZ*120)
+	static int oom_count=0;
+	static unsigned long oom_timestamp=0;
+	unsigned long freed = 0;
+#else
 	const nodemask_t *mpol_mask;
 	struct task_struct *p;
 	unsigned long totalpages;
@@ -706,6 +729,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
 	unsigned int points;
 	enum oom_constraint constraint = CONSTRAINT_NONE;
 	int killed = 0;
+#endif
 
 	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
 	if (freed > 0)
@@ -722,6 +746,40 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
 		return;
 	}
 
+#if defined(CONFIG_BCM_KF_OOM_REBOOT)
+
+	/* For our embedded system, most of the processes are considered essential. */
+	/* Randomly killing a process is no better than a reboot so we won't kill process here*/
+	
+	printk(KERN_WARNING "\n\n%s triggered out of memory codition (oom killer not called): "
+		"gfp_mask=0x%x, order=%d, oom_adj=%d\n\n",
+		current->comm, gfp_mask, order, current->signal->oom_adj);
+	dump_stack();
+	show_mem(0);
+	printk("\n");
+	/*
+	 * The process that triggered the oom is not necessarily the one that
+	 * caused it.  dump_tasks shows all tasks and their memory usage.
+	 */
+	read_lock(&tasklist_lock);
+	dump_tasks(NULL, nodemask);
+	read_unlock(&tasklist_lock);
+
+	/* Reboot if OOM, but don't do it immediately - just in case this can be too sensitive */
+	if ((jiffies - oom_timestamp) > OOM_REBOOT_INTERVAL) {
+		oom_timestamp = jiffies;
+		oom_count = 0;		
+	}
+	else {
+		oom_count++;
+		if (oom_count >= OOM_REBOOT_DELAY) {
+			panic("Reboot due to persistent out of memory codition..");
+		}
+	}
+	schedule_timeout_interruptible(HZ*5);
+
+#else
+
 	/*
 	 * Check if there were limitations on the allocation (only relevant for
 	 * NUMA) that may require different handling.
@@ -763,6 +821,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
 	 */
 	if (killed && !test_thread_flag(TIF_MEMDIE))
 		schedule_timeout_uninterruptible(1);
+
+#endif
 }
 
 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4a68c8f20e3ed5de6e59a7a71d52178707fd23c9..10776969a92c00b879092cdeab57e930b752ad14 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -170,6 +170,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
 #ifdef CONFIG_ZONE_DMA32
 	 256,
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	 1,
+#endif
 #ifdef CONFIG_HIGHMEM
 	 32,
 #endif
@@ -184,6 +187,9 @@ static char * const zone_names[MAX_NR_ZONES] = {
 #endif
 #ifdef CONFIG_ZONE_DMA32
 	 "DMA32",
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	 "ACP",
 #endif
 	 "Normal",
 #ifdef CONFIG_HIGHMEM
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 9422129705297dd62bf4658287f31fa1a2f331cd..d8c579f83adca4298d10665f391f84fee450613e 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -36,6 +36,9 @@ static unsigned long max_pages(unsigned long min_pages)
 #endif
 #ifdef CONFIG_ZONE_DMA32
 		zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+		zone_page_state(&zones[ZONE_ACP], NR_FREE_PAGES) +
 #endif
 		zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
 
diff --git a/mm/shmem.c b/mm/shmem.c
index 9d65a02a8799d2e1461d0efd5967fbc7ecfe33f1..f9240533ae9d29bc424fcf2c3d5a1bd69f4f5996 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2169,6 +2169,10 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
 	unsigned long inodes;
 	int error = -EINVAL;
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/* CVE-2013-1767 */
+	config.mpol = NULL;
+#endif
 	if (shmem_parse_options(data, &config, true))
 		return error;
 
@@ -2193,8 +2197,19 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
 	sbinfo->max_inodes  = config.max_inodes;
 	sbinfo->free_inodes = config.max_inodes - inodes;
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/* CVE-2013-1767 */
+	/*
+	* Preserve previous mempolicy unless mpol remount option was specified.
+	*/
+	if (config.mpol) {
+		mpol_put(sbinfo->mpol);
+		sbinfo->mpol = config.mpol; /* transfers initial ref */
+	}
+#else	
 	mpol_put(sbinfo->mpol);
 	sbinfo->mpol        = config.mpol;	/* transfers initial ref */
+#endif
 out:
 	spin_unlock(&sbinfo->stat_lock);
 	return error;
diff --git a/mm/slab.c b/mm/slab.c
index 64eb636e69469e48e63fc037e3f38187d08cef31..288ea4358ae7e43d244f5daa3b72d73a5e8639bd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -153,10 +153,12 @@
 #endif
 
 /* Legal flag mask for kmem_cache_create(). */
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
 #if DEBUG
 # define CREATE_MASK	(SLAB_RED_ZONE | \
 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
 			 SLAB_CACHE_DMA | \
+			 SLAB_CACHE_ACP | \
 			 SLAB_STORE_USER | \
 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
@@ -164,10 +166,28 @@
 #else
 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
 			 SLAB_CACHE_DMA | \
+			 SLAB_CACHE_ACP | \
 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
 			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
 #endif
+#else
+#if DEBUG
+# define CREATE_MASK	(SLAB_RED_ZONE | \
+			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
+			 SLAB_CACHE_DMA | \
+			 SLAB_STORE_USER | \
+			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
+			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
+#else
+# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
+			 SLAB_CACHE_DMA | \
+			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
+			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
+#endif
+#endif
 
 /*
  * kmem_bufctl_t:
@@ -309,8 +329,11 @@ static void cache_reap(struct work_struct *unused);
  */
 static __always_inline int index_of(const size_t size)
 {
+#if (defined(CONFIG_BCM_KF_BOUNCE) && defined(CONFIG_BRCM_BOUNCE))
+#define __bad_size() printk("__bad_size %d\n", size )
+#else
 	extern void __bad_size(void);
-
+#endif
 	if (__builtin_constant_p(size)) {
 		int i = 0;
 
@@ -564,10 +587,17 @@ EXPORT_SYMBOL(malloc_sizes);
 struct cache_names {
 	char *name;
 	char *name_dma;
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	char *name_acp;
+#endif
 };
 
 static struct cache_names __initdata cache_names[] = {
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_acp = "size-" #x "(ACP)" },
+#else
 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
+#endif
 #include <linux/kmalloc_sizes.h>
 	{NULL,}
 #undef CACHE
@@ -823,6 +853,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
 	while (size > csizep->cs_size)
 		csizep++;
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	if (unlikely(gfpflags & GFP_ACP))
+		return csizep->cs_acpcachep;
+#endif
 	/*
 	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
 	 * has cs_{dma,}cachep==NULL. Thus no special case
@@ -1706,6 +1740,15 @@ void __init kmem_cache_init(void)
 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
 						SLAB_PANIC,
 					NULL);
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+		sizes->cs_acpcachep = kmem_cache_create(
+					names->name_acp,
+					sizes->cs_size,
+					ARCH_KMALLOC_MINALIGN,
+					ARCH_KMALLOC_FLAGS|SLAB_CACHE_ACP|
+						SLAB_PANIC,
+					NULL);
 #endif
 		sizes++;
 		names++;
@@ -2600,6 +2643,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 	cachep->gfpflags = 0;
 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
 		cachep->gfpflags |= GFP_DMA;
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	if (flags & SLAB_CACHE_ACP)
+		cachep->gfpflags |= GFP_ACP;
+#endif
 	cachep->buffer_size = size;
 	cachep->reciprocal_buffer_size = reciprocal_value(size);
 
@@ -2969,6 +3016,12 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 		else
 			BUG_ON(cachep->gfpflags & GFP_DMA);
 	}
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	if (flags & GFP_ACP)
+		BUG_ON(!(cachep->gfpflags & GFP_ACP));
+	else
+		BUG_ON(cachep->gfpflags & GFP_ACP);
+#endif
 }
 
 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
diff --git a/mm/slub.c b/mm/slub.c
index 71de9b5685fa60a9d6e9a80ba906efd10bb64e5c..aab93e1bd8e9c4511dc528a678607bc09c72d0ae 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3222,6 +3222,9 @@ static struct kmem_cache *kmem_cache;
 #ifdef CONFIG_ZONE_DMA
 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+static struct kmem_cache *kmalloc_acp_caches[SLUB_PAGE_SHIFT];
+#endif
 
 static int __init setup_slub_min_order(char *str)
 {
@@ -3336,6 +3339,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
 	if (unlikely((flags & SLUB_DMA)))
 		return kmalloc_dma_caches[index];
 
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	if (unlikely((flags & SLUB_ACP)))
+		return kmalloc_dma_caches[index];
+
 #endif
 	return kmalloc_caches[index];
 }
@@ -3850,6 +3858,19 @@ void __init kmem_cache_init(void)
 				s->objsize, SLAB_CACHE_DMA);
 		}
 	}
+#endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
+		struct kmem_cache *s = kmalloc_caches[i];
+
+		if (s && s->size) {
+			char *name = kasprintf(GFP_NOWAIT,
+				 "dma-kmalloc-%d", s->objsize);
+
+			BUG_ON(!name);
+			kmalloc_acp_caches[i] = create_kmalloc_cache(name,
+				s->objsize, SLAB_CACHE_ACP);
+		}
 #endif
 	printk(KERN_INFO
 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
@@ -4796,6 +4817,13 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
 }
 SLAB_ATTR_RO(cache_dma);
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+static ssize_t cache_acp_show(struct kmem_cache *s, char *buf)
+{
+	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_ACP));
+}
+SLAB_ATTR_RO(cache_acp);
+#endif
 
 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
 {
@@ -5131,6 +5159,9 @@ static struct attribute *slab_attrs[] = {
 #ifdef CONFIG_ZONE_DMA
 	&cache_dma_attr.attr,
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+	&cache_acp_attr.attr,
+#endif
 #ifdef CONFIG_NUMA
 	&remote_node_defrag_ratio_attr.attr,
 #endif
diff --git a/mm/vmscan.c b/mm/vmscan.c
index be5bc0af2e76f00c3611a09137dce306de11dbc1..a40a9327e929e7da8f08b5077012bb6f4644f928 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3054,6 +3054,11 @@ static int kswapd(void *p)
 	};
 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
+#if defined(CONFIG_BCM_KF_VMSCAN_OPT)
+	/* Lower the priority to prevent lock up when running low on memory */
+	set_user_nice(current, 10);
+#endif
+
 	lockdep_set_current_reclaim_state(GFP_KERNEL);
 
 	if (!cpumask_empty(cpumask))
@@ -3143,6 +3148,17 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
 
 	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
 		return;
+		
+#if defined(CONFIG_BCM_KF_VMSCAN_OPT)
+	/* Cap the order at 128k blocks to relax fragmentation standard, so kswapd won't be   
+	 * running constantly without much progress in our small swapless system. Big blocks
+	 * should be allocated with vmalloc or kmalloc at boot time for our system 
+	 */
+	if (order > 5) {
+		order = 5;
+	}
+#endif
+
 	pgdat = zone->zone_pgdat;
 	if (pgdat->kswapd_max_order < order) {
 		pgdat->kswapd_max_order = order;
@@ -3327,7 +3343,15 @@ int zone_reclaim_mode __read_mostly;
  * of a node considered for each zone_reclaim. 4 scans 1/16th of
  * a zone.
  */
+
+#if defined(CONFIG_BCM_KF_VMSCAN_OPT)
+/* Start from a higher priority (lower value) for more optimized memory
+ * scanning, see DEF_PRIORITY 
+ */
+#define ZONE_RECLAIM_PRIORITY 2
+#else
 #define ZONE_RECLAIM_PRIORITY 4
+#endif
 
 /*
  * Percentage of pages in a zone that must be unmapped for zone_reclaim to
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 172212f6f213d8c330bf0d2f85d6e09bbd780efe..522f8877b87af0094519e8ff9bcd56611626f649 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -677,6 +677,9 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 #else
 #define TEXT_FOR_DMA32(xx)
 #endif
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define TEXT_FOR_ACP(xx) xx "_acp",
+#endif
 
 #ifdef CONFIG_HIGHMEM
 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
@@ -684,8 +687,15 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
 #define TEXT_FOR_HIGHMEM(xx)
 #endif
 
+#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP)
+#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) TEXT_FOR_ACP(xx) \
+					xx "_normal", TEXT_FOR_HIGHMEM(xx) \
+					xx "_movable",
+					
+#else
 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
+#endif
 
 const char * const vmstat_text[] = {
 	/* Zoned VM counters */
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index cf4a49c5623cdafa2cd60edaf69cf092c3b2509a..63ca874613579099eb8e9d6da892542ec1a2e4f4 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -40,12 +40,20 @@
 #include "vlan.h"
 #include "vlanproc.h"
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 #define DRV_VERSION "1.8"
 
 /* Global VLAN variables */
 
 int vlan_net_id __read_mostly;
 
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+int vlan_dev_set_nfmark_to_priority(char *, int);
+#endif
+
 const char vlan_fullname[] = "802.1Q VLAN Support";
 const char vlan_version[] = DRV_VERSION;
 
@@ -112,6 +120,114 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 	dev_put(real_dev);
 }
 
+#if defined(CONFIG_BCM_KF_VLAN)
+struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
+{
+	return &(dev->stats);
+}
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+static inline BlogStats_t *vlan_dev_get_bstats(struct net_device *dev)
+{
+	return &(vlan_dev_priv(dev)->bstats);
+}
+static inline struct net_device_stats *vlan_dev_get_cstats(struct net_device *dev)
+{
+	return &(vlan_dev_priv(dev)->cstats);
+}
+
+struct net_device_stats * vlan_dev_collect_stats(struct net_device * dev_p)
+{
+	BlogStats_t bStats;
+	BlogStats_t * bStats_p;
+	struct net_device_stats *dStats_p;
+	struct net_device_stats *cStats_p;
+
+	if ( dev_p == (struct net_device *)NULL )
+		return (struct net_device_stats *)NULL;
+
+	dStats_p = vlan_dev_get_stats(dev_p);
+	cStats_p = vlan_dev_get_cstats(dev_p);
+	bStats_p = vlan_dev_get_bstats(dev_p);
+
+	memset(&bStats, 0, sizeof(BlogStats_t));
+
+	blog_lock();
+	blog_notify(FETCH_NETIF_STATS, (void*)dev_p,
+				(uint32_t)&bStats, BLOG_PARAM2_NO_CLEAR);
+	blog_unlock();
+
+	memcpy( cStats_p, dStats_p, sizeof(struct net_device_stats) );
+	cStats_p->rx_packets += ( bStats.rx_packets + bStats_p->rx_packets );
+	cStats_p->tx_packets += ( bStats.tx_packets + bStats_p->tx_packets );
+
+	/* set byte counts to 0 if the bstat packet counts are non 0 and the
+		octet counts are 0 */
+	if ( ((bStats.rx_bytes + bStats_p->rx_bytes) == 0) &&
+		  ((bStats.rx_packets + bStats_p->rx_packets) > 0) )
+	{
+		cStats_p->rx_bytes = 0;
+	}
+	else
+	{
+		cStats_p->rx_bytes   += ( bStats.rx_bytes   + bStats_p->rx_bytes );
+	}
+
+	if ( ((bStats.tx_bytes + bStats_p->tx_bytes) == 0) &&
+		  ((bStats.tx_packets + bStats_p->tx_packets) > 0) )
+	{
+		cStats_p->tx_bytes = 0;
+	}
+	else
+	{
+		cStats_p->tx_bytes   += ( bStats.tx_bytes   + bStats_p->tx_bytes );
+	}
+	cStats_p->multicast  += ( bStats.multicast  + bStats_p->multicast );
+
+	return cStats_p;
+}
+
+void vlan_dev_update_stats(struct net_device * dev_p, BlogStats_t *blogStats_p)
+{
+	BlogStats_t * bStats_p;
+
+	if ( dev_p == (struct net_device *)NULL )
+		return;
+	bStats_p = vlan_dev_get_bstats(dev_p);
+
+	bStats_p->rx_packets += blogStats_p->rx_packets;
+	bStats_p->tx_packets += blogStats_p->tx_packets;
+	bStats_p->rx_bytes   += blogStats_p->rx_bytes;
+	bStats_p->tx_bytes   += blogStats_p->tx_bytes;
+	bStats_p->multicast  += blogStats_p->multicast;
+	return;
+}
+
+void vlan_dev_clear_stats(struct net_device * dev_p)
+{
+	BlogStats_t * bStats_p;
+	struct net_device_stats *dStats_p;
+	struct net_device_stats *cStats_p;
+
+	if ( dev_p == (struct net_device *)NULL )
+		return;
+
+	dStats_p = vlan_dev_get_stats(dev_p);
+	cStats_p = vlan_dev_get_cstats(dev_p); 
+	bStats_p = vlan_dev_get_bstats(dev_p);
+
+	blog_lock();
+	blog_notify(FETCH_NETIF_STATS, (void*)dev_p, 0, BLOG_PARAM2_DO_CLEAR);
+	blog_unlock();
+
+	memset(bStats_p, 0, sizeof(BlogStats_t));
+	memset(dStats_p, 0, sizeof(struct net_device_stats));
+	memset(cStats_p, 0, sizeof(struct net_device_stats));
+
+	return;
+}
+#endif //CONFIG_BLOG
+#endif //CONFIG_VLAN_STATS
+
 int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
 {
 	const char *name = real_dev->name;
@@ -237,6 +353,11 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
 	if (new_dev == NULL)
 		return -ENOBUFS;
 
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+    /* If real device is a hardware switch port, the vlan device must also be */
+    new_dev->priv_flags |= real_dev->priv_flags;
+#endif
+
 	dev_net_set(new_dev, net);
 	/* need 4 bytes for extra VLAN header info,
 	 * hope the underlying device can handle it.
@@ -544,6 +665,13 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
 						   args.vlan_qos);
 		break;
 
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+	case SET_VLAN_NFMARK_TO_PRIORITY_CMD:
+		err = vlan_dev_set_nfmark_to_priority(args.device1,
+						   args.u.nfmark_to_priority);
+		break;
+#endif  
+
 	case SET_VLAN_FLAG_CMD:
 		err = -EPERM;
 		if (!capable(CAP_NET_ADMIN))
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index a4886d94c40c453cf0032cc24a207dacb438c054..4fe94ad459027666ebe8f3a856edf74229483fe2 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -73,6 +73,15 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	struct netpoll				*netpoll;
 #endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	BlogStats_t bstats; /* stats when the blog promiscuous layer has consumed packets */
+	struct net_device_stats cstats; /* Cummulative Device stats (rx-bytes, tx-pkts, etc...) */
+#endif
+
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+    int nfmark_to_priority;
+#endif
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9757c193c86bc66a02fc09500702ec99d45e9e77..02d1c6ab851ca74e5379c3e5ac5ca04f7075fd47 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -35,6 +35,14 @@
 #include <linux/if_vlan.h>
 #include <linux/netpoll.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+struct net_device_stats *vlan_dev_get_stats(struct net_device *dev);
+extern struct net_device_stats * vlan_dev_collect_stats(struct net_device * dev_p);
+extern void vlan_dev_update_stats(struct net_device * dev_p, BlogStats_t *blogStats_p);
+extern void vlan_dev_clear_stats(struct net_device * dev_p);
+#endif
+
 /*
  *	Rebuild the Ethernet MAC header. This is called after an ARP
  *	(or in future other address resolution) has completed on this
@@ -359,6 +367,36 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
 	return 0;
 }
 
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+int vlan_dev_set_nfmark_to_priority(char *dev_name, int nfmark_to_priority)
+{
+	struct net_device *dev = dev_get_by_name(&init_net, dev_name);
+
+	if (dev) {
+        if (dev->priv_flags & IFF_802_1Q_VLAN) {
+            if (nfmark_to_priority>=-1 && nfmark_to_priority <=29) {
+                vlan_dev_priv(dev)->nfmark_to_priority = nfmark_to_priority;
+                dev_put(dev);
+                return 0;
+            }
+            else {
+    		    printk("invalid nfmark_to_priority\n");
+            }
+        }
+        else {
+            printk(KERN_ERR 
+             "%s: %s is not a vlan device, priv_flags: %hX.\n",
+            __FUNCTION__, dev->name, dev->priv_flags);
+        }    
+    }
+    else {
+		printk(KERN_ERR  "%s: Could not find device: %s\n", __FUNCTION__, dev_name);
+    }
+    dev_put(dev);
+    return -EINVAL;
+}
+#endif
+
 static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
 	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
@@ -748,6 +786,9 @@ static const struct net_device_ops vlan_netdev_ops = {
 	.ndo_netpoll_cleanup	= vlan_dev_netpoll_cleanup,
 #endif
 	.ndo_fix_features	= vlan_dev_fix_features,
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	.ndo_get_stats = vlan_dev_collect_stats,
+#endif
 };
 
 void vlan_setup(struct net_device *dev)
@@ -762,5 +803,9 @@ void vlan_setup(struct net_device *dev)
 	dev->destructor		= free_netdev;
 	dev->ethtool_ops	= &vlan_ethtool_ops;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	dev->put_stats = vlan_dev_update_stats;
+	dev->clr_stats = vlan_dev_clear_stats;
+#endif
 	memset(dev->broadcast, 0, ETH_ALEN);
 }
diff --git a/net/Kconfig b/net/Kconfig
index e07272d0bb2deddc70c287c8ff82563d08054798..0fac55a13be70d4e84532d0c2c6c8d31996134c3 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -72,6 +72,64 @@ config INET
 
 	  Short answer: say Y.
 
+config BLOG
+	bool "Enable Network Buffer Logging"
+	depends on BCM_KF_BLOG
+	---help---
+	  Debug logging of protocol header information of a network packet
+	  buffer as it traverses the Linux networking stack.
+
+	  Say N unless you wish to debug kernel networking stack.
+
+config BLOG_IPV6
+	bool "Enable Network Buffer Logging of IPv6 packets"
+	depends on BCM_KF_BLOG
+	---help---
+	  Debug logging of IPv6 protocol header information of a network packet
+	  buffer as it traverses the Linux networking stack.
+
+	  Say N unless you wish to debug IPv6 kernel networking stack.
+
+config BLOG_MCAST
+	bool "Enable Network Buffer Logging support for Multicast packets"
+	depends on BCM_KF_BLOG
+	---help---
+	  Debug logging of Multicast packet replication in Linux networking stack.
+
+	  Say N unless you wish to debug Multicast in networking stack.
+
+config BLOG_GRE
+	bool "Enable GRE support"
+	depends on BCM_KF_BLOG
+	---help---
+	  Debug logging of GRE protocol header information of a network packet
+	  buffer as it traverses the Linux networking stack.
+
+	  Say N unless you wish to debug GRE in networking stack.
+
+config BLOG_FEATURE
+	bool "Enable Per Packet Modification support for packet flow"
+	depends on BCM_KF_BLOG
+	---help---
+	  Enhance the flow cache to be able to modify the packets on the fly.
+
+config BRCM_DPI
+	bool "Enable Deep Packet Inspection"
+	depends on BCM_KF_DPI
+	---help---
+	  Deep packet inspection support
+
+	  Say N unless you wish to peek deeper.
+
+config BLOG_L2TP
+	bool "Enable L2TP support"
+	depends on BCM_KF_BLOG
+	---help---
+	  Debug logging of L2TP protocol header information of a network packet
+	  buffer as it traverses the Linux networking stack.
+
+	  Say N unless you wish to debug l2TP in networking stack.
+
 if INET
 source "net/ipv4/Kconfig"
 source "net/ipv6/Kconfig"
@@ -79,6 +137,22 @@ source "net/netlabel/Kconfig"
 
 endif # if INET
 
+config ANDROID_PARANOID_NETWORK
+	bool "Only allow certain groups to create sockets"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+	help
+		none
+
+config NET_ACTIVITY_STATS
+	bool "Network activity statistics tracking"
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+	help
+	 Network activity statistics are useful for tracking wireless
+	 modem activity on 2G, 3G, 4G wireless networks. Counts number of
+	 transmissions and groups them in specified time buckets.
+
 config NETWORK_SECMARK
 	bool "Security Marking"
 	help
@@ -210,6 +284,9 @@ source "net/lapb/Kconfig"
 source "net/econet/Kconfig"
 source "net/wanrouter/Kconfig"
 source "net/phonet/Kconfig"
+if BCM_KF_MHI
+source "net/mhi/Kconfig"
+endif
 source "net/ieee802154/Kconfig"
 source "net/sched/Kconfig"
 source "net/dcb/Kconfig"
@@ -338,5 +415,4 @@ source "net/caif/Kconfig"
 source "net/ceph/Kconfig"
 source "net/nfc/Kconfig"
 
-
 endif   # if NET
diff --git a/net/Makefile b/net/Makefile
index ad432fa4d9341fc9f5e7442ba6504d878cc820c5..a49faca91a9352274b57f2e018bbf44040119753 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -42,6 +42,9 @@ obj-$(CONFIG_L2TP)		+= l2tp/
 obj-$(CONFIG_DECNET)		+= decnet/
 obj-$(CONFIG_ECONET)		+= econet/
 obj-$(CONFIG_PHONET)		+= phonet/
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MHI)
+obj-$(CONFIG_MHI)		+= mhi/
+endif # BCM_KF
 ifneq ($(CONFIG_VLAN_8021Q),)
 obj-y				+= 8021q/
 endif
@@ -70,3 +73,8 @@ obj-$(CONFIG_CEPH_LIB)		+= ceph/
 obj-$(CONFIG_BATMAN_ADV)	+= batman-adv/
 obj-$(CONFIG_NFC)		+= nfc/
 obj-$(CONFIG_OPENVSWITCH)	+= openvswitch/
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_NET_ACTIVITY_STATS)		+= activity_stats.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 6dee7bf648a9af5a8da4bfd2b46082b71c197aa4..c323aa19e7524e5b503ddc15f4143f33c5582a31 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -33,6 +33,30 @@ config BRIDGE
 
 	  If unsure, say N.
 
+config BR_IGMP_SNOOP
+	tristate "Bridge IGMPv2/3 Snooping"
+	depends on BCM_KF_IGMP
+	---help---
+	If you say Y here, it will enable IGMP snooping which optimizes 
+	multicast forwarding
+
+config BR_IGMP_SNOOP_SWITCH_PATCH
+	tristate "Software patch for Switch IGMP Packet flooding"
+	depends on BCM_KF_IGMP
+	---help---
+	IGMP snooping does not work properly if more than one LAN client is 
+	connected to LAN side & these hosts want to join same IGMP grup. This
+	 problem is due to ROBO switch 5325/5398 flooding igmp report packets 
+	on all LAN ports & IGMP report suppression mechanism. If you say Y, 
+	here it will enable a software patch to avoid this problem.
+
+config BR_MLD_SNOOP
+	tristate "Bridge MLDv1/2 Snooping"
+	depends on BCM_KF_MLD
+	---help---
+	If you say Y here, it will enable IPV6 MLD snooping which optimizes 
+	multicast forwarding
+
 config BRIDGE_IGMP_SNOOPING
 	bool "IGMP/MLD snooping"
 	depends on BRIDGE
@@ -46,3 +70,10 @@ config BRIDGE_IGMP_SNOOPING
 	  Say N to exclude this support and reduce the binary size.
 
 	  If unsure, say Y.
+
+config BCM_VLAN_AGGREGATION
+	bool "vlan aggregation"
+	depends on BCM_KF_VLAN_AGGREGATION
+	---help---
+	If you say Y here, it will enable vlan aggregation
+	  
\ No newline at end of file
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index d0359ea8ee7901472adee37ff9f22e62d51ce585..2880e050350024bc16faa6212e2b3351353a51a9 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -2,16 +2,53 @@
 # Makefile for the IEEE 802.1d ethernet bridging layer.
 #
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_WLAN)
+ifneq ($(strip $(CONFIG_BCM_WLAN)),)
+EXTRA_CFLAGS    += -DDSLCPE -DBCMDRIVER -DPKTC
+EXTRA_CFLAGS    += -I$(BRCMDRIVERS_DIR)/broadcom/net/wl/bcm9$(BRCM_CHIP)/include
+EXTRA_CFLAGS    += -I$(BRCMDRIVERS_DIR)/broadcom/net/wl/bcm9$(BRCM_CHIP)/main/src/include
+endif
+endif #BCM_KF #defined(CONFIG_BCM_KF_WLAN)
+
 obj-$(CONFIG_BRIDGE) += bridge.o
 
 bridge-y	:= br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
 			br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \
 			br_stp_if.o br_stp_timer.o br_netlink.o
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_BLOG)
+bridge-y += br_flows.o
+endif # BCM_KF #defined(CONFIG_BCK_KF_BLOG)
+
 bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
 
+ifdef BCM_KF #defined(CONFIG_BCM_KF_IGMP)
+bridge-y += br_igmp.o
+bridge-$(CONFIG_BR_MLD_SNOOP) += br_mld.o
+endif # BCM_KF #defined(CONFIG_BCM_KF_IGMP)
+
 bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
 
 bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o
 
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_IGMP)
+ifeq ($(strip $(CONFIG_BR_IGMP_SNOOP)),y)
+bridge-$(CONFIG_BR_IGMP_SNOOP) += br_netlink_mcpd.o br_mcast.o
+else
+ifeq ($(strip $(CONFIG_BR_MLD_SNOOP)),y)
+bridge-$(CONFIG_BR_MLD_SNOOP) += br_netlink_mcpd.o br_mcast.o
+endif
+endif
+endif # BCM_KF #defined(CONFIG_BCM_KF_IGMP)
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_NETFILTER)
+bridge-y += br_notifier.o
+endif # BCM_KF #defined(CONFIG_BCM_KF_NETFILTER)
+
+ifdef BCM_KF #defined(CONFIG_BCM_KF_RUNNER)
+ifdef CONFIG_BCM_RDPA_BRIDGE
+bridge-y += br_fp.o
+endif # CONFIG_BCM_RDPA_BRIDGE
+endif # BCM_KF #defined(CONFIG_BCM_KF_RUNNER)
diff --git a/net/bridge/br.c b/net/bridge/br.c
index ba780cc8e515d2dcd6424d627c286ec1ceb86a47..b7d82e21d3a4f9670e41db488f29258964c721fc 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -21,6 +21,15 @@
 #include <net/stp.h>
 
 #include "br_private.h"
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#include "br_igmp.h"
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include "br_mld.h"
+#endif
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) && (defined(CONFIG_BCM_KF_IGMP) || defined(CONFIG_BCM_KF_MLD))
+#include "br_mcast.h"
+#endif
 
 static const struct stp_proto br_stp_proto = {
 	.rcv	= br_stp_rcv,
@@ -62,10 +71,30 @@ static int __init br_init(void)
 
 	brioctl_set(br_ioctl_deviceless_stub);
 
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	err = br_igmp_snooping_init();
+    if(err)
+        goto err_out4;
+#endif
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	err = br_mld_snooping_init();
+    if(err)
+        goto err_out4;
+#endif
+
+
 #if IS_ENABLED(CONFIG_ATM_LANE)
 	br_fdb_test_addr_hook = br_fdb_test_addr;
 #endif
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) \
+    && (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP) || defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)) \
+    && (defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)))
+    
+    blogRuleVlanNotifyHook = br_mcast_vlan_notify_for_blog_update;
+#endif
+
 	return 0;
 err_out4:
 	unregister_netdevice_notifier(&br_device_notifier);
@@ -75,6 +104,12 @@ static int __init br_init(void)
 	unregister_pernet_subsys(&br_net_ops);
 err_out1:
 	br_fdb_fini();
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+    br_igmp_snooping_fini();
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    br_mld_snooping_fini();
+#endif
 err_out:
 	stp_proto_unregister(&br_stp_proto);
 	return err;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ba829de84423e24cf05a1ccb1e73009669281bd9..0cffbc3469185b96663e3ef2d9e5ac7daf0dd64e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -21,6 +21,83 @@
 
 #include <asm/uaccess.h>
 #include "br_private.h"
+#if defined(CONFIG_BCM_KF_WL) 
+#include "linux/bcm_skb_defines.h"
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#if defined(CONFIG_BCM_KF_WL)
+#if defined(PKTC)
+#include <linux_osl_dslcpe_pktc.h>
+extern uint32_t (*wl_pktc_req_hook)(int req_id, uint32_t param0, uint32_t param1, uint32_t param2);
+#endif /* PKTC */
+#endif
+#endif
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#include "br_igmp.h"
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include "br_mld.h"
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+static struct net_device_stats *br_dev_get_stats(struct net_device *dev)
+{
+	//struct net_bridge *br = netdev_priv(dev);
+	//return &br->statistics;
+	return &dev->stats;
+	
+}
+
+static BlogStats_t * br_dev_get_bstats(struct net_device *dev)
+{
+	struct net_bridge *br = netdev_priv(dev);
+
+	return &br->bstats;
+}
+
+static struct net_device_stats *br_dev_get_cstats(struct net_device *dev)
+{
+	struct net_bridge *br = netdev_priv(dev);
+
+	return &br->cstats;
+}
+
+
+static void *br_dev_get_stats_p(struct net_device * dev_p,char type) {
+
+	switch (type) {
+		case 'd':
+			return br_dev_get_stats(dev_p);
+		case 'c':
+			return  br_dev_get_cstats(dev_p);
+		case 'b':
+			return  br_dev_get_bstats(dev_p);
+	}
+	return NULL;
+}
+                                
+static void br_dev_update_stats(struct net_device * dev_p, 
+                                BlogStats_t * blogStats_p)
+{
+	BlogStats_t * bStats_p;
+
+	if ( dev_p == (struct net_device *)NULL )
+		return;
+
+	bStats_p = br_dev_get_bstats(dev_p);
+
+	bStats_p->rx_packets += blogStats_p->rx_packets;
+	bStats_p->tx_packets += blogStats_p->tx_packets;
+	bStats_p->rx_bytes   += blogStats_p->rx_bytes;
+	bStats_p->tx_bytes   += blogStats_p->tx_bytes;
+	bStats_p->multicast  += blogStats_p->multicast;
+
+	return;
+}
+
+#endif /* CONFIG_BLOG */
 
 /* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -30,6 +107,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct net_bridge_fdb_entry *dst;
 	struct net_bridge_mdb_entry *mdst;
 	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP))
+	struct iphdr *pipmcast = NULL;
+	struct igmphdr *pigmp = NULL;
+#endif
+#if (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+	struct ipv6hdr *pipv6mcast = NULL;
+	struct icmp6hdr *picmpv6 = NULL;
+#endif
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 	if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
@@ -38,6 +123,28 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 	}
 #endif
 
+#if defined(CONFIG_BCM_KF_EXTSTATS) && defined(CONFIG_BLOG)
+	blog_lock();
+	blog_link(IF_DEVICE, blog_ptr(skb), (void*)dev, DIR_TX, skb->len);
+	blog_unlock();
+
+	/* Gather general TX statistics */
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+
+	/* Gather packet specific packet data using pkt_type calculations from the ethernet driver */
+	switch (skb->pkt_type) {
+	case PACKET_BROADCAST:
+		dev->stats.tx_broadcast_packets++;
+		break;
+
+	case PACKET_MULTICAST:
+		dev->stats.tx_multicast_packets++;
+		dev->stats.tx_multicast_bytes += skb->len;
+		break;
+	}
+#endif
+
 	u64_stats_update_begin(&brstats->syncp);
 	brstats->tx_packets++;
 	brstats->tx_bytes += skb->len;
@@ -49,6 +156,28 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 	skb_pull(skb, ETH_HLEN);
 
 	rcu_read_lock();
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	br_mld_get_ip_icmp_hdrs(skb, &pipv6mcast, &picmpv6, NULL);
+	if (pipv6mcast != NULL) {
+		if (br_mld_mc_forward(br, skb, 0, 1)) {
+			/* skb consumed so exit */
+			goto out;
+		}
+	}
+	else
+#endif
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	br_igmp_get_ip_igmp_hdrs(skb, &pipmcast, &pigmp, NULL);
+	if ( pipmcast != NULL )
+	{
+		if (br_igmp_mc_forward(br, skb, 0, 1)) {
+			/* skb consumed so exit */
+			goto out;
+		}
+	}
+#endif
+
 	if (is_broadcast_ether_addr(dest))
 		br_flood_deliver(br, skb);
 	else if (is_multicast_ether_addr(dest)) {
@@ -67,7 +196,44 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 		else
 			br_flood_deliver(br, skb);
 	} else if ((dst = __br_fdb_get(br, dest)) != NULL)
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	{
+		blog_lock();
+		blog_link(BRIDGEFDB, blog_ptr(skb), (void*)dst, BLOG_PARAM1_DSTFDB, 0);
+		blog_unlock();
+#if defined(CONFIG_BCM_KF_WL)
+#if defined(PKTC)
+		if (wl_pktc_req_hook && (dst->dst != NULL) &&
+			(BLOG_GET_PHYTYPE(dst->dst->dev->path.hw_port_type) == BLOG_WLANPHY) && 
+			wl_pktc_req_hook(GET_PKTC_TX_MODE, 0, 0, 0))
+		{
+			struct net_device *dst_dev_p = dst->dst->dev;
+			uint32_t chainIdx = wl_pktc_req_hook(UPDATE_BRC_HOT, (uint32_t)&(dst->addr.addr[0]), (uint32_t)dst_dev_p, 0);
+			if (chainIdx != INVALID_CHAIN_IDX)
+			{
+				// Update chainIdx in blog
+				if (skb->blog_p != NULL)
+				{
+					skb->blog_p->wfd.nic_ucast.is_tx_hw_acc_en = 1;
+					skb->blog_p->wfd.nic_ucast.is_wfd = 1;
+					skb->blog_p->wfd.nic_ucast.is_chain = 1;
+					skb->blog_p->wfd.nic_ucast.wfd_idx = ((chainIdx & WFD_IDX_UINT16_BIT_MASK) >> WFD_IDX_UINT16_BIT_POS);
+					skb->blog_p->wfd.nic_ucast.chain_idx = chainIdx;
+					//printk("%s: Added ChainEntryIdx 0x%x Dev %s blogSrcAddr 0x%x blogDstAddr 0x%x DstMac %x:%x:%x:%x:%x:%x "
+					//       "wfd_q %d wl_metadata %d wl 0x%x\n", __FUNCTION__,
+					//        chainIdx, dst->dst->dev->name, skb->blog_p->rx.tuple.saddr, skb->blog_p->rx.tuple.daddr,
+					//        dst->addr.addr[0], dst->addr.addr[1], dst->addr.addr[2], dst->addr.addr[3], dst->addr.addr[4],
+					//        dst->addr.addr[5], skb->blog_p->wfd_queue, skb->blog_p->wl_metadata, skb->blog_p->wl);
+				}
+			}
+		}
+#endif
+#endif
+		br_deliver(dst->dst, skb);
+	}        
+#else
 		br_deliver(dst->dst, skb);
+#endif
 	else
 		br_flood_deliver(br, skb);
 
@@ -115,6 +281,7 @@ static int br_dev_stop(struct net_device *dev)
 	return 0;
 }
 
+#if !(defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG))
 static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
 						struct rtnl_link_stats64 *stats)
 {
@@ -143,6 +310,7 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
 
 	return stats;
 }
+#endif
 
 static int br_change_mtu(struct net_device *dev, int new_mtu)
 {
@@ -304,7 +472,11 @@ static const struct net_device_ops br_netdev_ops = {
 	.ndo_stop		 = br_dev_stop,
 	.ndo_init		 = br_dev_init,
 	.ndo_start_xmit		 = br_dev_xmit,
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	.ndo_get_stats		 = net_dev_collect_stats,
+#else
 	.ndo_get_stats64	 = br_get_stats64,
+#endif
 	.ndo_set_mac_address	 = br_set_mac_address,
 	.ndo_set_rx_mode	 = br_dev_set_multicast_list,
 	.ndo_change_mtu		 = br_change_mtu,
@@ -338,6 +510,12 @@ void br_dev_setup(struct net_device *dev)
 	eth_hw_addr_random(dev);
 	ether_setup(dev);
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	dev->put_stats = br_dev_update_stats;
+	dev->get_stats_pointer = br_dev_get_stats_p;
+	dev->clr_stats = net_dev_clear_stats;
+#endif
+
 	dev->netdev_ops = &br_netdev_ops;
 	dev->destructor = br_dev_free;
 	SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
@@ -370,8 +548,28 @@ void br_dev_setup(struct net_device *dev)
 	br->bridge_hello_time = br->hello_time = 2 * HZ;
 	br->bridge_forward_delay = br->forward_delay = 15 * HZ;
 	br->ageing_time = 300 * HZ;
+#if defined(CONFIG_BCM_KF_BRIDGE_COUNTERS)
+	br->mac_entry_discard_counter = 0;
+#endif
 
 	br_netfilter_rtable_init(br);
 	br_stp_timer_init(br);
 	br_multicast_init(br);
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	br->num_fdb_entries = 0;
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	br->max_br_fdb_entries = BR_MAX_FDB_ENTRIES;
+	br->used_br_fdb_entries = 0;
+#endif
+
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	br_igmp_snooping_br_init(br);
+#endif
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	br_mld_snooping_br_init(br);
+#endif
 }
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 5ba0c844d508cbe549788e2219b4dd9ab1383149..91ab9a5b9ff8525620e482873e06b8807ce9aed0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -24,6 +24,32 @@
 #include <linux/atomic.h>
 #include <asm/unaligned.h>
 #include "br_private.h"
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include "br_igmp.h"
+#include <linux/blog.h>
+#endif
+#if defined(CONFIG_BCM_KF_LOG)
+#include <linux/bcm_log.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE)
+#include "br_fp.h"
+#include "br_fp_hooks.h"
+#endif /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */
+#endif /* CONFIG_BCM_RDPA || CONFIG_BCM_RDPA_MODULE */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#if defined(CONFIG_BCM_KF_WL)
+#include <linux/module.h>
+int (*fdb_check_expired_wl_hook)(unsigned char *addr) = NULL;
+int (*fdb_check_expired_dhd_hook)(unsigned char *addr) = NULL;
+#endif
+
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+void br_loopback_detected(struct net_bridge_port *p);
+#endif
 
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
@@ -51,18 +77,31 @@ void br_fdb_fini(void)
 	kmem_cache_destroy(br_fdb_cache);
 }
 
-
 /* if topology_changing then use forward_delay (default 15 sec)
  * otherwise keep longer (default 5 minutes)
  */
 static inline unsigned long hold_time(const struct net_bridge *br)
 {
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	/* Seems one timer constant in bridge code can serve several different purposes. As we use forward_delay=0,
+	if the code left unchanged, every entry in fdb will expire immidately after a topology change and every packet
+	will flood the local ports for a period of bridge_max_age. This will result in low throughput after boot up. 
+	So we decoulpe this timer from forward_delay. */
+	return br->topology_change ? (15*HZ) : br->ageing_time;
+#else
 	return br->topology_change ? br->forward_delay : br->ageing_time;
+#endif
 }
 
 static inline int has_expired(const struct net_bridge *br,
 				  const struct net_bridge_fdb_entry *fdb)
 {
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	if (fdb->fdb_key != BLOG_KEY_NONE)
+		blog_query(QUERY_BRIDGEFDB, (void*)fdb, fdb->fdb_key, 0, 0);
+	blog_unlock();
+#endif
 	return !fdb->is_static &&
 		time_before_eq(fdb->updated + hold_time(br), jiffies);
 }
@@ -81,10 +120,72 @@ static void fdb_rcu_free(struct rcu_head *head)
 	kmem_cache_free(br_fdb_cache, ent);
 }
 
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+static int fdb_limit_port_check(struct net_bridge *br, struct net_bridge_port *port)
+{
+	if (port->max_port_fdb_entries != 0) {
+		/* Check per port max limit */
+		if ((port->num_port_fdb_entries+1) > port->max_port_fdb_entries)
+			return -1;
+	}
+	return 0;    
+}
+
+static int fdb_limit_check(struct net_bridge *br, struct net_bridge_port *port)
+{
+	if (br->max_br_fdb_entries != 0) {
+		/* Check per br limit */
+		if ((br->used_br_fdb_entries+1) > br->max_br_fdb_entries)
+			return -1;
+	}
+
+	return fdb_limit_port_check(br, port);
+}
+
+static void fdb_limit_update(struct net_bridge *br, struct net_bridge_port *port, int isAdd)
+{
+	if (isAdd) {
+		port->num_port_fdb_entries++;
+		if (port->num_port_fdb_entries > port->min_port_fdb_entries)
+			br->used_br_fdb_entries++;
+	}
+	else {
+		BUG_ON(!port->num_port_fdb_entries);
+		port->num_port_fdb_entries--;
+		if (port->num_port_fdb_entries >= port->min_port_fdb_entries)
+			br->used_br_fdb_entries--;
+	}        	
+}
+#endif
+
 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	br->num_fdb_entries--;
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	if (f->is_local == 0) {
+		fdb_limit_update(br, f->dst, 0);
+	}
+#endif
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE)
+	if (!f->is_local) /* Do not remove local MAC to the Runner  */
+		br_fp_hook(BR_FP_FDB_REMOVE, f, NULL);
+#endif /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */
+#endif /* CONFIG_BCM_RDPA || CONFIG_BCM_RDPA_MODULE */
+#endif /* CONFIG_BCM_KF_RUNNER */
 	hlist_del_rcu(&f->hlist);
 	fdb_notify(br, f, RTM_DELNEIGH);
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	if (f->fdb_key != BLOG_KEY_NONE)
+		blog_notify(DESTROY_BRIDGEFDB, (void*)f, f->fdb_key, 0);
+	blog_unlock();
+#endif
 	call_rcu(&f->rcu, fdb_rcu_free);
 }
 
@@ -92,7 +193,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 {
 	struct net_bridge *br = p->br;
 	int i;
-
+	
 	spin_lock_bh(&br->hash_lock);
 
 	/* Search all chains since old address/hash is unknown */
@@ -106,7 +207,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 				/* maybe another port has same hw addr? */
 				struct net_bridge_port *op;
 				list_for_each_entry(op, &br->port_list, list) {
-					if (op != p &&
+					if (op != p && 
 					    !compare_ether_addr(op->dev->dev_addr,
 								f->addr.addr)) {
 						f->dst = op;
@@ -155,9 +256,36 @@ void br_fdb_cleanup(unsigned long _data)
 			unsigned long this_timer;
 			if (f->is_static)
 				continue;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+			blog_lock();
+			if (f->fdb_key != BLOG_KEY_NONE)
+				blog_query(QUERY_BRIDGEFDB, (void*)f, f->fdb_key, 0, 0);
+			blog_unlock();
+#endif
 			this_timer = f->updated + delay;
 			if (time_before_eq(this_timer, jiffies))
+#if defined(CONFIG_BCM_KF_RUNNER) || defined(CONFIG_BCM_KF_WL)
+			{
+				int flag = 0;
+
+#if (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)) && (defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE))
+				br_fp_hook(BR_FP_FDB_CHECK_AGE, f, &flag);
+#endif /* CONFIG_BCM_RDPA && CONFIG_BCM_RDPA_BRIDGE && CONFIG_BCM_RDPA_BRIDGE_MODULE */
+				if (flag
+#if defined(CONFIG_BCM_KF_WL)
+				    || (fdb_check_expired_wl_hook && (fdb_check_expired_wl_hook(f->addr.addr) == 0))
+				    || (fdb_check_expired_dhd_hook && (fdb_check_expired_dhd_hook(f->addr.addr) == 0))
+#endif
+				    )
+				{
+					f->updated = jiffies;
+				}
+				else
+					fdb_delete(br, f);
+			}
+#else
 				fdb_delete(br, f);
+#endif
 			else if (time_before(this_timer, next_timer))
 				next_timer = this_timer;
 		}
@@ -178,7 +306,23 @@ void br_fdb_flush(struct net_bridge *br)
 		struct hlist_node *h, *n;
 		hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
 			if (!f->is_static)
+#if defined(CONFIG_BCM_KF_RUNNER) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)) && (defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE))
+			{
+				int flag = 0;
+
+				br_fp_hook(BR_FP_FDB_CHECK_AGE, f, &flag);
+				if (flag) {
+					f->updated = jiffies;
+				}
+				else
+				{
+					fdb_delete(br, f);
+				}
+			}
+#else /* CONFIG_BCM_KF_RUNNER && CONFIG_BCM_RUNNER && (CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE) */
 				fdb_delete(br, f);
+#endif /* CONFIG_BCM_KF_RUNNER && CONFIG_BCM_RUNNER && (CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE) */
+
 		}
 	}
 	spin_unlock_bh(&br->hash_lock);
@@ -196,15 +340,27 @@ void br_fdb_delete_by_port(struct net_bridge *br,
 	spin_lock_bh(&br->hash_lock);
 	for (i = 0; i < BR_HASH_SIZE; i++) {
 		struct hlist_node *h, *g;
-
+		
 		hlist_for_each_safe(h, g, &br->hash[i]) {
 			struct net_bridge_fdb_entry *f
 				= hlist_entry(h, struct net_bridge_fdb_entry, hlist);
-			if (f->dst != p)
+			if (f->dst != p) 
 				continue;
 
+#if !defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
 			if (f->is_static && !do_all)
 				continue;
+#else
+			/* do_all - 0: only delete dynamic entries
+						1: delete all entries
+						2: only delete static entries */
+
+			if (f->is_static && (do_all == 0))
+				continue;
+			else if (!f->is_static && (do_all == 2))
+				continue;            
+#endif
+			
 			/*
 			 * if multiple ports all have the same device address
 			 * then when one port is deleted, assign
@@ -213,7 +369,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
 			if (f->is_local) {
 				struct net_bridge_port *op;
 				list_for_each_entry(op, &br->port_list, list) {
-					if (op != p &&
+					if (op != p && 
 					    !compare_ether_addr(op->dev->dev_addr,
 								f->addr.addr)) {
 						f->dst = op;
@@ -229,6 +385,86 @@ void br_fdb_delete_by_port(struct net_bridge *br,
 	spin_unlock_bh(&br->hash_lock);
 }
 
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+int br_get_fdb_limit(struct net_bridge *br, 
+						const struct net_bridge_port *p,
+						int is_min)
+{
+	if((br == NULL) && (p == NULL))
+		return -EINVAL;
+
+	if(br != NULL) {
+		if(is_min) 
+			return br->used_br_fdb_entries;
+		else
+			return br->max_br_fdb_entries;
+	}
+	else { 
+		if(is_min)
+			return p->min_port_fdb_entries;
+		else
+			return p->max_port_fdb_entries;
+	}
+}
+/* Set FDB limit
+lmtType 0: Bridge limit
+			1: Port limit
+*/
+int br_set_fdb_limit(struct net_bridge *br, 
+						struct net_bridge_port *p,
+						int lmt_type,
+						int is_min,
+						int fdb_limit)
+{
+	int new_used_fdb;
+	
+	if((br == NULL) || ((p == NULL) && lmt_type))
+		return -EINVAL;
+
+	if(fdb_limit == 0) {
+		/* Disable limit */
+		if(lmt_type == 0) {
+			br->max_br_fdb_entries = 0;
+		}
+		else if(is_min) {
+			if (p->num_port_fdb_entries < p->min_port_fdb_entries) {
+				new_used_fdb = br->used_br_fdb_entries - p->min_port_fdb_entries;
+				new_used_fdb += p->num_port_fdb_entries;
+				br->used_br_fdb_entries = new_used_fdb;
+			}
+			p->min_port_fdb_entries = 0;
+		}
+		else {
+			p->max_port_fdb_entries = 0;
+		}            
+	}
+	else {        
+		if(lmt_type == 0) {
+			if(br->used_br_fdb_entries > fdb_limit) 
+				return -EINVAL;
+			br->max_br_fdb_entries = fdb_limit;
+		}
+		else if(is_min) {
+			new_used_fdb = max(p->num_port_fdb_entries, p->min_port_fdb_entries);
+			new_used_fdb = br->used_br_fdb_entries - new_used_fdb;
+			new_used_fdb += max(p->num_port_fdb_entries, fdb_limit);
+			if ( (br->max_br_fdb_entries != 0) &&
+				(new_used_fdb > br->max_br_fdb_entries) )
+				return -EINVAL;
+				
+			p->min_port_fdb_entries = fdb_limit;   
+			br->used_br_fdb_entries = new_used_fdb;
+		}
+		else {
+			if(p->num_port_fdb_entries > fdb_limit)
+				return -EINVAL;
+			p->max_port_fdb_entries = fdb_limit;
+		}
+	}
+	return 0;
+}
+#endif
+
 /* No locking or refcounting, assumes caller has rcu_read_lock */
 struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
 					  const unsigned char *addr)
@@ -272,7 +508,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
 #endif /* CONFIG_ATM_LANE */
 
 /*
- * Fill buffer with forwarding table records in
+ * Fill buffer with forwarding table records in 
  * the API format.
  */
 int br_fdb_fillbuf(struct net_bridge *br, void *buf,
@@ -291,7 +527,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 			if (num >= maxnum)
 				goto out;
 
-			if (has_expired(br, f))
+			if (has_expired(br, f)) 
 				continue;
 
 			/* ignore pseudo entry for local MAC address */
@@ -313,6 +549,9 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 			fe->is_local = f->is_local;
 			if (!f->is_static)
 				fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+			fe->vid = f->vid;
+#endif
 			++fe;
 			++num;
 		}
@@ -338,7 +577,7 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
 }
 
 static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
-						 const unsigned char *addr)
+						    const unsigned char *addr)
 {
 	struct hlist_node *h;
 	struct net_bridge_fdb_entry *fdb;
@@ -350,24 +589,127 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
 	return NULL;
 }
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br, 
+					       struct hlist_head *head,
+					       struct net_bridge_port *source,
+					       const unsigned char *addr,
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+					       unsigned int vid,
+#endif
+					       int is_local,
+					       int is_static)
+#else
 static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
 					       struct net_bridge_port *source,
 					       const unsigned char *addr)
+#endif /* CONFIG_BCM_KF_BRIDGE_STATIC_FDB */
 {
 	struct net_bridge_fdb_entry *fdb;
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	if (br->num_fdb_entries >= BR_MAX_FDB_ENTRIES)
+		return NULL;
+
+	/* some users want to always flood. */
+	if (hold_time(br) == 0 && !is_static)
+		return NULL;
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	if (!is_local && (fdb_limit_check(br, source) != 0))
+		return NULL;
+#endif
+
 	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
 	if (fdb) {
 		memcpy(fdb->addr.addr, addr, ETH_ALEN);
 		fdb->dst = source;
+#if !defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
 		fdb->is_local = 0;
 		fdb->is_static = 0;
+#else
+		fdb->is_static = is_static;
+		fdb->is_local = is_local;
+#endif
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		fdb->fdb_key = BLOG_KEY_NONE;
+#endif
+#if defined (CONFIG_BCM_KF_NETFILTER)
+		br->num_fdb_entries++;
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+		if (!is_local) {
+			fdb_limit_update(br, source, 1);
+		}
+#endif
 		fdb->updated = fdb->used = jiffies;
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+		fdb->vid = vid;
+#endif
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE)
+		if (!is_local) /* Do not add local MAC to the Runner  */
+			br_fp_hook(BR_FP_FDB_ADD, fdb, NULL);
+#endif /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
 		hlist_add_head_rcu(&fdb->hlist, head);
 	}
 	return fdb;
 }
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+/* called with RTNL */
+static int fdb_adddel_static(struct net_bridge *br,
+                             struct net_bridge_port *source,
+                             const unsigned char *addr, 
+                             int addEntry)
+{
+	struct hlist_head *head;
+	struct net_bridge_fdb_entry *fdb;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	head = &br->hash[br_mac_hash(addr)];
+	fdb = fdb_find(head, addr);
+	if (fdb)
+	{
+		/* if the entry exists and it is not static then we will delete it
+		   and then add it back as static. If we are not adding an entry
+		   then just delete it */
+		if ( (0 == addEntry) || (0 == fdb->is_static) )
+		{
+			fdb_delete(br, fdb);
+		}
+		else
+		{ /* add same static mac, do nothing */
+			return 0;
+		}
+	}
+   
+	if ( 1 == addEntry )
+	{
+		struct net_bridge_fdb_entry * fdb;
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+		fdb = fdb_create(br, head, source, addr, VLAN_N_VID, 0, 1);
+#else
+		fdb = fdb_create(br, head, source, addr, 0, 1);
+#endif
+		if (!fdb)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+#endif
+
+
+
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		  const unsigned char *addr)
 {
@@ -381,16 +723,24 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 	if (fdb) {
 		/* it is okay to have multiple ports with same
 		 * address, just use the first one.
-		 */
+				 */
 		if (fdb->is_local)
-			return 0;
+					return 0;
 		br_warn(br, "adding interface %s with same address "
-		       "as a received packet\n",
-		       source->dev->name);
+				       "as a received packet\n",
+				       source->dev->name);
 		fdb_delete(br, fdb);
-	}
+			}
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+	fdb = fdb_create(br, head, source, addr, VLAN_N_VID, 1, 1);
+#else
+	fdb = fdb_create(br, head, source, addr, 1, 1);
+#endif
+#else
 	fdb = fdb_create(head, source, addr);
+#endif
 	if (!fdb)
 		return -ENOMEM;
 
@@ -410,9 +760,13 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 	spin_unlock_bh(&br->hash_lock);
 	return ret;
 }
-
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+		   const unsigned char *addr, const unsigned int vid)
+#else
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		   const unsigned char *addr)
+#endif
 {
 	struct hlist_head *head = &br->hash[br_mac_hash(addr)];
 	struct net_bridge_fdb_entry *fdb;
@@ -432,17 +786,105 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		if (unlikely(fdb->is_local)) {
 			if (net_ratelimit())
 				br_warn(br, "received packet on %s with "
-					"own address as source address\n",
-					source->dev->name);
-		} else {
+				       " own address as source address\n",
+				       source->dev->name);
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+			else {
+				// something's gone wrong here -- we're likely in a loop.
+				// block _outgoing_ port if stp is enabled. Count on stp to 
+				// unblock it later.
+				spin_lock_bh(&br->lock);
+				if (br->stp_enabled != BR_NO_STP) {
+					if (source->state != BR_STATE_DISABLED && source->state != BR_STATE_BLOCKING) {
+						BUG_ON(source == NULL || source->dev == NULL);
+						printk("Disabling port %s due to possible loop\n", 
+						        source->dev->name);
+						br_loopback_detected(source);
+					}
+				}
+				spin_unlock_bh(&br->lock);
+			}
+#endif
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+		} else if ( likely (fdb->is_static == 0)  ) {
+#else
+                } else {
+#endif
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE)
+			struct net_bridge_port *fdb_dst = fdb->dst;
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+			unsigned int fdb_vid = fdb->vid;
+#endif /* CONFIG_BCM_KF_VLAN_AGGREGATION && CONFIG_BCM_VLAN_AGGREGATION */
+#endif /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#if defined (CONFIG_BCM_KF_NETFILTER) || (defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT))
+			/* In case of MAC move - let ethernet driver clear switch ARL */
+			if (fdb->dst && fdb->dst->port_no != source->port_no) {
+#if defined (CONFIG_BCM_KF_NETFILTER)
+				bcmFun_t *ethswClearArlFun;
+#endif
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+				/* Check can be learned on new port */
+				if (fdb_limit_port_check(br, source) != 0)
+					return;
+				/* Modify both of old and new port counter */
+				fdb_limit_update(br, fdb->dst, 0);
+				fdb_limit_update(br, source, 1);
+#endif /* CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT && CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT */
+#if defined (CONFIG_BCM_KF_NETFILTER)
+				/* Get the switch clear ARL function pointer */
+				ethswClearArlFun =  bcmFun_get(BCM_FUN_IN_ENET_CLEAR_ARL_ENTRY);
+				if ( ethswClearArlFun ) {
+					ethswClearArlFun((void*)addr);
+				}
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+				blog_lock();
+				/* Also flush the associated entries in accelerators */
+				if (fdb->fdb_key != BLOG_KEY_NONE)
+					blog_notify(DESTROY_BRIDGEFDB, (void*)fdb, fdb->fdb_key, 0);
+				blog_unlock();
+#endif
+#endif
+			}
+#endif /* CONFIG_BCM_KF_NETFILTER || (CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT && CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT */
 			/* fastpath: update of existing entry */
 			fdb->dst = source;
 			fdb->updated = jiffies;
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+			fdb->vid = vid;
+#endif
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE)
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+			/*  Do not update FastPath if the the source still == dst and vid is same */
+			if (fdb_dst != source || fdb_vid != vid)
+				br_fp_hook(BR_FP_FDB_MODIFY, fdb, NULL);
+#else
+			/*  Do not update FastPath if the the source still == dst */
+			if (fdb_dst != source)
+				br_fp_hook(BR_FP_FDB_MODIFY, fdb, NULL);
+#endif /* CONFIG_BCM_KF_VLAN_AGGREGATION && CONFIG_BCM_VLAN_AGGREGATION */
+#endif /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
 		}
 	} else {
 		spin_lock(&br->hash_lock);
 		if (likely(!fdb_find(head, addr))) {
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+			fdb = fdb_create(br, head, source, addr, vid, 0, 0);
+#else
+			fdb = fdb_create(br, head, source, addr, 0, 0);
+#endif
+#else
 			fdb = fdb_create(head, source, addr);
+#endif
 			if (fdb)
 				fdb_notify(br, fdb, RTM_NEWNEIGH);
 		}
@@ -453,6 +895,33 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 	}
 }
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+extern void br_fdb_refresh( struct net_bridge_fdb_entry *fdb );
+void br_fdb_refresh( struct net_bridge_fdb_entry *fdb )
+{
+	fdb->updated = jiffies;
+	return;
+}
+#endif
+
+
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+int br_fdb_adddel_static(struct net_bridge *br, struct net_bridge_port *source,
+                         const unsigned char *addr, int bInsert)
+{
+	int ret = 0;
+
+	spin_lock_bh(&br->hash_lock);
+
+	ret = fdb_adddel_static(br, source, addr, bInsert);
+
+	spin_unlock_bh(&br->hash_lock);
+   
+	return ret;
+}
+#endif
+
+
 static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
 {
 	if (fdb->is_local)
@@ -491,6 +960,12 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
 
 	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
 	ci.ndm_confirmed = 0;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	if (fdb->fdb_key != BLOG_KEY_NONE)
+		blog_query(QUERY_BRIDGEFDB, (void*)fdb, fdb->fdb_key, 0, 0);
+	blog_unlock();
+#endif
 	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
 	ci.ndm_refcnt	 = 0;
 	NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
@@ -588,7 +1063,15 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
 		if (!(flags & NLM_F_CREATE))
 			return -ENOENT;
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+		fdb = fdb_create(br, head, source, addr, VLAN_N_VID, 0, 0);
+#else
+		fdb = fdb_create(br, head, source, addr, 0, 0);
+#endif
+#else
 		fdb = fdb_create(head, source, addr);
+#endif
 		if (!fdb)
 			return -ENOMEM;
 		fdb_notify(br, fdb, RTM_NEWNEIGH);
@@ -598,11 +1081,11 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
 	}
 
 	if (fdb_to_nud(fdb) != state) {
-		if (state & NUD_PERMANENT)
-			fdb->is_local = fdb->is_static = 1;
+	if (state & NUD_PERMANENT)
+		fdb->is_local = fdb->is_static = 1;
 		else if (state & NUD_NOARP) {
 			fdb->is_local = 0;
-			fdb->is_static = 1;
+		fdb->is_static = 1;
 		} else
 			fdb->is_local = fdb->is_static = 0;
 
@@ -666,12 +1149,16 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
 	if (ndm->ndm_flags & NTF_USE) {
 		rcu_read_lock();
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+		br_fdb_update(p->br, p, addr, VLAN_N_VID);
+#else
 		br_fdb_update(p->br, p, addr);
+#endif
 		rcu_read_unlock();
 	} else {
-		spin_lock_bh(&p->br->hash_lock);
-		err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
-		spin_unlock_bh(&p->br->hash_lock);
+	spin_lock_bh(&p->br->hash_lock);
+	err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+	spin_unlock_bh(&p->br->hash_lock);
 	}
 
 	return err;
@@ -739,3 +1226,40 @@ int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
 	return err;
 }
+
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+int br_fdb_get_vid(const unsigned char *addr)
+{
+	struct net_bridge *br = NULL;
+	struct hlist_node *h;
+	struct net_bridge_fdb_entry *fdb;
+	struct net_device *br_dev;
+	int addr_hash = br_mac_hash(addr);
+	int vid = -1;
+
+	rcu_read_lock();
+
+	for_each_netdev(&init_net, br_dev){
+		if (br_dev->priv_flags & IFF_EBRIDGE) {
+			br = netdev_priv(br_dev);
+			hlist_for_each_entry_rcu(fdb, h, &br->hash[addr_hash], hlist) {
+				if (!compare_ether_addr(fdb->addr.addr, addr)) {
+					if (unlikely(!has_expired(br, fdb)))
+						vid = (int)fdb->vid;
+					break;
+				}
+			}
+		}          
+	}
+	
+	rcu_read_unlock();
+	return vid;
+}
+EXPORT_SYMBOL(br_fdb_get_vid);
+#endif //defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+
+
+#if defined(CONFIG_BCM_KF_WL)
+EXPORT_SYMBOL(fdb_check_expired_wl_hook);
+EXPORT_SYMBOL(fdb_check_expired_dhd_hook);
+#endif
diff --git a/net/bridge/br_flows.c b/net/bridge/br_flows.c
new file mode 100644
index 0000000000000000000000000000000000000000..edf5dddb91050ff8d5db0d708ea8cdd6b6fdd3e8
--- /dev/null
+++ b/net/bridge/br_flows.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright 2011 Broadcom Corporation
+ *
+ * <:label-BRCM:2012:DUAL/GPL:standard
+ * 
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed
+ * to you under the terms of the GNU General Public License version 2
+ * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+ * with the following added to such license:
+ * 
+ *    As a special exception, the copyright holders of this software give
+ *    you permission to link this software with independent modules, and
+ *    to copy and distribute the resulting executable under terms of your
+ *    choice, provided that you also meet, for each linked independent
+ *    module, the terms and conditions of the license of that module.
+ *    An independent module is a module which is not derived from this
+ *    software.  The special exception does not apply to any modifications
+ *    of the software.
+ * 
+ * Not withstanding the above, under no circumstances may you combine
+ * this software in any way with any other Broadcom software provided
+ * under a license other than the GPL, without Broadcom's express prior
+ * written consent.
+ * 
+ * :>
+ */
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/times.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jhash.h>
+#include <asm/atomic.h>
+#include <linux/ip.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/list.h>
+#include <linux/if_vlan.h>
+#include <linux/blog.h>
+#include <linux/blog_rule.h>
+#include <linux/rtnetlink.h>
+#include "br_private.h"
+
+int testid = 0;
+
+static void free_ruleid_list(struct br_blog_rule_id *id_p);
+static int init_blog_header(struct net_device *dev_p, BlogHeader_t *bh_p);
+static struct br_blog_rule_id * activate_blog_rules(Blog_t *blog_p);
+static struct br_flow_path * deactivate_blog_rules(struct br_flow_path *path_p,
+                                                   struct net_device *rxVlanDev_p);
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function:
+ *   void free_ruleid_list(struct br_blog_rule_id *id_p)
+ * Description:
+ *   Free a blog rule id list.
+ * Parameters:
+ *   id_p (input): pointer to the blog rule id list.
+ *------------------------------------------------------------------------------
+ */
+void free_ruleid_list(struct br_blog_rule_id *id_p)
+{
+   struct br_blog_rule_id *nextid_p;
+   
+   while (id_p != NULL)
+   {
+      nextid_p = id_p->next_p;
+      
+//      printk(KERN_NOTICE "%s free blog rule id 0x%x\n",__FUNCTION__, id_p->id);
+      kfree(id_p);
+      id_p = nextid_p;
+   }
+   
+   return;
+   
+}  /* free_ruleid_list() */
+
+/*
+ *------------------------------------------------------------------------------
+ * Function:
+ *   int init_blog_header(struct net_device *dev_p, BlogHeader_t *bh_p)
+ * Description:
+ *   Initialize the blog header data structure of a blog for
+ *   the given device (dev_p).
+ * Parameters:
+ *   dev_p  (input): pointer to net device.
+ *   bh_p   (input): pointer to the blog header data structure.
+ * Returns:
+ *   0:  succeeded.
+ *   -1: failed.
+ *------------------------------------------------------------------------------
+ */
+int init_blog_header(struct net_device *dev_p, BlogHeader_t *bh_p)
+{
+   int ret = 0;
+   
+   /* find the root device */
+   while (1)
+   {
+      if (netdev_path_is_root(dev_p))
+         break;
+      dev_p = netdev_path_next_dev(dev_p);
+   }
+      
+	bh_p->dev_p = dev_p;
+   
+   bh_p->info.phyHdr =
+      netdev_path_get_hw_port_type(dev_p) & BLOG_PHYHDR_MASK;
+   
+   switch (bh_p->info.phyHdrType)
+   {
+      case BLOG_ENETPHY:
+         bh_p->info.channel = netdev_path_get_hw_port(dev_p);
+         bh_p->info.bmap.BCM_SWC = 1;
+      break;
+      
+      case BLOG_XTMPHY:
+         bh_p->info.channel = netdev_path_get_hw_port(dev_p);
+         bh_p->info.bmap.BCM_XPHY = 1;
+      break;
+      
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_GPON) || defined(CONFIG_BCM_EPON)
+      case BLOG_GPONPHY:
+      case BLOG_EPONPHY:            
+         bh_p->info.channel = netdev_path_get_hw_port(dev_p);
+      break;
+#endif /* CONFIG_BCM_GPON */
+#endif /* CONFIG_BCM_RDPA || CONFIG_BCM_RDPA_MODULE */
+#endif /* CONFIG_BCM_KF_RUNNER */
+      
+      default:
+		   printk(KERN_WARNING "%s phyHdrType %d is not supported\n",
+                __FUNCTION__, bh_p->info.phyHdrType);
+         ret = -1;
+      break;
+   }
+
+   return ret;
+   
+}  /* init_blog_header() */
+
+/*
+ *------------------------------------------------------------------------------
+ * Function:
+ *   struct br_blog_rule_id * activate_blog_rules(Blog_t *blog_p)
+ * Description:
+ *   Activate blog rules of a layer2 flow blog.
+ * Parameters:
+ *   blog_p (input): pointer to the layer2 flow blog.
+ * Returns:
+ *   The list of activated blog rule ids.
+ *------------------------------------------------------------------------------
+ */
+struct br_blog_rule_id * activate_blog_rules(Blog_t *blog_p)
+{
+   Blog_t                 *new_blog_p;
+   blogRule_t             *rule_p      = NULL;
+   blogRule_t             *n_rule_p    = NULL;
+   blogRuleFilter_t       *rule_filter = NULL;
+   struct br_blog_rule_id *ruleId_p    = NULL;
+   struct br_blog_rule_id *id_p        = NULL;
+   uint32_t               vid          = 0;
+   uint32_t               key;
+
+   if (!blog_p || !blog_p->blogRule_p)
+      return NULL;
+
+	new_blog_p = blog_get();
+	if (new_blog_p == BLOG_NULL)
+   {
+		printk(KERN_WARNING "%s new_blog_p allocation failed\n",__FUNCTION__);
+		return NULL;
+	}
+
+   /* get a copy of blog_p */
+   blog_copy(new_blog_p, blog_p);
+
+   /* activate blog rules one at a time */
+   for (rule_p = blog_p->blogRule_p; rule_p; rule_p = rule_p->next_p)
+   {
+      /* allocate a rule id node */
+	   id_p = kmalloc(sizeof(struct br_blog_rule_id), GFP_KERNEL);
+      if (id_p == NULL)
+      {
+		   printk(KERN_WARNING "%s ruleid_p allocation failed\n",__FUNCTION__);
+         break;
+      }
+
+      /* save pointer to the next blog rule */      
+      n_rule_p = rule_p->next_p;
+      
+      /* terminate the current blog rule node */
+      rule_p->next_p = NULL;
+
+      /* assign the blog rule to the new blog */
+      new_blog_p->blogRule_p = rule_p;
+
+      /* update vlan tag info of the new blog based on the blog rule */
+      rule_filter = &(((blogRule_t *)new_blog_p->blogRule_p)->filter);
+      new_blog_p->vtag_num = rule_filter->nbrOfVlanTags;
+      vid = ((rule_filter->vlan[0].value.h_vlan_TCI &
+              rule_filter->vlan[0].mask.h_vlan_TCI) & 0xFFF);
+      new_blog_p->vid  = vid ? vid : 0xFFFF; 
+      vid = ((rule_filter->vlan[1].value.h_vlan_TCI &
+              rule_filter->vlan[1].mask.h_vlan_TCI) & 0xFFF);
+      new_blog_p->vid |= vid ? (vid << 16) : 0xFFFF0000;
+
+      /* activate the new blog */
+      key = blog_activate(new_blog_p, BlogTraffic_Layer2_Flow, BlogClient_fap);
+      if (key == BLOG_KEY_INVALID)
+      {
+#if 0
+         /* Some flows can be rejected. use these prints only for debugging! */
+         printk(KERN_WARNING "%s blog_activate failed!\n",__FUNCTION__);
+         blog_rule_dump(rule_p);
+#endif
+         kfree(id_p);
+      }
+      else
+      {
+         /* save the blog rule activation key */
+         id_p->id     = key;  //++testid;
+         id_p->next_p = ruleId_p;
+         ruleId_p     = id_p;
+         
+//         printk(KERN_NOTICE "%s blog_activate succeeded. id=0x%x\n",__FUNCTION__, key);
+      }
+
+      /* restore pointer to the next blog rule */      
+      rule_p->next_p = n_rule_p;
+   }
+
+   /* free the new blog */   
+   blog_put(new_blog_p);
+   
+   return ruleId_p;
+   
+} /* activate_blog_rules() */
+
+/*
+ *------------------------------------------------------------------------------
+ * Function:
+ *   struct br_flow_path * deactivate_blog_rules(struct br_flow_path *path_p,
+ *                                               struct net_device *rxVlanDev_p)
+ * Description:
+ *   Deactivate blog rules associated with a layer2 flow path.
+ *   Note that activated blog rule ids were saved in the flow path list
+ *   in the tx vlan device bridge port data structure.
+ * Parameters:
+ *   path_p (input): pointer to the flow path list of the tx bridge port.
+ *   rxVlanDev_p (input): the rx vlan device.
+ * Returns:
+ *   pointer to the flow path if found.
+ *   NULL if flow path not found.
+ *------------------------------------------------------------------------------
+ */
+struct br_flow_path * deactivate_blog_rules(struct br_flow_path *path_p,
+                                            struct net_device *rxVlanDev_p)
+{
+   struct br_blog_rule_id *id_p;
+   
+   while (path_p != NULL)
+   {
+      if (rxVlanDev_p == NULL || rxVlanDev_p == path_p->rxDev_p)
+      {
+         /* found the existing flow path. Deactivate all the old blog rules. */
+         id_p = path_p->blogRuleId_p;
+         
+         while (id_p != NULL)
+         {
+            /* deactivate blog rule */
+//            printk(KERN_NOTICE "%s deactivate blog rule id 0x%x\n",__FUNCTION__, id_p->id);
+            blog_deactivate(id_p->id, BlogTraffic_Layer2_Flow, BlogClient_fap);
+            id_p = id_p->next_p;
+         }
+         
+         free_ruleid_list(path_p->blogRuleId_p);
+         path_p->blogRuleId_p = NULL;
+         if (rxVlanDev_p != NULL)
+            break;
+      }
+      path_p = path_p->next_p;
+   }
+  
+   return path_p;
+   
+}  /* deactivate_blog_rules() */
+
+/*
+ *------------------------------------------------------------------------------
+ * Function:
+ *   int br_flow_blog_rules(struct net_bridge *br,
+ *                          struct net_device *rxVlanDev_p,
+ *                          struct net_device *txVlanDev_p)
+ * Description:
+ *   Generate and activate blog rules for a layer2 flow path going
+ *   from the rx vlan device to the tx vlan device of a bridge.
+ * Parameters:
+ *   br (input): the bridge that the rx and tx vlan devices are member of.
+ *   rxVlanDev_p (input): rx vlan device 
+ *   txVlanDev_p (input): tx vlan device 
+ * Returns:
+ *   0:  succeeded
+ *   -1 or -EINVAL: failed
+ *------------------------------------------------------------------------------
+ */
+int br_flow_blog_rules(struct net_bridge *br,
+                       struct net_device *rxVlanDev_p,
+                       struct net_device *txVlanDev_p)
+{
+   Blog_t                 *blog_p      = BLOG_NULL;
+   struct br_blog_rule_id *newRuleId_p = NULL;
+   struct br_flow_path    *path_p      = NULL;
+   struct net_bridge_port *port_p      = NULL;
+   int ret = 0;
+
+   if (rxVlanDev_p == NULL || txVlanDev_p == NULL)
+	{
+   	printk(KERN_WARNING "%s rx or tx VLAN device not specified\n",__FUNCTION__);
+      return -EINVAL;
+   }
+   
+   port_p = br_port_get_rcu(rxVlanDev_p);
+	if (port_p == NULL || port_p->br != br)
+   {
+      printk(KERN_WARNING "%s rx VLAN device is not a bridge member\n",__FUNCTION__);
+		return -EINVAL;
+   }
+   
+   port_p = br_port_get_rcu(txVlanDev_p);
+	if (port_p == NULL || port_p->br != br)
+   {
+      printk(KERN_WARNING "%s tx VLAN device is not a bridge member\n",__FUNCTION__);
+		return -EINVAL;
+   }
+   
+   if (!(rxVlanDev_p->priv_flags & IFF_BCM_VLAN))
+   {
+      printk(KERN_WARNING "%s %s is NOT a VLAN device\n",__FUNCTION__, rxVlanDev_p->name);
+      return -EINVAL;
+   }
+
+   if (!(txVlanDev_p->priv_flags & IFF_BCM_VLAN))
+   {
+      printk(KERN_WARNING "%s %s is NOT a VLAN device\n",__FUNCTION__, txVlanDev_p->name);
+      return -EINVAL;
+   }
+   
+   /* allocate blog */
+   blog_p = blog_get();
+   if (blog_p == BLOG_NULL) 
+   {
+		printk(KERN_WARNING "%s blog_p allocation failed\n",__FUNCTION__);
+      return -1;
+   }
+
+   /* initialize the blog header for the rx vlan device */
+   if (init_blog_header(rxVlanDev_p, &(blog_p->rx)) != 0)
+   {
+		printk(KERN_WARNING "%s init_blog_header for rxVlanDev_p failed\n",__FUNCTION__);
+      blog_put(blog_p);
+      return -1;
+   }
+   
+   /* initialize the blog header for the tx vlan device */
+   if (init_blog_header(txVlanDev_p, &(blog_p->tx)) != 0)
+   {
+		printk(KERN_WARNING "%s init_blog_header for txVlanDev_p failed\n",__FUNCTION__);
+      blog_put(blog_p);
+      return -1;
+   }
+
+   blog_p->mark = blog_p->priority = 0;
+
+   //????   
+//   blog_p->key.l1_tuple.phy     = blog_p->rx.info.phyHdr;
+//   blog_p->key.l1_tuple.channel = blog_p->rx.info.channel;
+//   blog_p->key.protocol         = BLOG_IPPROTO_UDP;
+
+   blog_p->blogRule_p = NULL;
+
+   /* add vlan blog rules, if any vlan interfaces were found */
+   if (blogRuleVlanHook) 
+   {
+      if (blogRuleVlanHook(blog_p, rxVlanDev_p, txVlanDev_p) < 0)
+      {
+         printk(KERN_WARNING "%s Error while processing VLAN blog rules\n",__FUNCTION__);
+         blog_rule_free_list(blog_p);
+         blog_put(blog_p);
+         return -1;
+      }
+   }
+
+   /* activate new blog rules for flow path rxVlanDev -> txVlanDev */
+   newRuleId_p = activate_blog_rules(blog_p);
+
+   /* blog rule and blog are no longer needed. free them. */
+   blog_rule_free_list(blog_p);
+   blog_put(blog_p);
+
+   /* deactivate the old blog rules of the same flow path.
+    * old blog rule ids were saved in the flow path list
+    * in the tx bridge port data structure.
+    */
+   port_p = br_port_get_rcu(txVlanDev_p);
+   
+   path_p = deactivate_blog_rules(port_p->flowPath_p, rxVlanDev_p);
+   if (path_p == NULL)
+   {
+      /* did not find the old blog rule id list for flow path
+       * rxVlanDev -> txVlanDev. Allocate a flow path for the
+       * newly activated blog rule id list.
+       */
+      path_p = kmalloc(sizeof(struct br_flow_path), GFP_KERNEL);
+      if (path_p == NULL)
+      {
+         printk(KERN_WARNING "%s kmalloc failed for new flow path\n",__FUNCTION__);
+         free_ruleid_list(newRuleId_p);
+         return -1;
+      }
+      
+      path_p->rxDev_p    = rxVlanDev_p;
+      path_p->next_p     = port_p->flowPath_p;
+      port_p->flowPath_p = path_p;
+   }
+   
+   /* save the newly activated blog rule id list */
+   path_p->blogRuleId_p = newRuleId_p;
+
+   return ret;
+    
+}  /* br_flow_blog_rules() */
+
+/*
+ *------------------------------------------------------------------------------
+ * Function:
+ *   int br_flow_path_delete(struct net_bridge *br,
+ *                           struct net_device *rxVlanDev_p,
+ *                           struct net_device *txVlanDev_p)
+ * Description:
+ *   Deactivate blog rules for a layer2 flow path going
+ *   from the rx vlan device (rxVlanDev_p is not NULL) or
+ *   from any rx vlan devices (rxVlanDev_p is NULL)
+ *   to the tx vlan device of a bridge.
+ * Parameters:
+ *   br (input): the bridge that the rx and tx vlan devices are member of.
+ *   rxVlanDev_p (input): rx vlan device 
+ *   txVlanDev_p (input): tx vlan device 
+ * Returns:
+ *   0:  succeeded
+ *   -EINVAL: failed
+ *------------------------------------------------------------------------------
+ */
+int br_flow_path_delete(struct net_bridge *br,
+                        struct net_device *rxVlanDev_p,
+                        struct net_device *txVlanDev_p)
+{
+	struct net_bridge_port *port_p;
+   struct br_flow_path    *prevPath_p = NULL;
+   struct br_flow_path    *path_p     = NULL;
+   
+   if (rxVlanDev_p != NULL)
+   {
+       port_p = br_port_get_rcu(rxVlanDev_p);
+	   if (port_p == NULL || port_p->br != br)
+      {
+         printk(KERN_WARNING "%s rx VLAN device is not a bridge member\n",__FUNCTION__);
+		   return -EINVAL;
+      }
+   
+      if (!(rxVlanDev_p->priv_flags & IFF_BCM_VLAN))
+      {
+         printk(KERN_WARNING "%s %s is NOT a VLAN device\n",__FUNCTION__, rxVlanDev_p->name);
+         return -EINVAL;
+      }
+   }
+   
+   if (txVlanDev_p == NULL)
+	{
+      printk(KERN_WARNING "%s tx VLAN device not specified\n",__FUNCTION__);
+      return -EINVAL;
+   }
+   
+    port_p = br_port_get_rcu(txVlanDev_p);
+	if (port_p == NULL || port_p->br != br)
+   {
+      printk(KERN_WARNING "%s tx VLAN device is not a bridge member\n",__FUNCTION__);
+		return -EINVAL;
+   }
+   
+   if (!(txVlanDev_p->priv_flags & IFF_BCM_VLAN))
+   {
+      printk(KERN_WARNING "%s %s is NOT a VLAN device\n",__FUNCTION__, txVlanDev_p->name);
+      return -EINVAL;
+   }
+   
+   /* deactivate all the blog rules of the flow path.
+    * old blog rule ids were saved in the flow path list
+    * in the tx bridge port data structure.
+    */
+   port_p = br_port_get_rcu(txVlanDev_p);
+   
+   deactivate_blog_rules(port_p->flowPath_p, rxVlanDev_p);
+   
+   /* now, clean up flow paths that do not have any blog rule */
+   path_p = port_p->flowPath_p;
+   while (path_p != NULL)
+   {
+      if (path_p->blogRuleId_p == NULL)
+      {
+         if (path_p == port_p->flowPath_p)
+         {
+            port_p->flowPath_p = path_p->next_p;
+            kfree(path_p);
+            path_p = port_p->flowPath_p;
+         }
+         else
+         {
+            prevPath_p->next_p = path_p->next_p;
+            kfree(path_p);
+            path_p = prevPath_p->next_p;
+         }
+      }
+      else
+      {
+         prevPath_p = path_p;
+         path_p = path_p->next_p; 
+      }
+   }
+   
+   return 0;
+      
+}  /* br_flow_path_delete() */
+
+#else
+
+#include <linux/netdevice.h>
+#include "br_private.h"
+
+int br_flow_blog_rules(struct net_bridge *br,
+                       struct net_device *rxVlanDev_p,
+                       struct net_device *txVlanDev_p)
+{
+   return -1;
+}  /* br_flow_blog_rules() */
+
+int br_flow_path_delete(struct net_bridge *br,
+                        struct net_device *rxVlanDev_p,
+                        struct net_device *txVlanDev_p)
+{
+   return -1;
+}  /* br_flow_path_delete() */
+
+#endif /* CONFIG_BLOG */
diff --git a/net/bridge/br_flows.h b/net/bridge/br_flows.h
new file mode 100644
index 0000000000000000000000000000000000000000..f7680e560b9c99f201f4b16b7ece0ddac3bfd6f4
--- /dev/null
+++ b/net/bridge/br_flows.h
@@ -0,0 +1,46 @@
+/*
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#ifndef _BR_FLOWS_H
+#define _BR_FLOWS_H
+
+#if defined(CONFIG_BCM_KF_BLOG)
+
+extern int br_flow_blog_rules(struct net_bridge *br,
+                              struct net_device *rxVlanDev_p,
+                              struct net_device *txVlanDev_p);
+                       
+extern int br_flow_path_delete(struct net_bridge *br,
+                               struct net_device *rxVlanDev_p,
+                               struct net_device *txVlanDev_p);
+                        
+#endif
+
+#endif /* _BR_FLOWS_H */
+
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a2098e3de500d4ab34c3d3b14435eddb5089b354..e68adbec061f0c6b919e203b8b42304e5c63b78a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -19,7 +19,35 @@
 #include <linux/skbuff.h>
 #include <linux/if_vlan.h>
 #include <linux/netfilter_bridge.h>
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+#include <linux/export.h>
+#endif
 #include "br_private.h"
+#if defined(CONFIG_BCM_KF_IGMP)
+#include <linux/ip.h>
+#include <linux/igmp.h>
+#include "br_igmp.h"
+#endif
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_IP)
+#endif
+
+#if defined(CONFIG_BCM_KF_WL)
+static __inline__ int shouldBypassStp (const struct sk_buff *skb, int state) {
+	if (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST)
+		return 0;
+	if (state == BR_STATE_DISABLED)
+		return 0;
+	return ( (skb->protocol == htons(0x888e) /* ETHER_TYPE_802_1X */) || 
+	         (skb->protocol == htons(0x88c7) /* ETHER_TYPE_802_1X_PREAUTH */) ||
+	         (skb->protocol == htons(0x886c) /* ETHER_TYPE_BRCM */ ) );
+}
+#endif
+
+
 
 static int deliver_clone(const struct net_bridge_port *prev,
 			 struct sk_buff *skb,
@@ -27,13 +55,168 @@ static int deliver_clone(const struct net_bridge_port *prev,
 					       struct sk_buff *skb));
 
 /* Don't forward packets to originating port or forwarding diasabled */
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+static inline int should_deliver(const struct net_bridge_port *p,
+				 const struct sk_buff *skb, int state)
+#else
 static inline int should_deliver(const struct net_bridge_port *p,
 				 const struct sk_buff *skb)
+#endif
 {
+#if defined(CONFIG_BCM_KF_IGMP)
+	struct iphdr *pipmcast = NULL;
+	struct igmphdr *pigmp = NULL;
+#endif
+
+#if defined(CONFIG_BCM_KF_WANDEV)
+	/*
+	 * Do not forward any packets received from one WAN interface 
+	 * to another in multiple PVC case
+	 */
+	if( (skb->dev->priv_flags & p->dev->priv_flags) & IFF_WANDEV )
+	{
+		return 0;
+	}
+
+	if ((skb->dev->priv_flags & IFF_WANDEV) == 0 &&
+	    (p->dev->priv_flags   & IFF_WANDEV) == 0)
+	{
+		struct net_device *sdev = skb->dev;
+		struct net_device *ddev = p->dev;
+
+#if defined(CONFIG_BCM_KF_NETDEV_PATH)
+		/* From LAN to LAN */
+		/* Do not forward any packets to virtual interfaces on the same
+		 * real interface of the originating virtual interface.
+		 */
+		while (!netdev_path_is_root(sdev))
+		{
+			sdev = netdev_path_next_dev(sdev);
+		}
+
+		while (!netdev_path_is_root(ddev))
+		{
+			ddev = netdev_path_next_dev(ddev);
+		}
+#endif
+
+		if (sdev == ddev)
+		{
+			return 0;
+		}
+
+		if (skb->pkt_type == PACKET_BROADCAST) 
+		{
+#if defined(CONFIG_BCM_KF_ENET_SWITCH)
+			if (sdev->priv_flags & IFF_HW_SWITCH & ddev->priv_flags)
+			{
+				/* both source and destination are IFF_HW_SWITCH 
+				   if they are also on the same switch, reject the packet */
+				if (!((sdev->priv_flags & IFF_EXT_SWITCH) ^ (ddev->priv_flags & IFF_EXT_SWITCH)))
+				{
+					return 0;
+				}
+			}
+#endif /* CONFIG_BCM_KF_ENET_SWITCH */
+		}
+	}
+#endif /* CONFIG_BCM_KF_WANDEV */
+
+#if defined(CONFIG_BCM_KF_IGMP)
+	/*
+	 * CPE is querying for LAN-2-LAN multicast.  These query messages 
+	 * should not go on WAN interfaces.
+	 * Also don't alow leaking of IGMPv2 report messages among LAN ports
+	 */
+	br_igmp_get_ip_igmp_hdrs(skb, &pipmcast, &pigmp, NULL);
+	if ( pigmp != NULL )
+	{
+#if defined(CONFIG_BCM_KF_WANDEV)
+		if((p->dev->priv_flags & IFF_WANDEV))
+		{
+			if (pigmp->type == IGMP_HOST_MEMBERSHIP_QUERY) 
+			{
+				return 0;
+			}
+		}
+#endif
+#if defined(CONFIG_BR_IGMP_SNOOP)
+		else
+		{
+			if ((p->br->igmp_snooping) && (pigmp->type != IGMP_HOST_MEMBERSHIP_QUERY)) 
+			{
+				return 0;
+			}
+		}
+#endif
+	}
+#endif
+
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE)) 
+#if defined(CONFIG_BCM_KF_WL)
 	return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
-		p->state == BR_STATE_FORWARDING);
+	           ((state == BR_STATE_FORWARDING) || shouldBypassStp(skb, state)));
+#else
+	return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+	       state == BR_STATE_FORWARDING);
+
+#endif /* CONFIG_BCM_KF_WL */
+#elif defined(CONFIG_BCM_KF_WL)
+	return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+	           ((p->state == BR_STATE_FORWARDING) || shouldBypassStp(skb, p->state)));
+#else
+	return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+	        p->state == BR_STATE_FORWARDING);
+#endif
 }
 
+
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+typedef struct net_device *(* br_fb_process_hook_t)(struct sk_buff *skb_p, uint16_t h_proto, struct net_device *txDev);
+static br_fb_process_hook_t __rcu br_fb_process_hook;
+
+void br_fb_bind(br_fb_process_hook_t brFbProcessHook)
+{
+   if ( NULL == brFbProcessHook ) {
+      printk("br_fb_bind: invalid FB process hook\n");
+   }
+   printk("br_fb_bind: FB process hook bound to %p\n", brFbProcessHook );
+   RCU_INIT_POINTER(br_fb_process_hook, brFbProcessHook);
+}
+
+static const struct net_bridge_port *br_fb_process(const struct net_bridge_port *to, struct sk_buff *skb)
+{
+	br_fb_process_hook_t fbProcessHook;
+	struct net_device *newDev;
+	int state = to->state;
+	const struct net_bridge_port *txPrt = to;
+
+	if ( NULL == txPrt ) {
+		return NULL;
+	}
+
+	fbProcessHook = rcu_dereference(br_fb_process_hook);
+	if ( fbProcessHook ) {
+		newDev = fbProcessHook(skb, TYPE_ETH, txPrt->dev);
+		if ( newDev ) {
+			state = BR_STATE_FORWARDING;
+			txPrt = br_port_get_rcu(newDev);
+			if ( NULL == txPrt ) {
+				txPrt = to;
+			}
+		}
+	}
+
+	if (should_deliver(txPrt, skb, state)) {
+		return txPrt;
+	}
+	else {
+		return NULL;
+	}
+}
+EXPORT_SYMBOL(br_fb_bind);
+#endif
+
 static inline unsigned packet_length(const struct sk_buff *skb)
 {
 	return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
@@ -99,7 +282,12 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
 /* called with rcu_read_lock */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+	to = br_fb_process(to, skb);
+	if ( to ) {
+#else
 	if (to && should_deliver(to, skb)) {
+#endif
 		__br_deliver(to, skb);
 		return;
 	}
@@ -110,7 +298,12 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 /* called with rcu_read_lock */
 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
 {
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+	to = br_fb_process(to, skb);
+	if ( to ) {
+#else
 	if (should_deliver(to, skb)) {
+#endif   
 		if (skb0)
 			deliver_clone(to, skb, __br_forward);
 		else
@@ -147,7 +340,11 @@ static struct net_bridge_port *maybe_deliver(
 {
 	int err;
 
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+	if (!should_deliver(p, skb, p->state))
+#else
 	if (!should_deliver(p, skb))
+#endif
 		return prev;
 
 	if (!prev)
@@ -170,6 +367,12 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
 	struct net_bridge_port *p;
 	struct net_bridge_port *prev;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	Blog_t * blog_p = blog_ptr(skb);
+
+	if (blog_p && !blog_p->rx.multicast)
+		blog_skip(skb);
+#endif
 	prev = NULL;
 
 	list_for_each_entry_rcu(p, &br->port_list, list) {
@@ -181,11 +384,18 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
 	if (!prev)
 		goto out;
 
-	if (skb0)
-		deliver_clone(prev, skb, __packet_hook);
-	else
-		__packet_hook(prev, skb);
-	return;
+    if (skb0)
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+    {
+        blog_clone(skb, blog_ptr(skb0));
+#endif
+        deliver_clone(prev, skb, __packet_hook);
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+    }
+#endif
+    else
+        __packet_hook(prev, skb);
+    return;
 
 out:
 	if (!skb0)
diff --git a/net/bridge/br_fp.c b/net/bridge/br_fp.c
new file mode 100644
index 0000000000000000000000000000000000000000..a15c328f8a25214e657de929df847c5fdff2e829
--- /dev/null
+++ b/net/bridge/br_fp.c
@@ -0,0 +1,34 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "br_private.h"
+#include "br_fp.h"
+
+struct br_fp_data *fp_hooks;
+
+void br_fp_set_callbacks(struct br_fp_data *fpdata)
+{       
+    fp_hooks = fpdata;
+}
+
+void br_fp_clear_callbacks(void)
+{
+    fp_hooks = NULL;
+}
+
+EXPORT_SYMBOL(br_fp_set_callbacks);
+EXPORT_SYMBOL(br_fp_clear_callbacks);
+
diff --git a/net/bridge/br_fp.h b/net/bridge/br_fp.h
new file mode 100644
index 0000000000000000000000000000000000000000..1fcde9a0a14327178669f3260ac01795551415d3
--- /dev/null
+++ b/net/bridge/br_fp.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; 
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef BR_FP_H
+#define BR_FP_H
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#define BR_FP_FDB_ADD 1
+#define BR_FP_FDB_REMOVE 2
+#define BR_FP_FDB_MODIFY 3
+#define BR_FP_FDB_CHECK_AGE 4
+#define BR_FP_PORT_ADD 5
+#define BR_FP_PORT_REMOVE 6
+
+struct br_fp_data
+{
+    int (*rdpa_hook)(int cmd, void *in, void *out);
+    void *rdpa_priv;
+};
+
+/* interface routine */
+void br_fp_set_callbacks(struct br_fp_data *fpdata);
+void br_fp_clear_callbacks(void);
+ 
+#endif /* BR_FP_H */
diff --git a/net/bridge/br_fp_hooks.h b/net/bridge/br_fp_hooks.h
new file mode 100644
index 0000000000000000000000000000000000000000..ef8039e5f648f1e90f288aaf3f4634d89e3ae5ce
--- /dev/null
+++ b/net/bridge/br_fp_hooks.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; 
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef BR_FP_HOOKS_H
+#define BR_FP_HOOKS_H
+
+#include <linux/device.h>
+#include "br_private.h"
+
+extern struct br_fp_data *fp_hooks; 
+
+#undef BR_FP_DEBUG_SET
+#ifdef BR_FP_DEBUG_SET
+#define BR_FP_DEBUG_LEVEL 4
+#define BR_FP_START_DEBUG(n) do { if (n<BR_FP_DEBUG_LEVEL)
+#define BR_FP_END_DEBUG      } while (0)
+#define BR_FP_DEBUG(n, args...)			\
+	BR_FP_START_DEBUG(n)			\
+		printk(KERN_INFO args);		\
+	BR_FP_END_DEBUG
+#else
+#define BR_FP_DEBUG(n, args...)
+#endif
+
+static inline int br_fp_hook(int cmd, void *in, void *out)
+{
+    if (!fp_hooks)
+        return 0;
+    return fp_hooks->rdpa_hook(cmd, in, out);
+}
+
+#endif /* BR_FP_HOOKS_H */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e1144e1617be38814ebb2fb497755cb10b646559..55c88ba4229efe6b9af7b9835f0174c974f8a8ec 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -26,6 +26,20 @@
 
 #include "br_private.h"
 
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#include "br_igmp.h"
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include "br_mld.h"
+#include <linux/module.h>
+#endif
+#if defined(CONFIG_BCM_KF_IP)
+#include "br_flows.h"
+#endif
+#if defined(CONFIG_BCM_KF_RUNNER) && (defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE))
+#include "br_fp.h"
+#include "br_fp_hooks.h"
+#endif
 /*
  * Determine initial path cost based on speed.
  * using recommendations from 802.1d standard
@@ -141,6 +155,9 @@ static void del_nbp(struct net_bridge_port *p)
 
 	br_fdb_delete_by_port(br, p, 1);
 
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+	br_mcast_handle_netdevice_events(p->dev, NETDEV_CHANGE);
+#endif
 	list_del_rcu(&p->list);
 
 	dev->priv_flags &= ~IFF_BRIDGE_PORT;
@@ -170,6 +187,15 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
 		del_nbp(p);
 	}
 
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	br_igmp_mc_fdb_cleanup(br);
+	br_igmp_snooping_br_fini(br);
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	br_mld_mc_fdb_cleanup(br);
+	br_mld_snooping_br_fini(br);
+#endif
+
 	del_timer_sync(&br->gc_timer);
 
 	br_sysfs_delbr(br->dev);
@@ -222,9 +248,19 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
 	p->flags = 0;
 	br_init_port(p);
 	p->state = BR_STATE_DISABLED;
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	p->min_port_fdb_entries = 0;
+	p->max_port_fdb_entries = 0;
+	p->num_port_fdb_entries = 0;
+#endif
+
 	br_stp_port_timer_init(p);
 	br_multicast_add_port(p);
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STP)
+	br_stp_notify_state_port(p);
+#endif
 	return p;
 }
 
@@ -314,6 +350,10 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
 						     p->dev->features, mask);
 	}
 
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+	features |= NETIF_F_EXTSTATS;
+#endif
+
 	return features;
 }
 
@@ -393,6 +433,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 	if (changed_addr)
 		call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
 
+#if defined(CONFIG_BCM_KF_RUNNER) && (defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE))
+		br_fp_hook(BR_FP_PORT_ADD, p, NULL);
+#endif
+
 	dev_set_mtu(br->dev, br_min_mtu(br));
 
 	if (br_fdb_insert(br, p, dev->dev_addr))
@@ -427,6 +471,29 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
 	if (!p || p->br != br)
 		return -EINVAL;
 
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	/* Disable min limit per port in advance */
+	(void)br_set_fdb_limit(br, p, 1, 1, 0);
+#endif
+
+#if defined(CONFIG_BCM_KF_IP)
+	/* delete all flow paths tx through this port (dev) from ANY rx port */
+	if (dev->priv_flags & IFF_BCM_VLAN)
+	{
+		br_flow_path_delete(br, NULL, dev);
+
+		/* delete all flow paths tx through other ports from this rx port (dev) */
+		list_for_each_entry(p, &br->port_list, list) {
+			if (p->dev && (p->dev != dev) && ((p->dev)->priv_flags & IFF_BCM_VLAN))
+				br_flow_path_delete(br, dev, p->dev);
+		}
+	}
+	p = br_port_get_rtnl(dev);
+#endif
+#if defined(CONFIG_BCM_KF_RUNNER) && (defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE))
+	br_fp_hook(BR_FP_PORT_REMOVE, p, NULL);
+#endif
+
 	del_nbp(p);
 
 	spin_lock_bh(&br->lock);
@@ -437,7 +504,6 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
 		call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
 
 	netdev_update_features(br->dev);
-
 	return 0;
 }
 
diff --git a/net/bridge/br_igmp.c b/net/bridge/br_igmp.c
new file mode 100644
index 0000000000000000000000000000000000000000..b7381200a9950d9380646c3bb3cc460007ede26b
--- /dev/null
+++ b/net/bridge/br_igmp.c
@@ -0,0 +1,1332 @@
+/*
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#if defined(CONFIG_BCM_KF_IGMP)
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/times.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jhash.h>
+#include <asm/atomic.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/list.h>
+#include <linux/rtnetlink.h>
+#include <linux/export.h>
+#include <linux/igmp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include "br_private.h"
+#include "br_igmp.h"
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/if_vlan.h>
+#include <linux/blog.h>
+#include <linux/blog_rule.h>
+#endif
+#include "br_mcast.h"
+
+#define MCPD_NETLINK_SKB_TIMEOUT_MS 2000
+
+void br_igmp_get_ip_igmp_hdrs( const struct sk_buff *pskb, struct iphdr **ppipmcast, struct igmphdr **ppigmp, int *lanppp)
+{
+	struct iphdr *pip = NULL;
+	struct igmphdr *pigmp = NULL;
+	struct pppoe_hdr *pppoe = NULL;
+	const unsigned char *dest = eth_hdr(pskb)->h_dest;
+
+	if ( vlan_eth_hdr(pskb)->h_vlan_proto == htons(ETH_P_IP) )
+	{
+		pip = (struct iphdr *)skb_network_header(pskb);
+	}
+	else if ( vlan_eth_hdr(pskb)->h_vlan_proto == htons(ETH_P_PPP_SES) )
+	{
+		pppoe = (struct pppoe_hdr *)skb_network_header(pskb);
+		if ( pppoe->tag[0].tag_type == htons(PPP_IP))
+		{
+			pip = (struct iphdr *)(skb_network_header(pskb) + PPPOE_SES_HLEN);
+		}
+	}
+	else if ( vlan_eth_hdr(pskb)->h_vlan_proto == htons(ETH_P_8021Q) )
+	{
+		if ( vlan_eth_hdr(pskb)->h_vlan_encapsulated_proto == htons(ETH_P_IP) )
+		{
+			pip = (struct iphdr *)(skb_network_header(pskb) + sizeof(struct vlan_hdr));
+		}
+		else if ( vlan_eth_hdr(pskb)->h_vlan_encapsulated_proto == htons(ETH_P_PPP_SES) )
+		{
+			struct pppoe_hdr *pppoe = (struct pppoe_hdr *)(skb_network_header(pskb) + sizeof(struct vlan_hdr));
+			if ( pppoe->tag[0].tag_type == PPP_IP)
+			{
+				pip = (struct iphdr *)(skb_network_header(pskb) + sizeof(struct vlan_hdr) + PPPOE_SES_HLEN);
+			}
+		}
+	}
+
+	*ppipmcast = NULL;
+	*ppigmp = NULL;
+	if ( pip != NULL )
+	{
+		if ( pppoe != NULL )
+		{
+			/* MAC will be unicast so check IP */
+			if ((pip->daddr & htonl(0xF0000000)) == htonl(0xE0000000))
+			{
+				if ( pip->protocol == IPPROTO_IGMP ) {
+					pigmp = (struct igmphdr *)((char *)pip + (pip->ihl << 2));
+				}
+				*ppipmcast = pip;
+				*ppigmp = pigmp;
+				if ( lanppp != NULL )
+				{
+					*lanppp = 1;
+				}
+			}
+		}
+		else
+		{
+			if ( is_multicast_ether_addr(dest) && !is_broadcast_ether_addr(dest) )
+			{
+				if ( pip->protocol == IPPROTO_IGMP ) {
+					pigmp = (struct igmphdr *)((char *)pip + (pip->ihl << 2));
+				}
+				*ppipmcast = pip;
+				*ppigmp = pigmp;
+				if ( lanppp != NULL )
+				{
+					*lanppp = 0;
+				}
+			}
+		}
+	}
+	return;
+}
+
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+
+static struct kmem_cache *br_igmp_mc_fdb_cache __read_mostly;
+static struct kmem_cache *br_igmp_mc_rep_cache __read_mostly;
+static u32 br_igmp_mc_fdb_salt __read_mostly;
+static struct proc_dir_entry *br_igmp_entry = NULL;
+
+extern int mcpd_process_skb(struct net_bridge *br, struct sk_buff *skb,
+                            unsigned short protocol);
+extern void mcpd_nl_process_timer_check ( void );
+extern unsigned long mcpd_nl_get_next_timer_expiry ( int *found );
+
+static struct in_addr ip_upnp_addr      = {htonl(0xEFFFFFFA)}; /* UPnP / SSDP */
+static struct in_addr ip_ntfy_srvr_addr = {htonl(0xE000FF87)}; /* Notificatoin Server*/
+
+static inline int br_igmp_mc_fdb_hash(const u32 grp)
+{
+	return jhash_1word(grp, br_igmp_mc_fdb_salt) & (BR_IGMP_HASH_SIZE - 1);
+}
+
+int br_igmp_control_filter(const unsigned char *dest, __be32 dest_ip)
+{
+    if(((dest) && is_broadcast_ether_addr(dest)) ||
+       ((dest_ip & htonl(0xFFFFFF00)) == htonl(0xE0000000)) ||
+       (dest_ip == ip_upnp_addr.s_addr) || /* UPnp/SSDP */
+       (dest_ip == ip_ntfy_srvr_addr.s_addr))   /* Notification srvr */
+    {
+        return 0;
+    }
+    else
+    {
+        return 1;
+    }
+} /* br_igmp_control_filter */
+
+/* This function requires that br->mcl_lock is already held */
+void br_igmp_mc_fdb_del_entry(struct net_bridge *br, 
+                              struct net_bridge_mc_fdb_entry *igmp_fdb,
+                              struct in_addr *rep,
+                              unsigned char *repMac)
+{
+	struct net_bridge_mc_rep_entry *rep_entry = NULL;
+	struct net_bridge_mc_rep_entry *rep_entry_n = NULL;
+
+	list_for_each_entry_safe(rep_entry, 
+	                         rep_entry_n, &igmp_fdb->rep_list, list) 
+	{
+		if (((NULL == rep) && (NULL == repMac)) ||
+		    (rep && (rep_entry->rep.s_addr == rep->s_addr)) ||
+		    (repMac && (0 == memcmp(rep_entry->repMac, repMac, ETH_ALEN))))
+		{
+			if ( br->igmp_snooping )
+			{
+				mcpd_nl_send_igmp_purge_entry(igmp_fdb, rep_entry);
+			}
+			list_del(&rep_entry->list);
+			kmem_cache_free(br_igmp_mc_rep_cache, rep_entry);
+			if (rep || repMac)
+			{
+				break;
+			}
+		}
+	}
+	if(list_empty(&igmp_fdb->rep_list)) 
+	{
+		hlist_del(&igmp_fdb->hlist);
+#if defined(CONFIG_BLOG) 
+		br_mcast_blog_release(BR_MCAST_PROTO_IGMP, (void *)igmp_fdb);
+#endif
+		kmem_cache_free(br_igmp_mc_fdb_cache, igmp_fdb);
+}
+
+	return;
+}
+
+void br_igmp_process_timer_check ( struct net_bridge *br )
+{
+  int pendingIndex = 0;
+
+  for ( ; pendingIndex < MCPD_MAX_DELAYED_SKB_COUNT; pendingIndex ++) {
+    if (( br->igmp_delayed_skb[pendingIndex].skb != NULL ) && 
+        ( time_before (br->igmp_delayed_skb[pendingIndex].expiryTime, jiffies) ) ){
+      kfree_skb(br->igmp_delayed_skb[pendingIndex].skb);
+      br->igmp_delayed_skb[pendingIndex].skb = NULL;
+      br->igmp_delayed_skb[pendingIndex].expiryTime = 0;
+    }
+  }    
+}
+
+
+unsigned long br_igmp_get_next_timer_expiry ( struct net_bridge *br, int *found )
+{
+  int pendingIndex = 0;
+  unsigned long earliestTimeout = jiffies + (MCPD_NETLINK_SKB_TIMEOUT_MS*2);
+
+  for ( ; pendingIndex < MCPD_MAX_DELAYED_SKB_COUNT; pendingIndex ++) {
+    if (( br->igmp_delayed_skb[pendingIndex].skb != NULL) && 
+        ( time_after (earliestTimeout, br->igmp_delayed_skb[pendingIndex].expiryTime)) ){
+      earliestTimeout = br->igmp_delayed_skb[pendingIndex].expiryTime;
+      *found = 1;
+    }
+  }    
+
+  return earliestTimeout;
+}
+
+
+void br_igmp_set_timer( struct net_bridge *br )
+{
+	struct net_bridge_mc_fdb_entry *mcast_group;
+	int                             i;
+	/* the largest timeout is BR_IGMP_MEMBERSHIP_TIMEOUT */
+	unsigned long                   tstamp = jiffies + (BR_IGMP_MEMBERSHIP_TIMEOUT*HZ*2);
+	unsigned int                    found = 0;
+	unsigned long                   pendTimeout = br_igmp_get_next_timer_expiry( br, &found );
+
+	if (( br->igmp_snooping == 0 ) && ( found == 0 ))
+	{
+		del_timer(&br->igmp_timer);
+		return;
+	}
+
+	if (found) 
+	{
+		if (time_after(tstamp, pendTimeout) ) {
+			tstamp = pendTimeout;
+		}
+	}
+
+	if ( br->igmp_snooping != 0 ) 
+	{
+		for (i = 0; i < BR_IGMP_HASH_SIZE; i++) 
+		{
+			struct hlist_node *h_group;
+			hlist_for_each_entry(mcast_group, h_group, &br->mc_hash[i], hlist) 
+			{
+				struct net_bridge_mc_rep_entry *reporter_group;
+				list_for_each_entry(reporter_group, &mcast_group->rep_list, list)
+				{
+					if ( time_after(tstamp, reporter_group->tstamp) )
+					{
+						tstamp = reporter_group->tstamp;
+						found  = 1;
+					}
+				}
+			}
+		}
+	}
+  
+	if ( 0 == found )
+	{
+		del_timer(&br->igmp_timer);
+	}
+	else
+	{
+		mod_timer(&br->igmp_timer, (tstamp + TIMER_CHECK_TIMEOUT));
+	}
+
+}
+
+
+static void br_igmp_query_timeout(struct net_bridge *br)
+{
+	struct net_bridge_mc_fdb_entry *mcast_group;
+	int i;
+    
+	for (i = 0; i < BR_IGMP_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h_group, *n_group;
+		hlist_for_each_entry_safe(mcast_group, h_group, n_group, &br->mc_hash[i], hlist) 
+		{
+			struct net_bridge_mc_rep_entry *reporter_group, *n_reporter;
+			list_for_each_entry_safe(reporter_group, n_reporter, &mcast_group->rep_list, list)
+			{
+				if (time_after_eq(jiffies, reporter_group->tstamp)) 
+				{
+					br_igmp_mc_fdb_del_entry(br, mcast_group, &reporter_group->rep, NULL);
+				}
+			}
+		}
+	}
+
+}
+
+static void br_igmp_timeout (unsigned long ptr)
+{
+	struct net_bridge *br = (struct net_bridge *) ptr;
+
+	spin_lock_bh(&br->mcl_lock);
+
+	br_igmp_query_timeout(br);
+	br_igmp_process_timer_check(br);
+
+	br_igmp_set_timer(br);
+	spin_unlock_bh(&br->mcl_lock);
+}
+
+static struct net_bridge_mc_rep_entry * 
+br_igmp_rep_find(const struct net_bridge_mc_fdb_entry *mc_fdb,
+                 const struct in_addr *rep,
+                 unsigned char *repMac)
+{
+	struct net_bridge_mc_rep_entry *rep_entry;
+
+	list_for_each_entry(rep_entry, &mc_fdb->rep_list, list)
+	{
+		if ((rep && (rep_entry->rep.s_addr == rep->s_addr)) ||
+		    (repMac && (0 == memcmp(rep_entry->repMac, repMac, ETH_ALEN))))
+		{
+			return rep_entry;
+		}
+	}
+
+	return NULL;
+}
+
+/* In the case where a reporter has changed ports, this function
+   will remove all records pointing to the old port */
+void br_igmp_wipe_reporter_for_port (struct net_bridge *br,
+                                     struct in_addr *rep, 
+                                     u16 oldPort)
+{
+    int hashIndex = 0;
+    struct hlist_node *h = NULL;
+    struct hlist_node *n = NULL;
+    struct hlist_head *head = NULL;
+    struct net_bridge_mc_fdb_entry *mc_fdb;
+
+    spin_lock_bh(&br->mcl_lock);
+    for ( ; hashIndex < BR_IGMP_HASH_SIZE ; hashIndex++)
+    {
+        head = &br->mc_hash[hashIndex];
+        hlist_for_each_entry_safe(mc_fdb, h, n, head, hlist)
+        {
+            if ((mc_fdb->dst->port_no == oldPort) &&
+                (br_igmp_rep_find(mc_fdb, rep, NULL) != NULL))
+            {
+                /* The reporter we're looking for has been found
+                   in a record pointing to its old port */
+                br_igmp_mc_fdb_del_entry(br, mc_fdb, rep, NULL);
+            }
+        }
+    }
+    br_igmp_set_timer(br);
+    spin_unlock_bh(&br->mcl_lock);
+}
+
+/* will remove all records for reporter with MAC equal to repMac */
+void br_igmp_wipe_reporter_by_mac (struct net_bridge *br,
+                                   unsigned char *repMac)
+{
+    int hashIndex = 0;
+    struct hlist_node *h = NULL;
+    struct hlist_node *n = NULL;
+    struct hlist_head *head = NULL;
+    struct net_bridge_mc_fdb_entry *mc_fdb;
+
+    spin_lock_bh(&br->mcl_lock);
+    for ( ; hashIndex < BR_IGMP_HASH_SIZE ; hashIndex++)
+    {
+        head = &br->mc_hash[hashIndex];
+        hlist_for_each_entry_safe(mc_fdb, h, n, head, hlist)
+        {
+            if ((br_igmp_rep_find(mc_fdb, NULL, repMac) != NULL))
+            {
+                br_igmp_mc_fdb_del_entry(br, mc_fdb, NULL, repMac);
+            }
+        }
+    }
+    br_igmp_set_timer(br);
+    spin_unlock_bh(&br->mcl_lock);
+}
+
+/* this is called during addition of a snooping entry and requires that 
+   mcl_lock is already held */
+static int br_mc_fdb_update(struct net_bridge *br, 
+                            struct net_bridge_port *prt, 
+                            struct in_addr *rxGrp,
+                            struct in_addr *txGrp,
+                            struct in_addr *rep,
+                            unsigned char *repMac,
+                            int mode, 
+                            struct in_addr *src,
+                            struct net_device *from_dev,
+                            uint32_t info)
+{
+	struct net_bridge_mc_fdb_entry *dst;
+	struct net_bridge_mc_rep_entry *rep_entry = NULL;
+	int ret = 0;
+	int filt_mode;
+	struct hlist_head *head;
+	struct hlist_node *h;
+
+	if(mode == SNOOP_IN_ADD)
+		filt_mode = MCAST_INCLUDE;
+	else
+		filt_mode = MCAST_EXCLUDE;
+
+	head = &br->mc_hash[br_igmp_mc_fdb_hash(txGrp->s_addr)];
+	hlist_for_each_entry(dst, h, head, hlist) {
+		if ((dst->txGrp.s_addr == txGrp->s_addr) && (dst->rxGrp.s_addr == rxGrp->s_addr))
+		{
+			if((src->s_addr == dst->src_entry.src.s_addr) &&
+			   (filt_mode == dst->src_entry.filt_mode) && 
+			   (dst->from_dev == from_dev) &&
+			   (dst->dst == prt) &&
+			   (dst->info == info))
+			{
+				/* found entry - update TS */
+				struct net_bridge_mc_rep_entry *reporter = br_igmp_rep_find(dst, rep, NULL);
+				if(reporter == NULL)
+				{
+					rep_entry = kmem_cache_alloc(br_igmp_mc_rep_cache, GFP_ATOMIC);
+					if(rep_entry)
+					{
+						rep_entry->rep.s_addr = rep->s_addr;
+						rep_entry->tstamp = jiffies + BR_IGMP_MEMBERSHIP_TIMEOUT*HZ;
+						memcpy(rep_entry->repMac, repMac, ETH_ALEN);
+						list_add_tail(&rep_entry->list, &dst->rep_list);
+						br_igmp_set_timer(br);
+					}
+				}
+				else
+				{
+					reporter->tstamp = jiffies + BR_IGMP_MEMBERSHIP_TIMEOUT*HZ;
+					br_igmp_set_timer(br);
+				}
+				ret = 1;
+			}
+		}
+	}
+
+	return ret;
+}
+
+int br_igmp_process_if_change(struct net_bridge *br, struct net_device *ndev)
+{
+	struct net_bridge_mc_fdb_entry *dst;
+	int i;
+
+	spin_lock_bh(&br->mcl_lock);
+	for (i = 0; i < BR_IGMP_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h, *n;
+		hlist_for_each_entry_safe(dst, h, n, &br->mc_hash[i], hlist) 
+		{
+			if ((NULL == ndev) ||
+			    (dst->dst->dev == ndev) ||
+			    (dst->from_dev == ndev))
+			{
+				br_igmp_mc_fdb_del_entry(br, dst, NULL, NULL);
+			}
+		}
+	}
+	br_igmp_set_timer(br);
+	spin_unlock_bh(&br->mcl_lock);
+
+	return 0;
+}
+
+int br_igmp_mc_fdb_add(struct net_device *from_dev,
+                       int wan_ops,
+                       struct net_bridge *br, 
+                       struct net_bridge_port *prt, 
+                       struct in_addr *rxGrp, 
+                       struct in_addr *txGrp, 
+                       struct in_addr *rep,
+                       unsigned char *repMac,
+                       int mode, 
+                       uint16_t tci, 
+                       struct in_addr *src,
+                       int lanppp,
+                       int excludePort,
+                       char enRtpSeqCheck,
+                       uint32_t info)
+{
+	struct net_bridge_mc_fdb_entry *mc_fdb = NULL;
+	struct net_bridge_mc_rep_entry *rep_entry = NULL;
+	struct hlist_head *head = NULL;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	int ret = 1;
+#endif
+
+	if(!br || !prt || !rxGrp || !txGrp || !rep || !from_dev)
+		return 0;
+
+	if( !br_igmp_control_filter(NULL, rxGrp->s_addr) || !br_igmp_control_filter(NULL, txGrp->s_addr) )
+		return 0;
+
+	if(!netdev_path_is_leaf(from_dev))
+		return 0;
+
+	if((SNOOP_IN_ADD != mode) && (SNOOP_EX_ADD != mode))
+		return 0;
+
+	mc_fdb = kmem_cache_alloc(br_igmp_mc_fdb_cache, GFP_ATOMIC);
+	if ( !mc_fdb )
+	{
+		return -ENOMEM;
+	}
+	rep_entry = kmem_cache_alloc(br_igmp_mc_rep_cache, GFP_ATOMIC);
+	if ( !rep_entry )
+	{
+		kmem_cache_free(br_igmp_mc_fdb_cache, mc_fdb);
+		return -ENOMEM;
+	}
+
+	spin_lock_bh(&br->mcl_lock);
+	if (br_mc_fdb_update(br, prt, rxGrp, txGrp, rep, repMac, mode, src, from_dev, info))
+	{
+		kmem_cache_free(br_igmp_mc_fdb_cache, mc_fdb);
+		kmem_cache_free(br_igmp_mc_rep_cache, rep_entry);
+		spin_unlock_bh(&br->mcl_lock);
+		return 0;
+	}
+
+	mc_fdb->txGrp.s_addr = txGrp->s_addr;
+	mc_fdb->rxGrp.s_addr = rxGrp->s_addr;
+	memcpy(&mc_fdb->src_entry, src, sizeof(struct in_addr));
+	mc_fdb->src_entry.filt_mode = (mode == SNOOP_IN_ADD) ? MCAST_INCLUDE : MCAST_EXCLUDE;
+	mc_fdb->dst = prt;
+	mc_fdb->lan_tci = tci;
+	mc_fdb->wan_tci = 0;
+	mc_fdb->num_tags = 0;
+	mc_fdb->from_dev = from_dev;
+	mc_fdb->type = wan_ops;
+	mc_fdb->excludePort = excludePort;
+	mc_fdb->enRtpSeqCheck = enRtpSeqCheck;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	mc_fdb->root = 1;
+	mc_fdb->blog_idx = BLOG_KEY_INVALID;
+#endif
+	mc_fdb->info = info;
+	mc_fdb->lanppp = lanppp;
+	INIT_LIST_HEAD(&mc_fdb->rep_list);
+	rep_entry->rep.s_addr = rep->s_addr;
+	rep_entry->tstamp = jiffies + BR_IGMP_MEMBERSHIP_TIMEOUT * HZ;
+	memcpy(rep_entry->repMac, repMac, ETH_ALEN);
+	list_add_tail(&rep_entry->list, &mc_fdb->rep_list);
+
+	head = &br->mc_hash[br_igmp_mc_fdb_hash(txGrp->s_addr)];
+	hlist_add_head(&mc_fdb->hlist, head);
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	ret = br_mcast_blog_process(br, (void*)mc_fdb, BR_MCAST_PROTO_IGMP);
+	if(ret < 0)
+	{
+		hlist_del(&mc_fdb->hlist);
+		kmem_cache_free(br_igmp_mc_fdb_cache, mc_fdb);
+		kmem_cache_free(br_igmp_mc_rep_cache, rep_entry);
+		spin_unlock_bh(&br->mcl_lock);
+		return ret;
+	}
+#endif
+	br_igmp_set_timer(br);
+	spin_unlock_bh(&br->mcl_lock);
+
+	return 1;
+}
+EXPORT_SYMBOL(br_igmp_mc_fdb_add);
+
+void br_igmp_mc_fdb_cleanup(struct net_bridge *br)
+{
+	struct net_bridge_mc_fdb_entry *dst;
+	int i;
+    
+	spin_lock_bh(&br->mcl_lock);
+	for (i = 0; i < BR_IGMP_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h, *n;
+		hlist_for_each_entry_safe(dst, h, n, &br->mc_hash[i], hlist) 
+		{
+			br_igmp_mc_fdb_del_entry(br, dst, NULL, NULL);
+		}
+	}
+	br_igmp_set_timer(br);
+	spin_unlock_bh(&br->mcl_lock);
+}
+
+int br_igmp_mc_fdb_remove(struct net_device *from_dev,
+                          struct net_bridge *br, 
+                          struct net_bridge_port *prt, 
+                          struct in_addr *rxGrp, 
+                          struct in_addr *txGrp, 
+                          struct in_addr *rep, 
+                          int mode, 
+                          struct in_addr *src,
+                          uint32_t info)
+{
+	struct net_bridge_mc_fdb_entry *mc_fdb;
+	int filt_mode;
+	struct hlist_head *head = NULL;
+	struct hlist_node *h, *n;
+
+	//printk("--- remove mc entry ---\n");
+	
+	if(!br || !prt || !txGrp || !rxGrp || !rep || !from_dev)
+		return 0;
+
+	if(!br_igmp_control_filter(NULL, txGrp->s_addr))
+		return 0;
+
+	if(!br_igmp_control_filter(NULL, rxGrp->s_addr))
+		return 0;
+
+	if(!netdev_path_is_leaf(from_dev))
+		return 0;
+
+	if((SNOOP_IN_CLEAR != mode) && (SNOOP_EX_CLEAR != mode))
+		return 0;
+
+	if(mode == SNOOP_IN_CLEAR)
+		filt_mode = MCAST_INCLUDE;
+	else
+		filt_mode = MCAST_EXCLUDE;
+
+	spin_lock_bh(&br->mcl_lock);
+	head = &br->mc_hash[br_igmp_mc_fdb_hash(txGrp->s_addr)];
+	hlist_for_each_entry_safe(mc_fdb, h, n, head, hlist)
+	{
+		if ((mc_fdb->rxGrp.s_addr == rxGrp->s_addr) && 
+		    (mc_fdb->txGrp.s_addr == txGrp->s_addr) && 
+		    (filt_mode == mc_fdb->src_entry.filt_mode) && 
+		    (mc_fdb->src_entry.src.s_addr == src->s_addr) &&
+		    (mc_fdb->from_dev == from_dev) &&
+		    (mc_fdb->dst == prt) &&
+		    (mc_fdb->info == info))
+		{
+			br_igmp_mc_fdb_del_entry(br, mc_fdb, rep, NULL);
+		}
+	}
+	br_igmp_set_timer(br);
+	spin_unlock_bh(&br->mcl_lock);
+	
+	return 0;
+}
+EXPORT_SYMBOL(br_igmp_mc_fdb_remove);
+
+int br_igmp_mc_forward(struct net_bridge *br, 
+                       struct sk_buff *skb, 
+                       int forward,
+                       int is_routed)
+{
+	struct net_bridge_mc_fdb_entry *dst;
+	int status = 0;
+	struct sk_buff *skb2;
+	struct net_bridge_port *p, *p_n;
+	struct iphdr *pipmcast = NULL;
+	struct igmphdr *pigmp = NULL;
+	const unsigned char *dest = eth_hdr(skb)->h_dest;
+	struct hlist_head *head = NULL;
+	struct hlist_node *h;
+	int lanppp;
+
+	br_igmp_get_ip_igmp_hdrs(skb, &pipmcast, &pigmp, &lanppp);
+	if ( pipmcast == NULL ) {
+		return status;
+	}
+
+	if ((pigmp != NULL) &&
+	    (br->igmp_snooping || is_multicast_switching_mode_host_control()))
+	{
+		/* for bridged WAN service, do not pass any IGMP packets
+		   coming from the WAN port to mcpd. Queries can be passed 
+		   through for forwarding, other types should be dropped */
+		if (skb->dev)
+		{
+#if defined(CONFIG_BCM_KF_WANDEV)
+			if ( skb->dev->priv_flags & IFF_WANDEV )
+			{
+				if ( pigmp->type != IGMP_HOST_MEMBERSHIP_QUERY )
+				{
+					kfree_skb(skb);
+					status = 1;
+				}
+			}
+			else
+#endif
+			{
+				spin_lock_bh(&br->mcl_lock);
+				rcu_read_lock();
+				if(br_port_get_rcu(skb->dev))
+				{ 
+					status = mcpd_process_skb(br, skb, ETH_P_IP);
+					if (status == 1) 
+					{
+						int placedPending = 0;
+						int pendingIndex = 0;
+						for ( ; pendingIndex < MCPD_MAX_DELAYED_SKB_COUNT; pendingIndex ++) 
+						{
+							if ( br->igmp_delayed_skb[pendingIndex].skb == NULL ) 
+							{
+								br->igmp_delayed_skb[pendingIndex].skb = skb;
+								br->igmp_delayed_skb[pendingIndex].expiryTime = jiffies + MCPD_NETLINK_SKB_TIMEOUT_MS;
+								placedPending = 1;
+								br_igmp_set_timer( br );
+								break;
+							}
+						}
+						if (!placedPending)
+						{
+							printk ("Delayed admission failed due to lack of memory\n");
+						}
+					/* If a pending slot was not found, forward it anyway?? TBD */
+					}        
+				}
+				rcu_read_unlock();
+				spin_unlock_bh(&br->mcl_lock);
+			}
+		}
+		return status;
+	}
+
+	if (br_igmp_control_filter(dest, pipmcast->daddr) == 0) {
+		return status;
+	}
+
+	/* snooping could be disabled and still have manual entries */
+
+	/* drop traffic by default when snooping is enabled
+	   in blocking mode */
+	if (br->igmp_snooping == SNOOPING_BLOCKING_MODE)
+	{
+		status = 1;
+	}
+
+	spin_lock_bh(&br->mcl_lock);
+	head = &br->mc_hash[br_igmp_mc_fdb_hash(pipmcast->daddr)];
+	hlist_for_each_entry(dst, h, head, hlist) {
+		if (dst->txGrp.s_addr != pipmcast->daddr) {
+			continue;
+		}
+		/* if this packet has already been sent to the port referenced 
+		   by the forwarding entry then continue */
+		if (1 == dst->dst->dirty) {
+			continue;
+		}
+
+		/* routed packet will have bridge as dev - cannot match to mc_fdb */
+		if ( is_routed ) {
+			if ( dst->type != MCPD_IF_TYPE_ROUTED ) {
+				continue;
+			}
+		}
+		else {
+			if ( dst->type != MCPD_IF_TYPE_BRIDGED ) {
+				continue;
+			}
+#if defined(CONFIG_BCM_KF_WANDEV)
+			if (skb->dev->priv_flags & IFF_WANDEV) {
+				/* match exactly if skb device is a WAN device - otherwise continue */
+				if (dst->from_dev != skb->dev) {
+					continue;
+				}
+			}
+			else {
+				/* if this is not an L2L mc_fdb entry continue */
+				if (dst->from_dev != br->dev) {
+					continue;            
+				}
+			}
+#endif
+		}
+
+		if((dst->src_entry.filt_mode == MCAST_INCLUDE) && 
+		   (pipmcast->saddr == dst->src_entry.src.s_addr)) {
+			/* If this is the excluded port, drop it now */
+			if (dst->excludePort != -1) {
+				if ( pipmcast->protocol == IPPROTO_UDP ) {
+					struct udphdr *headerUdp = (struct udphdr *)(pipmcast + 1);
+					if (headerUdp->dest == dst->excludePort ) {
+						kfree_skb(skb);
+						spin_unlock_bh(&br->mcl_lock);
+						return 1;
+					}
+				}
+			}
+
+			if((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+			{
+				spin_unlock_bh(&br->mcl_lock);
+				return 0;
+			}
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+			blog_clone(skb, blog_ptr(skb2));
+#endif
+			if(forward) {
+				br_forward(dst->dst, skb2, NULL);
+			}
+			else {
+				br_deliver(dst->dst, skb2);
+			}
+			dst->dst->dirty = 1;
+			status = 1;
+		}
+		else if(dst->src_entry.filt_mode == MCAST_EXCLUDE) {
+			if((0 == dst->src_entry.src.s_addr) ||
+			   (pipmcast->saddr != dst->src_entry.src.s_addr)) {
+				/* If this is the excluded port, drop it now */
+				if (dst->excludePort != -1) {
+					if ( pipmcast->protocol == IPPROTO_UDP ) {
+						struct udphdr *headerUdp = (struct udphdr *)(pipmcast + 1);
+						if (headerUdp->dest == dst->excludePort ) {
+							kfree_skb(skb);
+							spin_unlock_bh(&br->mcl_lock);
+							return 1;
+						}
+					}
+				}
+
+				if((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+				{
+					spin_unlock_bh(&br->mcl_lock);
+					return 0;
+				}
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+				blog_clone(skb, blog_ptr(skb2));
+#endif
+				if(forward) {
+					br_forward(dst->dst, skb2, NULL);
+				}
+				else {
+					br_deliver(dst->dst, skb2);
+				}
+				dst->dst->dirty = 1;
+				status = 1;
+			}
+			else if(pipmcast->saddr == dst->src_entry.src.s_addr) {
+				status = 1;
+			}
+		}
+	}
+
+	if (status) {
+		list_for_each_entry_safe(p, p_n, &br->port_list, list) {
+			p->dirty = 0;
+		}
+	}
+	spin_unlock_bh(&br->mcl_lock);
+
+	if(status) {
+		kfree_skb(skb);
+	}
+
+	return status;
+}
+
+void br_igmp_process_admission (t_MCPD_ADMISSION* admit)
+{
+    int pendingIndex = 0;
+    struct net_bridge *br = (struct net_bridge *)admit->bridgePointer;
+    struct sk_buff *skb = (struct sk_buff *)admit->skbPointer;
+
+    if ((br == NULL) || (skb == NULL)) {
+      printk ("Error %p %p\n", br, skb);
+    }
+
+    spin_lock_bh(&br->mcl_lock);
+
+    for ( ; pendingIndex < MCPD_MAX_DELAYED_SKB_COUNT; pendingIndex ++) {
+      /*printk("Comparing [%d] = %p\n", pendingIndex, br->igmp_delayed_skb[pendingIndex].skb);*/
+      if ( br->igmp_delayed_skb[pendingIndex].skb == skb ) {
+        break;
+      }
+    }
+
+    if (pendingIndex == MCPD_MAX_DELAYED_SKB_COUNT) {
+      /* Did not find that skb, may have timed out */
+      printk("br_igmp_process_admission no match\n");
+      spin_unlock_bh(&br->mcl_lock);
+      return;
+    }
+
+    if (admit->admitted == MCPD_PACKET_ADMITTED_YES) {
+      /* send the packet on */
+      rcu_read_lock();
+      br_flood_forward(br, br->igmp_delayed_skb[pendingIndex].skb, NULL);
+      rcu_read_unlock();
+    }
+    else {
+      /* packet was not admitted, free it up */
+      kfree_skb(br->igmp_delayed_skb[pendingIndex].skb);
+    }
+    br->igmp_delayed_skb[pendingIndex].skb = NULL;
+
+    spin_unlock_bh(&br->mcl_lock);
+}
+
+void br_igmp_wipe_pending_skbs( void )
+{
+  struct net_device *dev;
+
+  rcu_read_lock();
+  for_each_netdev_rcu(&init_net, dev) {
+    if (dev->priv_flags & IFF_EBRIDGE) {
+      int pendingIndex = 0;
+      struct net_bridge* br = netdev_priv(dev);
+      spin_lock_bh(&br->mcl_lock);
+      for ( ; pendingIndex < MCPD_MAX_DELAYED_SKB_COUNT; pendingIndex ++) {
+        if ( br->igmp_delayed_skb[pendingIndex].skb != NULL) {
+          kfree_skb(br->igmp_delayed_skb[pendingIndex].skb);
+          br->igmp_delayed_skb[pendingIndex].skb = NULL;
+          br->igmp_delayed_skb[pendingIndex].expiryTime = 0;
+        }
+      }
+      spin_unlock_bh(&br->mcl_lock);
+    }
+  }
+  rcu_read_unlock();
+}
+
+void br_igmp_process_device_removal(struct net_device* dev)
+{
+  if (NULL == dev) {
+    return;
+  }
+
+  if (dev->priv_flags & IFF_EBRIDGE) {
+    /* the removed device is a bridge, clear all pending */
+    int pendingIndex = 0;
+    struct net_bridge* br = netdev_priv(dev);
+    spin_lock_bh(&br->mcl_lock);
+    for ( ; pendingIndex < MCPD_MAX_DELAYED_SKB_COUNT; pendingIndex ++) {
+      if (br->igmp_delayed_skb[pendingIndex].skb != NULL) {
+        kfree_skb(br->igmp_delayed_skb[pendingIndex].skb);
+        br->igmp_delayed_skb[pendingIndex].skb = NULL;
+        br->igmp_delayed_skb[pendingIndex].expiryTime = 0;
+      }
+    }  
+    spin_unlock_bh(&br->mcl_lock);
+  }
+  else {
+    /* this is a non bridge device.  We must clear it from all bridges. */
+    struct net_device *maybeBridge = NULL;
+
+    rcu_read_lock();
+    for_each_netdev_rcu(&init_net, maybeBridge) {
+      if (maybeBridge->priv_flags & IFF_EBRIDGE) {
+        int pendingIndex = 0;
+        struct net_bridge* br = netdev_priv(maybeBridge);
+        spin_lock_bh(&br->mcl_lock);
+        for ( ; pendingIndex < MCPD_MAX_DELAYED_SKB_COUNT; pendingIndex ++) {
+          if ((br->igmp_delayed_skb[pendingIndex].skb != NULL) && ( br->igmp_delayed_skb[pendingIndex].skb->dev == dev )){
+            kfree_skb(br->igmp_delayed_skb[pendingIndex].skb);
+            br->igmp_delayed_skb[pendingIndex].skb = NULL;
+            br->igmp_delayed_skb[pendingIndex].expiryTime = 0;
+          }
+        }  
+        spin_unlock_bh(&br->mcl_lock);
+      }
+    }
+    rcu_read_unlock();
+  }
+}
+
+
+int br_igmp_mc_fdb_update_bydev( struct net_bridge *br,
+                                 struct net_device *dev,
+                                 unsigned int       flushAll)
+{
+	struct net_bridge_mc_fdb_entry *mc_fdb;
+	int i;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	int ret;
+#endif
+
+	if(!br || !dev)
+		return 0;
+
+	if(!netdev_path_is_leaf(dev))
+		return 0;
+
+	spin_lock_bh(&br->mcl_lock);
+	for (i = 0; i < BR_IGMP_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h, *n;
+		hlist_for_each_entry_safe(mc_fdb, h, n, &br->mc_hash[i], hlist) 
+		{
+			if ((mc_fdb->dst->dev == dev) ||
+			    (mc_fdb->from_dev == dev))
+			{
+				/* do note remove the root entry */
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+				if ((0 == mc_fdb->root) || (1 == flushAll))
+				{
+					br_igmp_mc_fdb_del_entry(br, mc_fdb, NULL, NULL);
+				}
+				else
+				{
+					br_mcast_blog_release(BR_MCAST_PROTO_IGMP, (void *)mc_fdb);
+					mc_fdb->blog_idx = BLOG_KEY_INVALID;
+				}
+#else
+				if (1 == flushAll)
+				{
+					br_igmp_mc_fdb_del_entry(br, mc_fdb, NULL, NULL);
+				}
+#endif
+			}
+		}
+	}
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	if (0 == flushAll)
+	{
+		for (i = 0; i < BR_IGMP_HASH_SIZE; i++) 
+		{
+			struct hlist_node *h, *n;
+			hlist_for_each_entry_safe(mc_fdb, h, n, &br->mc_hash[i], hlist) 
+			{ 
+				if ( (1 == mc_fdb->root) && 
+				     ((mc_fdb->dst->dev == dev) ||
+				      (mc_fdb->from_dev == dev)) )
+				{
+					mc_fdb->wan_tci  = 0;
+					mc_fdb->num_tags = 0; 
+					ret = br_mcast_blog_process(br, (void*)mc_fdb, BR_MCAST_PROTO_IGMP);
+					if(ret < 0)
+					{
+						/* br_mcast_blog_process may return -1 if there are no blog rules
+						 * which may be a valid scenario, in which case we delete the
+						 * multicast entry.
+						 */
+						br_igmp_mc_fdb_del_entry(br, mc_fdb, NULL, NULL);
+						//printk(KERN_DEBUG "%s: Failed to create the blog\n", __FUNCTION__);
+					}
+				}
+			}
+		}
+	}
+#endif   
+	br_igmp_set_timer(br);
+	spin_unlock_bh(&br->mcl_lock);
+
+	return 0;
+}
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+/* This is a support function for vlan/blog processing that requires that 
+   br->mcl_lock is already held */
+struct net_bridge_mc_fdb_entry *br_igmp_mc_fdb_copy(
+                       struct net_bridge *br, 
+                       const struct net_bridge_mc_fdb_entry *igmp_fdb)
+{
+	struct net_bridge_mc_fdb_entry *new_igmp_fdb = NULL;
+	struct net_bridge_mc_rep_entry *rep_entry = NULL;
+	struct net_bridge_mc_rep_entry *rep_entry_n = NULL;
+	int success = 1;
+	struct hlist_head *head = NULL;
+
+	new_igmp_fdb = kmem_cache_alloc(br_igmp_mc_fdb_cache, GFP_ATOMIC);
+	if (new_igmp_fdb)
+	{
+		memcpy(new_igmp_fdb, igmp_fdb, sizeof(struct net_bridge_mc_fdb_entry));
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		new_igmp_fdb->blog_idx = BLOG_KEY_INVALID;
+#endif
+		new_igmp_fdb->root = 0;
+		INIT_LIST_HEAD(&new_igmp_fdb->rep_list);
+
+		list_for_each_entry(rep_entry, &igmp_fdb->rep_list, list) {
+			rep_entry_n = kmem_cache_alloc(br_igmp_mc_rep_cache, GFP_ATOMIC);
+			if(rep_entry_n)
+			{
+				memcpy(rep_entry_n, 
+				       rep_entry, 
+				       sizeof(struct net_bridge_mc_rep_entry));
+				list_add_tail(&rep_entry_n->list, &new_igmp_fdb->rep_list);
+			}
+			else 
+			{
+				success = 0;
+				break;
+			}
+		}
+
+		if(success)
+		{
+			head = &br->mc_hash[br_igmp_mc_fdb_hash(igmp_fdb->txGrp.s_addr)];
+			hlist_add_head(&new_igmp_fdb->hlist, head);
+		}
+		else
+		{
+			list_for_each_entry_safe(rep_entry, 
+			                         rep_entry_n, &new_igmp_fdb->rep_list, list) {
+				list_del(&rep_entry->list);
+				kmem_cache_free(br_igmp_mc_rep_cache, rep_entry);
+			}
+			kmem_cache_free(br_igmp_mc_fdb_cache, new_igmp_fdb);
+			new_igmp_fdb = NULL;
+		}
+	}
+
+	return new_igmp_fdb;
+} /* br_igmp_mc_fdb_copy */
+#endif
+
+static void *snoop_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct net_device *dev;
+	loff_t offs = 0;
+
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
+		if((dev->priv_flags & IFF_EBRIDGE) && (*pos == offs)) {
+			return dev;
+		}
+	}
+	++offs;
+	return NULL;
+}
+
+static void *snoop_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct net_device *dev = v;
+
+	++*pos;
+	for(dev = next_net_device_rcu(dev); dev; dev = next_net_device_rcu(dev)) {
+		if(dev->priv_flags & IFF_EBRIDGE) {
+			return dev;
+		}
+	}
+	return NULL;
+}
+
+static void snoop_display_entry(struct seq_file *seq,
+                                struct net_bridge *br,
+                                struct net_bridge_mc_fdb_entry *dst)
+{
+	struct net_bridge_mc_rep_entry *rep_entry;
+	int                             first;
+	int                             tstamp;
+	unsigned char                  *txAddressP = (unsigned char *)&(dst->txGrp.s_addr);
+	unsigned char                  *rxAddressP = (unsigned char *)&(dst->rxGrp.s_addr);
+	unsigned char                  *srcAddressP = (unsigned char *)&(dst->src_entry.src.s_addr);
+
+	seq_printf(seq, "%-6s %-6s %-7s %02d    0x%04x   0x%04x%04x", 
+	           br->dev->name, 
+	           dst->dst->dev->name, 
+	           dst->from_dev->name, 
+	           dst->num_tags,
+	           ntohs(dst->lan_tci),
+	           ((dst->wan_tci >> 16) & 0xFFFF),
+	           (dst->wan_tci & 0xFFFF));
+
+	seq_printf(seq, " %03u.%03u.%03u.%03u", txAddressP[0],txAddressP[1],txAddressP[2],txAddressP[3]);
+
+	seq_printf(seq, " %-4s %03u.%03u.%03u.%03u %03u.%03u.%03u.%03u", 
+	           (dst->src_entry.filt_mode == MCAST_EXCLUDE) ? 
+	           "EX" : "IN",  
+	           rxAddressP[0],rxAddressP[1],rxAddressP[2],rxAddressP[3], 
+	           srcAddressP[0],srcAddressP[1],srcAddressP[2],srcAddressP[3] );
+
+	first = 1;
+	list_for_each_entry(rep_entry, &dst->rep_list, list)
+	{ 
+		unsigned char *repAddressP = (unsigned char *)&(rep_entry->rep.s_addr);
+
+		if ( 0 == br->igmp_snooping )
+		{
+			tstamp = 0;
+		}
+		else
+		{
+			tstamp = (int)(rep_entry->tstamp - jiffies) / HZ;
+		}
+
+		if(first)
+		{
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+			seq_printf(seq, " %03u.%03u.%03u.%03u %-7d 0x%08x %d\n", 
+			           repAddressP[0],repAddressP[1],repAddressP[2],repAddressP[3],
+			           tstamp, dst->blog_idx, dst->excludePort);
+#else
+			seq_printf(seq, " %03u.%03u.%03u.%03u %-7d %d\n",
+			           repAddressP[0],repAddressP[1],repAddressP[2],repAddressP[3],
+			           tstamp, dst->excludePort);
+#endif
+			first = 0;
+		}
+		else
+		{
+			seq_printf(seq, "%100s %03u.%03u.%03u.%03u %-7d\n", " ", 
+			           repAddressP[0],repAddressP[1],repAddressP[2],repAddressP[3],
+			           tstamp);
+		}
+	}
+}
+
+static int snoop_seq_show(struct seq_file *seq, void *v)
+{
+	struct net_device *dev = v;
+	struct net_bridge *br = netdev_priv(dev);
+	int i;
+
+	seq_printf(seq, "igmp snooping %d  lan2lan-snooping %d/%d, rate-limit %dpps, priority %d\n",
+	           br->igmp_snooping, 
+	           br->igmp_lan2lan_mc_enable,
+	           br_mcast_get_lan2lan_snooping(BR_MCAST_PROTO_IGMP, br),
+	           br->igmp_rate_limit,
+	           br_mcast_get_pri_queue());
+	seq_printf(seq, "bridge device src-dev #tags lan-tci  wan-tci");
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	seq_printf(seq, "    group           mode RxGroup         source          reporter        timeout Index      ExcludPt\n");
+#else
+	seq_printf(seq, "    group           mode RxGroup         source          reporter        timeout ExcludPt\n");
+#endif
+
+	for (i = 0; i < BR_IGMP_HASH_SIZE; i++) 
+	{
+		struct net_bridge_mc_fdb_entry *entry;
+		struct hlist_node *pos;
+		hlist_for_each_entry(entry, pos, &br->mc_hash[i], hlist) 
+		{
+			snoop_display_entry(seq, br, entry);
+		}
+	}
+
+	return 0;
+}
+
+static void snoop_seq_stop(struct seq_file *seq, void *v)
+{
+	rcu_read_unlock();
+}
+
+static struct seq_operations snoop_seq_ops = {
+	.start = snoop_seq_start,
+	.next  = snoop_seq_next,
+	.stop  = snoop_seq_stop,
+	.show  = snoop_seq_show,
+};
+
+static int snoop_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &snoop_seq_ops);
+}
+
+static struct file_operations br_igmp_snoop_proc_fops = {
+	.owner = THIS_MODULE,
+	.open  = snoop_seq_open,
+	.read  = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+void br_igmp_snooping_br_init( struct net_bridge *br )
+{
+	spin_lock_init(&br->mcl_lock);
+	br->igmp_lan2lan_mc_enable = BR_MC_LAN2LAN_STATUS_DEFAULT;
+	setup_timer(&br->igmp_timer, br_igmp_timeout, (unsigned long)br);   
+}
+
+void br_igmp_snooping_br_fini( struct net_bridge *br )
+{
+	del_timer_sync(&br->igmp_timer);
+}
+
+int __init br_igmp_snooping_init(void)
+{
+	br_igmp_entry = proc_create("igmp_snooping", 0, init_net.proc_net,
+			   &br_igmp_snoop_proc_fops);
+
+	if(!br_igmp_entry) {
+		printk("error while creating igmp_snooping proc\n");
+		return -ENOMEM;
+	}
+
+	br_igmp_mc_fdb_cache = kmem_cache_create("bridge_igmp_mc_fdb_cache",
+	                                         sizeof(struct net_bridge_mc_fdb_entry),
+	                                         0,
+	                                         SLAB_HWCACHE_ALIGN, NULL);
+	if (!br_igmp_mc_fdb_cache)
+		return -ENOMEM;
+
+	br_igmp_mc_rep_cache = kmem_cache_create("bridge_igmp_mc_rep_cache",
+	                                         sizeof(struct net_bridge_mc_rep_entry),
+	                                         0,
+	                                         SLAB_HWCACHE_ALIGN, NULL);
+	if (!br_igmp_mc_rep_cache)
+	{
+		kmem_cache_destroy(br_igmp_mc_fdb_cache);
+		return -ENOMEM;
+	}
+
+	get_random_bytes(&br_igmp_mc_fdb_salt, sizeof(br_igmp_mc_fdb_salt));
+
+	return 0;
+}
+
+void br_igmp_snooping_fini(void)
+{
+	kmem_cache_destroy(br_igmp_mc_fdb_cache);
+	kmem_cache_destroy(br_igmp_mc_rep_cache);
+
+	return;
+}
+#endif /* CONFIG_BR_IGMP_SNOOP */
+#endif /* CONFIG_BCM_KF_IGMP */
diff --git a/net/bridge/br_igmp.h b/net/bridge/br_igmp.h
new file mode 100644
index 0000000000000000000000000000000000000000..a3682c88eb9ab7b46fc110a1d90e12914852e7b5
--- /dev/null
+++ b/net/bridge/br_igmp.h
@@ -0,0 +1,172 @@
+/*
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#ifndef _BR_IGMP_H
+#define _BR_IGMP_H
+
+#if defined(CONFIG_BCM_KF_IGMP)
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/igmp.h>
+#include <linux/in.h>
+#include "br_private.h"
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+#include "br_mcast.h"
+
+#if defined(CONFIG_BR_IGMP_SNOOP)
+
+#define TIMER_CHECK_TIMEOUT (2*HZ)
+#define BR_IGMP_MEMBERSHIP_TIMEOUT 260 /* RFC3376 */
+
+struct net_bridge_mc_src_entry
+{
+	struct in_addr   src;
+	unsigned long    tstamp;
+	int              filt_mode;
+};
+
+struct net_bridge_mc_rep_entry
+{
+	struct in_addr      rep;
+	unsigned char       repMac[6];
+	unsigned long       tstamp;
+	struct list_head    list;
+};
+
+struct net_bridge_mc_fdb_entry
+{
+   struct hlist_node               hlist;
+   struct net_bridge_port         *dst;
+   struct in_addr                  rxGrp;
+   struct in_addr                  txGrp;
+   struct list_head                rep_list;
+   struct net_bridge_mc_src_entry  src_entry;
+   uint16_t                        lan_tci; /* vlan id */
+   uint32_t                        wan_tci; /* vlan id */
+   int                             num_tags;
+   char                            type;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+   uint32_t                        blog_idx;
+   char                            root;
+#endif
+   uint32_t                        info;
+   int                             lanppp;
+   int                             excludePort;
+   char                            enRtpSeqCheck;  
+   struct net_device              *from_dev;
+};
+
+enum mcpd_packet_admitted
+{
+  MCPD_PACKET_ADMITTED_NO      = 0,
+  MCPD_PACKET_ADMITTED_YES     = 1,
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+  MCPD_MLD_PACKET_ADMITTED_NO  = 2,
+  MCPD_MLD_PACKET_ADMITTED_YES = 3,
+#endif
+};
+
+typedef struct
+{
+   int                       bridgePointer;
+   int                       skbPointer;
+   enum mcpd_packet_admitted admitted;
+} t_MCPD_ADMISSION;
+
+int br_igmp_control_filter(const unsigned char *dest, __be32 dest_ip);
+
+void mcpd_nl_send_igmp_purge_entry(struct net_bridge_mc_fdb_entry *igmp_entry,
+                                   struct net_bridge_mc_rep_entry *rep_entry);
+
+int br_igmp_blog_rule_update(struct net_bridge_mc_fdb_entry *mc_fdb, int wan_ops);
+
+int br_igmp_mc_forward(struct net_bridge *br, 
+                       struct sk_buff *skb, 
+                       int forward,
+                       int is_routed);
+void br_igmp_delbr_cleanup(struct net_bridge *br);
+
+int br_igmp_mc_fdb_add(struct net_device *from_dev,
+                       int wan_ops,
+                       struct net_bridge *br, 
+                       struct net_bridge_port *prt, 
+                       struct in_addr *rxGrp, 
+                       struct in_addr *txGrp, 
+                       struct in_addr *rep,
+                       unsigned char *repMac,
+                       int mode, 
+                       uint16_t tci, 
+                       struct in_addr *src,
+                       int lanppp,
+                       int excludePort,
+                       char enRtpSeqCheck,
+                       uint32_t info);
+
+void br_igmp_mc_fdb_cleanup(struct net_bridge *br);
+int br_igmp_mc_fdb_remove(struct net_device *from_dev,
+                          struct net_bridge *br, 
+                          struct net_bridge_port *prt, 
+                          struct in_addr *rxGrp, 
+                          struct in_addr *txGrp, 
+                          struct in_addr *rep, 
+                          int mode, 
+                          struct in_addr *src,
+                          uint32_t info);
+int br_igmp_mc_fdb_update_bydev( struct net_bridge *br,
+                                 struct net_device *dev,
+                                 unsigned int       flushAll);
+int __init br_igmp_snooping_init(void);
+void br_igmp_snooping_fini(void);
+void br_igmp_set_snooping(int val);
+void br_igmp_handle_netdevice_events(struct net_device *ndev, unsigned long event);
+void br_igmp_wipe_reporter_for_port (struct net_bridge *br,
+                                     struct in_addr *rep, 
+                                     u16 oldPort);
+void br_igmp_wipe_reporter_by_mac (struct net_bridge *br,
+                                   unsigned char *repMac);
+int br_igmp_process_if_change(struct net_bridge *br, struct net_device *ndev);
+struct net_bridge_mc_fdb_entry *br_igmp_mc_fdb_copy(struct net_bridge *br, 
+                                     const struct net_bridge_mc_fdb_entry *igmp_fdb);
+void br_igmp_mc_fdb_del_entry(struct net_bridge *br, 
+                              struct net_bridge_mc_fdb_entry *igmp_fdb,
+                              struct in_addr *rep,
+                              unsigned char *repMac);
+void br_igmp_set_timer( struct net_bridge *br );
+void br_igmp_process_timer_check ( struct net_bridge *br );
+void br_igmp_process_admission (t_MCPD_ADMISSION* admit);
+void br_igmp_wipe_pending_skbs( void );
+void br_igmp_process_device_removal(struct net_device* dev);
+#endif /* CONFIG_BR_IGMP_SNOOP */
+
+void br_igmp_get_ip_igmp_hdrs( const struct sk_buff *pskb, struct iphdr **ppipmcast, struct igmphdr **ppigmp, int *lanppp);
+#endif /* CONFIG_BCM_KF_IGMP */
+
+#endif /* _BR_IGMP_H */
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5a31731be4d066f168579552981bd3e28b2f0ac4..750c1a83b15b8244f52538290b69546872577ea3 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -18,6 +18,33 @@
 #include <linux/netfilter_bridge.h>
 #include <linux/export.h>
 #include "br_private.h"
+#if defined(CONFIG_BCM_KF_IP)
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/igmp.h>
+#include <linux/ip.h>
+#include <linux/ktime.h>
+#endif
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+#if defined(CONFIG_BCM_KF_IGMP)
+#include "br_igmp.h"
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include "br_mld.h"
+#endif
+#if defined(CONFIG_BCM_KF_WL)
+#if defined(PKTC)
+#include <linux_osl_dslcpe_pktc.h>
+#include "linux/bcm_skb_defines.h"
+
+uint32_t (*wl_pktc_req_hook)(int req_id, uint32_t param0, uint32_t param1, uint32_t param2) = NULL;
+EXPORT_SYMBOL(wl_pktc_req_hook);
+uint32_t (*dhd_pktc_req_hook)(int req_id, uint32_t param0, uint32_t param1, uint32_t param2) = NULL;
+EXPORT_SYMBOL(dhd_pktc_req_hook);
+#endif /* PKTC */
+#endif
 
 /* Bridge group multicast address 802.1d (pg 51). */
 const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
@@ -32,6 +59,16 @@ static int br_pass_frame_up(struct sk_buff *skb)
 	struct net_bridge *br = netdev_priv(brdev);
 	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	blog_link(IF_DEVICE, blog_ptr(skb), (void*)br->dev, DIR_RX, skb->len);
+	blog_unlock();
+
+	/* Gather general RX statistics */
+	brdev->stats.rx_packets++;
+	brdev->stats.rx_bytes += skb->len;
+#endif
+
 	u64_stats_update_begin(&brstats->syncp);
 	brstats->rx_packets++;
 	brstats->rx_bytes += skb->len;
@@ -53,19 +90,124 @@ int br_handle_frame_finish(struct sk_buff *skb)
 	struct net_bridge_fdb_entry *dst;
 	struct net_bridge_mdb_entry *mdst;
 	struct sk_buff *skb2;
-
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || defined(CONFIG_BCM_KF_IGMP_RATE_LIMIT)
+	struct iphdr *pipmcast = NULL;
+	struct igmphdr *pigmp = NULL;
+#endif
+#if (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+	struct ipv6hdr *pipv6mcast = NULL;
+	struct icmp6hdr *picmpv6 = NULL;
+#endif
+
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+	__u16 vid = VLAN_N_VID;
+#endif
 	if (!p || p->state == BR_STATE_DISABLED)
 		goto drop;
 
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || defined(CONFIG_BCM_KF_IGMP_RATE_LIMIT)
+	br_igmp_get_ip_igmp_hdrs(skb, &pipmcast, &pigmp, NULL);
+#endif
+
+#if defined(CONFIG_BCM_KF_IGMP_RATE_LIMIT)
+	if (pigmp != NULL) {
+#if defined(CONFIG_BCM_GPON_MODULE)
+		/* drop IGMP v1 report packets */
+		if (pigmp->type == IGMP_HOST_MEMBERSHIP_REPORT)
+			goto drop;
+
+		/* drop IGMP v1 query packets */
+		if ((pigmp->type == IGMP_HOST_MEMBERSHIP_QUERY) &&
+		    (pigmp->code == 0))
+			goto drop;
+
+		/* drop IGMP leave packets for group 0.0.0.0 */
+		if ((pigmp->type == IGMP_HOST_LEAVE_MESSAGE) &&
+		    (0 == pigmp->group))
+			goto drop;
+#endif
+		/* rate limit IGMP */
+		br = p->br;
+		if (br->igmp_rate_limit) {
+			ktime_t curTime;
+			u64 diffUs;
+			unsigned int usPerPacket;
+			unsigned int temp32;
+			unsigned int burstLimit;
+
+			/* add tokens to the bucket - compute in microseconds */
+			curTime = ktime_get();
+			usPerPacket = (1000000 / br->igmp_rate_limit);
+			diffUs = ktime_to_us(ktime_sub(curTime,
+						br->igmp_rate_last_packet));
+			diffUs += br->igmp_rate_rem_time;
+
+			/* allow 25% burst */
+			burstLimit = br->igmp_rate_limit >> 2;
+			if (0 == burstLimit)
+				burstLimit = 1;
+
+			if (diffUs > 1000000) {
+				br->igmp_rate_bucket = burstLimit;
+				br->igmp_rate_rem_time = 0;
+			} else {
+				temp32 = (unsigned int)diffUs / usPerPacket;
+				br->igmp_rate_bucket += temp32;
+				if (temp32)
+					br->igmp_rate_rem_time = diffUs - (temp32 * usPerPacket);
+			}
+
+			if (br->igmp_rate_bucket > burstLimit) {
+				br->igmp_rate_bucket = burstLimit;
+				br->igmp_rate_rem_time = 0;
+			}
+
+			/* if bucket is empty drop the packet */
+			if (0 == br->igmp_rate_bucket)
+				goto drop;
+
+			br->igmp_rate_bucket--;
+			br->igmp_rate_last_packet.tv64 = curTime.tv64;
+		}
+	}
+#endif
+
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+	if (skb->vlan_count)
+ 		vid = (skb->vlan_header[0] >> 16) & VLAN_VID_MASK;
+	else
+#endif /* CONFIG_BCM_VLAN) */
+	/* 
+	*  dev.c/__netif_receive_skb(): if proto == ETH_P_8021Q
+	*  call vlan_untag() to remove tag and save vid in skb->vlan_tci
+	*/
+	if (vlan_tx_tag_present(skb))
+		vid = skb->vlan_tci & VLAN_VID_MASK;
+	else if ( vlan_eth_hdr(skb)->h_vlan_proto == htons(ETH_P_8021Q) )
+		vid = ntohs(vlan_eth_hdr(skb)->h_vlan_TCI) & VLAN_VID_MASK;
+#endif
+
 	/* insert into forwarding database after filtering to avoid spoofing */
 	br = p->br;
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+	br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
+#else
 	br_fdb_update(br, p, eth_hdr(skb)->h_source);
+#endif
 
 	if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
 	    br_multicast_rcv(br, p, skb))
 		goto drop;
 
+#if defined(CONFIG_BCM_KF_WL)
+	if ((p->state == BR_STATE_LEARNING) &&
+	    (skb->protocol != htons(0x886c) /*ETHER_TYPE_BRCM*/) &&
+	    (skb->protocol != htons(0x888e) /*ETHER_TYPE_802_1X*/) &&
+	    (skb->protocol != htons(0x88c7) /*ETHER_TYPE_802_1X_PREAUTH*/))
+#else
 	if (p->state == BR_STATE_LEARNING)
+#endif
 		goto drop;
 
 	BR_INPUT_SKB_CB(skb)->brdev = br->dev;
@@ -78,8 +220,31 @@ int br_handle_frame_finish(struct sk_buff *skb)
 
 	dst = NULL;
 
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	br_mld_get_ip_icmp_hdrs(skb, &pipv6mcast, &picmpv6, NULL);
+	if (pipv6mcast != NULL) {
+		if (br_mld_mc_forward(br, skb, 1, 0))
+			/* packet processed by mld snooping - no further processing required */
+			goto out;
+	} else
+#endif
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	if (pipmcast != NULL) {
+		if (br_igmp_mc_forward(br, skb, 1, 0))
+			/* packet processed by igmp snooping - no further processing required */
+			goto out;
+	}
+#endif
+
 	if (is_broadcast_ether_addr(dest))
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+	{
+		br->dev->stats.rx_broadcast_packets++;
+#endif
 		skb2 = skb;
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+	}
+#endif
 	else if (is_multicast_ether_addr(dest)) {
 		mdst = br_mdb_get(br, skb);
 		if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
@@ -94,17 +259,138 @@ int br_handle_frame_finish(struct sk_buff *skb)
 			skb2 = skb;
 
 		br->dev->stats.multicast++;
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+		br->dev->stats.rx_multicast_bytes += skb2->len;
+#endif
+#if !(defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG))
 	} else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
 		skb2 = skb;
 		/* Do not forward the packet since it's local. */
 		skb = NULL;
 	}
-
+#else
+	} else {
+		struct net_bridge_fdb_entry *src;
+
+		dst = __br_fdb_get(br, dest);
+		src = __br_fdb_get(br, eth_hdr(skb)->h_source);
+		blog_lock();
+		blog_link(BRIDGEFDB, blog_ptr(skb), (void*)src, BLOG_PARAM1_SRCFDB, 0);
+		blog_link(BRIDGEFDB, blog_ptr(skb), (void*)dst, BLOG_PARAM1_DSTFDB, 0);
+		blog_unlock();
+
+#if defined(PKTC)
+		/* wlan pktc */
+		if ((dst != NULL) && (dst->dst != NULL) && (!dst->is_local)) {
+#if defined(CONFIG_BCM_KF_WL)
+			u8 from_wl_to_switch=0, from_switch_to_wl=0;
+			struct net_device *root_dst_dev_p = dst->dst->dev;
+			BlogPhy_t srcPhyType, dstPhyType;
+			uint32_t chainIdx;
+			uint32_t pktc_tx_enabled = wl_pktc_req_hook ? 
+						wl_pktc_req_hook(GET_PKTC_TX_MODE, 0, 0, 0) : 0;
+
+			src = __br_fdb_get(br, eth_hdr(skb)->h_source);
+			if (unlikely(src == NULL))
+				goto next;
+
+			srcPhyType = BLOG_GET_PHYTYPE(src->dst->dev->path.hw_port_type);
+			dstPhyType = BLOG_GET_PHYTYPE(dst->dst->dev->path.hw_port_type);
+
+
+			if ((srcPhyType == BLOG_WLANPHY) &&
+			    (dstPhyType == BLOG_ENETPHY)) {
+				from_wl_to_switch = 1;
+				while (!netdev_path_is_root(root_dst_dev_p) &&
+				       (root_dst_dev_p->priv_flags & IFF_BCM_VLAN)) {
+					root_dst_dev_p = netdev_path_next_dev(root_dst_dev_p);
+				}
+			} else if ((srcPhyType == BLOG_ENETPHY || srcPhyType == BLOG_XTMPHY ||
+ 				srcPhyType == BLOG_EPONPHY || srcPhyType == BLOG_GPONPHY) &&
+ 				(dstPhyType == BLOG_WLANPHY) &&
+ 				pktc_tx_enabled)
+  			{ 
+				from_switch_to_wl = 1;
+   			}
+
+#if defined(CONFIG_BCM_KF_WANDEV)
+			if ((from_wl_to_switch || from_switch_to_wl) &&
+			    !(dst->dst->dev->priv_flags & IFF_WANDEV) &&
+			    netdev_path_is_root(root_dst_dev_p)) {
+			/* Also check for non-WAN cases.
+			 * For the Rx direction, VLAN cases are allowed as long 
+			 * as the packets are untagged.
+			 *
+			 * Tagged packets are not forwarded through the chaining 
+			 * path by WLAN driver. Tagged packets go through the
+			 * flowcache path.
+			 * see wlc_sendup_chain() function for reference.
+			 *
+			 * For the Tx direction, there are no VLAN interfaces 
+			 * created on wl device when LAN_VLAN flag is enabled 
+			 * in the build.
+			 *
+			 * The netdev_path_is_root() check makes sure that we 
+			 * are always transmitting to a root device */
+			 
+			    /* Update chaining table for DHD on the wl to switch direction only */
+				if (from_wl_to_switch && (dhd_pktc_req_hook != NULL)) {
+					dhd_pktc_req_hook(UPDATE_BRC_HOT,
+								     (uint32_t)&(dst->addr.addr[0]),
+								     (uint32_t)root_dst_dev_p, 0);
+				}
+			 
+			 	/* Update chaining table for WL (NIC driver) */
+				chainIdx = wl_pktc_req_hook ? 
+								wl_pktc_req_hook(UPDATE_BRC_HOT,
+								     (uint32_t)&(dst->addr.addr[0]),
+								     (uint32_t)root_dst_dev_p, 0) : INVALID_CHAIN_IDX;
+				if (chainIdx != INVALID_CHAIN_IDX) {
+					/* Update chainIdx in blog
+					 * chainEntry->tx_dev will always be NOT 
+					 * NULL as we just added that above */
+					if (skb->blog_p != NULL) {
+						if (from_switch_to_wl)
+						{
+							skb->blog_p->wfd.nic_ucast.is_tx_hw_acc_en = 1;
+							skb->blog_p->wfd.nic_ucast.is_wfd = 1;
+							skb->blog_p->wfd.nic_ucast.is_chain = 1;
+							skb->blog_p->wfd.nic_ucast.wfd_idx = ((chainIdx & WFD_IDX_UINT16_BIT_MASK) >> WFD_IDX_UINT16_BIT_POS);
+							skb->blog_p->wfd.nic_ucast.chain_idx = chainIdx;
+						}
+#if 0
+						printk("Added ChainEntryIdx 0x%x Dev %s blogSrcAddr 0x%x blogDstAddr 0x%x DstMac %x:%x:%x:%x:%x:%x "
+						       "wfd_q %d wl_metadata %d wl 0x%x\n",
+						       chainIdx, dst->dst->dev->name, skb->blog_p->rx.tuple.saddr, skb->blog_p->rx.tuple.daddr,
+						       dst->addr.addr[0], dst->addr.addr[1], dst->addr.addr[2], dst->addr.addr[3], dst->addr.addr[4],
+						       dst->addr.addr[5], skb->blog_p->wfd_queue, skb->blog_p->wl_metadata, skb->blog_p->wl);
+#endif
+					}
+				}
+			}
+#endif /* CONFIG_BCM_KF_WANDEV */
+#endif
+		}
+next:
+#endif /* PKTC */
+		if ((dst != NULL) && dst->is_local) {
+			skb2 = skb;
+			/* Do not forward the packet since it's local. */
+			skb = NULL;
+		}
+	}
+#endif
 	if (skb) {
 		if (dst) {
 			dst->used = jiffies;
 			br_forward(dst->dst, skb, skb2);
 		} else
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+			if (BR_STATE_BLOCKING == p->state)
+				/* prevent flooding unknown unicast from blocked port */
+				goto drop;
+			else
+#endif
 			br_flood_forward(br, skb, skb2);
 	}
 
@@ -123,7 +409,11 @@ static int br_handle_local_finish(struct sk_buff *skb)
 {
 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
 
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+	br_fdb_update(p->br, p, eth_hdr(skb)->h_source, VLAN_N_VID);
+#else
 	br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
+#endif
 	return 0;	 /* process further */
 }
 
@@ -204,8 +494,51 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
 	}
 
 forward:
+#if defined(CONFIG_BCM_KF_IEEE1905) && defined(CONFIG_BCM_IEEE1905)
+	/* allow broute to forward packets to the stack in any STP state */
+	rhook = rcu_dereference(br_should_route_hook);
+	if (rhook) {
+		if ((*rhook)(skb)) {
+			*pskb = skb;
+			if ((skb->protocol == htons(0x893a)) ||
+			    (skb->protocol == htons(0x8912)) ||
+			    (skb->protocol == htons(0x88e1)))
+				br_handle_local_finish(skb);
+
+			return RX_HANDLER_PASS;
+		} else if (skb->protocol == htons(0x893a) &&
+			   (skb->pkt_type == PACKET_MULTICAST))
+			/* do not bridge multicast 1905 packets when 1905 is compiled */
+			goto drop;
+
+		dest = eth_hdr(skb)->h_dest;
+	}
+#endif
+
+#if defined(CONFIG_BCM_KF_WL)
+	if (( (skb->protocol == htons(0x886c) /*ETHER_TYPE_BRCM*/) ||
+           (skb->protocol == htons(0x888e) /*ETHER_TYPE_802_1X*/) ||
+           (skb->protocol == htons(0x88c7) /*ETHER_TYPE_802_1X_PREAUTH*/) ) &&
+	    (p->state != BR_STATE_FORWARDING) &&
+	    (p->state != BR_STATE_DISABLED)) {
+		/* force to forward brcm_type event packet */
+		NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+			br_handle_frame_finish);
+		return RX_HANDLER_CONSUMED;
+	}
+#endif
+
 	switch (p->state) {
+#if defined(CONFIG_BCM_KF_FBOND) && (defined(CONFIG_BCM_FBOND) || defined(CONFIG_BCM_FBOND_MODULE))
+	case BR_STATE_BLOCKING:
+		/* if this is unicast let it through even if the port is blocked 
+		   it will be dropped later if a destination is not found to
+                   prevent flooding unicast from a blocked port */
+		if (is_multicast_ether_addr(dest))
+			goto drop;
+#endif
 	case BR_STATE_FORWARDING:
+#if !defined(CONFIG_BCM_KF_IEEE1905) || !defined(CONFIG_BCM_IEEE1905)
 		rhook = rcu_dereference(br_should_route_hook);
 		if (rhook) {
 			if ((*rhook)(skb)) {
@@ -214,6 +547,7 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
 			}
 			dest = eth_hdr(skb)->h_dest;
 		}
+#endif
 		/* fall through */
 	case BR_STATE_LEARNING:
 		if (!compare_ether_addr(p->br->dev->dev_addr, dest))
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 7222fe1d5460203ad5d1edff1ee5331d49fdb036..d0efdda6ce5e4a5bdd5ba91709edab99d6439eb6 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -20,6 +20,21 @@
 #include <net/net_namespace.h>
 #include <asm/uaccess.h>
 #include "br_private.h"
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#include "br_igmp.h"
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include "br_mld.h"
+#endif
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <linux/module.h> 
+#include "br_flows.h"
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <linux/bcm_log.h>
+#endif
+
 
 /* called with RTNL */
 static int get_bridge_ifindices(struct net *net, int *indices, int num)
@@ -82,6 +97,371 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
 	return num;
 }
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB) || defined(CONFIG_BCM_KF_BRIDGE_DYNAMIC_FDB)
+/* called with RTNL */
+static int add_fdb_entries(struct net_bridge *br, void __user *userbuf,
+			   unsigned long maxnum, int ifindex, int isStatic)
+{
+	struct net_device *dev;
+	unsigned char     *pMacAddr = NULL;
+	unsigned char     *pMac = NULL;
+	int                size;
+	int                i;
+	int                ret = 0;
+	struct net_bridge_port *port;
+
+	if (!capable(CAP_NET_ADMIN))
+	{
+		return -EPERM;
+	}
+
+	dev = dev_get_by_index(dev_net(br->dev), ifindex);
+	if (!dev)
+	{
+		return -EINVAL;
+	}
+
+	port = br_port_get_rtnl(dev);
+	if (!port)
+	{
+		dev_put(dev);
+		return -EINVAL;
+	}
+
+	size     = maxnum * ETH_ALEN;
+	pMacAddr = kmalloc(size, GFP_KERNEL);
+	if (!pMacAddr)
+	{
+		dev_put(dev);
+		return -ENOMEM;
+	}
+
+	copy_from_user(pMacAddr, userbuf, size);
+
+	pMac = pMacAddr;
+	for ( i = 0; i < maxnum; i++ )
+	{
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+		if(isStatic)
+		{   
+			ret = br_fdb_adddel_static(br, port, (const unsigned char *)pMac, 1);
+		}
+		else
+#endif
+		{        
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+			br_fdb_update(br, port, (const unsigned char *)pMac, VLAN_N_VID);
+#else
+			br_fdb_update(br, port, (const unsigned char *)pMac);
+#endif
+			ret = 0;
+		}
+		pMac += ETH_ALEN;
+	}
+
+	kfree(pMacAddr);
+
+	dev_put(dev);
+   
+	return ret;
+}
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_DYNAMIC_FDB)
+/* called with RTNL */
+static int del_dyn_fdb_entries(struct net_bridge *br, int ifindex)
+{
+	struct net_device *dev;
+	struct net_bridge_port *port;
+
+	if (!capable(CAP_NET_ADMIN))
+	{
+		return -EPERM;
+	}
+
+	dev = dev_get_by_index(dev_net(br->dev), ifindex);
+	if (!dev)
+	{
+		return -EINVAL;
+	}
+
+	port = br_port_get_rtnl(dev);
+	if (!port)
+	{
+		dev_put(dev);
+		return -EINVAL;
+	}
+
+	br_fdb_delete_by_port(br, port, 0);
+	dev_put(dev);
+	return 0;
+}
+#endif
+
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+/* called with RTNL */
+static int block_stp_loop(struct net_bridge *br, int ifindex)
+{
+    struct net_device *dev;
+    struct net_bridge_port *port;
+
+    dev = dev_get_by_index(dev_net(br->dev), ifindex);
+    if (!dev)
+    {  
+        return -EINVAL;
+    }
+    
+    port = br_port_get_rtnl(dev);
+    if (!port)
+    {
+        dev_put(dev);
+        return -EINVAL;
+    }
+    port->is_bpdu_blocked = !(port->is_bpdu_blocked);
+    dev_put(dev);
+    printk("%s: port bpdus are %s\n", dev->name, port->is_bpdu_blocked ? "BLOCKED":"unblocked");
+    return 0;
+}
+
+static int mark_dedicated_stp(struct net_bridge *br, int ifindex, int isDedicated)
+{
+    struct net_device *dev;
+    struct net_bridge_port *port;
+    
+    dev = dev_get_by_index(dev_net(br->dev), ifindex);
+    if (!dev)
+    {  
+        return -EINVAL;
+    }
+
+    port = br_port_get_rtnl(dev);
+    
+    if (!port)
+    {
+        dev_put(dev);
+        return -EINVAL;
+    }
+    
+    port->is_dedicated_stp_port = !!isDedicated;
+    dev_put(dev);
+    printk("[%s.%d] stp port %s is %s\n", __func__, __LINE__, dev->name,
+           port->is_dedicated_stp_port ? "dedicated":"undedicated");
+    return 0;
+    
+}
+
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+/* called with RTNL */
+static int delete_fdb_entries(struct net_bridge *br, void __user *userbuf,
+			unsigned long maxnum, int ifindex)
+{
+	struct net_device *dev;
+	unsigned char     *pMacAddr = NULL;
+	unsigned char     *pMac = NULL;
+	int                size;
+	int                i;
+	int                ret = 0;
+	struct net_bridge_port *port;
+
+	if (!capable(CAP_NET_ADMIN))
+	{
+		return -EPERM;
+	}
+
+	dev = dev_get_by_index(dev_net(br->dev), ifindex);
+	if (!dev)
+	{
+		return -EINVAL;
+	}
+
+	port = br_port_get_rtnl(dev);
+	if (!port)
+	{
+		dev_put(dev);
+		return -EINVAL;
+	}
+
+	size     = maxnum * ETH_ALEN;
+	pMacAddr = kmalloc(size, GFP_KERNEL);
+	if (!pMacAddr)
+	{
+		dev_put(dev);
+		return -ENOMEM;
+	}
+
+	copy_from_user(pMacAddr, userbuf, size);
+
+	pMac = pMacAddr;
+	for ( i = 0; i < maxnum; i++ )
+	{
+		ret = br_fdb_adddel_static(br, port, (const unsigned char *)pMac, 0);
+		pMac += ETH_ALEN;
+	}
+
+	kfree(pMacAddr);
+
+	dev_put(dev);
+
+	return ret;
+}
+
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_DYNAMIC_FDB)
+/* called with RTNL */
+static int add_fdb_dynamic_entries(struct net_bridge *br, void __user *userbuf,
+			   unsigned long maxnum, int ifindex)
+{
+	return add_fdb_entries(br, userbuf, maxnum, ifindex, 0);
+}
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+/* called with RTNL */
+static int add_fdb_static_entries(struct net_bridge *br, void __user *userbuf,
+			   unsigned long maxnum, int ifindex)
+{
+	return add_fdb_entries(br, userbuf, maxnum, ifindex, 1);
+}
+
+/* called with RTNL */
+static int del_static_fdb_entries(struct net_bridge *br, int ifindex)
+{
+	struct net_device *dev;
+	struct net_bridge_port *port;
+
+	if (!capable(CAP_NET_ADMIN))
+	{
+		return -EPERM;
+	}
+
+	dev = dev_get_by_index(dev_net(br->dev), ifindex);
+	if (!dev)
+	{
+		return -EINVAL;
+	}
+
+	port = br_port_get_rtnl(dev);
+	if (!port)
+	{
+		dev_put(dev);
+		return -EINVAL;
+	}
+
+	br_fdb_delete_by_port(br, port, 2);
+	dev_put(dev);
+	return 0;
+}
+
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+static int get_dev_and_port(struct net_bridge *br, int ifindex,
+								struct net_device **dev, struct net_bridge_port **port)
+{
+	if (!capable(CAP_NET_ADMIN))
+	{
+		return -EPERM;
+	}
+
+	*dev = dev_get_by_index(dev_net(br->dev), ifindex);
+	if (!*dev)
+	{
+		return -EINVAL;
+	}
+
+	if (port != NULL)
+	{    		
+		*port = br_port_get_rtnl(*dev);
+		if (!*port)
+		{
+			dev_put(*dev);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/* called with RTNL */
+/* Get FDB limit
+ *    lmtType 0: Bridge limit
+ *                1: Port limit
+ */
+static int get_fdb_mac_limit(struct net_bridge *br, int lmt_type,
+								 int ifindex, int is_min)
+{
+	struct net_device *dev = NULL;
+	struct net_bridge_port *port = NULL;
+	int fdb_limit = 0;
+	
+	if (lmt_type == 1)
+	{    
+		if (get_dev_and_port(br, ifindex, &dev, &port) != 0)
+		{
+			return -EINVAL;
+		}
+		fdb_limit = br_get_fdb_limit(NULL, port, is_min);           
+	}
+	else
+	{
+		fdb_limit = br_get_fdb_limit(br, NULL, is_min);   
+	}
+
+
+	if(dev)
+		dev_put(dev);
+	return fdb_limit;
+}
+
+static int set_fdb_mac_limit(struct net_bridge *br, int fdb_limit,
+								int lmt_type, int ifindex, int is_min)
+{
+	struct net_device *dev = NULL;
+	struct net_bridge_port *port = NULL;
+	int ret = 0;
+	
+	if (lmt_type == 1)
+	{    
+		if (get_dev_and_port(br, ifindex, &dev, &port) != 0)
+		{
+			return -EINVAL;
+		}		
+	}
+	ret = br_set_fdb_limit(br, port, lmt_type, is_min, fdb_limit);
+	if(dev)
+		dev_put(dev);
+	return ret;
+}
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+static int set_flows(struct net_bridge *br, int rxifindex, int txifindex)
+{
+	struct net_device *rxdev, *txdev;
+	int                ret = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	rxdev = dev_get_by_index(dev_net(br->dev), rxifindex);
+	if (rxdev == NULL)
+		return -EINVAL;
+
+	txdev = dev_get_by_index(dev_net(br->dev), txifindex);
+	if (txdev == NULL)
+		return -EINVAL;
+
+   br_flow_blog_rules(br, rxdev, txdev);
+
+   dev_put(rxdev);
+   dev_put(txdev);
+
+	return ret;
+}
+#endif
+
 /* called with RTNL */
 static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
 {
@@ -100,6 +480,11 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
 	else
 		ret = br_del_if(br, dev);
 
+#if defined(CONFIG_BCM_KF_BRIDGE_PORT_ISOLATION)
+	rcu_read_lock();
+	br_dev_notify_if_change(&br->dev->name[0]);
+	rcu_read_unlock();
+#endif
 	return ret;
 }
 
@@ -111,7 +496,11 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
 static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct net_bridge *br = netdev_priv(dev);
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	unsigned long args[5];
+#else
 	unsigned long args[4];
+#endif
 
 	if (copy_from_user(args, rq->ifr_data, sizeof(args)))
 		return -EFAULT;
@@ -288,12 +677,88 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
 	case BRCTL_GET_FDB_ENTRIES:
 		return get_fdb_entries(br, (void __user *)args[1],
-				       args[2], args[3]);
+					   args[2], args[3]);
+
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+	case BRCTL_ADD_FDB_ENTRIES:
+		return add_fdb_static_entries(br, (void __user *)args[1],
+					   args[2], args[3]);
+
+	case BRCTL_DEL_FDB_ENTRIES:
+		return delete_fdb_entries(br, (void __user *)args[1],
+					   args[2], args[3]);
+
+	case BRCTL_DEL_STATIC_FDB_ENTRIES:
+		return del_static_fdb_entries(br, args[1]);
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_DYNAMIC_FDB)
+	case BRCTL_DEL_DYN_FDB_ENTRIES:
+		return del_dyn_fdb_entries(br, args[1]);
+
+	case BRCTL_ADD_FDB_DYNAMIC_ENTRIES:
+		return add_fdb_dynamic_entries(br, (void __user *)args[1],
+					   args[2], args[3]); 
+#endif
+
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+	case BRCTL_MARK_DEDICATED_STP:
+		return mark_dedicated_stp(br, args[1], args[2]);
+
+	case BRCTL_BLOCK_STP:
+		return block_stp_loop(br, args[1]);
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	case BRCTL_SET_FLOWS:
+		return set_flows(br, args[1], args[2]);
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	case BRCTL_GET_BR_FDB_LIMIT:
+	{
+		int fdb_limit;
+		
+		fdb_limit = get_fdb_mac_limit(br, args[2], args[3], args[4]);
+		if(fdb_limit < 0)
+			fdb_limit = 0;
+		if (copy_to_user((void __user *)args[1], &fdb_limit, sizeof(fdb_limit)))
+			return -EFAULT;
+		return 0;
+	}
+
+	case BRCTL_SET_BR_FDB_LIMIT:
+		return set_fdb_mac_limit(br, args[1], args[2], args[3], args[4]);        
+#endif
 	}
 
 	return -EOPNOTSUPP;
 }
 
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)) || defined(CONFIG_BCM_KF_IGMP_RATE_LIMIT)
+static int getDevice(struct net_device **dev, unsigned long arg1)
+{
+	char buf[IFNAMSIZ];
+
+	if (!capable(CAP_NET_ADMIN))
+	{
+		return -EPERM;
+	}
+	if (copy_from_user(buf, (void __user *) arg1, IFNAMSIZ))
+	{
+		return -EFAULT;
+	}
+	buf[IFNAMSIZ-1] = 0;
+	*dev = dev_get_by_name(&init_net, buf);
+	if (*dev == NULL)
+	{
+		return  -ENXIO; 	/* Could not find device */
+	}
+
+	return 0;
+}
+#endif
+
 static int old_deviceless(struct net *net, void __user *uarg)
 {
 	unsigned long args[3];
@@ -343,6 +808,159 @@ static int old_deviceless(struct net *net, void __user *uarg)
 
 		return br_del_bridge(net, buf);
 	}
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	case BRCTL_ENABLE_SNOOPING:
+	{
+		struct net_device *dev = NULL;
+		struct net_bridge *br;
+		int error = getDevice(&dev, args[1]);
+		if (error)
+		{
+			return error;
+		}
+		
+		br = netdev_priv(dev);
+		br->igmp_snooping = args[2];
+		dev_put(dev);
+
+		return 0;
+	}
+
+	case BRCTL_ENABLE_IGMP_LAN2LAN_MC:
+	{
+		struct net_device *dev = NULL;
+		struct net_bridge *br;
+		int error = getDevice(&dev, args[1]);
+		if (error)
+		{
+			return error;
+		}
+		
+		br = netdev_priv(dev);
+		br->igmp_lan2lan_mc_enable= args[2];
+
+		dev_put(dev);
+
+		return 0;
+	}
+
+	case BRCTL_GET_IGMP_LAN_TO_LAN_MCAST_ENABLED:
+	{
+		struct net_device *dev = NULL;
+		struct net_bridge *br;
+		int enable;
+	  
+		int error = getDevice(&dev, args[1]);
+		if (error)
+		{
+			return error;
+		}
+
+		br = netdev_priv(dev);
+		enable = br_mcast_get_lan2lan_snooping (BR_MCAST_PROTO_IGMP, br);
+		if (copy_to_user((void __user *)args[2], &enable, sizeof(int)))
+		{
+			dev_put(dev);
+			return -EFAULT;
+		}
+		dev_put(dev);
+		return 0;
+	}
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	case BRCTL_MLD_ENABLE_SNOOPING:
+	{
+		struct net_device *dev = NULL;
+		struct net_bridge *br;
+		int error = getDevice(&dev, args[1]);
+		if (error)
+		{
+			return error;
+		}
+		
+		br = netdev_priv(dev);
+		br->mld_snooping = args[2];
+		if(br->mld_snooping==SNOOPING_DISABLED_MODE) 
+			br_mcast_wl_flush(br) ;
+		
+		dev_put(dev);
+
+		return 0;
+	}
+
+	case BRCTL_ENABLE_MLD_LAN2LAN_MC:
+	{
+		struct net_device *dev = NULL;
+		struct net_bridge *br;
+		int error = getDevice(&dev, args[1]);
+		if (error)
+		{
+			return error;
+		}
+		
+		br = netdev_priv(dev);
+		br->mld_lan2lan_mc_enable= args[2];
+		
+		dev_put(dev);
+
+		return 0;
+
+	}
+
+	case BRCTL_GET_MLD_LAN_TO_LAN_MCAST_ENABLED:
+	{
+		struct net_device *dev = NULL;
+		struct net_bridge *br;
+		int enable;
+		int error = getDevice(&dev, args[1]);
+		if (error)
+		{
+			return error;
+		}
+
+		br = netdev_priv(dev);
+		enable = br_mcast_get_lan2lan_snooping (BR_MCAST_PROTO_MLD, br);
+		if (copy_to_user((void __user *)args[2], &enable, sizeof(int)))
+		{
+			dev_put(dev);
+			return -EFAULT;
+		}
+		dev_put(dev);
+		return 0;
+	}   
+#endif
+
+#if defined(CONFIG_BCM_KF_IGMP_RATE_LIMIT)
+	case BRCTL_ENABLE_IGMP_RATE_LIMIT:
+	{
+		struct net_device *dev = NULL;
+		struct net_bridge *br;
+		int error = getDevice(&dev, args[1]);
+		if (error)
+		{
+			return error;
+		}
+
+		br = netdev_priv(dev);
+
+		if (args[2] > 500)
+		{
+			dev_put(dev);
+			return  -EINVAL; 	/* Could not find device */
+		}
+
+		br = netdev_priv(dev);
+		br->igmp_rate_limit       = args[2];
+		br->igmp_rate_last_packet = ktime_set(0,0);
+		br->igmp_rate_bucket      = 0;
+		br->igmp_rate_rem_time    = 0;
+
+		dev_put(dev);
+
+		return 0;
+	}
+#endif
+
 	}
 
 	return -EOPNOTSUPP;
diff --git a/net/bridge/br_mcast.c b/net/bridge/br_mcast.c
new file mode 100644
index 0000000000000000000000000000000000000000..22540676ecb8709b33780edf202023b1faf9e8ad
--- /dev/null
+++ b/net/bridge/br_mcast.c
@@ -0,0 +1,1205 @@
+/*
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+* 
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/times.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jhash.h>
+#include <asm/atomic.h>
+#include <linux/ip.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/list.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/if_vlan.h>
+#include <linux/blog.h>
+#include <linux/blog_rule.h>
+#include <linux/bcm_dslcpe_wlan_info.h>
+#endif
+#include <linux/bcm_skb_defines.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include "br_private.h"
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#include "br_igmp.h"
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include "br_mld.h"
+#endif
+
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+static t_MCAST_CFG multiConfig = { -1,  /* mcastPriQueue */
+                                    0   /* thereIsAnUplink */
+                                 };
+
+void br_mcast_set_pri_queue(int val)
+{
+   multiConfig.mcastPriQueue = val;
+}
+
+int br_mcast_get_pri_queue(void)
+{
+   return multiConfig.mcastPriQueue;
+}
+
+void br_mcast_set_skb_mark_queue(struct sk_buff *skb)
+{
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+   struct iphdr        *pipmcast = NULL;
+   struct igmphdr      *pigmp = NULL;
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+   struct ipv6hdr      *pipv6mcast = NULL;
+   struct icmp6hdr     *picmpv6 = NULL;
+#endif
+   int                  isMulticast = 0;
+   const unsigned char *dest = eth_hdr(skb)->h_dest;
+
+   if ( multiConfig.mcastPriQueue == -1 )
+   {
+      return;
+   }
+
+   if (unlikely(skb->pkt_type == PACKET_LOOPBACK) || 
+       unlikely(skb->pkt_type == PACKET_HOST))
+   {
+      return;
+   }
+
+   if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+   {
+      return;
+   }
+
+   /* look for L2 mcast packet */
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+   /* do not need to differentiate IPv4 from IPv6 */
+   if ( is_multicast_ether_addr(dest) && !is_broadcast_ether_addr(dest) )
+   {
+      isMulticast = 1;
+   }
+#endif
+
+   /* check for IP mcast header */
+   if ( 0 == isMulticast )
+   {
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+      br_mld_get_ip_icmp_hdrs(skb, &pipv6mcast, &picmpv6, NULL);
+      if (pipv6mcast != NULL)
+      {
+         isMulticast = 1;
+      }
+      else
+#endif
+      {
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+         br_igmp_get_ip_igmp_hdrs(skb, &pipmcast, &pigmp, NULL);
+         if ( pipmcast != NULL )
+         {
+            isMulticast = 1;
+         }
+#endif
+      }
+   }
+
+   if ( isMulticast )
+   {
+      skb->mark = SKBMARK_SET_Q(skb->mark, multiConfig.mcastPriQueue);
+   }
+}
+
+void br_mcast_set_uplink_exists(int uplinkExists)
+{
+   multiConfig.thereIsAnUplink = uplinkExists;
+}
+
+int br_mcast_get_lan2lan_snooping(t_BR_MCAST_PROTO_TYPE proto, struct net_bridge *br)
+{
+   if (!multiConfig.thereIsAnUplink)
+      {
+      return BR_MC_LAN2LAN_STATUS_ENABLE;
+      }
+#if defined(CONFIG_BR_MLD_SNOOP)
+   if ( BR_MCAST_PROTO_MLD == proto )
+      {
+         return br->mld_lan2lan_mc_enable;
+      }
+   else
+#endif
+   {
+         return br->igmp_lan2lan_mc_enable;
+      }
+   }
+
+static void br_mcast_mc_fdb_update_bydev(t_BR_MCAST_PROTO_TYPE proto, 
+                                         struct net_bridge    *br,
+                                         struct net_device    *dev,
+                                         unsigned int          flushAll)
+{
+    if(!br || !dev)
+        return;
+
+    switch ( proto ) {
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+        case BR_MCAST_PROTO_IGMP:
+            br_igmp_mc_fdb_update_bydev(br, dev, flushAll);
+            br_igmp_process_device_removal (dev);
+            break;
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+        case BR_MCAST_PROTO_MLD:
+            br_mld_mc_fdb_update_bydev(br, dev, flushAll);
+            //br_mld_process_device_removal (dev);
+            break;
+#endif
+        default:
+            break;
+    }
+    return;
+}
+
+void br_mcast_handle_netdevice_events(struct net_device *ndev, unsigned long event)
+{
+    struct net_bridge *br = NULL;
+    struct net_device *brDev = NULL;
+
+    switch (event) {
+        case NETDEV_DOWN:
+        case NETDEV_GOING_DOWN:
+        case NETDEV_CHANGE:
+            rcu_read_lock();
+            for_each_netdev_rcu(&init_net, brDev) {
+                br = netdev_priv(brDev);
+                if(brDev->priv_flags & IFF_EBRIDGE)
+                {
+                    /* snooping entries could be present even if snooping is
+                       disabled, update existing entries */
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+                    br_mcast_mc_fdb_update_bydev(BR_MCAST_PROTO_IGMP, br, ndev, 1);
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+                    br_mcast_mc_fdb_update_bydev(BR_MCAST_PROTO_MLD, br, ndev, 1);
+#endif
+                }
+            }
+            rcu_read_unlock();
+            break;
+    }
+
+    return;
+}
+
+int br_mcast_get_rep_info(struct net_device *repDev, unsigned char *repMac, uint32_t *info)
+{ 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) && defined(CONFIG_BCM_KF_WL)
+   wlan_client_info_t wlInfo = { 0 };
+   int                rc;
+   uint32_t           phyType = netdev_path_get_hw_port_type(repDev);
+#endif
+
+   *info = 0;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) && defined(CONFIG_BCM_KF_WL)
+   if( (BLOG_WLANPHY == BLOG_GET_PHYTYPE(phyType)) &&
+       (repDev->wlan_client_get_info != NULL) )
+   {
+      rc = repDev->wlan_client_get_info(repDev, repMac, multiConfig.mcastPriQueue, &wlInfo);
+      if ( rc != 0 )
+      {
+          return -1;
+      }
+      *info = wlInfo.wl;
+   }
+#endif
+
+   return 0;
+}
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+inline void br_mcast_ipv4_to_eth(unsigned long ipv4_addr,
+                                       unsigned char *mac_addr_p)
+{
+    unsigned char *ipv4_addr8_p = (unsigned char *)(&ipv4_addr);
+
+    mac_addr_p[0] = 0x01;
+    mac_addr_p[1] = 0x00;
+    mac_addr_p[2] = 0x5E;
+    mac_addr_p[3] = ipv4_addr8_p[1] & 0x7F;
+    mac_addr_p[4] = ipv4_addr8_p[2];
+    mac_addr_p[5] = ipv4_addr8_p[3];
+}
+
+inline void br_mcast_ipv6_to_eth(unsigned char *ipv6_addr,
+                                       unsigned char *mac_addr_p)
+{
+    mac_addr_p[0] = 0x33;
+    mac_addr_p[1] = 0x33;
+    mac_addr_p[2] = ipv6_addr[12];
+    mac_addr_p[3] = ipv6_addr[13];
+    mac_addr_p[4] = ipv6_addr[14];
+    mac_addr_p[5] = ipv6_addr[15];
+}
+
+void br_mcast_blog_release(t_BR_MCAST_PROTO_TYPE proto, void *mc_fdb)
+{
+	Blog_t *blog_p = BLOG_NULL;
+	uint32_t blog_idx = BLOG_KEY_INVALID;
+	BlogTraffic_t traffic;
+
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	if(proto == BR_MCAST_PROTO_IGMP)
+	{
+		blog_idx =  ((struct net_bridge_mc_fdb_entry *)mc_fdb)->blog_idx;
+		((struct net_bridge_mc_fdb_entry *)mc_fdb)->blog_idx = 0;
+		traffic = BlogTraffic_IPV4_MCAST;
+	}
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	if(proto == BR_MCAST_PROTO_MLD)
+	{
+		blog_idx =  ((struct net_br_mld_mc_fdb_entry *)mc_fdb)->blog_idx;
+		((struct net_br_mld_mc_fdb_entry *)mc_fdb)->blog_idx = 0;
+		traffic = BlogTraffic_IPV6_MCAST;
+	}
+#endif
+
+	if(BLOG_KEY_INVALID == blog_idx)
+		return;
+
+	blog_p = blog_deactivate(blog_idx, traffic, BlogClient_fcache);
+#if defined(CONFIG_BCM_KF_RUNNER) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	/* When Runner is used, we always will use the Runner's IPTV table,
+	 * and won't use software (flow-cache) acceleration */
+		hw_accelerator_client_get();
+#else /* CONFIG_BCM_KF_RUNNER && (CONFIG_BCM_RDPA) || (CONFIG_BCM_RDPA_MODULE) */
+		sw_accelerator_client_get();
+#endif /* CONFIG_BCM_KF_RUNNER && (CONFIG_BCM_RDPA) || (CONFIG_BCM_RDPA_MODULE) */
+	if ( blog_p )
+	{
+		blog_rule_free_list(blog_p);
+		blog_put(blog_p);
+	}
+
+	return;
+}
+
+static blogRuleAction_t *br_mcast_blog_find_command(blogRule_t *blogRule_p,
+                                                    blogRuleCommand_t blogRuleCommand,
+                                                    uint32_t *cmdIndex_p)
+{
+    blogRuleAction_t *action_p;
+    int i;
+
+    for(i=*cmdIndex_p; i<blogRule_p->actionCount; ++i)
+    {
+        action_p = &blogRule_p->action[i];
+        if(action_p->cmd == blogRuleCommand)
+        {
+            *cmdIndex_p = i;
+            return action_p;
+        }
+    }
+
+    return NULL;
+}
+
+static void br_mcast_blog_process_wan(blogRule_t *rule_p,
+                                     void *mc_fdb,
+                                     t_BR_MCAST_PROTO_TYPE proto,
+                                     struct net_device **wan_dev_pp,
+                                     struct net_device **wan_vlan_dev_pp)
+{
+	blogRuleAction_t ruleAction;
+	struct net_device *dev_p = NULL;
+	struct net_bridge_mc_fdb_entry *igmp_fdb = NULL;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	struct net_br_mld_mc_fdb_entry *mld_fdb = NULL;
+#endif 
+	uint8_t                *dev_addr = NULL;
+	uint32_t phyType;
+	char wan_ops;
+	uint32_t index = 0;
+
+	if(!mc_fdb)
+		return;
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	if(BR_MCAST_PROTO_MLD == proto)
+	{
+		mld_fdb  = (struct net_br_mld_mc_fdb_entry *)mc_fdb;
+		dev_p    = mld_fdb->from_dev;
+		dev_addr = mld_fdb->dst->dev->dev_addr;
+		wan_ops  = mld_fdb->type;
+	}
+	else
+#endif
+	{
+		igmp_fdb = (struct net_bridge_mc_fdb_entry *)mc_fdb;
+		dev_p    = igmp_fdb->from_dev;
+		dev_addr = igmp_fdb->dst->dev->dev_addr;
+		wan_ops  = igmp_fdb->type;
+	}
+
+	while(1)
+	{
+		if(netdev_path_is_root(dev_p))
+		{
+			*wan_dev_pp = dev_p;
+			break;
+		}
+
+		if(dev_p->priv_flags & IFF_PPP)
+		{
+			rule_p->filter.hasPppoeHeader = 1;
+			memset(&ruleAction, 0, sizeof(blogRuleAction_t));
+			ruleAction.cmd = BLOG_RULE_CMD_POP_PPPOE_HDR;
+			blog_rule_add_action(rule_p, &ruleAction);
+
+			if ( NULL == br_mcast_blog_find_command(rule_p, BLOG_RULE_CMD_SET_MAC_DA, &index) )
+			{
+				memset(&ruleAction, 0, sizeof(blogRuleAction_t));
+				ruleAction.cmd = BLOG_RULE_CMD_SET_MAC_DA;
+				if(igmp_fdb)
+					br_mcast_ipv4_to_eth(igmp_fdb->txGrp.s_addr, ruleAction.macAddr);
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+				else
+					br_mcast_ipv6_to_eth(mld_fdb->grp.s6_addr, ruleAction.macAddr);
+#endif
+				blog_rule_add_action(rule_p, &ruleAction);
+			}
+		}
+		else if(*wan_vlan_dev_pp == NULL &&
+		        dev_p->priv_flags & IFF_BCM_VLAN)
+		{
+			*wan_vlan_dev_pp = dev_p;
+		}
+		dev_p = netdev_path_next_dev(dev_p);
+	}
+
+	/* For IPoA */
+	phyType = netdev_path_get_hw_port_type(*wan_dev_pp);
+	phyType = BLOG_GET_HW_ACT(phyType);
+	if((phyType == VC_MUX_IPOA) || (phyType == LLC_SNAP_ROUTE_IP))
+	{
+		if ( NULL == br_mcast_blog_find_command(rule_p, BLOG_RULE_CMD_SET_MAC_DA, &index) )
+		{
+			memset(&ruleAction, 0, sizeof(blogRuleAction_t));
+			ruleAction.cmd = BLOG_RULE_CMD_SET_MAC_DA;
+			if(igmp_fdb)
+				br_mcast_ipv4_to_eth(igmp_fdb->txGrp.s_addr, ruleAction.macAddr);
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+			else
+				br_mcast_ipv6_to_eth(mld_fdb->grp.s6_addr, ruleAction.macAddr);
+#endif
+			blog_rule_add_action(rule_p, &ruleAction);
+		}
+	}
+
+	if(wan_ops == MCPD_IF_TYPE_ROUTED)
+	{
+		memset(&ruleAction, 0, sizeof(blogRuleAction_t));
+		ruleAction.cmd = BLOG_RULE_CMD_SET_MAC_SA;
+		memcpy(ruleAction.macAddr, dev_addr, ETH_ALEN);
+		blog_rule_add_action(rule_p, &ruleAction);
+
+		memset(&ruleAction, 0, sizeof(blogRuleAction_t));
+		ruleAction.cmd = BLOG_RULE_CMD_DECR_TTL;
+		blog_rule_add_action(rule_p, &ruleAction);
+	}
+}
+
+static void br_mcast_blog_process_lan(blogRule_t *rule_p,
+                                      void *mc_fdb,
+                                      t_BR_MCAST_PROTO_TYPE proto,
+                                      struct net_device **lan_dev_pp,
+                                      struct net_device **lan_vlan_dev_pp)
+{
+    struct net_device *dev_p = NULL;
+    struct net_bridge_mc_fdb_entry *igmp_fdb = NULL;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    struct net_br_mld_mc_fdb_entry *mld_fdb = NULL;
+#endif
+    int phyType;
+
+    if(!mc_fdb)
+        return;
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    if (BR_MCAST_PROTO_MLD == proto )
+    {
+        mld_fdb = (struct net_br_mld_mc_fdb_entry *)mc_fdb;
+        dev_p = mld_fdb->dst->dev;
+    }
+    else
+#endif
+    {
+        igmp_fdb = (struct net_bridge_mc_fdb_entry *)mc_fdb;
+        dev_p = igmp_fdb->dst->dev;
+    }
+
+    while(1)
+    {
+        if(netdev_path_is_root(dev_p))
+        {
+            *lan_dev_pp = dev_p;
+            break;
+        }
+
+        if(*lan_vlan_dev_pp == NULL &&
+           dev_p->priv_flags & IFF_BCM_VLAN)
+        {
+            *lan_vlan_dev_pp = dev_p;
+        }
+
+        dev_p = netdev_path_next_dev(dev_p);
+    }
+
+#if defined(CONFIG_BCM_KF_WL)
+    phyType = netdev_path_get_hw_port_type(dev_p);
+    if ( BLOG_GET_PHYTYPE(phyType) == BLOG_WLANPHY )
+    {
+        BlogRnr_t *blogRnr = NULL;
+        if ( igmp_fdb )
+        {
+            blogRnr = (BlogRnr_t *)&igmp_fdb->info;
+        }
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+        else
+        {
+            blogRnr = (BlogRnr_t *)&mld_fdb->info;
+        }
+#endif
+        if ( blogRnr && (0 == blogRnr->is_wfd) && (1 == blogRnr->is_tx_hw_acc_en))
+        {
+            blogRuleAction_t ruleAction;
+            memset(&ruleAction, 0, sizeof(blogRuleAction_t));
+            ruleAction.cmd = BLOG_RULE_CMD_SET_STA_MAC_ADDRESS;
+            if(igmp_fdb)
+            {
+                /* can only be one reporter for this forwarding entry */
+                struct net_bridge_mc_rep_entry *rep;
+
+                rep = list_first_entry(&igmp_fdb->rep_list,
+                                       struct net_bridge_mc_rep_entry,
+                                       list);
+                memcpy(ruleAction.macAddr, rep->repMac, ETH_ALEN);
+                blog_rule_add_action(rule_p, &ruleAction);
+            }
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+            else
+            {
+                /* can only be one reporter for this forwarding entry */
+                struct net_br_mld_mc_rep_entry *rep;
+
+                rep = list_first_entry(&mld_fdb->rep_list,
+                                       struct net_br_mld_mc_rep_entry,
+                                       list);
+                memcpy(ruleAction.macAddr, rep->repMac, ETH_ALEN);
+                blog_rule_add_action(rule_p, &ruleAction);
+            }
+#endif
+        }
+    }
+#endif
+}
+
+void br_mcast_vlan_notify_for_blog_update(struct net_device *ndev,
+                                          blogRuleVlanNotifyDirection_t direction,
+                                          uint32_t nbrOfTags)
+{
+	struct net_bridge *br = NULL;
+	struct net_device *dev = NULL;
+
+#if defined(CONFIG_BCM_KF_WANDEV)
+	if((ndev->priv_flags & IFF_WANDEV) && (direction == BLOG_RULE_VLAN_NOTIFY_DIR_TX))
+		return;
+#endif
+
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
+		br = netdev_priv(dev);
+		if(dev->priv_flags & IFF_EBRIDGE)
+		{
+			/* snooping entries could be present even if snooping is
+			   disabled, update existing entries */
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+			br_mcast_mc_fdb_update_bydev(BR_MCAST_PROTO_IGMP, br, ndev, 0);
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+			br_mcast_mc_fdb_update_bydev(BR_MCAST_PROTO_MLD, br, ndev, 0);
+#endif
+		}
+	}
+	rcu_read_unlock();
+
+	return;
+}
+
+static void *br_mcast_mc_fdb_copy(t_BR_MCAST_PROTO_TYPE proto,
+                                  struct net_bridge *br, 
+                                  const void *mc_fdb)
+{
+    if(!mc_fdb)
+        return NULL;
+
+    if(BR_MCAST_PROTO_IGMP == proto)
+        return br_igmp_mc_fdb_copy(br, (struct net_bridge_mc_fdb_entry *)mc_fdb);
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    else if(BR_MCAST_PROTO_MLD == proto)
+        return br_mld_mc_fdb_copy(br, (struct net_br_mld_mc_fdb_entry *)mc_fdb);
+#endif
+
+    return NULL;
+}
+
+static void br_mcast_mc_fdb_del_entry(t_BR_MCAST_PROTO_TYPE proto, 
+                                      struct net_bridge *br, 
+                                      void *mc_fdb)
+{
+    if(!mc_fdb)
+        return;
+
+    if(BR_MCAST_PROTO_IGMP == proto)
+        br_igmp_mc_fdb_del_entry(br, (struct net_bridge_mc_fdb_entry *)mc_fdb, NULL, NULL);
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    else if(BR_MCAST_PROTO_MLD == proto)
+        br_mld_mc_fdb_del_entry(br, (struct net_br_mld_mc_fdb_entry *)mc_fdb, NULL, NULL);
+#endif
+
+    return;
+} /* br_mcast_mc_fdb_del_entry */
+
+static void br_mcast_blog_link_devices(Blog_t *blog_p, struct net_device *rxDev, 
+                                       struct net_device *txDev, int wanType )
+{
+    struct net_device *dev_p;
+    uint32_t delta;
+    struct net_device *rxPath[MAX_VIRT_DEV];
+    struct net_bridge_port *port;
+    int rxPathIdx = 0;
+    int i;
+
+    /* save rx path required for reverse path traversal for delta calc */
+    memset(&rxPath[0], 0, (MAX_VIRT_DEV * sizeof(struct net_device *)));
+    dev_p = rxDev;
+    while(1)
+    {
+        if(netdev_path_is_root(dev_p))
+        {
+            break;
+        }
+        rxPath[rxPathIdx] = dev_p;
+        rxPathIdx++;
+        dev_p = netdev_path_next_dev(dev_p);
+    }
+
+    /* omit Ethernet header from virtual dev RX stats */
+    delta = BLOG_ETH_HDR_LEN;
+
+    for(i = (MAX_VIRT_DEV-1); i >= 0; i--)
+    {
+        if(NULL == rxPath[i])
+        {
+            continue;
+        }
+
+        if ( rxPath[i]->priv_flags & IFF_PPP )
+        {
+            delta += BLOG_PPPOE_HDR_LEN;
+        }
+
+        if ( rxPath[i]->priv_flags & IFF_802_1Q_VLAN )
+        {
+            delta += BLOG_VLAN_HDR_LEN;
+        }
+
+        if ( (rxPath[i]->priv_flags & IFF_BCM_VLAN) && 
+             (blog_p->vtag_num > 0) )
+        {
+            delta += BLOG_VLAN_HDR_LEN;
+        }
+
+        blog_lock();
+        blog_link(IF_DEVICE_MCAST, blog_p, rxPath[i], DIR_RX, delta);
+        blog_unlock();
+        dev_p = netdev_path_next_dev(dev_p);
+    }
+
+    /* include Ethernet header in virtual TX stats */
+    delta -= BLOG_ETH_HDR_LEN;
+
+    if ( (wanType == MCPD_IF_TYPE_ROUTED) && br_port_get_rcu(txDev) )
+    {
+       /* routed packets will come through br_dev_xmit, link bridge
+          device with blog */
+        port = br_port_get_rcu(txDev);
+        blog_lock();
+        blog_link(IF_DEVICE_MCAST, blog_p, port->br->dev, DIR_TX, delta );
+        blog_unlock();
+    }
+
+    dev_p = txDev;
+    while(1)
+    {
+        if(netdev_path_is_root(dev_p))
+        {
+            break;
+        }
+
+        if ( dev_p->priv_flags & IFF_802_1Q_VLAN )
+        {
+            delta -= BLOG_VLAN_HDR_LEN;
+        }
+
+        if ( dev_p->priv_flags & IFF_BCM_VLAN )
+        {
+            delta -= BLOG_VLAN_HDR_LEN;
+        }
+
+        blog_lock();
+        blog_link(IF_DEVICE_MCAST, blog_p, dev_p, DIR_TX, delta);
+        blog_unlock();
+        dev_p = netdev_path_next_dev(dev_p);
+    }
+}
+
+
+static int br_mcast_vlan_process(struct net_bridge     *br,
+                           void                  *mc_fdb,
+                           t_BR_MCAST_PROTO_TYPE  proto,
+                           Blog_t                *blog_p)
+{
+    Blog_t           *new_blog_p;
+    void             *new_mc_fdb = NULL;
+    blogRule_t       *rule_p = NULL;
+    int               firstRule = 1;
+    uint32_t          vid = 0;
+    blogRuleFilter_t *rule_filter = NULL;
+    BlogTraffic_t     traffic;
+    int               activates = 0;
+    void             *rxDev;
+    void             *txDev;
+    int               wanType;
+    uint32_t          blog_key;
+
+    if(!mc_fdb || !blog_p || !br)
+        return 0;
+
+    if(!((BR_MCAST_PROTO_IGMP == proto) || (BR_MCAST_PROTO_MLD == proto)))
+        return 0;
+
+    firstRule = 1;
+    rule_p = (blogRule_t *)blog_p->blogRule_p;
+    while( rule_p )
+    {
+        blogRuleFilter_t *filter_p;
+
+        filter_p = &rule_p->filter;
+        /* if there is a rule that specifies a protocol filter that does not match
+           blog key protocol skip it */
+        if(blog_rule_filterInUse(filter_p->ipv4.mask.ip_proto))
+        {
+            if(filter_p->ipv4.mask.ip_proto & BLOG_RULE_IP_PROTO_MASK)
+            {
+                uint8_t proto;
+
+                proto = filter_p->ipv4.value.ip_proto >> BLOG_RULE_IP_PROTO_SHIFT;
+                if (proto != blog_p->key.protocol)
+                {
+                    /* skip this rule */
+                    blog_p->blogRule_p = rule_p->next_p;
+                    blog_rule_free(rule_p);
+                    rule_p = blog_p->blogRule_p;
+                    continue;
+                }
+            }
+        }
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+        if(blog_rule_filterInUse(filter_p->ipv6.mask.nxtHdr))
+        {
+            if(filter_p->ipv6.mask.nxtHdr & BLOG_RULE_IP6_NXT_HDR_MASK)
+            {
+                uint8_t nxtHdr;
+
+                nxtHdr = filter_p->ipv6.value.nxtHdr >> BLOG_RULE_IP6_NXT_HDR_SHIFT;
+                if (nxtHdr != blog_p->key.protocol)
+                {
+                    /* skip this rule */
+                    blog_p->blogRule_p = rule_p->next_p;
+                    blog_rule_free(rule_p);
+                    rule_p = blog_p->blogRule_p;
+                    continue;
+                }
+            }
+        }
+#endif
+
+        /* create new fdb entry unless this is the first rule. For the
+           first rule use the fdb entry that was passed in */
+        if ( 1 == firstRule )
+        {
+            firstRule  = 0;
+            new_mc_fdb = mc_fdb;
+        }
+        else
+        {
+            new_mc_fdb = br_mcast_mc_fdb_copy(proto, br , mc_fdb);
+            if(!new_mc_fdb)
+            {
+                printk(KERN_WARNING "%s new_mc_fdb allocation failed\n",__FUNCTION__);
+                break;
+            }
+        }
+
+        /* get a new blog and copy original blog */
+        new_blog_p = blog_get();
+        if (new_blog_p == BLOG_NULL) 
+        {
+            if (new_mc_fdb != mc_fdb) 
+            {
+                br_mcast_mc_fdb_del_entry(proto, br, new_mc_fdb);
+            }
+            break;
+        }
+        blog_copy(new_blog_p, blog_p);
+
+        /* pop the rule off the original blog now that a new fdb and blog have been
+           allocated. This is to ensure that all rules are freed in case of error */
+        blog_p->blogRule_p = rule_p->next_p;
+        rule_p->next_p = NULL;
+            new_blog_p->blogRule_p = rule_p;
+
+        rule_filter = &(((blogRule_t *)new_blog_p->blogRule_p)->filter);
+        new_blog_p->vtag_num = rule_filter->nbrOfVlanTags;
+        vid = ((rule_filter->vlan[0].value.h_vlan_TCI &
+                rule_filter->vlan[0].mask.h_vlan_TCI) & 0xFFF);
+        new_blog_p->vid = vid ? vid : 0xFFFF; 
+        vid = ((rule_filter->vlan[1].value.h_vlan_TCI &
+                rule_filter->vlan[1].mask.h_vlan_TCI) & 0xFFF);
+        new_blog_p->vid |= vid ? (vid << 16) : 0xFFFF0000;
+
+        blog_lock();
+        blog_link(MCAST_FDB, new_blog_p, (void *)new_mc_fdb, 0, 0);
+        blog_unlock();
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+        if(BR_MCAST_PROTO_MLD == proto)
+        {
+            traffic = BlogTraffic_IPV6_MCAST;
+            ((struct net_br_mld_mc_fdb_entry *)new_mc_fdb)->wan_tci = new_blog_p->vid;
+            ((struct net_br_mld_mc_fdb_entry *)new_mc_fdb)->num_tags = new_blog_p->vtag_num;
+            rxDev   = ((struct net_br_mld_mc_fdb_entry *)new_mc_fdb)->from_dev;
+            txDev   = ((struct net_br_mld_mc_fdb_entry *)new_mc_fdb)->dst->dev;
+            wanType = ((struct net_br_mld_mc_fdb_entry *)new_mc_fdb)->type;
+        }
+        else//IGMP
+#endif
+        {
+            traffic = BlogTraffic_IPV4_MCAST;
+            ((struct net_bridge_mc_fdb_entry *)new_mc_fdb)->wan_tci = new_blog_p->vid;
+            ((struct net_bridge_mc_fdb_entry *)new_mc_fdb)->num_tags = new_blog_p->vtag_num;
+            rxDev   = ((struct net_bridge_mc_fdb_entry *)new_mc_fdb)->from_dev;
+            txDev   = ((struct net_bridge_mc_fdb_entry *)new_mc_fdb)->dst->dev;
+            wanType = ((struct net_bridge_mc_fdb_entry *)new_mc_fdb)->type;
+        }
+
+        br_mcast_blog_link_devices(new_blog_p, rxDev, txDev, wanType);
+
+        blog_key = blog_activate(new_blog_p, traffic, BlogClient_fcache);
+        if ( blog_key == BLOG_KEY_INVALID )
+        {
+            blog_rule_free_list(new_blog_p);
+            blog_put(new_blog_p);
+            if ( new_mc_fdb != mc_fdb )
+            {
+               br_mcast_mc_fdb_del_entry(proto, br, new_mc_fdb);
+            }
+        }
+        else
+        {
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+            if(BR_MCAST_PROTO_MLD == proto)
+            {
+                ((struct net_br_mld_mc_fdb_entry *)new_mc_fdb)->blog_idx = blog_key;
+            }
+            else
+#endif
+            {
+                ((struct net_bridge_mc_fdb_entry *)new_mc_fdb)->blog_idx = blog_key;
+            }
+#endif /* (CONFIG_BCM_RDPA) || (CONFIG_BCM_RDPA_MODULE) */
+#endif /* CONFIG_BCM_KF_RUNNER */
+            activates++;
+        }
+
+        /* advance to the next rule */
+        rule_p = blog_p->blogRule_p;
+    }
+
+    /* Free blog. The blog will only have rules if there was an error */
+    blog_rule_free_list(blog_p);
+    blog_put(blog_p);
+
+    return activates;
+} /* br_mcast_vlan_process */
+
+
+int br_mcast_blog_process(struct net_bridge *br,
+                            void            *mc_fdb,
+                            t_BR_MCAST_PROTO_TYPE proto)
+{
+	Blog_t *blog_p = BLOG_NULL;
+	blogRule_t *rule_p = NULL;
+	struct net_device *wan_vlan_dev_p = NULL;
+	struct net_device *lan_vlan_dev_p = NULL;
+	struct net_device *wan_dev_p = NULL;
+	struct net_device *lan_dev_p = NULL;
+	struct net_bridge_mc_fdb_entry *igmp_fdb = NULL;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	struct net_br_mld_mc_fdb_entry *mld_fdb = NULL;
+#endif
+	struct net_device *from_dev = NULL;
+	uint32_t phyType;
+	int numActivates;
+
+	if(!mc_fdb)
+		return -1;
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	if(BR_MCAST_PROTO_MLD == proto)
+	{
+		mld_fdb = (struct net_br_mld_mc_fdb_entry *)mc_fdb;
+		from_dev = mld_fdb->from_dev;
+	}
+	else
+#endif
+	{
+		igmp_fdb = (struct net_bridge_mc_fdb_entry *)mc_fdb;
+		from_dev = igmp_fdb->from_dev;
+	}
+
+	/* allocate blog */
+	blog_p = blog_get();
+	if(blog_p == BLOG_NULL) {
+		printk(KERN_WARNING "%s blog_p allocation failed\n",__FUNCTION__);
+		return -1;
+	}
+
+	/* allocate blog rule */
+	rule_p = blog_rule_alloc();
+	if(rule_p == NULL)
+	{
+		printk(KERN_WARNING "%s blog_rule allocation failed\n",__FUNCTION__);
+		blog_put(blog_p);
+		return -1;
+	}
+
+	blog_rule_init(rule_p);
+	blog_p->blogRule_p = (void *)rule_p;
+
+	/* find LAN devices */
+	br_mcast_blog_process_lan(rule_p, mc_fdb, proto, &lan_dev_p, &lan_vlan_dev_p);
+
+	/* for LAN2LAN don't do anything */
+	if(br->dev == from_dev) 
+	{
+		blog_p->rx.info.phyHdr = 0;
+		blog_p->rx.info.channel = 0xFF; /* for lan2lan mcast */
+		blog_p->rx.info.bmap.BCM_SWC = 1;
+		wan_dev_p = from_dev;
+	}
+	else
+	{
+		/* find WAN devices */
+		br_mcast_blog_process_wan(rule_p, mc_fdb, proto,
+		                          &wan_dev_p, &wan_vlan_dev_p);
+      
+		phyType = netdev_path_get_hw_port_type(wan_dev_p);
+		blog_p->rx.info.phyHdrType = BLOG_GET_PHYTYPE(phyType);
+		blog_p->rx.info.phyHdrLen = BLOG_GET_PHYLEN(phyType);
+		phyType = BLOG_GET_HW_ACT(phyType);
+
+		if(blog_p->rx.info.phyHdrType == BLOG_GPONPHY)
+		{
+			unsigned int hw_subport_mcast_idx;
+
+			hw_subport_mcast_idx = netdev_path_get_hw_subport_mcast_idx(wan_dev_p);
+
+			if(hw_subport_mcast_idx < CONFIG_BCM_MAX_GEM_PORTS)
+			{
+				blog_p->rx.info.channel = hw_subport_mcast_idx;
+			}
+			else
+			{
+				/* Not a GPON Multicast WAN device */
+				blog_rule_free_list(blog_p);
+				blog_put(blog_p);
+				return -1;
+			}
+		}
+		else /* Ethernet or DSL WAN device */
+		{
+			blog_p->rx.info.channel = netdev_path_get_hw_port(wan_dev_p);
+		}
+
+		if( (blog_p->rx.info.phyHdrType == BLOG_XTMPHY) &&
+		    ((phyType == VC_MUX_PPPOA) ||
+		     (phyType == VC_MUX_IPOA) ||
+		     (phyType == LLC_SNAP_ROUTE_IP) ||
+		     (phyType == LLC_ENCAPS_PPP)) )
+		{
+			blog_p->insert_eth = 1;
+		}
+
+		if( (blog_p->rx.info.phyHdrType == BLOG_XTMPHY) &&
+		    ((phyType == VC_MUX_PPPOA) ||
+		     (phyType == LLC_ENCAPS_PPP)) )
+		{
+			blog_p->pop_pppoa = 1;
+		}
+
+		if(blog_p->rx.info.phyHdrType == BLOG_ENETPHY)
+		{
+			blog_p->rx.info.bmap.BCM_SWC = 1;
+		}
+		else
+		{
+			blog_p->rx.info.bmap.BCM_XPHY = 1;
+		}
+	}
+
+#if defined(CONFIG_MIPS_BRCM) && defined(CONFIG_BR_MLD_SNOOP)
+	if ((mld_fdb) && (mld_fdb->lanppp)) {
+		blog_p->has_pppoe = 1;
+	}
+	else
+#endif
+	if ((igmp_fdb) && (igmp_fdb->lanppp)) {
+		blog_p->has_pppoe = 1;
+	}
+
+	blog_p->tx.info.bmap.BCM_SWC = 1;
+
+	blog_p->key.l1_tuple.phy = blog_p->rx.info.phyHdr;
+	blog_p->key.l1_tuple.channel = blog_p->rx.info.channel;
+	blog_p->key.protocol = BLOG_IPPROTO_UDP;
+
+	phyType = netdev_path_get_hw_port_type(lan_dev_p);
+	blog_p->tx.info.phyHdrType = BLOG_GET_PHYTYPE(phyType);
+	blog_p->tx.info.phyHdrLen = BLOG_GET_PHYLEN(phyType);
+	blog_p->tx.info.channel = netdev_path_get_hw_port(lan_dev_p);
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	if(BR_MCAST_PROTO_MLD == proto)
+	{
+		BCM_IN6_ASSIGN_ADDR(&blog_p->tupleV6.saddr, &mld_fdb->src_entry.src);
+		BCM_IN6_ASSIGN_ADDR(&blog_p->tupleV6.daddr, &mld_fdb->grp);
+		blog_p->rx.info.bmap.PLD_IPv6 = 1;
+		blog_p->tx.info.bmap.PLD_IPv6 = 1;
+		blog_p->wl = mld_fdb->info;
+	}
+	else
+#endif
+	{
+		blog_p->rx.tuple.saddr = igmp_fdb->src_entry.src.s_addr;
+		blog_p->rx.tuple.daddr = igmp_fdb->rxGrp.s_addr;
+		blog_p->tx.tuple.saddr = igmp_fdb->src_entry.src.s_addr;
+		blog_p->tx.tuple.daddr = igmp_fdb->txGrp.s_addr;
+		blog_p->rx.tuple.port.dest = 0;
+		if (igmp_fdb->excludePort != -1) {
+			blog_p->rx.tuple.port.dest = igmp_fdb->excludePort;
+		}
+
+		blog_p->rtp_seq_chk = igmp_fdb->enRtpSeqCheck;
+		blog_p->rx.info.bmap.PLD_IPv4 = 1;
+		blog_p->tx.info.bmap.PLD_IPv4 = 1;
+		blog_p->wl = igmp_fdb->info;
+	}
+
+	blog_p->rx.dev_p = wan_dev_p;
+	blog_p->rx.multicast = 1;
+	blog_p->tx.dev_p = lan_dev_p;
+
+	if ( multiConfig.mcastPriQueue != -1 )
+	{
+		blog_p->mark = SKBMARK_SET_Q(blog_p->mark, multiConfig.mcastPriQueue);
+	}
+
+	/* add vlan blog rules, if any vlan interfaces were found */
+	if(blogRuleVlanHook && (wan_vlan_dev_p || lan_vlan_dev_p)) {
+		if(blogRuleVlanHook(blog_p, wan_vlan_dev_p, lan_vlan_dev_p) < 0) {
+			printk(KERN_WARNING "Error while processing VLAN blog rules\n");
+			blog_rule_free_list(blog_p);
+			blog_put(blog_p);
+			return -1;
+		}
+	}
+
+	/* blog must have at least one rule */
+	if (NULL == blog_p->blogRule_p)
+	{
+		/* blogRule_p == NULL may be valid if there are no 
+		   VLAN rules and the default behavior for either interface is DROP */
+//		printk(KERN_WARNING "Error while processing VLAN blog rules\n");
+		blog_put(blog_p);
+		return -1;
+	}
+
+	numActivates = br_mcast_vlan_process(br, mc_fdb, proto, blog_p);
+	if ( 0 == numActivates )
+	{
+		return - 1;
+	}
+
+	return 0;
+} /* br_mcast_blog_process */
+
+#if defined(CONFIG_BCM_KF_WL)
+int wlan_client_disconnect_notifier(struct net_device *dev, char *mac)
+{
+	if(dev && mac)
+	{
+		struct net_bridge_port *p;
+		struct net_bridge *br;
+
+		p = br_port_get_rcu(dev);
+		if ( p )
+		{
+			br = p->br;
+#if 0
+			printk("%s:%d %02x%02x%02x%02x%02x%02x client disconnected fromm net:%s",
+			       __FUNCTION__, __LINE__, 
+			        mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], 
+			       dev->name );
+#endif
+			br_igmp_wipe_reporter_by_mac(br, mac);
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+			br_mld_wipe_reporter_by_mac(br, mac);
+#endif
+		}
+	}
+	return 0;
+
+}
+EXPORT_SYMBOL(wlan_client_disconnect_notifier);
+#endif
+
+#endif /* CONFIG_BLOG */
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+struct net_device *br_get_device_by_index(char *brname,char index) {
+	struct net_bridge *br = NULL;
+	struct net_bridge_port *br_port = NULL;
+	struct net_device *dev = dev_get_by_name(&init_net,brname); 
+	struct net_device *prtDev = NULL;
+
+	if(!dev)
+		return NULL;
+
+	if (0 == (dev->priv_flags & IFF_EBRIDGE))
+	{
+		printk("%s: invalid bridge name specified %s\n", 
+		         __FUNCTION__, brname);
+		dev_put(dev);
+		return NULL;
+	}
+	br = netdev_priv(dev);
+
+	rcu_read_lock();
+	br_port = br_get_port(br, index);
+	if ( br_port )
+	{
+		prtDev = br_port->dev;
+	}
+	rcu_read_unlock();
+	dev_put(dev);
+	return prtDev;
+}
+
+static RAW_NOTIFIER_HEAD(mcast_snooping_chain);
+
+int register_mcast_snooping_notifier(struct notifier_block *nb) {
+	return raw_notifier_chain_register(&mcast_snooping_chain,nb);
+}
+
+int unregister_mcast_snooping_notifier(struct notifier_block *nb) {
+	return raw_notifier_chain_unregister(&mcast_snooping_chain,nb);
+}
+
+int mcast_snooping_call_chain(unsigned long val,void *v) 
+{
+	t_MCPD_MLD_SNOOP_ENTRY *snoopEntry=(t_MCPD_MLD_SNOOP_ENTRY *)v;
+	struct net_device *device=br_get_device_by_index(snoopEntry->br_name,snoopEntry->port_no);
+	if(!strncmp(device->name,"wl",2))  
+		return raw_notifier_call_chain(&mcast_snooping_chain,val,v);
+	else 
+		return 1;
+}
+
+
+void br_mcast_wl_flush(struct net_bridge *br) {
+	t_MCPD_MLD_SNOOP_ENTRY snoopEntry;
+	struct net_bridge_port *p;
+
+	rcu_read_lock();   
+	list_for_each_entry_rcu(p, &br->port_list, list) {
+		if(!strncmp(p->dev->name,"wl",2)){
+			snoopEntry.port_no= p->port_no;
+			memcpy(snoopEntry.br_name,br->dev->name,IFNAMSIZ);
+			mcast_snooping_call_chain(SNOOPING_FLUSH_ENTRY_ALL,(void *)&snoopEntry);
+		}
+	}
+	rcu_read_unlock();
+}
+
+void br_mld_wl_del_entry(struct net_bridge *br,struct net_br_mld_mc_fdb_entry *dst) {
+	if(dst && (!strncmp(dst->dst->dev->name,"wl",2))) { 
+		t_MCPD_MLD_SNOOP_ENTRY snoopEntry;
+		snoopEntry.port_no=dst->dst->port_no;
+		memcpy(snoopEntry.br_name,br->dev->name,IFNAMSIZ);
+		memcpy(&snoopEntry.grp,&dst->grp,sizeof(struct in6_addr));
+		mcast_snooping_call_chain(SNOOPING_FLUSH_ENTRY,(void *)&snoopEntry);
+	} 
+
+}
+EXPORT_SYMBOL(unregister_mcast_snooping_notifier);
+EXPORT_SYMBOL(register_mcast_snooping_notifier);
+EXPORT_SYMBOL(br_get_device_by_index);
+
+#endif /* defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP) */
+
+#endif /* defined(CONFIG_BCM_KF_IGMP) || defined(CONFIG_BCM_KF_MLD) */
diff --git a/net/bridge/br_mcast.h b/net/bridge/br_mcast.h
new file mode 100644
index 0000000000000000000000000000000000000000..e13292598f24eb0d3b72e308480e7461d4a6d37c
--- /dev/null
+++ b/net/bridge/br_mcast.h
@@ -0,0 +1,146 @@
+/*
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef _BR_MCAST_H
+#define _BR_MCAST_H
+
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/igmp.h>
+#include <linux/in.h>
+#include "br_private.h"
+#include <linux/if_vlan.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#include <linux/blog_rule.h>
+#endif
+
+#define MCPD_IF_TYPE_UNKWN      0
+#define MCPD_IF_TYPE_BRIDGED    1
+#define MCPD_IF_TYPE_ROUTED     2
+
+typedef enum br_mcast_proto_type {
+    BR_MCAST_PROTO_NONE,
+    BR_MCAST_PROTO_IGMP,
+    BR_MCAST_PROTO_MLD
+} t_BR_MCAST_PROTO_TYPE;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+void br_mcast_blog_release(t_BR_MCAST_PROTO_TYPE proto, void *mc_fdb);
+void br_mcast_vlan_notify_for_blog_update(struct net_device *ndev,
+                                   blogRuleVlanNotifyDirection_t direction,
+                                   uint32_t nbrOfTags);
+int br_mcast_blog_process(struct net_bridge *br,
+                            void *mc_fdb,
+                            t_BR_MCAST_PROTO_TYPE proto);
+#endif
+int br_mcast_get_rep_info(struct net_device *repDev, unsigned char *repMac, uint32_t *info);
+void br_mcast_handle_netdevice_events(struct net_device *ndev, unsigned long event);
+
+#define PROXY_DISABLED_MODE 0
+#define PROXY_ENABLED_MODE 1
+
+#define SNOOPING_DISABLED_MODE 0
+#define SNOOPING_ENABLED_MODE 1
+#define SNOOPING_BLOCKING_MODE 2
+
+typedef enum br_mcast_l2l_snoop_mode {
+    BR_MCAST_L2L_SNOOP_DISABLED,
+    BR_MCAST_L2L_SNOOP_ENABLED,
+    BR_MCAST_L2L_SNOOP_ENABLED_LAN
+} t_BR_MCAST_L2L_SNOOP_MODE;
+
+#define MCPD_MAX_IFS            10
+typedef struct mcpd_wan_info
+{
+	char                      if_name[IFNAMSIZ];
+	int                       if_ops;
+} t_MCPD_WAN_INFO;
+
+typedef struct mcpd_igmp_snoop_entry
+{
+	char                      br_name[IFNAMSIZ];
+	/* Internal, ignore endianness */
+	unsigned short            port_no;
+	unsigned int              mode;
+	unsigned int              code;
+	unsigned short            tci;/* vlan id */
+	t_MCPD_WAN_INFO           wan_info[MCPD_MAX_IFS];
+	int                       lanppp;
+	int                       excludePort;  
+	char                      enRtpSeqCheck;
+	/* Standard, use big endian */
+	struct                    in_addr rxGrp;
+	struct                    in_addr txGrp;
+	struct                    in_addr src;
+	struct                    in_addr rep;
+	unsigned char             repMac[6];
+} t_MCPD_IGMP_SNOOP_ENTRY;
+
+typedef struct mcastCfg {
+	int          mcastPriQueue;
+	int          thereIsAnUplink;
+} t_MCAST_CFG;
+
+void br_mcast_set_pri_queue(int val);
+int  br_mcast_get_pri_queue(void);
+void br_mcast_set_skb_mark_queue(struct sk_buff *skb);
+void br_mcast_set_uplink_exists(int uplinkExists);
+int  br_mcast_get_lan2lan_snooping(t_BR_MCAST_PROTO_TYPE proto, struct net_bridge *br);
+
+#if defined(CONFIG_BR_MLD_SNOOP) && defined(CONFIG_BCM_KF_MLD)
+#define SNOOPING_ADD_ENTRY 0
+#define SNOOPING_DEL_ENTRY 1
+#define SNOOPING_FLUSH_ENTRY 2
+#define SNOOPING_FLUSH_ENTRY_ALL 3
+int mcast_snooping_call_chain(unsigned long val,void *v);
+void br_mcast_wl_flush(struct net_bridge *br) ;
+
+typedef struct mcpd_mld_snoop_entry
+{
+	char                      br_name[IFNAMSIZ];
+	/* Internal, ignore endianness */
+	unsigned short            port_no;
+	unsigned int              mode;
+	unsigned int              code;
+	unsigned short            tci;
+	t_MCPD_WAN_INFO           wan_info[MCPD_MAX_IFS];
+	int                       lanppp;
+	/* External, use big endian */
+	struct                    in6_addr grp;
+ 	struct                    in6_addr src;
+	struct                    in6_addr rep;
+	unsigned char             repMac[6];
+} t_MCPD_MLD_SNOOP_ENTRY;
+#endif
+
+#endif /* (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)) */
+
+#endif /* _BR_MCAST_H */
diff --git a/net/bridge/br_mld.c b/net/bridge/br_mld.c
new file mode 100644
index 0000000000000000000000000000000000000000..788e48b2fdc058ba488969aab84879505bfbab2e
--- /dev/null
+++ b/net/bridge/br_mld.c
@@ -0,0 +1,1140 @@
+/*
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/times.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jhash.h>
+#include <asm/atomic.h>
+#include <linux/ip.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include <linux/list.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <linux/module.h>
+#include "br_private.h"
+#include "br_mld.h"
+#include <linux/if_vlan.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#include <linux/blog_rule.h>
+#endif
+#include "br_mcast.h"
+
+void br_mld_get_ip_icmp_hdrs(const struct sk_buff *pskb, struct ipv6hdr **ppipv6mcast, struct icmp6hdr **ppicmpv6, int *lanppp)
+{
+	struct ipv6hdr *pipv6 = NULL;
+	struct icmp6hdr *picmp = NULL;
+	struct pppoe_hdr *pppoe = NULL;
+	const unsigned char *dest = eth_hdr(pskb)->h_dest;
+
+	if ( vlan_eth_hdr(pskb)->h_vlan_proto == htons(ETH_P_IPV6) )
+	{
+		pipv6 = (struct ipv6hdr *)skb_network_header(pskb);
+	}
+	else if ( vlan_eth_hdr(pskb)->h_vlan_proto == htons(ETH_P_PPP_SES) )
+	{
+		pppoe = (struct pppoe_hdr *)skb_network_header(pskb);
+		if ( pppoe->tag[0].tag_type == htons(PPP_IPV6))
+		{
+			pipv6 = (struct ipv6hdr *)(skb_network_header(pskb) + PPPOE_SES_HLEN);
+		}
+	}
+	else if ( vlan_eth_hdr(pskb)->h_vlan_proto == htons(ETH_P_8021Q) )
+	{
+		if ( vlan_eth_hdr(pskb)->h_vlan_encapsulated_proto == htons(ETH_P_IPV6) )
+		{
+			pipv6 = (struct ipv6hdr *)(skb_network_header(pskb) + sizeof(struct vlan_hdr));
+		}
+		else if ( vlan_eth_hdr(pskb)->h_vlan_encapsulated_proto == htons(ETH_P_PPP_SES) )
+		{
+			struct pppoe_hdr *pppoe = (struct pppoe_hdr *)(skb_network_header(pskb) + sizeof(struct vlan_hdr));
+			if ( pppoe->tag[0].tag_type == PPP_IP)
+			{
+				pipv6 = (struct ipv6hdr *)(skb_network_header(pskb) + sizeof(struct vlan_hdr) + PPPOE_SES_HLEN);
+			}
+		}
+	}
+
+	*ppipv6mcast = NULL;
+	*ppicmpv6 = NULL;
+	if ( pipv6 != NULL )
+	{
+		if ( pppoe != NULL )
+		{
+			/* MAC will be unicast so check IP */
+			if (pipv6 && (pipv6->daddr.s6_addr[0] == 0xFF))
+			{
+				u8 *nextHdr = (u8 *)((u8*)pipv6 + sizeof(struct ipv6hdr));
+				if ( (pipv6->nexthdr == IPPROTO_HOPOPTS) &&
+				     (*nextHdr == IPPROTO_ICMPV6) )
+				{
+					/* skip past hop by hop hdr */
+					picmp =  (struct icmp6hdr *)(nextHdr + 8);
+				}
+				*ppipv6mcast = pipv6;
+				*ppicmpv6 = picmp;
+				if ( lanppp != NULL )
+				{
+					*lanppp = 1;
+				}
+			}
+		}
+		else
+		{
+			if ((BR_MLD_MULTICAST_MAC_PREFIX == dest[0]) && 
+			    (BR_MLD_MULTICAST_MAC_PREFIX == dest[1]))
+			{
+				u8 *nextHdr = (u8 *)((u8*)pipv6 + sizeof(struct ipv6hdr));
+				if ( (pipv6->nexthdr == IPPROTO_HOPOPTS) &&
+				     (*nextHdr == IPPROTO_ICMPV6) )
+				{
+					/* skip past hop by hop hdr */
+					picmp =  (struct icmp6hdr *)(nextHdr + 8);
+				}
+
+				*ppipv6mcast = pipv6;
+				*ppicmpv6 = picmp;
+				if ( lanppp != NULL )
+				{
+					*lanppp = 0;
+				}
+			}
+		}
+	}
+	return;
+}
+
+int br_mld_snooping_enabled(struct net_device *dev) {
+	struct net_bridge *br;
+        struct net_bridge_port *port;
+
+        port = br_port_get_rcu(dev);
+	if (port) {
+		br = port->br;
+        	if (br->mld_snooping==SNOOPING_DISABLED_MODE) 
+		return 0;
+		else return 1;
+	}
+	return 0;
+}
+
+static struct kmem_cache *br_mld_mc_fdb_cache __read_mostly;
+static struct kmem_cache *br_mld_mc_rep_cache __read_mostly;
+static u32 br_mld_mc_fdb_salt __read_mostly;
+static struct proc_dir_entry *br_mld_entry = NULL;
+
+extern int mcpd_process_skb(struct net_bridge *br, struct sk_buff *skb,
+                            unsigned short protocol);
+
+static struct in6_addr all_dhcp_srvr_addr = { .in6_u.u6_addr32 = {0xFF050000, 
+                                                                  0x00000000, 
+                                                                  0x00000000, 
+                                                                  0x00010003 } };
+
+static inline int br_mld_mc_fdb_hash(const struct in6_addr *grp)
+{
+	return jhash_1word((grp->s6_addr32[0] | grp->s6_addr32[3]), 
+                                   br_mld_mc_fdb_salt) & (BR_MLD_HASH_SIZE - 1);
+}
+
+static int br_mld_control_filter(const unsigned char *dest, const struct in6_addr *ipv6)
+{
+    /* ignore any packets that are not multicast
+       ignore scope0, node and link local addresses
+       ignore IPv6 all DHCP servers address */
+    if(((dest) && is_broadcast_ether_addr(dest)) || 
+       (!BCM_IN6_IS_ADDR_MULTICAST(ipv6)) ||
+       (BCM_IN6_IS_ADDR_MC_SCOPE0(ipv6)) ||
+       (BCM_IN6_IS_ADDR_MC_NODELOCAL(ipv6)) ||
+       (BCM_IN6_IS_ADDR_MC_LINKLOCAL(ipv6)) ||
+       (0 == memcmp(ipv6, &all_dhcp_srvr_addr, sizeof(struct in6_addr))))
+        return 0;
+    else
+        return 1;
+}
+
+/* This function requires that br->mld_mcl_lock is already held */
+void br_mld_mc_fdb_del_entry(struct net_bridge *br, 
+                             struct net_br_mld_mc_fdb_entry *mld_fdb,
+                             struct in6_addr *rep,
+                             unsigned char *repMac)
+{
+	struct net_br_mld_mc_rep_entry *rep_entry = NULL;
+	struct net_br_mld_mc_rep_entry *rep_entry_n = NULL;
+
+	list_for_each_entry_safe(rep_entry, 
+	                         rep_entry_n, &mld_fdb->rep_list, list) 
+	{
+		if(((NULL == rep) && (NULL == repMac)) ||
+		   (rep && BCM_IN6_ARE_ADDR_EQUAL(&rep_entry->rep, rep)) ||
+		   (repMac && (0 == memcmp(rep_entry->repMac, repMac, ETH_ALEN))))
+		{
+			list_del(&rep_entry->list);
+			kmem_cache_free(br_mld_mc_rep_cache, rep_entry);
+			if (rep || repMac)
+			{
+				break;
+			}
+		}
+	}
+	if(list_empty(&mld_fdb->rep_list)) 
+	{
+		hlist_del(&mld_fdb->hlist);
+		br_mld_wl_del_entry(br, mld_fdb);
+#if defined(CONFIG_BLOG) 
+		br_mcast_blog_release(BR_MCAST_PROTO_MLD, (void *)mld_fdb);
+#endif
+		kmem_cache_free(br_mld_mc_fdb_cache, mld_fdb);
+	}
+
+	return;
+}
+
+void br_mld_set_timer( struct net_bridge *br )
+{
+	struct net_br_mld_mc_fdb_entry *mcast_group;
+	int                             i;
+	unsigned long                   tstamp;
+	unsigned int                    found;
+
+	if ( br->mld_snooping == 0 )
+	{
+		del_timer(&br->mld_timer);
+		return;
+	}
+
+	/* the largest timeout is BR_MLD_MEMBERSHIP_TIMEOUT */
+	tstamp = jiffies + (BR_MLD_MEMBERSHIP_TIMEOUT*HZ*2);
+	found = 0;
+	for (i = 0; i < BR_MLD_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h_group;
+		hlist_for_each_entry(mcast_group, h_group, &br->mld_mc_hash[i], hlist) 
+		{
+			struct net_br_mld_mc_rep_entry *reporter;
+			list_for_each_entry(reporter, &mcast_group->rep_list, list)
+			{
+				if ( time_after(tstamp, reporter->tstamp) )
+				{
+					tstamp = reporter->tstamp;
+					found  = 1;
+				}
+			}
+		}
+	}
+
+	if ( 0 == found )
+	{
+		del_timer(&br->mld_timer);
+	}
+	else
+	{
+		mod_timer(&br->mld_timer, (tstamp + TIMER_CHECK_TIMEOUT));
+	}
+}
+
+
+static void br_mld_query_timeout(unsigned long ptr)
+{
+	struct net_br_mld_mc_fdb_entry *mcast_group;
+	struct net_bridge *br;
+	int i;
+    
+	br = (struct net_bridge *) ptr;
+
+	spin_lock_bh(&br->mld_mcl_lock);
+	for (i = 0; i < BR_MLD_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h_group, *n_group;
+		hlist_for_each_entry_safe(mcast_group, h_group, n_group, &br->mld_mc_hash[i], hlist) 
+		{
+			struct net_br_mld_mc_rep_entry *reporter, *n_reporter;
+			list_for_each_entry_safe(reporter, n_reporter, &mcast_group->rep_list, list)
+			{
+				if (time_after_eq(jiffies, reporter->tstamp))
+				{
+					br_mld_mc_fdb_del_entry(br, mcast_group, &reporter->rep, NULL);
+				}
+			}
+		}
+	}
+
+	br_mld_set_timer(br);
+	spin_unlock_bh(&br->mld_mcl_lock);
+}
+
+static struct net_br_mld_mc_rep_entry *
+br_mld_rep_find (const struct net_br_mld_mc_fdb_entry *mc_fdb,
+                 const struct in6_addr *rep,
+                 unsigned char *repMac)
+{
+	struct net_br_mld_mc_rep_entry *rep_entry;
+
+	list_for_each_entry(rep_entry, &mc_fdb->rep_list, list)
+	{
+		if((rep && BCM_IN6_ARE_ADDR_EQUAL(&rep_entry->rep, rep)) ||
+		   (repMac && (0 == memcmp(rep_entry->repMac, repMac, ETH_ALEN))))
+		{
+			return rep_entry;
+		}
+	}
+
+	return NULL;
+}
+
+/* In the case where a reporter has changed ports, this function
+   will remove all records pointing to the old port */
+void br_mld_wipe_reporter_for_port (struct net_bridge *br,
+                                    struct in6_addr *rep, 
+                                    u16 oldPort)
+{
+    int hashIndex = 0;
+    struct hlist_node *h = NULL;
+    struct hlist_node *n = NULL;
+    struct hlist_head *head = NULL;
+    struct net_br_mld_mc_fdb_entry *mc_fdb;
+
+    spin_lock_bh(&br->mld_mcl_lock);
+    for ( ; hashIndex < BR_MLD_HASH_SIZE ; hashIndex++)
+    {
+        head = &br->mld_mc_hash[hashIndex];
+        hlist_for_each_entry_safe(mc_fdb, h, n, head, hlist)
+        {
+            if ((br_mld_rep_find(mc_fdb, rep, NULL)) &&
+                (mc_fdb->dst->port_no == oldPort))
+            {
+                /* The reporter we're looking for has been found
+                   in a record pointing to its old port */
+                br_mld_mc_fdb_del_entry (br, mc_fdb, rep, NULL);
+            }
+        }
+    }
+    br_mld_set_timer(br);
+    spin_unlock_bh(&br->mld_mcl_lock);
+}
+
+/* will remove all records for reporter with MAC equal to repMac */
+void br_mld_wipe_reporter_by_mac (struct net_bridge *br,
+                                  unsigned char *repMac)
+{
+    int hashIndex = 0;
+    struct hlist_node *h = NULL;
+    struct hlist_node *n = NULL;
+    struct hlist_head *head = NULL;
+    struct net_br_mld_mc_fdb_entry *mc_fdb;
+
+    spin_lock_bh(&br->mld_mcl_lock);
+    for ( ; hashIndex < BR_MLD_HASH_SIZE ; hashIndex++)
+    {
+        head = &br->mld_mc_hash[hashIndex];
+        hlist_for_each_entry_safe(mc_fdb, h, n, head, hlist)
+        {
+            if (br_mld_rep_find(mc_fdb, NULL, repMac))
+            {
+                br_mld_mc_fdb_del_entry (br, mc_fdb, NULL, repMac);
+            }
+        }
+    }
+    br_mld_set_timer(br);
+    spin_unlock_bh(&br->mld_mcl_lock);
+}
+
+/* this is called during addition of a snooping entry and requires that 
+   mld_mcl_lock is already held */
+static int br_mld_mc_fdb_update(struct net_bridge *br, 
+                                struct net_bridge_port *prt, 
+                                struct in6_addr *grp, 
+                                struct in6_addr *rep,
+                                unsigned char *repMac,
+                                int mode, 
+                                struct in6_addr *src,
+                                struct net_device *from_dev,
+                                uint32_t info)
+{
+	struct net_br_mld_mc_fdb_entry *dst;
+	struct net_br_mld_mc_rep_entry *rep_entry = NULL;
+	int ret = 0;
+	int filt_mode;
+	struct hlist_head *head;
+	struct hlist_node *h;
+
+	if(mode == SNOOP_IN_ADD)
+		filt_mode = MCAST_INCLUDE;
+	else
+		filt_mode = MCAST_EXCLUDE;
+    
+	head = &br->mld_mc_hash[br_mld_mc_fdb_hash(grp)];
+	hlist_for_each_entry(dst, h, head, hlist) {
+		if (BCM_IN6_ARE_ADDR_EQUAL(&dst->grp, grp))
+		{
+			if((BCM_IN6_ARE_ADDR_EQUAL(src, &dst->src_entry.src)) &&
+			   (filt_mode == dst->src_entry.filt_mode) && 
+			   (dst->from_dev == from_dev) &&
+			   (dst->dst == prt) &&
+			   (dst->info == info))
+			{
+				/* found entry - update TS */
+				struct net_br_mld_mc_rep_entry *reporter = br_mld_rep_find(dst, rep, NULL);
+				if(reporter == NULL)
+				{
+					rep_entry = kmem_cache_alloc(br_mld_mc_rep_cache, GFP_ATOMIC);
+					if(rep_entry)
+					{
+						BCM_IN6_ASSIGN_ADDR(&rep_entry->rep, rep);
+						rep_entry->tstamp = jiffies + BR_MLD_MEMBERSHIP_TIMEOUT*HZ;
+						memcpy(rep_entry->repMac, repMac, ETH_ALEN);
+						list_add_tail(&rep_entry->list, &dst->rep_list);
+						br_mld_set_timer(br);
+					}
+				}
+				else 
+				{
+					reporter->tstamp = jiffies + BR_MLD_MEMBERSHIP_TIMEOUT*HZ;
+					br_mld_set_timer(br);
+				}
+				ret = 1;
+			}
+		}
+	}
+	return ret;
+}
+
+int br_mld_process_if_change(struct net_bridge *br, struct net_device *ndev)
+{
+	struct net_br_mld_mc_fdb_entry *dst;
+	int i;
+
+	spin_lock_bh(&br->mld_mcl_lock);
+	for (i = 0; i < BR_MLD_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h, *n;
+		hlist_for_each_entry_safe(dst, h, n, &br->mld_mc_hash[i], hlist) 
+		{
+			if ((NULL == ndev) ||
+			    (dst->dst->dev == ndev) ||
+			    (dst->from_dev == ndev))
+			{
+				br_mld_mc_fdb_del_entry(br, dst, NULL, NULL);
+			}
+		}
+	}
+	br_mld_set_timer(br);
+	spin_unlock_bh(&br->mld_mcl_lock);
+
+	return 0;
+}
+
+int br_mld_mc_fdb_add(struct net_device *from_dev,
+                        int wan_ops,
+                        struct net_bridge *br, 
+                        struct net_bridge_port *prt, 
+                        struct in6_addr *grp, 
+                        struct in6_addr *rep,
+                        unsigned char *repMac,
+                        int mode, 
+                        uint16_t tci, 
+                        struct in6_addr *src,
+                        int lanppp,
+                        uint32_t info)
+{
+	struct net_br_mld_mc_fdb_entry *mc_fdb;
+	struct net_br_mld_mc_rep_entry *rep_entry = NULL;
+	struct hlist_head *head = NULL;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	int ret = 1;
+#endif
+
+	if(!br || !prt || !grp|| !rep || !from_dev)
+		return 0;
+
+	if(!(br_mld_control_filter(NULL, grp) ||
+	   BCM_IN6_IS_ADDR_L2_MCAST(grp)))
+		return 0;
+
+	if(!netdev_path_is_leaf(from_dev))
+		return 0;
+
+	if((SNOOP_IN_ADD != mode) && (SNOOP_EX_ADD != mode))
+		return 0;
+
+	mc_fdb = kmem_cache_alloc(br_mld_mc_fdb_cache, GFP_ATOMIC);
+	if (!mc_fdb)
+	{
+		return -ENOMEM;
+	}
+	rep_entry = kmem_cache_alloc(br_mld_mc_rep_cache, GFP_ATOMIC);
+	if ( !rep_entry )
+	{
+		kmem_cache_free(br_mld_mc_fdb_cache, mc_fdb);
+		return -ENOMEM;
+	}
+
+	spin_lock_bh(&br->mld_mcl_lock);
+	if (br_mld_mc_fdb_update(br, prt, grp, rep, repMac, mode, src, from_dev, info))
+	{
+		kmem_cache_free(br_mld_mc_fdb_cache, mc_fdb);
+		kmem_cache_free(br_mld_mc_rep_cache, rep_entry);
+		spin_unlock_bh(&br->mld_mcl_lock);
+		return 0;
+	}
+   
+	BCM_IN6_ASSIGN_ADDR(&mc_fdb->grp, grp);
+	BCM_IN6_ASSIGN_ADDR(&mc_fdb->src_entry, src);
+	mc_fdb->src_entry.filt_mode = (mode == SNOOP_IN_ADD) ? MCAST_INCLUDE : MCAST_EXCLUDE;
+	mc_fdb->dst = prt;
+	mc_fdb->lan_tci = tci;
+	mc_fdb->wan_tci = 0;
+	mc_fdb->num_tags = 0;
+	mc_fdb->from_dev = from_dev;
+	mc_fdb->type = wan_ops;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	mc_fdb->root = 1;
+	mc_fdb->blog_idx = BLOG_KEY_INVALID;
+#endif
+	mc_fdb->info = info;
+	mc_fdb->lanppp = lanppp;
+	INIT_LIST_HEAD(&mc_fdb->rep_list);
+	BCM_IN6_ASSIGN_ADDR(&rep_entry->rep, rep);
+	rep_entry->tstamp = jiffies + (BR_MLD_MEMBERSHIP_TIMEOUT*HZ);
+	memcpy(rep_entry->repMac, repMac, ETH_ALEN);
+	list_add_tail(&rep_entry->list, &mc_fdb->rep_list);
+
+	head = &br->mld_mc_hash[br_mld_mc_fdb_hash(grp)];
+	hlist_add_head(&mc_fdb->hlist, head);
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	ret = br_mcast_blog_process(br, (void *)mc_fdb, BR_MCAST_PROTO_MLD);
+	if(ret < 0)
+	{
+		hlist_del(&mc_fdb->hlist);
+		kmem_cache_free(br_mld_mc_fdb_cache, mc_fdb);
+		kmem_cache_free(br_mld_mc_rep_cache, rep_entry);
+		spin_unlock_bh(&br->mld_mcl_lock);
+		return ret;
+	}
+#endif
+	br_mld_set_timer(br);
+	spin_unlock_bh(&br->mld_mcl_lock);
+
+	return 1;
+}
+EXPORT_SYMBOL(br_mld_mc_fdb_add);
+
+void br_mld_mc_fdb_cleanup(struct net_bridge *br)
+{
+	struct net_br_mld_mc_fdb_entry *dst;
+	int i;
+
+	spin_lock_bh(&br->mld_mcl_lock);
+	for (i = 0; i < BR_MLD_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h, *n;
+		hlist_for_each_entry_safe(dst, h, n, &br->mld_mc_hash[i], hlist) 
+		{
+			br_mld_mc_fdb_del_entry(br, dst, NULL, NULL);
+		}
+	}
+	br_mld_set_timer(br);
+	spin_unlock_bh(&br->mld_mcl_lock);
+}
+
+int br_mld_mc_fdb_remove(struct net_device *from_dev,
+                         struct net_bridge *br, 
+                         struct net_bridge_port *prt, 
+                         struct in6_addr *grp, 
+                         struct in6_addr *rep, 
+                         int mode, 
+                         struct in6_addr *src,
+                         uint32_t info)
+{
+	struct net_br_mld_mc_fdb_entry *mc_fdb;
+	int filt_mode;
+	struct hlist_head *head = NULL;
+	struct hlist_node *h, *n;
+    
+	if(!br || !prt || !grp|| !rep || !from_dev)
+		return 0;
+
+	if(!(br_mld_control_filter(NULL, grp) ||
+	   BCM_IN6_IS_ADDR_L2_MCAST(grp)))
+		return 0;
+
+	if(!netdev_path_is_leaf(from_dev))
+		return 0;
+
+	if((SNOOP_IN_CLEAR != mode) && (SNOOP_EX_CLEAR != mode))
+		return 0;
+
+	if(mode == SNOOP_IN_CLEAR)
+		filt_mode = MCAST_INCLUDE;
+	else
+		filt_mode = MCAST_EXCLUDE;
+
+	spin_lock_bh(&br->mld_mcl_lock);
+	head = &br->mld_mc_hash[br_mld_mc_fdb_hash(grp)];
+	hlist_for_each_entry_safe(mc_fdb, h, n, head, hlist) 
+	{
+		if ((BCM_IN6_ARE_ADDR_EQUAL(&mc_fdb->grp, grp)) && 
+		    (filt_mode == mc_fdb->src_entry.filt_mode) &&
+		    (BCM_IN6_ARE_ADDR_EQUAL(&mc_fdb->src_entry.src, src)) &&
+		    (mc_fdb->from_dev == from_dev) &&
+		    (mc_fdb->dst == prt) &&
+		    (mc_fdb->info == info))
+		{
+			br_mld_mc_fdb_del_entry(br, mc_fdb, rep, NULL);
+		}
+	}
+	br_mld_set_timer(br);
+	spin_unlock_bh(&br->mld_mcl_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(br_mld_mc_fdb_remove);
+
+int br_mld_mc_forward(struct net_bridge *br, 
+                      struct sk_buff *skb, 
+                      int forward, 
+                      int is_routed)
+{
+	struct net_br_mld_mc_fdb_entry *dst;
+	int status = 0;
+	struct sk_buff *skb2;
+	struct net_bridge_port *p, *p_n;
+	const unsigned char *dest = eth_hdr(skb)->h_dest;
+	struct hlist_head *head = NULL;
+	struct hlist_node *h;
+	struct ipv6hdr *pipv6mcast = NULL;
+	struct icmp6hdr *picmpv6 = NULL;
+	int lanppp;
+
+	br_mld_get_ip_icmp_hdrs(skb, &pipv6mcast, &picmpv6, &lanppp);
+	if ( pipv6mcast == NULL )
+	{
+		return status;
+	}
+   
+	if ( picmpv6 != NULL )
+	{
+		if((picmpv6->icmp6_type == ICMPV6_MGM_REPORT) ||
+			(picmpv6->icmp6_type == ICMPV6_MGM_REDUCTION) || 
+			(picmpv6->icmp6_type == ICMPV6_MLD2_REPORT)) 
+		{
+			rcu_read_lock();
+			if(skb->dev && (br_port_get_rcu(skb->dev)) &&
+				(br->mld_snooping ||
+				is_multicast_switching_mode_host_control()))
+			{
+				/* for bridged WAN service, do not pass any MLD packets
+				   coming from the WAN port to mcpd */
+#if defined(CONFIG_BCM_KF_WANDEV)
+				if ( skb->dev->priv_flags & IFF_WANDEV )
+				{
+					kfree_skb(skb);
+					status = 1;
+				}
+				else
+#endif
+				{
+				   mcpd_process_skb(br, skb, ETH_P_IPV6);
+				}
+			}
+			rcu_read_unlock();
+			return status;
+		}
+	}
+
+	/* snooping could be disabled and still have entries */
+
+	/* drop traffic by default when snooping is enabled
+	   in blocking mode */
+	if ((br->mld_snooping == SNOOPING_BLOCKING_MODE) &&
+	     br_mld_control_filter(dest, &pipv6mcast->daddr))
+	{
+		status = 1;
+	}
+
+	spin_lock_bh(&br->mld_mcl_lock);
+	head = &br->mld_mc_hash[br_mld_mc_fdb_hash(&pipv6mcast->daddr)];
+	hlist_for_each_entry(dst, h, head, hlist) {
+		if (!BCM_IN6_ARE_ADDR_EQUAL(&dst->grp, &pipv6mcast->daddr)) {
+			continue;
+		}
+		/* if this packet has already been sent to the port referenced by the forwarding
+		   entry continue */
+		if (1 == dst->dst->dirty) {
+			continue;
+		}
+
+		/* routed packet will have bridge as from dev - cannot match to mc_fdb */
+		if ( is_routed ) {
+			if ( dst->type != MCPD_IF_TYPE_ROUTED ) {
+				continue;
+			}
+		}
+		else {
+			if ( dst->type != MCPD_IF_TYPE_BRIDGED ) {
+				continue;
+			}
+#if defined(CONFIG_BCM_KF_WANDEV)
+			if (skb->dev->priv_flags & IFF_WANDEV) {
+				/* match exactly if skb device is a WAN device - otherwise continue */
+				if (dst->from_dev != skb->dev)
+					continue;
+			}
+			else 
+#endif
+			{
+				/* if this is not an L2L mc_fdb entry continue */
+				if (dst->from_dev != br->dev)
+					continue;            
+			}
+		}
+		if((dst->src_entry.filt_mode == MCAST_INCLUDE) && 
+		   (BCM_IN6_ARE_ADDR_EQUAL(&pipv6mcast->saddr, &dst->src_entry.src))) {
+			if((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+			{
+				spin_unlock_bh(&br->mld_mcl_lock);
+				return 0;
+			} 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+			blog_clone(skb, blog_ptr(skb2));
+#endif
+			if(forward) {
+				br_forward(dst->dst, skb2, NULL);
+			}
+			else {
+				br_deliver(dst->dst, skb2);
+			}
+			dst->dst->dirty = 1;
+			status = 1;
+		}
+		else if(dst->src_entry.filt_mode == MCAST_EXCLUDE) {
+			if((0 == dst->src_entry.src.s6_addr[0]) ||
+			   (!BCM_IN6_ARE_ADDR_EQUAL(&pipv6mcast->saddr, &dst->src_entry.src))) {
+				if((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+				{
+					spin_unlock_bh(&br->mld_mcl_lock);
+					return 0;
+				}
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+				blog_clone(skb, blog_ptr(skb2));
+#endif
+				if(forward) {
+					br_forward(dst->dst, skb2, NULL);
+				}
+				else {
+					br_deliver(dst->dst, skb2);
+				}
+				dst->dst->dirty = 1;
+				status = 1;
+			}
+			else if( BCM_IN6_ARE_ADDR_EQUAL(&pipv6mcast->saddr, &dst->src_entry.src)) {
+				status = 1;
+			}
+		}
+	}
+
+	if (status) {
+		list_for_each_entry_safe(p, p_n, &br->port_list, list) {
+			p->dirty = 0;
+		}
+	}
+	spin_unlock_bh(&br->mld_mcl_lock);
+
+	if(status)
+		kfree_skb(skb);
+
+	return status;
+}
+
+int br_mld_mc_fdb_update_bydev( struct net_bridge *br,
+                                struct net_device *dev,
+                                unsigned int       flushAll)
+{
+	struct net_br_mld_mc_fdb_entry *mc_fdb;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	int ret;
+#endif
+	int i;
+
+	if(!br || !dev)
+		return 0;
+
+	if(!netdev_path_is_leaf(dev))
+		return 0;
+
+	spin_lock_bh(&br->mld_mcl_lock);
+	for (i = 0; i < BR_MLD_HASH_SIZE; i++) 
+	{
+		struct hlist_node *h, *n;
+		hlist_for_each_entry_safe(mc_fdb, h, n, &br->mld_mc_hash[i], hlist) 
+		{
+			if ((mc_fdb->dst->dev == dev) ||
+			    (mc_fdb->from_dev == dev))
+			{
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+				/* do not remove the root entry */
+				if ((0 == mc_fdb->root) || (1 == flushAll))
+				{
+					br_mld_mc_fdb_del_entry(br, mc_fdb, NULL, NULL);
+				}
+				else
+				{
+					br_mcast_blog_release(BR_MCAST_PROTO_MLD, (void *)mc_fdb);
+					mc_fdb->blog_idx = BLOG_KEY_INVALID;
+				}
+#else
+				if (1 == flushAll)
+				{
+					br_mld_mc_fdb_del_entry(br, mc_fdb, NULL, NULL);
+				}
+#endif
+			}
+		}
+	}
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	if ( 0 == flushAll )
+	{
+		for (i = 0; i < BR_MLD_HASH_SIZE; i++) 
+		{
+			struct hlist_node *h, *n;
+			hlist_for_each_entry_safe(mc_fdb, h, n, &br->mld_mc_hash[i], hlist) 
+			{ 
+				if ( (1 == mc_fdb->root) &&
+				     ((mc_fdb->dst->dev == dev) ||
+				      (mc_fdb->from_dev == dev)) )
+				{
+					mc_fdb->wan_tci  = 0;
+					mc_fdb->num_tags = 0;
+					ret = br_mcast_blog_process(br, (void*)mc_fdb, BR_MCAST_PROTO_MLD);
+					if(ret < 0)
+					{
+						/* br_mcast_blog_process may return -1 if there are no blog rules
+						 * which may be a valid scenario, in which case we delete the
+						 * multicast entry.
+						 */
+						br_mld_mc_fdb_del_entry(br, mc_fdb, NULL, NULL);
+						//printk(KERN_WARNING "%s: Failed to create the blog\n", __FUNCTION__);
+					}
+				}
+			}
+		}
+	}
+#endif   
+	br_mld_set_timer(br);
+	spin_unlock_bh(&br->mld_mcl_lock);
+
+	return 0;
+}
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+/* This is a support function for vlan/blog processing that requires that 
+   br->mld_mcl_lock is already held */
+struct net_br_mld_mc_fdb_entry *br_mld_mc_fdb_copy(struct net_bridge *br, 
+                                     const struct net_br_mld_mc_fdb_entry *mld_fdb)
+{
+	struct net_br_mld_mc_fdb_entry *new_mld_fdb = NULL;
+	struct net_br_mld_mc_rep_entry *rep_entry = NULL;
+	struct net_br_mld_mc_rep_entry *rep_entry_n = NULL;
+	int success = 1;
+	struct hlist_head *head = NULL;
+
+	new_mld_fdb = kmem_cache_alloc(br_mld_mc_fdb_cache, GFP_ATOMIC);
+	if (new_mld_fdb)
+	{
+		memcpy(new_mld_fdb, mld_fdb, sizeof(struct net_br_mld_mc_fdb_entry));
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		new_mld_fdb->blog_idx = BLOG_KEY_INVALID;
+#endif
+		new_mld_fdb->root = 0;
+		INIT_LIST_HEAD(&new_mld_fdb->rep_list);
+
+		list_for_each_entry(rep_entry, &mld_fdb->rep_list, list) {
+			rep_entry_n = kmem_cache_alloc(br_mld_mc_rep_cache, GFP_ATOMIC);
+			if(rep_entry_n)
+			{
+				memcpy(rep_entry_n, 
+				       rep_entry, 
+				       sizeof(struct net_br_mld_mc_rep_entry));
+				list_add_tail(&rep_entry_n->list, &new_mld_fdb->rep_list);
+			}
+			else 
+			{
+				success = 0;
+				break;
+			}
+		}
+
+		if(success)
+		{
+			head = &br->mld_mc_hash[br_mld_mc_fdb_hash(&mld_fdb->grp)];
+			hlist_add_head(&new_mld_fdb->hlist, head);
+	}
+		else
+		{
+			list_for_each_entry_safe(rep_entry, 
+			                         rep_entry_n, &new_mld_fdb->rep_list, list) {
+				list_del(&rep_entry->list);
+				kmem_cache_free(br_mld_mc_rep_cache, rep_entry);
+			}
+			kmem_cache_free(br_mld_mc_fdb_cache, new_mld_fdb);
+			new_mld_fdb = NULL;
+		}
+	}
+
+	return new_mld_fdb;
+} /* br_mld_mc_fdb_copy */
+#endif
+
+static void *snoop_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct net_device *dev;
+	loff_t offs = 0;
+
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
+		if ((dev->priv_flags & IFF_EBRIDGE) && (*pos == offs)) {
+			return dev;
+		}
+	}
+	++offs;
+	return NULL;
+}
+
+static void *snoop_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct net_device *dev = v;
+
+	++*pos;
+	for(dev = next_net_device_rcu(dev); dev; dev = next_net_device_rcu(dev)) {
+		if(dev->priv_flags & IFF_EBRIDGE) {
+			return dev;
+		}
+	}
+	return NULL;
+}
+
+static void snoop_display_entry(struct seq_file *seq,
+                                struct net_bridge *br,
+                                struct net_br_mld_mc_fdb_entry *dst)
+{
+	struct net_br_mld_mc_rep_entry *rep_entry;
+	int                             first;
+	int                             tstamp;
+
+	seq_printf(seq, "%-6s %-6s %-7s %02d    0x%04x   0x%04x%04x", 
+	           br->dev->name, 
+	           dst->dst->dev->name, 
+	           dst->from_dev->name, 
+	           dst->num_tags,
+	           ntohs(dst->lan_tci),
+	           ((dst->wan_tci >> 16) & 0xFFFF),
+	           (dst->wan_tci & 0xFFFF));
+
+	seq_printf(seq, " %08x:%08x:%08x:%08x",
+	           htonl(dst->grp.s6_addr32[0]),
+	           htonl(dst->grp.s6_addr32[1]),
+	           htonl(dst->grp.s6_addr32[2]),
+	           htonl(dst->grp.s6_addr32[3]));
+
+	seq_printf(seq, " %-4s %08x:%08x:%08x:%08x", 
+	           (dst->src_entry.filt_mode == MCAST_EXCLUDE) ? 
+	            "EX" : "IN",
+	           htonl(dst->src_entry.src.s6_addr32[0]), 
+	           htonl(dst->src_entry.src.s6_addr32[1]), 
+	           htonl(dst->src_entry.src.s6_addr32[2]), 
+	           htonl(dst->src_entry.src.s6_addr32[3]));
+
+	first = 1;
+	list_for_each_entry(rep_entry, &dst->rep_list, list)
+	{ 
+
+		if ( 0 == br->mld_snooping )
+		{
+			tstamp = 0;
+		}
+		else
+		{
+			tstamp = (int)(rep_entry->tstamp - jiffies) / HZ;
+		}
+
+		if(first)
+		{
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+			seq_printf(seq, " %-7d %08x:%08x:%08x:%08x 0x%08x\n", 
+			           tstamp,
+			           htonl(rep_entry->rep.s6_addr32[0]),
+			           htonl(rep_entry->rep.s6_addr32[1]),
+			           htonl(rep_entry->rep.s6_addr32[2]),
+			           htonl(rep_entry->rep.s6_addr32[3]), dst->blog_idx);
+#else
+			seq_printf(seq, " %-7d %08x:%08x:%08x:%08x\n", 
+			           tstamp,
+			           htonl(rep_entry->rep.s6_addr32[0]),
+			           htonl(rep_entry->rep.s6_addr32[1]),
+			           htonl(rep_entry->rep.s6_addr32[2]),
+			           htonl(rep_entry->rep.s6_addr32[3]));
+#endif
+			first = 0;
+		}
+		else 
+		{
+			seq_printf(seq, "%124s %-7d %08x:%08x:%08x:%08x\n", " ", 
+			           tstamp,
+			           htonl(rep_entry->rep.s6_addr32[0]),
+			           htonl(rep_entry->rep.s6_addr32[1]),
+			           htonl(rep_entry->rep.s6_addr32[2]),
+			           htonl(rep_entry->rep.s6_addr32[3]));
+		}
+	}
+}
+
+static int snoop_seq_show(struct seq_file *seq, void *v)
+{
+	struct net_device *dev = v;
+	struct net_bridge *br = netdev_priv(dev);
+	int                i;
+
+	seq_printf(seq, "mld snooping %d  lan2lan-snooping %d/%d, priority %d\n",
+	           br->mld_snooping,
+	           br->mld_lan2lan_mc_enable,
+	           br_mcast_get_lan2lan_snooping(BR_MCAST_PROTO_MLD, br),
+	           br_mcast_get_pri_queue());
+	seq_printf(seq, "bridge device src-dev #tags lan-tci  wan-tci");
+	seq_printf(seq, "    group                               mode source");
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	seq_printf(seq, "                              timeout reporter");
+	seq_printf(seq, "                            Index\n");
+#else
+	seq_printf(seq, "                              timeout reporter\n");
+#endif
+
+	for (i = 0; i < BR_MLD_HASH_SIZE; i++) 
+	{
+		struct net_br_mld_mc_fdb_entry *entry;
+		struct hlist_node *pos;
+		hlist_for_each_entry(entry, pos, &br->mld_mc_hash[i], hlist) 
+		{
+			snoop_display_entry(seq, br, entry);
+		}
+	}
+
+	return 0;
+}
+
+static void snoop_seq_stop(struct seq_file *seq, void *v)
+{
+	rcu_read_unlock();
+}
+
+static struct seq_operations snoop_seq_ops = {
+	.start = snoop_seq_start,
+	.next  = snoop_seq_next,
+	.stop  = snoop_seq_stop,
+	.show  = snoop_seq_show,
+};
+
+static int snoop_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &snoop_seq_ops);
+}
+
+static struct file_operations br_mld_snoop_proc_fops = {
+	.owner = THIS_MODULE,
+	.open  = snoop_seq_open,
+	.read  = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+void br_mld_snooping_br_init( struct net_bridge *br )
+{
+	spin_lock_init(&br->mld_mcl_lock);
+	br->mld_lan2lan_mc_enable = BR_MC_LAN2LAN_STATUS_DEFAULT;
+	setup_timer(&br->mld_timer, br_mld_query_timeout, (unsigned long)br);  
+}
+
+void br_mld_snooping_br_fini( struct net_bridge *br )
+{
+	del_timer_sync(&br->mld_timer);
+}
+
+int __init br_mld_snooping_init(void)
+{
+	br_mld_entry = proc_create("mld_snooping", 0, init_net.proc_net,
+			   &br_mld_snoop_proc_fops);
+
+	if(!br_mld_entry) {
+		printk("error while creating mld_snooping proc\n");
+		return -ENOMEM;
+	}
+
+	br_mld_mc_fdb_cache = kmem_cache_create("bridge_mld_mc_fdb_cache",
+	                                        sizeof(struct net_br_mld_mc_fdb_entry),
+	                                        0,
+	                                        SLAB_HWCACHE_ALIGN, NULL);
+	if (!br_mld_mc_fdb_cache)
+		return -ENOMEM;
+
+	br_mld_mc_rep_cache = kmem_cache_create("br_mld_mc_rep_cache",
+	                                        sizeof(struct net_br_mld_mc_rep_entry),
+	                                        0,
+	                                        SLAB_HWCACHE_ALIGN, NULL);
+	if (!br_mld_mc_rep_cache)
+	{
+		kmem_cache_destroy(br_mld_mc_fdb_cache);
+		return -ENOMEM;
+	}
+
+	 get_random_bytes(&br_mld_mc_fdb_salt, sizeof(br_mld_mc_fdb_salt));
+
+	return 0;
+}
+
+void br_mld_snooping_fini(void)
+{
+	kmem_cache_destroy(br_mld_mc_fdb_cache);
+	kmem_cache_destroy(br_mld_mc_rep_cache);
+
+	return;
+}
+
+EXPORT_SYMBOL(br_mld_control_filter);
+EXPORT_SYMBOL(br_mld_snooping_enabled);
+#endif /* defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP) */
diff --git a/net/bridge/br_mld.h b/net/bridge/br_mld.h
new file mode 100644
index 0000000000000000000000000000000000000000..73f69db5c29101232a1c012fe41e3a5ea55b4be6
--- /dev/null
+++ b/net/bridge/br_mld.h
@@ -0,0 +1,197 @@
+/*
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#ifndef _BR_MLD_H
+#define _BR_MLD_H
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/igmp.h>
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include "br_private.h"
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#include "br_mcast.h"
+#endif
+
+#define SNOOPING_BLOCKING_MODE 2
+
+#define TIMER_CHECK_TIMEOUT (2*HZ)
+#define BR_MLD_MEMBERSHIP_TIMEOUT 260 /* RFC3810 */
+
+#define BR_MLD_MULTICAST_MAC_PREFIX 0x33
+
+#define BCM_IN6_ARE_ADDR_EQUAL(a,b)                                       \
+       ((((__const uint32_t *) (a))[0] == ((__const uint32_t *) (b))[0])  \
+	 && (((__const uint32_t *) (a))[1] == ((__const uint32_t *) (b))[1])  \
+	 && (((__const uint32_t *) (a))[2] == ((__const uint32_t *) (b))[2])  \
+	 && (((__const uint32_t *) (a))[3] == ((__const uint32_t *) (b))[3])) 
+
+#define BCM_IN6_ASSIGN_ADDR(a,b)                                  \
+    do {                                                          \
+        ((uint32_t *) (a))[0] = ((__const uint32_t *) (b))[0];    \
+        ((uint32_t *) (a))[1] = ((__const uint32_t *) (b))[1];    \
+        ((uint32_t *) (a))[2] = ((__const uint32_t *) (b))[2];    \
+        ((uint32_t *) (a))[3] = ((__const uint32_t *) (b))[3];    \
+    } while(0)
+
+#define BCM_IN6_IS_ADDR_MULTICAST(a) (((__const uint8_t *) (a))[0] == 0xff)
+#define BCM_IN6_MULTICAST(x)   (BCM_IN6_IS_ADDR_MULTICAST(x))
+#define BCM_IN6_IS_ADDR_MC_NODELOCAL(a) \
+	(BCM_IN6_IS_ADDR_MULTICAST(a)					      \
+	 && ((((__const uint8_t *) (a))[1] & 0xf) == 0x1))
+
+#define BCM_IN6_IS_ADDR_MC_LINKLOCAL(a) \
+	(BCM_IN6_IS_ADDR_MULTICAST(a)					      \
+	 && ((((__const uint8_t *) (a))[1] & 0xf) == 0x2))
+
+#define BCM_IN6_IS_ADDR_MC_SITELOCAL(a) \
+	(BCM_IN6_IS_ADDR_MULTICAST(a)					      \
+	 && ((((__const uint8_t *) (a))[1] & 0xf) == 0x5))
+
+#define BCM_IN6_IS_ADDR_MC_ORGLOCAL(a) \
+	(BCM_IN6_IS_ADDR_MULTICAST(a)					      \
+	 && ((((__const uint8_t *) (a))[1] & 0xf) == 0x8))
+
+#define BCM_IN6_IS_ADDR_MC_GLOBAL(a) \
+	(BCM_IN6_IS_ADDR_MULTICAST(a) \
+	 && ((((__const uint8_t *) (a))[1] & 0xf) == 0xe))
+
+#define BCM_IN6_IS_ADDR_MC_SCOPE0(a) \
+	(BCM_IN6_IS_ADDR_MULTICAST(a)					      \
+	 && ((((__const uint8_t *) (a))[1] & 0xf) == 0x0))
+
+/* Identify IPV6 L2 multicast by checking whether the most 12 bytes are 0 */
+#define BCM_IN6_IS_ADDR_L2_MCAST(a)         \
+    !((((__const uint32_t *) (a))[0])       \
+        || (((__const uint32_t *) (a))[1])  \
+        || (((__const uint32_t *) (a))[2]))
+
+struct net_br_mld_mc_src_entry
+{
+	struct in6_addr   src;
+	unsigned long     tstamp;
+	int               filt_mode;
+};
+
+struct net_br_mld_mc_rep_entry
+{
+	struct in6_addr     rep;
+	unsigned char       repMac[6];
+	unsigned long       tstamp;
+	struct list_head    list;
+};
+
+struct net_br_mld_mc_fdb_entry
+{
+	struct hlist_node              hlist;
+	struct net_bridge_port        *dst;
+	struct in6_addr                grp;
+	struct list_head               rep_list;
+	struct net_br_mld_mc_src_entry src_entry;
+	uint16_t                       lan_tci; /* vlan id */
+	uint32_t                       wan_tci; /* vlan id */
+	int                            num_tags;
+	char                           type;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	uint32_t                       blog_idx;
+	char                           root;
+#endif
+	uint32_t                       info; 
+	int                            lanppp;
+	struct net_device             *from_dev;
+};
+
+int br_mld_blog_rule_update(struct net_br_mld_mc_fdb_entry *mc_fdb, int wan_ops);
+
+int br_mld_mc_forward(struct net_bridge *br, 
+                      struct sk_buff *skb, 
+                      int forward,
+                      int is_routed);
+
+int br_mld_mc_fdb_add(struct net_device *from_dev,
+                      int wan_ops,
+                      struct net_bridge *br, 
+                      struct net_bridge_port *prt, 
+                      struct in6_addr *grp, 
+                      struct in6_addr *rep,
+                      unsigned char *repMac,
+                      int mode, 
+                      uint16_t tci, 
+                      struct in6_addr *src,
+                      int lanppp,
+                      uint32_t info);
+
+void br_mld_mc_fdb_cleanup(struct net_bridge *br);
+
+int br_mld_mc_fdb_remove(struct net_device *from_dev,
+                         struct net_bridge *br, 
+                         struct net_bridge_port *prt, 
+                         struct in6_addr *grp, 
+                         struct in6_addr *rep, 
+                         int mode, 
+                         struct in6_addr *src,
+                         uint32_t info);
+
+int br_mld_mc_fdb_update_bydev( struct net_bridge *br,
+                                struct net_device *dev,
+                                unsigned int       flushAll);
+
+int br_mld_set_port_snooping(struct net_bridge_port *p,  void __user * userbuf);
+
+int br_mld_clear_port_snooping(struct net_bridge_port *p,  void __user * userbuf);
+
+void br_mld_wipe_reporter_for_port (struct net_bridge *br,
+                                    struct in6_addr *rep, 
+                                    u16 oldPort);
+
+void br_mld_wipe_reporter_by_mac (struct net_bridge *br,
+                                  unsigned char *repMac);
+
+int br_mld_process_if_change(struct net_bridge *br, struct net_device *ndev);
+
+struct net_br_mld_mc_fdb_entry *br_mld_mc_fdb_copy(struct net_bridge *br, 
+                                     const struct net_br_mld_mc_fdb_entry *mld_fdb);
+void br_mld_mc_fdb_del_entry(struct net_bridge *br, 
+                             struct net_br_mld_mc_fdb_entry *mld_fdb,
+                             struct in6_addr *rep,
+                             unsigned char *repMac);
+int __init br_mld_snooping_init(void);
+void br_mld_snooping_fini(void);
+
+void br_mld_wl_del_entry(struct net_bridge *br,struct net_br_mld_mc_fdb_entry *dst);
+
+void br_mld_get_ip_icmp_hdrs( const struct sk_buff *pskb, struct ipv6hdr **ppipv6mcast, struct icmp6hdr **ppicmpv6, int *lanppp);
+
+#endif /* defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP) */
+
+#endif /* _BR_MLD_H */
+
diff --git a/net/bridge/br_netlink_mcpd.c b/net/bridge/br_netlink_mcpd.c
new file mode 100644
index 0000000000000000000000000000000000000000..2d57ce13f1e58a657952a317aa610cc4fc0a8e41
--- /dev/null
+++ b/net/bridge/br_netlink_mcpd.c
@@ -0,0 +1,1205 @@
+/*
+*    Copyright (c) 2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#if (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)) || (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP))
+
+#include <linux/socket.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/netlink.h>
+#include <net/sock.h>
+#include <linux/in.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include "br_igmp.h"
+#include "br_mld.h"
+
+static struct sock *nl_sk = NULL;
+static int mcpd_pid = 0;
+static int mcpdControlsIgmpAdmission = 0;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+static int mcpdControlsMldAdmission = 0;
+#endif
+
+#define MCPD_SNOOP_IN_ADD    1
+#define MCPD_SNOOP_IN_CLEAR  2
+#define MCPD_SNOOP_EX_ADD    3
+#define MCPD_SNOOP_EX_CLEAR  4
+
+#define MAX_MULTICAST_APPS_REGISTERED 2
+#define MULTICAST_APP_INVALID_ENTRY -1
+
+static int snoop_registered_pid[MAX_MULTICAST_APPS_REGISTERED];
+
+typedef enum
+{
+    SNOOP_REGISTRATION_TYPE_DEFAULT,
+    SNOOP_REGISTRATION_TYPE_PRIMARY
+}SNOOP_REGISTRATION_TYPE;
+
+typedef struct mcpd_msg_hdr 
+{
+    __u16 type;
+    __u16 len;
+} t_MCPD_MSG_HDR;
+
+typedef enum mcpd_msgtype 
+{
+    MCDP_MSG_BASE = 0,
+    MCPD_MSG_REGISTER, /* usr - > krnl -> usr */
+    MCPD_MSG_UNREGISTER, /* usr - > krnl -> usr */
+    MCPD_MSG_IGMP_PKT, /* krnl -> usr */
+    MCPD_MSG_IGMP_SNOOP_ENTRY,
+    MCPD_MSG_MLD_PKT, /* krnl -> usr */
+    MCPD_MSG_MLD_SNOOP_ENTRY,
+    MCPD_MSG_IGMP_PURGE_ENTRY,
+    MCPD_MSG_MLD_PURGE_ENTRY,
+    MCPD_MSG_IF_CHANGE,
+    MCPD_MSG_MC_FDB_CLEANUP, /* clean up for MIB RESET */
+    MCPD_MSG_SET_PRI_QUEUE,
+    MCPD_MSG_UPLINK_INDICATION,
+    MCPD_MSG_IGMP_PURGE_REPORTER,
+    MCPD_MSG_MLD_PURGE_REPORTER,
+    MCPD_MSG_CONTROLS_ADMISSION,
+    MCPD_MSG_ADMISSION_RESULT,
+    MCPD_MSG_MAX
+} t_MCPD_MSGTYPES;
+
+typedef enum mcpd_ret_codes 
+{
+    MCPD_SUCCESS = 0,
+    MCPD_GEN_ERR = 1,
+    MCPD_RET_MEMERR = 2,
+    MCPD_RET_ACCEPT = 3,
+    MCPD_RET_DROP   = 4
+} t_MCPD_RET_CODE;
+
+typedef enum mcpd_proto_type
+{
+    MCPD_PROTO_IGMP = 0,
+    MCPD_PROTO_MLD  = 1,
+    MCPD_PROTO_MAX  = 2,
+} t_MCPD_PROTO_TYPE;
+
+typedef struct mcpd_register 
+{
+    int code;
+    SNOOP_REGISTRATION_TYPE registration_type;
+} t_MCPD_REGISTER;
+
+typedef struct mcpd_if_change
+{
+   char              ifName[IFNAMSIZ];
+   t_MCPD_PROTO_TYPE proto;
+} t_MCPD_IF_CHANGE;
+
+typedef struct mcpd_pkt_info
+{
+    char                      br_name[IFNAMSIZ];
+    char                      port_name[IFNAMSIZ];
+    unsigned short            port_no;
+    int                       if_index;
+    int                       data_len;
+    unsigned char             repMac[6];
+    unsigned short            tci;/* vlan id */
+    int                       lanppp;
+    int                       packetIndex; /* kernel's skb pointer */
+    int                       bridgeIndex; /* kernel's bridge pointer */
+} t_MCPD_PKT_INFO;
+
+typedef struct mcpd_igmp_purge_entry
+{
+    struct in_addr            grp;
+    struct in_addr            src;
+    struct in_addr            rep;
+    t_MCPD_PKT_INFO           pkt_info;
+} t_MCPD_IGMP_PURGE_ENTRY;
+
+typedef struct mcpd_igmp_purge_reporter 
+{
+   char                      br_name[IFNAMSIZ];
+   char                      port_no;
+   struct                    in_addr grp;   
+} t_MCPD_IGMP_PURGE_REPORTER;
+
+#ifdef CONFIG_BR_MLD_SNOOP
+typedef struct mcpd_mld_purge_reporter 
+{
+   char                      br_name[IFNAMSIZ];
+   char                      port_no;
+   struct                    in6_addr grp;
+} t_MCPD_MLD_PURGE_REPORTER;
+#endif /* CONFIG_BR_MLD_SNOOP */
+
+extern void (*bcm_mcast_def_pri_queue_hook)(struct sk_buff *);
+
+static void mcpd_dump_buf(char *buf, int len)
+{
+#if 0
+    int i;
+    printk("========================KRNEL BPEELA START===================================\n");
+    for(i =0; i < len; i++) 
+    {
+     printk("%02x", (unsigned char)buf[i]);
+     if(!((i+1)%2))
+         printk(" ");
+     if(!((i+1)%16))
+       printk("\n");
+    }
+    printk("\n");
+    printk("=======================KRNL BPEELA END====================================\n");
+#endif
+}
+
+/* called with rcu read lock */
+/* return values:  0 not handled *
+ *                 1 handled     */
+int mcpd_process_skb(struct net_bridge *br, struct sk_buff *skb, unsigned short protocol)
+{
+    struct nlmsghdr *nlh;
+    char                   *ptr = NULL;
+    struct sk_buff         *new_skb;
+    t_MCPD_PKT_INFO        *pkt_info;
+    int                     buf_size;
+    char                   *br_name = br->dev->name;
+    int                     if_index = br->dev->ifindex;
+    struct net_bridge_port *port;
+    int                     port_no;
+    int                     len;
+    u8                     *pData = NULL;
+    short                   type;
+    struct iphdr           *pipmcast = NULL;
+    struct igmphdr         *pigmp = NULL;
+    int                     lanppp;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    struct ipv6hdr         *pipv6mcast = NULL;
+    struct icmp6hdr        *picmpv6 = NULL;
+#endif    
+    int                     placePending = 0;
+
+    if(!mcpd_pid)
+        return 0;
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    if ( protocol == ETH_P_IPV6 ) {
+        br_mld_get_ip_icmp_hdrs(skb, &pipv6mcast, &picmpv6, &lanppp);
+        if ( picmpv6 != NULL ) {
+             pData = (u8 *)pipv6mcast;
+             len   = skb->len - (pData - skb->data);
+             type  = MCPD_MSG_MLD_PKT;
+        }
+    }
+    else
+#endif
+    if (protocol == ETH_P_IP)
+    {
+        br_igmp_get_ip_igmp_hdrs(skb, &pipmcast, &pigmp, &lanppp);
+        if ( pigmp != NULL ) {
+             pData = (u8 *)pipmcast;
+             len   = skb->len - (pData - skb->data);
+             type  = MCPD_MSG_IGMP_PKT;
+        }
+    }
+
+    if ( pData == NULL )
+    {
+        return 0;
+    }
+
+    port = br_port_get_rcu(skb->dev);
+    port_no = port->port_no;
+    buf_size = len + sizeof(t_MCPD_MSG_HDR) + sizeof(t_MCPD_PKT_INFO);
+    new_skb  = alloc_skb(NLMSG_SPACE(buf_size), GFP_ATOMIC);
+
+    if(!new_skb) {
+        printk("br_netlink_mcpd.c:%d %s() errr no mem\n", __LINE__, __FUNCTION__);
+        return 0;
+    }
+
+    nlh = (struct nlmsghdr *)new_skb->data;
+    ptr = NLMSG_DATA(nlh);
+    nlh->nlmsg_len = NLMSG_SPACE(buf_size);
+    nlh->nlmsg_pid = 0;
+    nlh->nlmsg_flags = 0;
+    skb_put(new_skb, NLMSG_SPACE(buf_size));
+    ((t_MCPD_MSG_HDR *)ptr)->type = type;
+    ((t_MCPD_MSG_HDR *)ptr)->len = sizeof(t_MCPD_PKT_INFO);
+    ptr += sizeof(t_MCPD_MSG_HDR);
+
+    pkt_info = (t_MCPD_PKT_INFO *)ptr;
+
+    memcpy(pkt_info->br_name, br_name, IFNAMSIZ);
+    memcpy(pkt_info->port_name, skb->dev->name, IFNAMSIZ);
+    memcpy(pkt_info->repMac, skb_mac_header(skb)+ ETH_ALEN, ETH_ALEN);
+    pkt_info->port_no = port_no;
+    pkt_info->if_index = if_index;
+    pkt_info->data_len = len;
+    pkt_info->tci = 0; /* should be made big endian, but it's zero anyway */
+    pkt_info->lanppp = lanppp;
+
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+    if(skb->vlan_count)
+        pkt_info->tci = htonl(skb->vlan_header[0] >> 16);
+#endif /* CONFIG_BCM_VLAN) */
+    ptr += sizeof(t_MCPD_PKT_INFO);
+
+    pkt_info->packetIndex = 0;
+    pkt_info->bridgeIndex = 0;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    if ( protocol == ETH_P_IPV6 ) {
+      if (mcpdControlsMldAdmission) {
+        pkt_info->bridgeIndex = (int)br;
+        pkt_info->packetIndex = (int)skb;
+        placePending = 1;
+      }
+    }
+    else
+#endif
+    {
+      if (mcpdControlsIgmpAdmission) {
+        pkt_info->bridgeIndex = (int)br;
+        pkt_info->packetIndex = (int)skb;
+        placePending = 1;
+      }
+    }
+    memcpy(ptr, pData, len);
+
+    NETLINK_CB(new_skb).dst_group = 0;
+    NETLINK_CB(new_skb).pid = mcpd_pid;
+    mcpd_dump_buf((char *)nlh, 128);
+
+    netlink_unicast(nl_sk, new_skb, mcpd_pid, MSG_DONTWAIT);
+
+#if 0
+    mcpd_enqueue_packet(br, skb);
+#endif
+
+    return placePending;
+} /* mcpd_process_skb */
+EXPORT_SYMBOL(mcpd_process_skb);
+
+static void mcpd_nl_process_set_admission_control(struct sk_buff* skb)
+{
+    struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+    char *ptr = NLMSG_DATA(nlh);
+    int changed = 0;
+
+    ptr += sizeof(t_MCPD_MSG_HDR);
+
+    // IGMP ADMISSION
+    if ((*ptr == 0) || (*ptr == 1)) {
+      if (mcpdControlsIgmpAdmission != *ptr) {
+        changed = 1;
+        mcpdControlsIgmpAdmission = *ptr;
+        br_igmp_wipe_pending_skbs();
+      }
+    }
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+    ptr++;
+    // MLD ADMISSION
+    if ((*ptr == 0) || (*ptr == 1)) {
+      if (mcpdControlsMldAdmission != *ptr) {
+        changed = 1;
+        mcpdControlsMldAdmission = *ptr;
+        /* TBD  Insert MLD wipe code */
+      }
+    }
+#endif
+}
+
+static void mcpd_nl_process_admission_result (struct sk_buff *skb)
+{
+    struct nlmsghdr*   nlh          = (struct nlmsghdr *)skb->data;
+    char* ptr = NLMSG_DATA(nlh);
+    t_MCPD_ADMISSION*  admit        = (t_MCPD_ADMISSION*)( ptr + sizeof(sizeof(t_MCPD_MSG_HDR)));
+
+    if (admit == NULL) {
+      return;
+    }
+
+    if ((admit->admitted == MCPD_PACKET_ADMITTED_NO) || (admit->admitted == MCPD_PACKET_ADMITTED_YES)) {
+      br_igmp_process_admission (admit);
+    }
+    else {
+      // TBD br_mld_process_admission (admit);
+    }
+}
+
+int is_multicast_switching_mode_host_control(void)
+{
+    return (snoop_registered_pid[SNOOP_REGISTRATION_TYPE_PRIMARY] == mcpd_pid);
+}
+
+static void mcpd_nl_process_unregistration(struct sk_buff *skb)
+{
+    struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+    char *ptr  = NULL;
+    struct sk_buff *new_skb = NULL;
+    char *new_ptr = NULL;
+    struct nlmsghdr *new_nlh = NULL;
+    int buf_size;
+
+    buf_size = NLMSG_SPACE((sizeof(t_MCPD_MSG_HDR) + sizeof(t_MCPD_REGISTER)));
+
+    new_skb = alloc_skb(buf_size, GFP_ATOMIC);
+
+    if(!new_skb) {
+        printk("br_netlink_mcpd.c:%d %s() errr no mem\n", __LINE__, __FUNCTION__);
+        return;
+    }
+
+    ptr = NLMSG_DATA(nlh);
+
+    new_nlh = (struct nlmsghdr *)new_skb->data;
+    new_ptr = NLMSG_DATA(new_nlh);
+    new_nlh->nlmsg_len = buf_size;
+    new_nlh->nlmsg_pid = 0;
+    new_nlh->nlmsg_flags = 0;
+    skb_put(new_skb, buf_size);
+    ((t_MCPD_MSG_HDR *)new_ptr)->type = MCPD_MSG_UNREGISTER;
+    ((t_MCPD_MSG_HDR *)new_ptr)->len = sizeof(t_MCPD_REGISTER);
+    new_ptr += sizeof(t_MCPD_MSG_HDR);
+    ((t_MCPD_REGISTER *)new_ptr)->code = MCPD_SUCCESS;
+
+    NETLINK_CB(new_skb).dst_group = 0;
+    NETLINK_CB(new_skb).pid = mcpd_pid;
+
+    netlink_unicast(nl_sk, new_skb, mcpd_pid, MSG_DONTWAIT);
+    mcpd_pid = snoop_registered_pid[SNOOP_REGISTRATION_TYPE_DEFAULT];
+
+    return;
+} /* mcpd_nl_process_unregistration */
+
+static void mcpd_nl_process_registration(struct sk_buff *skb)
+{
+    struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+    char *ptr  = NULL;
+    struct sk_buff *new_skb = NULL;
+    char *new_ptr = NULL;
+    struct nlmsghdr *new_nlh = NULL;
+    int buf_size;
+
+    buf_size = NLMSG_SPACE((sizeof(t_MCPD_MSG_HDR) + sizeof(t_MCPD_REGISTER)));
+
+    new_skb = alloc_skb(buf_size, GFP_ATOMIC);
+
+    if(!new_skb) {
+        printk("br_netlink_mcpd.c:%d %s() errr no mem\n", __LINE__, __FUNCTION__);
+        return;
+    }
+
+    ptr = NLMSG_DATA(nlh);
+
+    if(new_skb)
+    {
+        int registration_type;
+
+        new_nlh = (struct nlmsghdr *)new_skb->data;
+        new_ptr = NLMSG_DATA(new_nlh);
+        new_nlh->nlmsg_len = buf_size;
+        new_nlh->nlmsg_pid = 0;
+        new_nlh->nlmsg_flags = 0;
+        skb_put(new_skb, buf_size);
+        ((t_MCPD_MSG_HDR *)new_ptr)->type = MCPD_MSG_REGISTER;
+        ((t_MCPD_MSG_HDR *)new_ptr)->len = sizeof(t_MCPD_REGISTER);
+        new_ptr += sizeof(t_MCPD_MSG_HDR);
+        ptr += sizeof(t_MCPD_MSG_HDR);
+        ((t_MCPD_REGISTER *)new_ptr)->code = MCPD_SUCCESS;
+        registration_type = ((t_MCPD_REGISTER *)ptr)->registration_type;
+        if (snoop_registered_pid[registration_type] != nlh->nlmsg_pid) {
+            snoop_registered_pid[registration_type] = nlh->nlmsg_pid;
+            printk("br_netlink_mcpd.c: Setting registration type %d pid to %d\n", registration_type, snoop_registered_pid[registration_type]);
+        }
+        if ((mcpd_pid) && (mcpd_pid != snoop_registered_pid[registration_type]/*nlh->nlmsg_pid*/))
+        {
+            struct sk_buff *new_skb2 = alloc_skb(buf_size, GFP_ATOMIC);
+            if(!new_skb2) {
+                printk("br_netlink_mcpd.c:%d %s() error no mem\n", __LINE__, __FUNCTION__);
+                return;
+            }
+
+            mcpd_nl_process_unregistration(new_skb2);
+        }
+
+        NETLINK_CB(new_skb).dst_group = 0;
+
+        mcpd_pid = snoop_registered_pid[registration_type];
+        NETLINK_CB(new_skb).pid = mcpd_pid;
+
+
+        netlink_unicast(nl_sk, new_skb, mcpd_pid, MSG_DONTWAIT);
+    }
+
+    return;
+} /* mcpd_nl_process_registration */
+
+
+static int mcpd_is_br_port(struct net_bridge *br,struct net_device *from_dev)
+{
+    struct net_bridge_port *p = NULL;
+    int ret = 0;
+
+    rcu_read_lock();
+    list_for_each_entry_rcu(p, &br->port_list, list) {
+        if ((p->dev) && (!memcmp(p->dev->name, from_dev->name, IFNAMSIZ)))
+            ret = 1;
+    }
+    rcu_read_unlock();
+
+    return ret;
+} /* br_igmp_is_br_port */
+
+static void mcpd_nl_process_igmp_snoop_entry(struct sk_buff *skb)
+{
+    struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+    struct net_device *dev = NULL;
+    struct net_bridge *br = NULL;
+    struct net_bridge_port *prt;
+    t_MCPD_IGMP_SNOOP_ENTRY *snoop_entry;
+    unsigned char *ptr;
+    struct net_device *from_dev= NULL;
+    int idx = 0;
+    uint32_t info = 0;
+
+    ptr = NLMSG_DATA(nlh);
+    ptr += sizeof(t_MCPD_MSG_HDR);
+
+    snoop_entry = (t_MCPD_IGMP_SNOOP_ENTRY *)ptr;
+
+    dev = dev_get_by_name(&init_net, snoop_entry->br_name);
+    if(NULL == dev)
+        return;
+
+    if ((0 == (dev->priv_flags & IFF_EBRIDGE)) ||
+        (0 == (dev->flags & IFF_UP)))
+    {
+        printk("%s: invalid bridge %s for snooping entry\n", 
+               __FUNCTION__, snoop_entry->br_name);
+        dev_put(dev);
+        return;
+    }
+    br = netdev_priv(dev);
+
+    for(idx = 0; idx < MCPD_MAX_IFS; idx++)
+    {
+        if(snoop_entry->wan_info[idx].if_ops)
+        {
+            from_dev = dev_get_by_name(&init_net, 
+                                       snoop_entry->wan_info[idx].if_name);
+            if (NULL == from_dev)
+               continue;
+
+            rcu_read_lock();
+            prt = br_get_port(br, snoop_entry->port_no);
+            if ( NULL == prt )
+            {
+               printk("%s: port %d could not be found in br %s\n", 
+                      __FUNCTION__, snoop_entry->port_no, snoop_entry->br_name);
+               rcu_read_unlock();
+               dev_put(from_dev);
+               dev_put(dev);
+               return;
+            }
+
+            br_mcast_get_rep_info(prt->dev, snoop_entry->repMac, &info);
+
+            if((snoop_entry->mode == MCPD_SNOOP_IN_CLEAR) ||
+               (snoop_entry->mode == MCPD_SNOOP_EX_CLEAR)) 
+            {
+                br_igmp_mc_fdb_remove(from_dev,
+                                      br, 
+                                      prt, 
+                                      &snoop_entry->rxGrp, 
+                                      &snoop_entry->txGrp, 
+                                      &snoop_entry->rep,
+                                      snoop_entry->mode, 
+                                      &snoop_entry->src,
+                                      info);
+            }
+            else
+            {
+                if((snoop_entry->wan_info[idx].if_ops == MCPD_IF_TYPE_BRIDGED) && 
+                   (!mcpd_is_br_port(br, from_dev)))
+                {
+                   rcu_read_unlock();
+                   dev_put(from_dev);
+                   continue;
+                }
+
+                if (0 == (prt->dev->flags & IFF_UP)) {
+                   printk("%s: port %d is not up %s\n", 
+                          __FUNCTION__, snoop_entry->port_no, snoop_entry->br_name);
+                   rcu_read_unlock();
+                   dev_put(from_dev);
+                   dev_put(dev);
+                   return;
+                }
+    
+                if (0 == (from_dev->flags & IFF_UP)) {
+                   printk("%s: source device %s is not up\n", 
+                          __FUNCTION__, from_dev->name);
+                   rcu_read_unlock();
+                   dev_put(from_dev);
+                   continue;
+                }
+
+                br_igmp_mc_fdb_add(from_dev,
+                                   snoop_entry->wan_info[idx].if_ops,
+                                   br, 
+                                   prt, 
+                                   &snoop_entry->rxGrp,
+                                   &snoop_entry->txGrp,
+                                   &snoop_entry->rep,
+                                   snoop_entry->repMac,
+                                   snoop_entry->mode, 
+                                   snoop_entry->tci,
+                                   &snoop_entry->src,
+                                   snoop_entry->lanppp,
+                                   snoop_entry->excludePort,
+                                   snoop_entry->enRtpSeqCheck,
+                                   info);
+            }
+            rcu_read_unlock();
+            dev_put(from_dev);
+        }
+        else
+        {
+            break;
+        }
+    }
+
+    /* if LAN-2-LAN snooping enabled make an entry                         *
+     * unless multicast DNAT is being used (txGrp and rxGrp are different) */
+    if (br_mcast_get_lan2lan_snooping(BR_MCAST_PROTO_IGMP, br) &&
+        (snoop_entry->rxGrp.s_addr == snoop_entry->txGrp.s_addr) ) 
+    {
+        rcu_read_lock();
+        prt = br_get_port(br, snoop_entry->port_no);
+        if ( NULL == prt )
+        {
+           printk("%s: port %d could not be found in br %s\n", 
+                  __FUNCTION__, snoop_entry->port_no, snoop_entry->br_name);
+           rcu_read_unlock();
+           dev_put(dev);
+           return;
+        }
+
+        if((snoop_entry->mode == MCPD_SNOOP_IN_CLEAR) ||
+            (snoop_entry->mode == MCPD_SNOOP_EX_CLEAR)) 
+        {
+            br_igmp_mc_fdb_remove(dev,
+                                  br, 
+                                  prt, 
+                                  &snoop_entry->txGrp, 
+                                  &snoop_entry->txGrp, 
+                                  &snoop_entry->rep, 
+                                  snoop_entry->mode, 
+                                  &snoop_entry->src,
+                                  0);
+        }
+        else
+        {
+            if (0 == (prt->dev->flags & IFF_UP)) {
+               printk("%s: port %d is not up %s\n", 
+                      __FUNCTION__, snoop_entry->port_no, snoop_entry->br_name);
+               rcu_read_unlock();
+               dev_put(dev);
+               return;
+            }
+
+            br_igmp_mc_fdb_add(dev,
+                               MCPD_IF_TYPE_BRIDGED,
+                               br, 
+                               prt, 
+                               &snoop_entry->txGrp, 
+                               &snoop_entry->txGrp, 
+                               &snoop_entry->rep,
+                               snoop_entry->repMac,
+                               snoop_entry->mode, 
+                               snoop_entry->tci,
+                               &snoop_entry->src,
+                               snoop_entry->lanppp,
+                               -1,
+                               0,
+                               0);
+        }
+        rcu_read_unlock();
+    }
+    dev_put(dev);
+
+    return;
+} /* mcpd_nl_process_igmp_snoop_entry*/
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+static void mcpd_nl_process_mld_snoop_entry(struct sk_buff *skb)
+{
+    struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+    struct net_device *dev = NULL;
+    struct net_bridge *br = NULL;
+    struct net_bridge_port *prt;
+    t_MCPD_MLD_SNOOP_ENTRY *snoop_entry;
+    unsigned char *ptr;
+    struct net_device *from_dev= NULL;
+    int idx = 0;
+    uint32_t info = 0;
+
+    ptr = NLMSG_DATA(nlh);
+    ptr += sizeof(t_MCPD_MSG_HDR);
+
+    snoop_entry = (t_MCPD_MLD_SNOOP_ENTRY *)ptr;
+
+    dev = dev_get_by_name(&init_net, snoop_entry->br_name);
+    if(NULL == dev)
+        return;
+
+    if ((0 == (dev->priv_flags & IFF_EBRIDGE)) ||
+        (0 == (dev->flags & IFF_UP)))
+    {
+        printk("%s: invalid bridge %s for snooping entry\n", 
+               __FUNCTION__, snoop_entry->br_name);
+        dev_put(dev);
+        return;
+    }
+    br = netdev_priv(dev);
+
+    for(idx = 0; idx < MCPD_MAX_IFS; idx++)
+    {
+        if(snoop_entry->wan_info[idx].if_ops)
+        {
+            from_dev = dev_get_by_name(&init_net, 
+                                       snoop_entry->wan_info[idx].if_name);
+            if(NULL == from_dev)
+               continue;
+
+            rcu_read_lock();
+            prt = br_get_port(br, snoop_entry->port_no);
+            if ( NULL == prt )
+            {
+               printk("%s: port %d could not be found in br %s\n", 
+                      __FUNCTION__, snoop_entry->port_no, snoop_entry->br_name);
+               rcu_read_unlock();
+               dev_put(from_dev);
+               dev_put(dev);
+               return;
+            }
+
+            br_mcast_get_rep_info(prt->dev, snoop_entry->repMac, &info);
+
+            if((snoop_entry->mode == MCPD_SNOOP_IN_CLEAR) ||
+                (snoop_entry->mode == MCPD_SNOOP_EX_CLEAR)) 
+            {
+                mcast_snooping_call_chain(SNOOPING_DEL_ENTRY,snoop_entry);
+                br_mld_mc_fdb_remove(from_dev,
+                                    br, 
+                                    prt, 
+                                    &snoop_entry->grp,
+                                    &snoop_entry->rep, 
+                                    snoop_entry->mode, 
+                                    &snoop_entry->src,
+                                    info);
+            }
+            else
+            {
+                if((snoop_entry->wan_info[idx].if_ops == MCPD_IF_TYPE_BRIDGED) && 
+                   (!mcpd_is_br_port(br, from_dev)))
+                {
+                   rcu_read_unlock();
+                   dev_put(from_dev);
+                   continue;
+                }
+
+                if (0 == (prt->dev->flags & IFF_UP)) {
+                   printk("%s: port %d is not up %s\n", 
+                          __FUNCTION__, snoop_entry->port_no, snoop_entry->br_name);
+                   rcu_read_unlock();
+                   dev_put(from_dev);
+                   dev_put(dev);
+                   return;
+                }
+    
+                if (0 == (from_dev->flags & IFF_UP)) {
+                   printk("%s: source device %s is not up\n", 
+                          __FUNCTION__, from_dev->name);
+                   rcu_read_unlock();
+                   dev_put(from_dev);
+                   continue;
+                }
+
+                br_mld_mc_fdb_add(from_dev,
+                                snoop_entry->wan_info[idx].if_ops,
+                                br, 
+                                prt, 
+                                &snoop_entry->grp, 
+                                &snoop_entry->rep,
+                                snoop_entry->repMac,
+                                snoop_entry->mode, 
+                                snoop_entry->tci, 
+                                &snoop_entry->src,
+                                snoop_entry->lanppp,
+                                info);
+                mcast_snooping_call_chain(SNOOPING_ADD_ENTRY,snoop_entry);
+            }
+            rcu_read_unlock();
+            dev_put(from_dev);
+        }
+        else
+        {
+            break;
+        }
+    }
+
+    /* if LAN-2-LAN snooping enabled make an entry */
+    if(br_mcast_get_lan2lan_snooping(BR_MCAST_PROTO_MLD, br))
+    {
+        rcu_read_lock();
+        prt = br_get_port(br, snoop_entry->port_no);
+        if ( NULL == prt )
+        {
+           printk("%s: port %d could not be found in br %s\n", 
+                  __FUNCTION__, snoop_entry->port_no, snoop_entry->br_name);
+           rcu_read_unlock();
+           dev_put(dev);
+           return;
+        }
+
+        if((snoop_entry->mode == MCPD_SNOOP_IN_CLEAR) ||
+            (snoop_entry->mode == MCPD_SNOOP_EX_CLEAR)) 
+        {
+            mcast_snooping_call_chain(SNOOPING_DEL_ENTRY,snoop_entry);
+            br_mld_mc_fdb_remove(dev,
+                                 br, 
+                                 prt, 
+                                 &snoop_entry->grp, 
+                                 &snoop_entry->rep, 
+                                 snoop_entry->mode, 
+                                 &snoop_entry->src,
+                                 0);
+        }
+        else
+        {
+            if (0 == (prt->dev->flags & IFF_UP)) {
+               printk("%s: port %d is not up %s\n", 
+                      __FUNCTION__, snoop_entry->port_no, snoop_entry->br_name);
+               rcu_read_unlock();
+               dev_put(dev);
+               return;
+            }
+
+            br_mld_mc_fdb_add(dev,
+                              MCPD_IF_TYPE_BRIDGED,
+                              br, 
+                              prt, 
+                              &snoop_entry->grp, 
+                              &snoop_entry->rep,
+                              snoop_entry->repMac,
+                              snoop_entry->mode, 
+                              snoop_entry->tci, 
+                              &snoop_entry->src,
+                              snoop_entry->lanppp,
+                              0);
+            mcast_snooping_call_chain(SNOOPING_ADD_ENTRY,snoop_entry);
+        }
+        rcu_read_unlock();
+    }
+        dev_put(dev);
+
+    return;
+} /* mcpd_nl_process_mld_snoop_entry*/
+#endif
+
+static void mcpd_nl_process_uplink_indication(struct sk_buff const *skb)
+{
+    struct nlmsghdr const *nlh = (struct nlmsghdr *)skb->data;
+    unsigned char *ptr;
+    int uplinkIndicator;
+
+    ptr = NLMSG_DATA(nlh);
+    ptr += sizeof(t_MCPD_MSG_HDR);
+
+    uplinkIndicator = (int)*(int *)ptr;
+
+    br_mcast_set_uplink_exists(uplinkIndicator);
+
+    return;
+}
+
+static void mcpd_nl_process_igmp_purge_reporter (struct sk_buff *skb)
+{
+   struct nlmsghdr const *nlh = (struct nlmsghdr *)skb->data;
+   t_MCPD_IGMP_PURGE_REPORTER* purge_data = NULL;
+   unsigned char *ptr;
+   struct net_device *dev = NULL;
+   struct net_bridge *br = NULL;
+
+   ptr = NLMSG_DATA(nlh);
+   ptr += sizeof(t_MCPD_MSG_HDR);
+
+   purge_data = (t_MCPD_IGMP_PURGE_REPORTER*) ptr;
+   
+   dev = dev_get_by_name (&init_net, purge_data->br_name);
+   if (dev == NULL) {
+      return;
+   }
+
+   br = (struct net_bridge *)netdev_priv(dev);
+   if (br == NULL) {
+      dev_put(dev);
+      return;
+   }
+
+   br_igmp_wipe_reporter_for_port(br, &purge_data->grp, (u16)purge_data->port_no);
+   dev_put(dev);
+}
+
+#ifdef CONFIG_BR_MLD_SNOOP
+static void mcpd_nl_process_mld_purge_reporter (struct sk_buff *skb)
+{
+   struct nlmsghdr const *nlh = (struct nlmsghdr *)skb->data;
+   t_MCPD_MLD_PURGE_REPORTER* purge_data = NULL;
+   unsigned char *ptr;
+   struct net_device *dev = NULL;
+   struct net_bridge *br = NULL;
+
+   ptr = NLMSG_DATA(nlh);
+   ptr += sizeof(t_MCPD_MSG_HDR);
+
+   purge_data = (t_MCPD_MLD_PURGE_REPORTER*) ptr;
+
+   if (purge_data == NULL) {
+      return;
+   }
+
+   dev = dev_get_by_name (&init_net, purge_data->br_name);
+   if (dev == NULL) {
+      return;
+   }
+
+   br = netdev_priv(dev);
+   if (br == NULL) {
+      dev_put(dev);
+      return;
+   }
+
+   br_mld_wipe_reporter_for_port(br, &purge_data->grp, (u16)purge_data->port_no);
+   dev_put(dev);
+}
+#endif
+   
+static void mcpd_nl_process_mcast_pri_queue (struct sk_buff *skb)
+{
+    struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+    unsigned char *ptr;
+    int val;
+
+    ptr = NLMSG_DATA(nlh);
+    ptr += sizeof(t_MCPD_MSG_HDR);
+
+    val = (int)*(int *)ptr;
+
+    br_mcast_set_pri_queue(val);
+
+    return;
+}
+
+static void mcpd_nl_process_if_change(struct sk_buff *skb)
+{
+    struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+    struct net_device *ndev = NULL;
+    struct net_device *dev = NULL;
+    struct net_bridge *br = NULL;
+    unsigned char *ptr;
+    t_MCPD_IF_CHANGE *ifChgMsg;
+
+    ptr = NLMSG_DATA(nlh);
+    ptr += sizeof(t_MCPD_MSG_HDR);
+    ifChgMsg = (t_MCPD_IF_CHANGE *)ptr;
+    ndev = dev_get_by_name(&init_net, &ifChgMsg->ifName[0]);
+    if(!ndev)
+        return;
+
+    if (ndev->priv_flags & IFF_EBRIDGE)
+    {
+        br = netdev_priv(ndev);
+        if ( NULL == br )
+        {
+            dev_put(ndev);
+            return;
+        }
+
+        /* update is for a bridge interface so flush all entries */
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+        if ( MCPD_PROTO_MLD != ifChgMsg->proto )
+        {
+            br_igmp_process_if_change(br, NULL);
+            br_igmp_process_device_removal(ndev);
+        }
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+        if ( MCPD_PROTO_IGMP != ifChgMsg->proto )
+        {
+            br_mld_process_if_change(br, NULL);
+        }
+#endif
+    }
+    else
+    {
+        rcu_read_lock();
+        for_each_netdev_rcu(&init_net, dev)
+        {
+            if(dev->priv_flags & IFF_EBRIDGE)
+            {
+                br = netdev_priv(dev);
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+                if ( MCPD_PROTO_MLD != ifChgMsg->proto )
+                {
+                    br_igmp_process_if_change(br, ndev);
+                }
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+                if ( MCPD_PROTO_IGMP != ifChgMsg->proto )
+                {
+                    br_mld_process_if_change(br, ndev);
+                }
+#endif
+            }
+        }
+        rcu_read_unlock();
+    }
+
+    dev_put(ndev);
+
+    return;
+} /* mcpd_nl_process_if_change */
+
+static void mcpd_nl_process_mc_fdb_cleanup(void)
+{
+    struct net_device *dev = NULL;
+    struct net_bridge *br = NULL;
+
+    rcu_read_lock();
+    for_each_netdev_rcu(&init_net, dev)
+    {
+        br = netdev_priv(dev);
+        if((dev->priv_flags & IFF_EBRIDGE) && (br))
+        {
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+            if(br->igmp_snooping) {
+                br_igmp_mc_fdb_cleanup(br);
+                br_igmp_process_device_removal(dev);
+            }
+#endif            
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+            if(br->mld_snooping) {
+                br_mld_mc_fdb_cleanup(br);
+            }
+#endif
+        }
+    }
+    rcu_read_unlock();
+    return;
+}
+
+void mcpd_nl_send_igmp_purge_entry(struct net_bridge_mc_fdb_entry *igmp_entry, 
+                                   struct net_bridge_mc_rep_entry *rep_entry)
+{
+    t_MCPD_IGMP_PURGE_ENTRY *purge_entry;
+    int buf_size = 0;
+    struct sk_buff *new_skb;
+    struct nlmsghdr *nlh;
+    char *ptr = NULL;
+
+    if(!igmp_entry)
+        return;
+
+    if(!rep_entry)
+        return;
+
+    if(!mcpd_pid)
+        return;
+
+    buf_size = sizeof(t_MCPD_IGMP_PURGE_ENTRY) + sizeof(t_MCPD_MSG_HDR);
+    new_skb = alloc_skb(NLMSG_SPACE(buf_size), GFP_ATOMIC);
+    if(!new_skb) 
+    {
+        return;
+    }
+
+    nlh = (struct nlmsghdr *)new_skb->data;
+    ptr = NLMSG_DATA(nlh);
+    nlh->nlmsg_len = NLMSG_SPACE(buf_size);
+    nlh->nlmsg_pid = 0;
+    nlh->nlmsg_flags = 0;
+    skb_put(new_skb, NLMSG_SPACE(buf_size));
+    ((t_MCPD_MSG_HDR *)ptr)->type = MCPD_MSG_IGMP_PURGE_ENTRY;
+    ((t_MCPD_MSG_HDR *)ptr)->len = sizeof(t_MCPD_IGMP_PURGE_ENTRY);
+    ptr += sizeof(t_MCPD_MSG_HDR);
+
+    purge_entry = (t_MCPD_IGMP_PURGE_ENTRY *)ptr;
+
+    purge_entry->grp.s_addr = igmp_entry->txGrp.s_addr;
+    purge_entry->src.s_addr = igmp_entry->src_entry.src.s_addr;
+    purge_entry->rep.s_addr = rep_entry->rep.s_addr;
+    purge_entry->pkt_info.br_name[0] = '\0';
+    memcpy(purge_entry->pkt_info.port_name, igmp_entry->dst->dev->name, IFNAMSIZ);
+    purge_entry->pkt_info.port_no = igmp_entry->dst->port_no;
+    purge_entry->pkt_info.if_index = 0;
+    purge_entry->pkt_info.data_len = 0;
+
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+    purge_entry->pkt_info.tci = igmp_entry->lan_tci;
+#endif /* CONFIG_BCM_VLAN */
+
+    NETLINK_CB(new_skb).dst_group = 0;
+    NETLINK_CB(new_skb).pid = mcpd_pid;
+    mcpd_dump_buf((char *)nlh, 128);
+
+    netlink_unicast(nl_sk, new_skb, mcpd_pid, MSG_DONTWAIT);
+
+    return;
+} /* mcpd_nl_send_igmp_purge_entry */
+
+static inline void mcpd_nl_rcv_skb(struct sk_buff *skb)
+{
+    struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+    char *ptr  = NULL;
+    unsigned short msg_type;
+
+    if (skb->len >= NLMSG_SPACE(0)) 
+    {
+        if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+            return;
+
+        ptr = NLMSG_DATA(nlh);
+
+        msg_type = *(unsigned short *)ptr;
+        switch(msg_type)
+        {
+            case MCPD_MSG_REGISTER:
+                mcpd_nl_process_registration(skb);
+                break;
+
+            case MCPD_MSG_UNREGISTER:
+                mcpd_nl_process_unregistration(skb);
+                break;
+
+            case MCPD_MSG_IGMP_SNOOP_ENTRY:
+                mcpd_nl_process_igmp_snoop_entry(skb);
+                break;
+                
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+            case MCPD_MSG_MLD_SNOOP_ENTRY:
+                mcpd_nl_process_mld_snoop_entry(skb);
+                break;
+#endif
+
+            case MCPD_MSG_IF_CHANGE:
+                mcpd_nl_process_if_change(skb);
+                break;
+
+            case MCPD_MSG_MC_FDB_CLEANUP:
+                mcpd_nl_process_mc_fdb_cleanup();
+                break;
+
+            case MCPD_MSG_SET_PRI_QUEUE:
+                mcpd_nl_process_mcast_pri_queue(skb);
+                break;
+
+            case MCPD_MSG_UPLINK_INDICATION:
+                mcpd_nl_process_uplink_indication(skb);
+                break;
+
+            case MCPD_MSG_IGMP_PURGE_REPORTER:
+                mcpd_nl_process_igmp_purge_reporter(skb);
+                break;
+                
+#ifdef CONFIG_BR_MLD_SNOOP
+            case MCPD_MSG_MLD_PURGE_REPORTER:
+                mcpd_nl_process_mld_purge_reporter(skb);
+                break;
+#endif
+            case MCPD_MSG_CONTROLS_ADMISSION:
+                mcpd_nl_process_set_admission_control(skb);
+                break;
+
+            case MCPD_MSG_ADMISSION_RESULT:
+                mcpd_nl_process_admission_result(skb);
+                break;
+                
+            default:
+                printk("MCPD Unknown usr->krnl msg type -%d- \n", msg_type);
+        }
+    }
+
+    return;
+} /* mcpd_nl_rcv_skb */
+
+#if 0
+static void mcpd_nl_data_ready(struct sock *sk, int len)
+{
+    struct sk_buff *skb = NULL;
+    unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
+
+    while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) 
+    {
+        mcpd_nl_rcv_skb(skb);
+        kfree_skb(skb);
+    }
+} /* mcpd_nl_data_ready */
+#endif
+
+static int __init mcpd_module_init(void)
+{
+    printk(KERN_INFO "Initializing MCPD Module\n");
+
+    nl_sk = netlink_kernel_create(&init_net, NETLINK_MCPD, 0, 
+                                mcpd_nl_rcv_skb, NULL, THIS_MODULE);
+
+    if(nl_sk == NULL) 
+    {
+        printk("MCPD: failure to create kernel netlink socket\n");
+        return -ENOMEM;
+    }
+
+    bcm_mcast_def_pri_queue_hook = br_mcast_set_skb_mark_queue;    
+
+    return 0;
+} /* mcpd_module_init */
+
+static void __exit mcpd_module_exit(void)
+{
+    sock_release(nl_sk->sk_socket); 
+    printk(KERN_INFO "Removed MCPD\n");
+} /* mcpd_module_exit */
+
+module_init(mcpd_module_init);
+module_exit(mcpd_module_exit);
+
+#endif /* defined(CONFIG_BCM_KF_MLD) || defined(CONFIG_BCM_KF_IGMP) */
diff --git a/net/bridge/br_notifier.c b/net/bridge/br_notifier.c
new file mode 100644
index 0000000000000000000000000000000000000000..78f56689a3aa96aa66e0ecbd6b181ab9272df64c
--- /dev/null
+++ b/net/bridge/br_notifier.c
@@ -0,0 +1,173 @@
+#include <linux/module.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/times.h>
+#include <net/net_namespace.h>
+#include <asm/uaccess.h>
+#include "br_private.h"
+
+#if defined(CONFIG_BCM_KF_BRIDGE_PORT_ISOLATION)
+static RAW_NOTIFIER_HEAD(bridge_event_chain);
+
+void br_dev_notify_if_change(char *brName)
+{
+	raw_notifier_call_chain(&bridge_event_chain, BREVT_IF_CHANGED, brName);
+}
+
+/* NOTE -- IMPORTANT : Caller MUST take the RCU_READ_LOCK */
+void bridge_get_br_list(char *brList, const unsigned int listSize)
+{
+    struct net_device *dev = NULL;
+    unsigned int arrayIdx=0, brNameLen;
+
+    /* Must enable Kernel debugging features and CONFIG_DEBUG_LOCK_ALLOC to make following statement take effect */
+    BUG_ON(!rcu_read_lock_held()); 
+
+    for_each_netdev_rcu(&init_net, dev) {
+        if(dev->priv_flags & IFF_EBRIDGE)
+        {
+            if (arrayIdx > 0 && arrayIdx+1 <= listSize)
+            {
+                /* Bridge names separated by comma */
+                brList[arrayIdx++] = ',';
+            }
+
+            brNameLen = strlen(dev->name);
+            if (arrayIdx+brNameLen+1 > listSize)
+            {
+                printk("bridge_get_br_list() : insufficient size; skipping <%s> <%d>\n",
+                       dev->name,brNameLen);
+                brList[arrayIdx-1] = '\0'; /* Remove the trailing "," if present */
+                break;
+            }
+            strcpy(&brList[arrayIdx],dev->name);
+            arrayIdx += brNameLen; /* Intentionally not accounting for NULL towards the end */
+        }
+    }
+    brList[arrayIdx] = '\0'; /* Force Null terminated string */
+
+}
+/* NOTE -- IMPORTANT : Caller MUST take the RCU_READ_LOCK */
+struct net_device *bridge_get_next_port(char *brName, unsigned int *brPort)
+{
+    struct net_bridge_port *cp;
+    struct net_bridge_port *np;
+    struct net_bridge *br;
+    struct net_device *dev;
+    struct net_device *prtDev;
+
+    /* Must enable Kernel debugging features and CONFIG_DEBUG_LOCK_ALLOC to make following statement take effect */
+    BUG_ON(!rcu_read_lock_held());
+
+    dev = dev_get_by_name(&init_net, brName);
+    if(!dev)
+        return NULL;
+
+    br = netdev_priv(dev);
+    if (list_empty(&br->port_list))
+    {
+        dev_put(dev);
+        return NULL;
+    }
+
+    if (*brPort == 0xFFFFFFFF)
+    {
+        np = list_first_entry_rcu(&br->port_list, struct net_bridge_port, list);
+        *brPort = np->port_no;
+        prtDev = np->dev;
+    }
+    else
+    {
+        cp = br_get_port(br, *brPort);
+        if ( cp )
+        {
+           if (list_is_last(&cp->list, &br->port_list))
+           {
+               prtDev = NULL;
+           }
+           else
+           {
+              np = list_first_entry_rcu(&cp->list, struct net_bridge_port, list);
+              *brPort = np->port_no;
+              prtDev = np->dev;
+           }
+        }
+        else
+        {
+           prtDev = NULL;
+        }
+    }
+
+    dev_put(dev);
+    return prtDev;
+}
+EXPORT_SYMBOL(bridge_get_next_port);
+EXPORT_SYMBOL(bridge_get_br_list);
+
+
+int register_bridge_notifier(struct notifier_block *nb)
+{
+    return raw_notifier_chain_register(&bridge_event_chain, nb);
+}
+EXPORT_SYMBOL(register_bridge_notifier);
+
+int unregister_bridge_notifier(struct notifier_block *nb)
+{
+    return raw_notifier_chain_unregister(&bridge_event_chain, nb);
+}
+EXPORT_SYMBOL(unregister_bridge_notifier);
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_STP)
+static RAW_NOTIFIER_HEAD(bridge_stp_event_chain);
+
+void br_stp_notify_state_port(const struct net_bridge_port *p)
+{
+	struct stpPortInfo portInfo;
+
+	if ( BR_NO_STP != p->br->stp_enabled )
+	{
+		memcpy(&portInfo.portName[0], p->dev->name, IFNAMSIZ);
+		portInfo.stpState = p->state;
+		raw_notifier_call_chain(&bridge_stp_event_chain, BREVT_STP_STATE_CHANGED, &portInfo);
+	}
+}
+
+void br_stp_notify_state_bridge(const struct net_bridge *br)
+{
+	struct net_bridge_port *p;
+	struct stpPortInfo portInfo;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(p, &br->port_list, list) {
+		if ( BR_NO_STP == br->stp_enabled )
+		{
+			portInfo.stpState = 0xFF; /* disable */
+		}
+		else
+		{
+			portInfo.stpState = p->state;
+		}
+		memcpy(&portInfo.portName[0], p->dev->name, IFNAMSIZ);
+		raw_notifier_call_chain(&bridge_stp_event_chain, BREVT_STP_STATE_CHANGED, &portInfo);
+	}
+	rcu_read_unlock();
+
+}
+
+int register_bridge_stp_notifier(struct notifier_block *nb)
+{
+    return raw_notifier_chain_register(&bridge_stp_event_chain, nb);
+}
+EXPORT_SYMBOL(register_bridge_stp_notifier);
+
+int unregister_bridge_stp_notifier(struct notifier_block *nb)
+{
+    return raw_notifier_chain_unregister(&bridge_stp_event_chain, nb);
+}
+EXPORT_SYMBOL(unregister_bridge_stp_notifier);
+#endif
+
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index a76b62135558a06308cc76adff65de3d704d76bf..7e84b0ec5f7e2c09f1a42ff6d00e1b36d148ef73 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -17,6 +17,16 @@
 
 #include "br_private.h"
 
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#include "br_igmp.h"
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#include "br_mld.h"
+#endif
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) && (defined(CONFIG_BCM_KF_IGMP) || defined(CONFIG_BCM_KF_MLD))
+#include "br_mcast.h"
+#endif
+
 static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
 
 struct notifier_block br_device_notifier = {
@@ -37,9 +47,22 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
 	bool changed_addr;
 	int err;
 
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP))  || (defined(CONFIG_BCM_KF_MLD) &&  defined(CONFIG_BR_MLD_SNOOP))
+	br_mcast_handle_netdevice_events(dev, event);
+#endif
+
 	/* register of bridge completed, add sysfs entries */
 	if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
+#if defined(CONFIG_BCM_KF_KERN_WARNING) 
+#if defined(CONFIG_SYSFS)
+		// This was causing a warning: statement with no effect [-Werror=unused-value]
+		// if SYSFS is not defined, br_sysfs_addbr(dev) resolves to (0), so don't call
+		// it if sysfs is not defined
+		br_sysfs_addbr(dev);
+#endif
+#else
 		br_sysfs_addbr(dev);
+#endif
 		return NOTIFY_DONE;
 	}
 
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 51e8826e78ca2f6595eba5ba89540ae32edbca9b..ec42983d9d8ce5c4cc477582e90ec5d498daec35 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -19,9 +19,35 @@
 #include <linux/u64_stats_sync.h>
 #include <net/route.h>
 
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#include <linux/igmp.h>
+#include <linux/in.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+#include <linux/ktime.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE)
+#include "br_fp.h"
+#endif /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+
+
 #define BR_HASH_BITS 8
 #define BR_HASH_SIZE (1 << BR_HASH_BITS)
 
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#define BR_IGMP_HASH_SIZE BR_HASH_SIZE
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+#define BR_MLD_HASH_SIZE BR_HASH_SIZE
+#endif
+
 #define BR_HOLD_TIME (1*HZ)
 
 #define BR_PORT_BITS	10
@@ -29,6 +55,10 @@
 
 #define BR_VERSION	"2.3"
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#define BR_MAX_FDB_ENTRIES 4096
+#endif
+
 /* Control of forwarding link local multicast */
 #define BR_GROUPFWD_DEFAULT	0
 /* Don't allow forwarding control protocols like STP and LLDP */
@@ -74,7 +104,28 @@ struct net_bridge_fdb_entry
 	mac_addr			addr;
 	unsigned char			is_local;
 	unsigned char			is_static;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	unsigned int			fdb_key;
+#endif
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+	unsigned int            vid;
+#endif
 };
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	struct br_blog_rule_id
+	{
+		u32                     id;
+		struct br_blog_rule_id *next_p;
+	};
+
+	struct br_flow_path
+	{
+		struct net_device       *rxDev_p;   ////*txDev_p;
+		struct br_blog_rule_id  *blogRuleId_p;
+		struct br_flow_path     *next_p;
+	};
+#endif
 
 struct net_bridge_port_group {
 	struct net_bridge_port		*port;
@@ -126,7 +177,12 @@ struct net_bridge_port
 	u32				path_cost;
 	u32				designated_cost;
 	unsigned long			designated_age;
-
+#if defined(CONFIG_BCM_KF_IP)
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+	int                  dirty;
+#endif
+	struct br_flow_path  *flowPath_p;
+#endif
 	struct timer_list		forward_delay_timer;
 	struct timer_list		hold_timer;
 	struct timer_list		message_age_timer;
@@ -136,6 +192,17 @@ struct net_bridge_port
 	unsigned long 			flags;
 #define BR_HAIRPIN_MODE		0x00000001
 
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+	struct {		
+		/* The following is set if the port is singular (only connects to one other device), and
+		   if that other device is guarenteed to support STP.  When set, the port will not enter
+		   forwarding state until it has received at least one bpdu */
+		int is_dedicated_stp_port    : 1;
+		int is_bpdu_blocked          : 1;    // used for debugging.  When set, do not send bpdus
+		int unused1                  : 6; 
+	};
+#endif
+
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	u32				multicast_startup_queries_sent;
 	unsigned char			multicast_router;
@@ -152,6 +219,12 @@ struct net_bridge_port
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	struct netpoll			*np;
 #endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	int                     num_port_fdb_entries;
+	int                     max_port_fdb_entries;
+	int                     min_port_fdb_entries;
+#endif
 };
 
 #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
@@ -176,6 +249,20 @@ struct br_cpu_netstats {
 	struct u64_stats_sync	syncp;
 };
 
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+typedef enum {
+	BR_MC_LAN2LAN_STATUS_DEFAULT = 0,
+	BR_MC_LAN2LAN_STATUS_DISABLE = 0,
+	BR_MC_LAN2LAN_STATUS_ENABLE = 1
+} t_BR_MC_LAN2LAN_STATUS;
+
+#define MCPD_MAX_DELAYED_SKB_COUNT 64
+typedef struct {
+    struct sk_buff *skb;
+    unsigned long expiryTime;
+} t_DELAYED_SKB;
+#endif
+
 struct net_bridge
 {
 	spinlock_t			lock;
@@ -183,6 +270,14 @@ struct net_bridge
 	struct net_device		*dev;
 
 	struct br_cpu_netstats __percpu *stats;
+#if defined(CONFIG_BCM_KF_BRIDGE_COUNTERS)
+	u32 mac_entry_discard_counter;
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	BlogStats_t bstats; /* stats when the blog promiscuous layer has consumed packets */
+	struct net_device_stats cstats; /* Cumulative Device stats (rx-bytes, tx-pkts, etc...) */
+#endif
 	spinlock_t			hash_lock;
 	struct hlist_head		hash[BR_HASH_SIZE];
 #ifdef CONFIG_BRIDGE_NETFILTER
@@ -194,6 +289,41 @@ struct net_bridge
 	unsigned long			flags;
 #define BR_SET_MAC_ADDR		0x00000001
 
+#if defined(CONFIG_BCM_KF_NETFILTER) 
+	int                     num_fdb_entries;
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+	int                     max_br_fdb_entries;
+	int                     used_br_fdb_entries;
+#endif
+
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	struct timer_list 		igmp_timer;
+	int                     igmp_snooping;
+	spinlock_t              mcl_lock;
+	struct hlist_head       mc_hash[BR_IGMP_HASH_SIZE];
+	t_BR_MC_LAN2LAN_STATUS	igmp_lan2lan_mc_enable;
+	t_DELAYED_SKB           igmp_delayed_skb[MCPD_MAX_DELAYED_SKB_COUNT];
+#endif
+
+#if defined(CONFIG_BCM_KF_IGMP_RATE_LIMIT)
+  /* for igmp packet rate limit */
+	unsigned int            igmp_rate_limit;
+	unsigned int            igmp_rate_bucket;
+	ktime_t                 igmp_rate_last_packet;
+	unsigned int            igmp_rate_rem_time;
+#endif
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	struct timer_list 		mld_timer;
+	int                     mld_snooping;
+	spinlock_t              mld_mcl_lock;
+	struct hlist_head       mld_mc_hash[BR_MLD_HASH_SIZE];
+	t_BR_MC_LAN2LAN_STATUS	mld_lan2lan_mc_enable;
+	t_DELAYED_SKB           mld_delayed_skb[MCPD_MAX_DELAYED_SKB_COUNT];
+#endif
+
 	u16				group_fwd_mask;
 
 	/* STP */
@@ -253,8 +383,29 @@ struct net_bridge
 	struct timer_list		topology_change_timer;
 	struct timer_list		gc_timer;
 	struct kobject			*ifobj;
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE)
+	struct br_fp_data		fp_hooks;
+#endif /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+    
 };
 
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+/* these definisions are also there igmprt/hmld.h */
+#define SNOOP_IN_ADD		1
+#define SNOOP_IN_CLEAR		2
+#define SNOOP_EX_ADD		3
+#define SNOOP_EX_CLEAR		4
+
+/* br_netlink_mcpd.c */
+int is_multicast_switching_mode_host_control(void);
+#endif
+
+
 struct br_input_skb_cb {
 	struct net_device *brdev;
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
@@ -356,13 +507,39 @@ extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 extern int br_fdb_insert(struct net_bridge *br,
 			 struct net_bridge_port *source,
 			 const unsigned char *addr);
+#if defined(CONFIG_BCM_KF_VLAN_AGGREGATION) && defined(CONFIG_BCM_VLAN_AGGREGATION)
+void br_fdb_update(struct net_bridge *br, 
+			  struct net_bridge_port *source,
+			  const unsigned char *addr, 
+			  const unsigned int vid);
+#else
 extern void br_fdb_update(struct net_bridge *br,
 			  struct net_bridge_port *source,
 			  const unsigned char *addr);
+#endif
 extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
 extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
 extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STATIC_FDB)
+extern int br_fdb_adddel_static(struct net_bridge *br, 
+                                struct net_bridge_port *source,
+                                const unsigned char *addr, int bInsert);
+#endif
+
+#if defined(CONFIG_BCM_KF_BRIDGE_MAC_FDB_LIMIT) && defined(CONFIG_BCM_BRIDGE_MAC_FDB_LIMIT)
+int br_get_fdb_limit(struct net_bridge *br, 
+						const struct net_bridge_port *p,
+						int is_min);
+
+
+int br_set_fdb_limit(struct net_bridge *br, 
+						struct net_bridge_port *p,
+						int lmt_type,
+						int is_min,
+						int fdb_limit);
+#endif
+
 /* br_forward.c */
 extern void br_deliver(const struct net_bridge_port *to,
 		struct sk_buff *skb);
@@ -561,4 +738,25 @@ extern void br_sysfs_delbr(struct net_device *dev);
 #define br_sysfs_delbr(dev)	do { } while(0)
 #endif /* CONFIG_SYSFS */
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STP)
+/* br_notifier.c */
+extern void br_stp_notify_state_port(const struct net_bridge_port *p);
+extern void br_stp_notify_state_bridge(const struct net_bridge *br);
+#endif
+#if defined(CONFIG_BCM_KF_BRIDGE_PORT_ISOLATION)
+extern void br_dev_notify_if_change(char *brName);
+#endif
+
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+/* br_igmp.c */
+void br_igmp_snooping_br_init( struct net_bridge *br );
+void br_igmp_snooping_br_fini( struct net_bridge *br );
+#endif
+
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+/* br_mld.c */
+void br_mld_snooping_br_init( struct net_bridge *br );
+void br_mld_snooping_br_fini( struct net_bridge *br );
+#endif
+
 #endif
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 8c836d96ba769eaaf4b62ce76b26c7e5e4f814b0..d9423c7bdcde42777487669733a681fd5771cb76 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -16,6 +16,10 @@
 #include "br_private.h"
 #include "br_private_stp.h"
 
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+#include <linux/bcm_log.h>
+#endif
+
 /* since time values in bpdu are in jiffies and then scaled (1/256)
  * before sending, make sure that is at least one STP tick.
  */
@@ -29,11 +33,17 @@ static const char *const br_port_state_names[] = {
 	[BR_STATE_BLOCKING] = "blocking",
 };
 
+
+
 void br_log_state(const struct net_bridge_port *p)
 {
 	br_info(p->br, "port %u(%s) entered %s state\n",
 		(unsigned) p->port_no, p->dev->name,
 		br_port_state_names[p->state]);
+
+#if defined(CONFIG_BCM_KF_BRIDGE_STP)
+	br_stp_notify_state_port(p);
+#endif   
 }
 
 /* called under bridge lock */
@@ -368,6 +378,15 @@ static void br_make_blocking(struct net_bridge_port *p)
 	}
 }
 
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+void br_loopback_detected(struct net_bridge_port *p) {
+    if  (p->port_no == p->br->root_port) {
+        BCM_LOG_ERROR(BCM_LOG_ID_LOG, "Loopback detected on root port %s -- making blocking\n", p->dev->name);
+        br_make_blocking(p);
+    }
+}
+#endif
+
 /* called under bridge lock */
 static void br_make_forwarding(struct net_bridge_port *p)
 {
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index e16aade51ae0462c2f0a7cdb72ec49346c56128e..25891555a98d4868825ee1a1dc8be461c71e8bfa 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -78,6 +78,15 @@ void br_send_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
 	if (p->br->stp_enabled != BR_KERNEL_STP)
 		return;
 
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+	// for debugging purposes only:
+	if (p->is_bpdu_blocked) {
+		printk("supressing transmission of config bpdu on port (%s)\n", p->dev->name);        
+		return;
+	}
+#endif
+    
+
 	buf[0] = 0;
 	buf[1] = 0;
 	buf[2] = 0;
@@ -122,6 +131,14 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
 
 	if (p->br->stp_enabled != BR_KERNEL_STP)
 		return;
+    
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+	// for debugging purposes only:
+	if (p->is_bpdu_blocked) {
+		printk("supressing transmission of tcn bpdu on port (%s)\n", p->dev->name);		  
+		return;
+	}
+#endif
 
 	buf[0] = 0;
 	buf[1] = 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index f494496373d60c1faf368a05605073a1c71ad04f..8893aedd1cfa62dc9cf6600fcbaf38bb677b227f 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -19,6 +19,9 @@
 #include "br_private.h"
 #include "br_private_stp.h"
 
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+#include "br_mcast.h"
+#endif
 
 /* Port id is composed of priority and port number.
  * NB: some bits of priority are dropped to
@@ -114,6 +117,9 @@ void br_stp_disable_port(struct net_bridge_port *p)
 	br_fdb_delete_by_port(br, p, 0);
 	br_multicast_disable_port(p);
 
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+	br_mcast_handle_netdevice_events(p->dev, NETDEV_CHANGE);
+#endif
 	br_configuration_update(br);
 
 	br_port_state_selection(br);
@@ -141,6 +147,11 @@ static void br_stp_start(struct net_bridge *br)
 		br_port_state_selection(br);
 		spin_unlock_bh(&br->lock);
 	}
+
+#if defined(CONFIG_BCM_KF_BRIDGE_STP)
+	/* STP enabled, send notification for all ports */
+	br_stp_notify_state_bridge(br);
+#endif   
 }
 
 static void br_stp_stop(struct net_bridge *br)
@@ -160,6 +171,10 @@ static void br_stp_stop(struct net_bridge *br)
 	}
 
 	br->stp_enabled = BR_NO_STP;
+#if defined(CONFIG_BCM_KF_BRIDGE_STP)
+	/* STP disabled, send notification for all ports */
+	br_stp_notify_state_bridge(br);
+#endif
 }
 
 void br_stp_set_enabled(struct net_bridge *br, unsigned long val)
@@ -220,6 +235,16 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
 	if (br->flags & BR_SET_MAC_ADDR)
 		return false;
 
+#if defined(CONFIG_BCM_KF_BRIDGE_STP)
+	/* if the current bridge address is being used by 
+	   a member device then keep it */
+	list_for_each_entry(p, &br->port_list, list) {
+		if (0 == memcmp(br->bridge_id.addr, p->dev->dev_addr, ETH_ALEN)) {
+			return false;
+		}
+	}
+#endif
+
 	list_for_each_entry(p, &br->port_list, list) {
 		if (addr == br_mac_zero ||
 		    memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0)
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 58de2a0f99751d50137cfe5455c3acbc8387a744..2b6a5db7305d315ade10265a5340fea4589f79e5 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -67,6 +67,22 @@ static void br_message_age_timer_expired(unsigned long arg)
 	spin_lock(&br->lock);
 	if (p->state == BR_STATE_DISABLED)
 		goto unlock;
+#if defined(CONFIG_BCM_KF_STP_LOOP)
+	if (p->is_dedicated_stp_port)
+	{
+		/* we are no longer receiving bpdus from upstream device.  Could be due to interference
+		  or upstream device going down.  Regardless, we do not want to become the DP.
+		  If the device is lost, the connection is dead anyways -- no one to receive bpdus
+		  If this is interference / starvation, we do not want to become DP and send traffic
+			(potential loop)
+		  If a new root has been added 'downstream', then the tcn bpdu's will take care of
+			making this port the DP (and the DP has no timer, so we're good) */
+		printk("\n-------\n  [%s.%d] -- message age time expired for %s -- IGNORING!\n\n", __func__, __LINE__,
+				p->dev->name);
+		// note: this does NOT restart the timer
+		goto unlock;
+	}
+#endif
 	was_root = br_is_root_bridge(br);
 
 	br_become_designated_port(p);
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c236c0e439848342f2d5c3825b33b6c8bdc6ab47..68edac06b48e3f09308d0384342ee4eabec9384b 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -20,6 +20,9 @@
 #include <linux/times.h>
 
 #include "br_private.h"
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+#include "br_igmp.h"
+#endif
 
 #define to_dev(obj)	container_of(obj, struct device, kobj)
 #define to_bridge(cd)	((struct net_bridge *)netdev_priv(to_net_dev(cd)))
@@ -64,6 +67,71 @@ static ssize_t store_forward_delay(struct device *d,
 static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
 		   show_forward_delay, store_forward_delay);
 
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+static ssize_t show_igmp_snooping(struct device *d,
+				  struct device_attribute *attr, char *buf)
+{
+	struct net_bridge *br = to_bridge(d);
+        if (br->igmp_snooping==SNOOPING_DISABLED_MODE) 
+		return sprintf(buf, "Disabled(%d)\n",br->igmp_snooping);
+	else if (br->igmp_snooping==SNOOPING_ENABLED_MODE) 
+		return sprintf(buf, "Enabled(%d)\n",br->igmp_snooping);
+	else 
+		return sprintf(buf, "Blocking(%d)\n",br->igmp_snooping);
+}
+static int set_igmp_snooping(struct net_bridge *br, unsigned long val)
+{
+	br->igmp_snooping=(val>SNOOPING_BLOCKING_MODE)?SNOOPING_DISABLED_MODE:(int)val;
+	return 0;
+}
+
+static ssize_t store_igmp_snooping(struct device *d,
+				   struct device_attribute *attr,
+				   const char *buf, size_t len)
+{
+	return store_bridge_parm(d, buf, len, set_igmp_snooping);
+}
+
+static DEVICE_ATTR(igmp_snooping, S_IRUGO | S_IWUSR,
+		   show_igmp_snooping, store_igmp_snooping);
+
+
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+//added to sysfs for show and configure MLD parameters.
+static ssize_t show_mld_snooping(struct device *d,
+				  struct device_attribute *attr, char *buf)
+{
+	struct net_bridge *br = to_bridge(d);
+        if (br->mld_snooping==SNOOPING_DISABLED_MODE) 
+		return sprintf(buf, "Disabled(%d)\n",br->mld_snooping);
+	else if (br->mld_snooping==SNOOPING_ENABLED_MODE) 
+		return sprintf(buf, "Enabled(%d)\n",br->mld_snooping);
+	else 
+		return sprintf(buf, "Blocking(%d)\n",br->mld_snooping);
+}
+
+static int set_mld_snooping(struct net_bridge *br, unsigned long val)
+{
+	br->mld_snooping=(val>SNOOPING_BLOCKING_MODE)?SNOOPING_DISABLED_MODE:(int)val;
+	if(br->mld_snooping==SNOOPING_DISABLED_MODE) 
+		br_mcast_wl_flush(br) ;
+	return 0;
+}
+
+static ssize_t store_mld_snooping(struct device *d,
+				   struct device_attribute *attr,
+				   const char *buf, size_t len)
+{
+	return store_bridge_parm(d, buf, len, set_mld_snooping);
+}
+
+static DEVICE_ATTR(mld_snooping, S_IRUGO | S_IWUSR,
+		   show_mld_snooping, store_mld_snooping);
+
+#endif
+
+
 static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
 			       char *buf)
 {
@@ -679,6 +747,31 @@ static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR,
 		   show_nf_call_arptables, store_nf_call_arptables);
 #endif
 
+#if defined(CONFIG_BCM_KF_BRIDGE_COUNTERS)
+static ssize_t show_mac_entry_discard_counter(struct device *d,
+                struct device_attribute *attr, char *buf)
+{
+    struct net_bridge *br = to_bridge(d);
+    return sprintf(buf, "%u\n", br->mac_entry_discard_counter);
+}
+
+static int set_mac_entry_discard_counter(struct net_bridge *br, unsigned long val)
+{
+    br->mac_entry_discard_counter = val;
+    return 0;
+}
+
+static ssize_t store_mac_entry_discard_counter(struct device *d,
+                 struct device_attribute *attr,
+                 const char *buf, size_t len)
+{
+    return store_bridge_parm(d, buf, len, set_mac_entry_discard_counter);
+}
+static DEVICE_ATTR(mac_entry_discard_counter, S_IRUGO | S_IWUSR, show_mac_entry_discard_counter,
+        store_mac_entry_discard_counter);
+#endif
+
+
 static struct attribute *bridge_attrs[] = {
 	&dev_attr_forward_delay.attr,
 	&dev_attr_hello_time.attr,
@@ -698,6 +791,15 @@ static struct attribute *bridge_attrs[] = {
 	&dev_attr_topology_change_timer.attr,
 	&dev_attr_gc_timer.attr,
 	&dev_attr_group_addr.attr,
+#if defined(CONFIG_BCM_KF_BRIDGE_COUNTERS)
+    &dev_attr_mac_entry_discard_counter.attr,
+#endif
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)
+	&dev_attr_igmp_snooping.attr,
+#endif
+#if defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP)
+	&dev_attr_mld_snooping.attr,
+#endif
 	&dev_attr_flush.attr,
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	&dev_attr_multicast_router.attr,
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index a9aff9c7d0273b2a41ef9d122ac38e769956a640..f23faaab64757801f80e1dba1c6c149e14f46a06 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -43,6 +43,7 @@ config BRIDGE_EBT_T_NAT
 	  See the man page for ebtables(8).
 
 	  To compile it as a module, choose M here.  If unsure, say N.
+
 #
 # matches
 #
@@ -132,6 +133,17 @@ config BRIDGE_EBT_VLAN
 	  802.1Q vlan fields.
 
 	  To compile it as a module, choose M here.  If unsure, say N.
+
+config BRIDGE_EBT_TIME
+	tristate "ebt: time filter support"
+	depends on BRIDGE_NF_EBTABLES
+	depends on BCM_KF_NETFILTER
+	help
+	  This option adds the system time match, which allows the filtering
+	  of system time when a frame arrives.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 #
 # targets
 #
@@ -177,6 +189,37 @@ config BRIDGE_EBT_SNAT
 	  source address of frames.
 
 	  To compile it as a module, choose M here.  If unsure, say N.
+
+config BRIDGE_EBT_FTOS_T
+	tristate "ebt: ftos target support"
+	depends on BRIDGE_NF_EBTABLES
+	depends on BCM_KF_NETFILTER
+	help
+	  This option adds the ftos target, which allows altering the full TOS byte
+	  in IP frames.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config BRIDGE_EBT_SKIPLOG_T
+	tristate "ebt: skip target support"
+	depends on BRIDGE_NF_EBTABLES
+	depends on BCM_KF_NETFILTER
+	help
+	  This option adds the skiplog target, which can prevent packet from
+	  acceleration.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config BRIDGE_EBT_WMM_MARK
+	tristate "ebt: Wireless Wi-Fi WMM marking support"
+	depends on BRIDGE_NF_EBTABLES
+	depends on BCM_KF_NETFILTER
+	help
+	  This option adds the wmm-mark target, which allows to 
+	  mark Wi-Fi WMM priorities.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 #
 # watchers
 #
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 0718699540b023fd6d95be217b79e4bfe5029821..ffb41e64bd45d349c00befeea28f7ba1889341ca 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -20,6 +20,9 @@ obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o
 obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o
 obj-$(CONFIG_BRIDGE_EBT_STP) += ebt_stp.o
 obj-$(CONFIG_BRIDGE_EBT_VLAN) += ebt_vlan.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_BRIDGE_EBT_TIME) += ebt_time.o
+endif # BCM_KF
 
 # targets
 obj-$(CONFIG_BRIDGE_EBT_ARPREPLY) += ebt_arpreply.o
@@ -27,6 +30,11 @@ obj-$(CONFIG_BRIDGE_EBT_MARK_T) += ebt_mark.o
 obj-$(CONFIG_BRIDGE_EBT_DNAT) += ebt_dnat.o
 obj-$(CONFIG_BRIDGE_EBT_REDIRECT) += ebt_redirect.o
 obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_BRIDGE_EBT_FTOS_T) += ebt_ftos.o
+obj-$(CONFIG_BRIDGE_EBT_SKIPLOG_T) += ebt_skiplog.o
+obj-$(CONFIG_BRIDGE_EBT_WMM_MARK) += ebt_wmm_mark.o 
+endif # BCM_KF # CONFIG_BCM_KF_NETFILTER
 
 # watchers
 obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
diff --git a/net/bridge/netfilter/ebt_ftos.c b/net/bridge/netfilter/ebt_ftos.c
new file mode 100644
index 0000000000000000000000000000000000000000..3e14e88a02fe108fddac865834f8eb085e8a1260
--- /dev/null
+++ b/net/bridge/netfilter/ebt_ftos.c
@@ -0,0 +1,211 @@
+#if defined(CONFIG_BCM_KF_NETFILTER)
+/*
+*    Copyright (c) 2003-2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:GPL/GPL:standard
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+
+/*
+ *  ebt_ftos
+ *
+ *	Authors:
+ *	 Song Wang <songw@broadcom.com>
+ *
+ *  Feb, 2004
+ *
+ */
+
+// The ftos target can be used in any chain
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <net/checksum.h>
+#include <linux/if_vlan.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_ftos_t.h>
+
+#include <net/dsfield.h>
+
+#define PPPOE_HLEN   6
+#define PPP_TYPE_IPV4   0x0021  /* IPv4 in PPP */
+#define PPP_TYPE_IPV6   0x0057  /* IPv6 in PPP */
+
+static unsigned int ebt_ftos_tg(struct sk_buff *skb, const struct xt_action_param *par)   
+{
+	//struct ebt_ftos_t_info *ftosinfo = (struct ebt_ftos_t_info *)data;
+	const  struct ebt_ftos_t_info *ftosinfo = par->targinfo;
+	struct iphdr *iph = NULL;
+	struct ipv6hdr *ipv6h = NULL;
+        /* Need to recalculate IP header checksum after altering TOS byte */
+	u_int16_t diffs[2];
+
+	/* if VLAN frame, we need to point to correct network header */
+   if (skb->protocol == __constant_htons(ETH_P_IP))
+      iph = (struct iphdr *)(skb->network_header);
+   else if ((skb)->protocol == __constant_htons(ETH_P_IPV6))
+      ipv6h = (struct ipv6hdr *)(skb->network_header);
+   else if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+      if (*(unsigned short *)(skb->network_header + VLAN_HLEN - 2) == __constant_htons(ETH_P_IP))
+         iph = (struct iphdr *)(skb->network_header + VLAN_HLEN);
+      else if (*(unsigned short *)(skb->network_header + VLAN_HLEN - 2) == __constant_htons(ETH_P_IPV6))
+         ipv6h = (struct ipv6hdr *)(skb->network_header + VLAN_HLEN);
+   }
+   else if (skb->protocol == __constant_htons(ETH_P_PPP_SES)) {
+      if (*(unsigned short *)(skb->network_header + PPPOE_HLEN) == PPP_TYPE_IPV4)
+         iph = (struct iphdr *)(skb->network_header + PPPOE_HLEN + 2);
+      else if (*(unsigned short *)(skb->network_header + PPPOE_HLEN) == PPP_TYPE_IPV6)
+         ipv6h = (struct ipv6hdr *)(skb->network_header + PPPOE_HLEN + 2);
+   }
+   /* if not IP header, do nothing. */
+   if ((iph == NULL) && (ipv6h == NULL))
+	   return ftosinfo->target;
+
+   if ( iph != NULL ) //IPv4
+   {
+	if ((ftosinfo->ftos_set & FTOS_SETFTOS) && (iph->tos != ftosinfo->ftos)) {
+                //printk("ebt_target_ftos:FTOS_SETFTOS .....\n");
+		diffs[0] = htons(iph->tos) ^ 0xFFFF;
+		iph->tos = ftosinfo->ftos;
+		diffs[1] = htons(iph->tos);
+		iph->check = csum_fold(csum_partial((char *)diffs,
+		                                    sizeof(diffs),
+		                                    iph->check^0xFFFF));		
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// member below is removed
+//		(*pskb)->nfcache |= NFC_ALTERED;
+	} else if (ftosinfo->ftos_set & FTOS_WMMFTOS) {
+	    //printk("ebt_target_ftos:FTOS_WMMFTOS .....0x%08x\n", (*pskb)->mark & 0xf);
+      diffs[0] = htons(iph->tos) ^ 0xFFFF;
+      iph->tos |= ((skb->mark >> PRIO_LOC_NFMARK) & PRIO_LOC_NFMASK) << DSCP_MASK_SHIFT;
+      diffs[1] = htons(iph->tos);
+      iph->check = csum_fold(csum_partial((char *)diffs,
+		                                    sizeof(diffs),
+		                                    iph->check^0xFFFF));
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// member below is removed
+//        (*pskb)->nfcache |= NFC_ALTERED;
+	} else if ((ftosinfo->ftos_set & FTOS_8021QFTOS) && skb->protocol == __constant_htons(ETH_P_8021Q)) {
+	    
+      struct vlan_hdr *frame;	
+      unsigned char prio = 0;
+      unsigned short TCI;
+
+      frame = (struct vlan_hdr *)(skb->network_header);
+      TCI = ntohs(frame->h_vlan_TCI);
+      prio = (unsigned char)((TCI >> 13) & 0x7);
+        //printk("ebt_target_ftos:FTOS_8021QFTOS ..... 0x%08x\n", prio);
+      diffs[0] = htons(iph->tos) ^ 0xFFFF;
+      iph->tos |= (prio & 0xf) << DSCP_MASK_SHIFT;
+      diffs[1] = htons(iph->tos);
+      iph->check = csum_fold(csum_partial((char *)diffs,
+		                                    sizeof(diffs),
+		                                    iph->check^0xFFFF)); 
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// member below is removed
+//        (*pskb)->nfcache |= NFC_ALTERED;
+	}
+   }
+   else //IPv6
+   {
+      __u8 tos;
+
+      /* TOS consists of priority field and first 4 bits of flow_lbl */
+      tos = ipv6_get_dsfield((struct ipv6hdr *)(ipv6h));
+
+      if ((ftosinfo->ftos_set & FTOS_SETFTOS) && (tos != ftosinfo->ftos))
+      {
+         //printk("ebt_target_ftos:FTOS_SETFTOS .....\n");
+         ipv6_change_dsfield((struct ipv6hdr *)(ipv6h), 0, ftosinfo->ftos);
+      } 
+      else if (ftosinfo->ftos_set & FTOS_WMMFTOS) 
+      {
+         //printk("ebt_target_ftos:FTOS_WMMFTOS .....0x%08x\n", 
+	     tos |= ((skb->mark >> PRIO_LOC_NFMARK) & PRIO_LOC_NFMASK) << DSCP_MASK_SHIFT;
+         ipv6_change_dsfield((struct ipv6hdr *)(ipv6h), 0, tos);
+      } 
+      else if ((ftosinfo->ftos_set & FTOS_8021QFTOS) && 
+               skb->protocol == __constant_htons(ETH_P_8021Q)) 
+      {
+         struct vlan_hdr *frame;	
+         unsigned char prio = 0;
+         unsigned short TCI;
+
+         frame = (struct vlan_hdr *)(skb->network_header);
+         TCI = ntohs(frame->h_vlan_TCI);
+         prio = (unsigned char)((TCI >> 13) & 0x7);
+         //printk("ebt_target_ftos:FTOS_8021QFTOS ..... 0x%08x\n", prio);
+         tos |= (prio & 0xf) << DSCP_MASK_SHIFT;
+         ipv6_change_dsfield((struct ipv6hdr *)(ipv6h), 0, tos);
+      }
+   }
+
+	return ftosinfo->target;
+}
+
+static int ebt_ftos_tg_check(const struct xt_tgchk_param *par)
+{
+	const struct ebt_ftos_t_info *info = par->targinfo;
+/*
+	if (datalen != sizeof(struct ebt_ftos_t_info))
+		return -EINVAL;
+*/
+	if (BASE_CHAIN && info->target == EBT_RETURN)
+		return -EINVAL;
+	
+	//CLEAR_BASE_CHAIN_BIT;
+	
+	if (INVALID_TARGET)
+		return -EINVAL;
+	
+	return 0;
+}
+
+static struct xt_target ebt_ftos_tg_reg = {
+	.name       = EBT_FTOS_TARGET,
+	.revision   = 0,
+	.family     = NFPROTO_BRIDGE,
+	.target     = ebt_ftos_tg,
+	.checkentry = ebt_ftos_tg_check,
+	.targetsize = XT_ALIGN(sizeof(struct ebt_ftos_t_info)),
+	.me         = THIS_MODULE,
+};
+
+static int __init ebt_ftos_init(void)
+{
+	int ret;
+	ret = xt_register_target(&ebt_ftos_tg_reg);
+	if(ret == 0)
+		printk(KERN_INFO "ebt_ftos registered\n");
+
+	return ret;
+}
+
+static void __exit ebt_ftos_fini(void)
+{
+	xt_unregister_target(&ebt_ftos_tg_reg);
+}
+
+module_init(ebt_ftos_init);
+module_exit(ebt_ftos_fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Song Wang, songw@broadcom.com");
+MODULE_DESCRIPTION("Target to overwrite the full TOS byte in IP header");
+#endif
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index 23bca62d58d290f5085094b3b5d8c74b21a7c8fc..4a69814a49014b71db445c8347daa6f6e8a93b08 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -39,6 +39,11 @@ ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
 	if (info->bitmask & EBT_IP_TOS &&
 	   FWINV(info->tos != ih->tos, EBT_IP_TOS))
 		return false;
+#if defined(CONFIG_BCM_KF_NETFILTER) || !defined(CONFIG_BCM_IN_KERNEL)
+	if (info->bitmask & EBT_IP_DSCP &&
+	   FWINV(info->dscp != (ih->tos & 0xFC), EBT_IP_DSCP))
+		return false;
+#endif      
 	if (info->bitmask & EBT_IP_SOURCE &&
 	   FWINV((ih->saddr & info->smsk) !=
 	   info->saddr, EBT_IP_SOURCE))
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 66697cbd0a8b82c9e132aebda93b96ee09e48fe8..5cb6cbdfcfaabd627a53d40b1d1215afbe76e27b 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -30,8 +30,76 @@ ebt_mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
 		skb->mark |= info->mark;
 	else if (action == MARK_AND_VALUE)
 		skb->mark &= info->mark;
+#if !defined(CONFIG_BCM_KF_NETFILTER)
 	else
 		skb->mark ^= info->mark;
+#else
+	else if (action == MARK_XOR_VALUE)
+		skb->mark ^= info->mark;
+	else
+   {
+		skb->vtag = (unsigned short)(info->mark);
+
+      /* if the 8021p priority field (bits 0-3) of skb->vtag is not zero, we need
+       * to do p-bit marking.
+       */
+      if (skb->vtag & 0xf)
+      {
+         unsigned short TCI = 0;
+
+         /* if this is a vlan frame, we want to re-mark its p-bit with the 8021p
+          * priority in skb->vtag.
+          * if this is not a vlan frame, we want to add a 8021p tag to it, with
+          * vid=0 and p-bit=the 8021p priority in skb->vtag.
+          */
+	      if ((skb->protocol == __constant_htons(ETH_P_8021Q)))
+	      {
+   	      struct vlan_hdr *frame = (struct vlan_hdr *)(skb->network_header);
+
+		      TCI = ntohs(frame->h_vlan_TCI);
+
+            /* Since the 8021p priority value in vtag had been incremented by 1,
+             * we need to minus 1 from it to get the exact value.
+             */
+            TCI = (TCI & 0x1fff) | (((skb->vtag & 0xf) - 1) << 13);
+
+		      frame->h_vlan_TCI = htons(TCI);
+   	   }
+         else
+         {
+            if ((skb->mac_header - skb->head) < VLAN_HLEN)
+            {
+               printk("ebt_mark_tg: No headroom for VLAN tag. Marking is not done.\n");
+            }
+            else
+            {
+   	         struct vlan_ethhdr *ethHeader;
+
+               skb->protocol = __constant_htons(ETH_P_8021Q);
+               skb->mac_header -= VLAN_HLEN;
+               skb->network_header -= VLAN_HLEN;
+               skb->data -= VLAN_HLEN;
+	            skb->len  += VLAN_HLEN;
+
+               /* Move the mac addresses to the beginning of the new header. */
+               memmove(skb->mac_header, skb->mac_header + VLAN_HLEN, 2 * ETH_ALEN);
+
+               ethHeader = (struct vlan_ethhdr *)(skb->mac_header);
+
+               ethHeader->h_vlan_proto = __constant_htons(ETH_P_8021Q);
+
+               /* Since the 8021p priority value in vtag had been incremented by 1,
+                * we need to minus 1 from it to get the exact value.
+                */
+               TCI = (TCI & 0x1fff) | (((skb->vtag & 0xf) - 1) << 13);
+
+               ethHeader->h_vlan_TCI = htons(TCI);
+            }
+         }
+         skb->vtag = 0;
+      }
+   }
+#endif // CONFIG_BCM_KF_NETFILTER
 
 	return info->target | ~EBT_VERDICT_BITS;
 }
@@ -48,7 +116,12 @@ static int ebt_mark_tg_check(const struct xt_tgchk_param *par)
 		return -EINVAL;
 	tmp = info->target & ~EBT_VERDICT_BITS;
 	if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	    tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE &&
+            tmp != VTAG_SET_VALUE)
+#else
 	    tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
+#endif
 		return -EINVAL;
 	return 0;
 }
diff --git a/net/bridge/netfilter/ebt_skiplog.c b/net/bridge/netfilter/ebt_skiplog.c
new file mode 100644
index 0000000000000000000000000000000000000000..926c0479807c0ce7a6aff6054f320dd6a8335d89
--- /dev/null
+++ b/net/bridge/netfilter/ebt_skiplog.c
@@ -0,0 +1,74 @@
+#if defined(CONFIG_BCM_KF_NETFILTER)
+/*
+*    Copyright (c) 2003-2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ *  ebt_skiplog
+ */
+#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
+static unsigned int
+ebt_skiplog_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
+
+	return EBT_CONTINUE;
+}
+
+static struct xt_target ebt_skiplog_tg_reg __read_mostly = {
+	.name		= "SKIPLOG",
+	.revision	= 0,
+	.family		= NFPROTO_BRIDGE,
+	.target		= ebt_skiplog_tg,
+	.me		= THIS_MODULE,
+};
+
+static int __init ebt_skiplog_init(void)
+{
+	return xt_register_target(&ebt_skiplog_tg_reg);
+}
+
+static void __exit ebt_skiplog_fini(void)
+{
+	xt_unregister_target(&ebt_skiplog_tg_reg);
+}
+
+module_init(ebt_skiplog_init);
+module_exit(ebt_skiplog_fini);
+MODULE_DESCRIPTION("Ebtables: SKIPLOG target");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
+#endif
diff --git a/net/bridge/netfilter/ebt_time.c b/net/bridge/netfilter/ebt_time.c
new file mode 100644
index 0000000000000000000000000000000000000000..d77ad5fd415e775dc2c4ae984b4c00d2be19966b
--- /dev/null
+++ b/net/bridge/netfilter/ebt_time.c
@@ -0,0 +1,228 @@
+#if defined(CONFIG_BCM_KF_NETFILTER)
+
+/*
+*    Copyright (c) 2003-2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:GPL/GPL:standard
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+
+/*
+  Description: EBTables time match extension kernelspace module.
+  Authors:  Song Wang <songw@broadcom.com>, ported from netfilter/iptables
+            The following is the original disclaimer.
+
+  This is a module which is used for time matching
+  It is using some modified code from dietlibc (localtime() function)
+  that you can find at http://www.fefe.de/dietlibc/
+  This file is distributed under the terms of the GNU General Public
+  License (GPL). Copies of the GPL can be obtained from: ftp://prep.ai.mit.edu/pub/gnu/GPL
+  2001-05-04 Fabrice MARIE <fabrice@netfilter.org> : initial development.
+  2001-21-05 Fabrice MARIE <fabrice@netfilter.org> : bug fix in the match code,
+     thanks to "Zeng Yu" <zengy@capitel.com.cn> for bug report.
+  2001-26-09 Fabrice MARIE <fabrice@netfilter.org> : force the match to be in LOCAL_IN or PRE_ROUTING only.
+  2001-30-11 Fabrice : added the possibility to use the match in FORWARD/OUTPUT with a little hack,
+     added Nguyen Dang Phuoc Dong <dongnd@tlnet.com.vn> patch to support timezones.
+*/
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_time.h>
+#include <linux/time.h>
+
+//static unsigned char debug;
+//MODULE_PARM(debug, "0-1b");
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "debug=1 is turn on debug messages");
+MODULE_AUTHOR("Song Wang <songw@broadcom.com>");
+MODULE_DESCRIPTION("Match timestamp");
+MODULE_LICENSE("GPL");
+
+#define DEBUG_MSG(...) if (debug) printk (KERN_DEBUG "ebt_time: " __VA_ARGS__)
+
+void localtime(const time_t *timepr, struct tm *r);
+bool (*match)(const struct sk_buff *skb, struct xt_action_param *);
+
+static bool ebt_time_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	//const struct ebt_time_info *info = (struct ebt_time_info *)data;   /* match info for rule */
+	const struct ebt_time_info *info = par->matchinfo;
+	struct tm currenttime;                          /* time human readable */
+	u_int8_t days_of_week[7] = {64, 32, 16, 8, 4, 2, 1};
+	u_int16_t packet_time;
+	struct timeval kerneltimeval;
+	time_t packet_local_time;
+
+	/* if kerneltime=1, we don't read the skb->timestamp but kernel time instead */
+	if (info->kerneltime)
+	{
+		do_gettimeofday(&kerneltimeval);
+		packet_local_time = kerneltimeval.tv_sec;
+	}
+	else
+	{
+		struct timespec ts;
+		ts = ktime_to_timespec(skb->tstamp);
+		packet_local_time = ts.tv_sec;
+	}
+
+	/* Transform the timestamp of the packet, in a human readable form */
+	localtime(&packet_local_time, &currenttime);
+	DEBUG_MSG("currenttime: Y-%ld M-%d D-%d H-%d M-%d S-%d, Day: W-%d\n",
+		currenttime.tm_year, currenttime.tm_mon, currenttime.tm_mday,
+		currenttime.tm_hour, currenttime.tm_min, currenttime.tm_sec,
+		currenttime.tm_wday);
+
+	/* check if we match this timestamp, we start by the days... */
+	if (info->days_match != 0) {
+		if ((days_of_week[currenttime.tm_wday] & info->days_match) != days_of_week[currenttime.tm_wday])
+		{
+			DEBUG_MSG("the day doesn't match\n");
+			return false; /* the day doesn't match */
+		}
+	}
+	/* ... check the time now */
+	packet_time = (currenttime.tm_hour * 60) + currenttime.tm_min;
+	if ((packet_time < info->time_start) || (packet_time > info->time_stop))
+	{
+		DEBUG_MSG("the time doesn't match\n");
+		return false;
+	}
+	
+	/* here we match ! */
+	DEBUG_MSG("the time match!!!!!!!!\n");
+	return true;
+}
+
+static int ebt_time_mt_check(const struct xt_mtchk_param *par)
+{
+	//struct ebt_time_info *info = (struct ebt_time_info *)data;   /* match info for rule */
+	struct ebt_time_info *info = par->matchinfo;
+
+	/* First, check that we are in the correct hook */
+	/* PRE_ROUTING, LOCAL_IN or FROWARD */
+#if 0
+	if (hookmask
+            & ~((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | (1 << NF_BR_LOCAL_OUT)))
+	{
+		printk("ebt_time: error, only valid for PRE_ROUTING, LOCAL_IN, FORWARD and OUTPUT)\n");
+		return -EINVAL;
+	}
+#endif
+	/* we use the kerneltime if we are in forward or output */
+	info->kerneltime = 1;
+#if 0
+	if (hookmask & ~((1 << NF_BR_FORWARD) | (1 << NF_BR_LOCAL_OUT))) 
+		/* if not, we use the skb time */
+		info->kerneltime = 0;
+#endif
+
+	/* Check the size */
+	//if (datalen != sizeof(struct ebt_time_info))
+	//	return -EINVAL;
+	/* Now check the coherence of the data ... */
+	if ((info->time_start > 1439) ||        /* 23*60+59 = 1439*/
+	    (info->time_stop  > 1439))
+	{
+		printk(KERN_WARNING "ebt_time: invalid argument\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct xt_match ebt_time_mt_reg = {
+	.name		= EBT_TIME_MATCH,
+	.revision	= 0,
+	.family		= NFPROTO_BRIDGE,
+	.match		= ebt_time_mt,
+	.checkentry	= ebt_time_mt_check,
+	.matchsize	= XT_ALIGN(sizeof(struct ebt_time_info)),
+	.me		= THIS_MODULE,
+};
+
+static int __init ebt_time_init(void)
+{
+	int ret;
+	ret = xt_register_match(&ebt_time_mt_reg);
+
+	if(ret == 0)
+		printk(KERN_INFO "ebt_time registered\n");
+
+	return ret;
+}
+
+static void __exit ebt_time_fini(void)
+{
+	xt_unregister_match(&ebt_time_mt_reg);
+}
+
+
+
+module_init(ebt_time_init);
+module_exit(ebt_time_fini);
+
+
+/* The part below is borowed and modified from dietlibc */
+
+/* seconds per day */
+#define SPD 24*60*60
+
+void localtime(const time_t *timepr, struct tm *r) {
+	time_t i;
+	time_t timep;
+	extern struct timezone sys_tz;
+	const unsigned int __spm[12] =
+		{ 0,
+		  (31),
+		  (31+28),
+		  (31+28+31),
+		  (31+28+31+30),
+		  (31+28+31+30+31),
+		  (31+28+31+30+31+30),
+		  (31+28+31+30+31+30+31),
+		  (31+28+31+30+31+30+31+31),
+		  (31+28+31+30+31+30+31+31+30),
+		  (31+28+31+30+31+30+31+31+30+31),
+		  (31+28+31+30+31+30+31+31+30+31+30),
+		};
+	register time_t work;
+
+	timep = (*timepr) - (sys_tz.tz_minuteswest * 60);
+	work=timep%(SPD);
+	r->tm_sec=work%60; work/=60;
+	r->tm_min=work%60; r->tm_hour=work/60;
+	work=timep/(SPD);
+	r->tm_wday=(4+work)%7;
+	for (i=1970; ; ++i) {
+		register time_t k= (!(i%4) && ((i%100) || !(i%400)))?366:365;
+		if (work>k)
+			work-=k;
+		else
+			break;
+	}
+	r->tm_year=i-1900;
+	for (i=11; i && __spm[i]>work; --i) ;
+	r->tm_mon=i;
+	r->tm_mday=work-__spm[i]+1;
+}
+#endif
diff --git a/net/bridge/netfilter/ebt_wmm_mark.c b/net/bridge/netfilter/ebt_wmm_mark.c
new file mode 100644
index 0000000000000000000000000000000000000000..2e44095073f718189ab7efafa540b8de0545c94b
--- /dev/null
+++ b/net/bridge/netfilter/ebt_wmm_mark.c
@@ -0,0 +1,175 @@
+#if defined(CONFIG_BCM_KF_NETFILTER)
+/*
+*    Copyright (c) 2003-2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:GPL/GPL:standard
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:>
+*/
+
+/*
+ *  ebt_wmm_mark
+ *
+ */
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_wmm_mark_t.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#if defined(CONFIG_BCM_KF_IP) && defined(CONFIG_IPV6)
+#include <linux/ipv6.h>
+#endif
+#include <linux/skbuff.h>
+
+static unsigned int ebt_wmm_mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
+   
+{
+	const struct ebt_wmm_mark_t_info *info = par->targinfo;
+
+//	struct iphdr *iph;
+//	struct vlan_hdr *frame;	
+	unsigned char prio = 0;
+//	unsigned short TCI;
+
+	if (info->markset != WMM_MARK_VALUE_NONE) {
+		/* use marset regardless of supported classification method */
+		prio = (unsigned char)info->markset;
+		
+#if 0 /* TOS/DSCP priority update will be handled in wlan driver (bcmutils.c, pktsetprio()) */
+      if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+
+         unsigned short pbits = (unsigned short)(info->markset & 0x0000f000);
+
+         if (pbits) {
+            frame = (struct vlan_hdr *)(skb->network_header);
+            TCI = ntohs(frame->h_vlan_TCI);
+		      TCI = (TCI & 0x1fff) | (((pbits >> 12) - 1) << 13);
+            frame->h_vlan_TCI = htons(TCI);
+         }
+      }
+	} else if (info->mark & WMM_MARK_8021D) {
+		if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+			frame = (struct vlan_hdr *)(skb->network_header);
+			TCI = ntohs(frame->h_vlan_TCI);
+			prio = (unsigned char)((TCI >> 13) & 0x7);
+        	} else
+			return EBT_CONTINUE;        	
+        					
+	} else if (info->mark & WMM_MARK_DSCP) {
+		
+		/* if VLAN frame, we need to point to correct network header */
+		if (skb->protocol == __constant_htons(ETH_P_8021Q))
+        		iph = (struct iphdr *)(skb->network_header + VLAN_HLEN);
+        	/* ip */
+#if defined(CONFIG_BCM_KF_IP) && defined(CONFIG_IPV6)
+        	else if (skb->protocol == __constant_htons(ETH_P_IP)||skb->protocol == __constant_htons(ETH_P_IPV6))
+#else         
+        	else if (skb->protocol == __constant_htons(ETH_P_IP))
+#endif
+			iph = (struct iphdr *)(skb->network_header);
+		else
+		/* pass for others */
+			return EBT_CONTINUE;
+
+#if defined(CONFIG_BCM_KF_IP) && defined(CONFIG_IPV6)
+		if(skb->protocol == __constant_htons(ETH_P_IPV6)) 
+			prio=((struct ipv6hdr *)iph)->priority>>1;			
+		else
+#endif
+
+		prio = iph->tos>>WMM_DSCP_MASK_SHIFT ;
+		
+#endif /* if 0 */
+
+	}
+		
+    //printk("markset 0x%08x, mark 0x%x, mark 0x%x \n", info->markset, info->mark, (*pskb)->mark);
+	if(prio) {
+		skb->mark &= ~(PRIO_LOC_NFMASK << info->markpos);		
+		skb->mark |= (prio << info->markpos);
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// member below is removed
+//		(*pskb)->nfcache |= NFC_ALTERED;
+		//printk("mark 0x%x, mark 0x%x\n",( prio << info->markpos), (*pskb)->mark);			
+	}
+		
+	return info->target;
+}
+
+static int ebt_wmm_mark_tg_check(const struct xt_tgchk_param *par)
+
+{
+	const struct ebt_wmm_mark_t_info *info = par->targinfo;
+	const struct ebt_entry *e = par->entryinfo;
+
+	//if (datalen != EBT_ALIGN(sizeof(struct ebt_wmm_mark_t_info)))
+	//	return -EINVAL;
+	
+	//printk("e->ethproto=0x%x, e->invflags=0x%x\n",e->ethproto, e->invflags);
+
+#if defined(CONFIG_BCM_KF_IP) && defined(CONFIG_IPV6)
+	if ((e->ethproto != __constant_htons(ETH_P_IPV6) && e->ethproto != __constant_htons(ETH_P_IP) && e->ethproto != __constant_htons(ETH_P_8021Q)) ||
+#else   
+	if ((e->ethproto != __constant_htons(ETH_P_IP) && e->ethproto != __constant_htons(ETH_P_8021Q)) ||
+#endif      
+	   e->invflags & EBT_IPROTO)
+		return -EINVAL;
+				
+	if (BASE_CHAIN && info->target == EBT_RETURN)
+		return -EINVAL;
+		
+	//CLEAR_BASE_CHAIN_BIT;
+	
+	if (INVALID_TARGET)
+		return -EINVAL;
+	
+	return 0;
+	
+}
+
+static struct xt_target ebt_wmm_mark_tg_reg = {
+	.name		= EBT_WMM_MARK_TARGET,
+	.revision	= 0,
+	.family		= NFPROTO_BRIDGE,
+	.target		= ebt_wmm_mark_tg,
+	.checkentry	= ebt_wmm_mark_tg_check,
+	.targetsize	= XT_ALIGN(sizeof(struct ebt_wmm_mark_t_info)),
+	.me		= THIS_MODULE,
+};
+
+static int __init ebt_wmm_mark_init(void)
+{
+	int ret;
+	ret = xt_register_target(&ebt_wmm_mark_tg_reg);
+
+	if(ret == 0)
+		printk(KERN_INFO "ebt_wmm_mark registered\n");
+
+	return ret;
+}
+
+static void __exit ebt_wmm_mark_fini(void)
+{
+	xt_unregister_target(&ebt_wmm_mark_tg_reg);
+}
+
+module_init(ebt_wmm_mark_init);
+module_exit(ebt_wmm_mark_fini);
+MODULE_LICENSE("GPL");
+#endif
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 42e6bd0945745f99ae2cb718b55efed2410f704b..0000a1c796daf21f403f7c52151077acf55c24f0 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -63,6 +63,7 @@ static unsigned int
 ebt_in_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
    const struct net_device *out, int (*okfn)(struct sk_buff *))
 {
+
 	return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_filter);
 }
 
@@ -70,6 +71,7 @@ static unsigned int
 ebt_out_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
    const struct net_device *out, int (*okfn)(struct sk_buff *))
 {
+
 	return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_filter);
 }
 
diff --git a/net/core/Makefile b/net/core/Makefile
index 674641b13aea341257094fa4f070bd4289075d9b..073c7757caee96511490a452ad26a3dcd55de139 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -2,6 +2,11 @@
 # Makefile for the Linux networking core.
 #
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_BLOG)
+EXTRA_CFLAGS	+= -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD)
+EXTRA_CFLAGS	+= -I$(INC_BRCMSHARED_PUB_PATH)/bcm963xx
+endif # BCM_KF
+
 obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
 	 gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
 
@@ -21,3 +26,12 @@ obj-$(CONFIG_TRACEPOINTS) += net-traces.o
 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
 obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
 obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NBUFF)
+obj-y += blog.o blog_rule.o vlanctl_bind.o
+obj-y += nbuff.o iqos.o gbpm.o flwstif.o
+endif # BCM_KF
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_DPI)
+obj-y += devinfo.o dpistats.o urlinfo.o
+endif # BCM_KF
diff --git a/net/core/blog.c b/net/core/blog.c
new file mode 100644
index 0000000000000000000000000000000000000000..53713156ec61fb08e339293edbdb4c4fe52d5d00
--- /dev/null
+++ b/net/core/blog.c
@@ -0,0 +1,3541 @@
+#if defined(CONFIG_BCM_KF_BLOG)
+
+/*
+*    Copyright (c) 2003-2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/*
+ *******************************************************************************
+ * File Name  : blog.c
+ * Description: Implements the tracing of L2 and L3 modifications to a packet
+ *              buffer while it traverses the Linux networking stack.
+ *******************************************************************************
+ */
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/blog.h>
+#include <linux/blog_net.h>
+#include <linux/nbuff.h>
+#include <linux/skbuff.h>
+#include <skb_defines.h>
+#include <linux/iqos.h>
+#include <linux/notifier.h>
+#include <net/netevent.h> 
+
+#if defined(CONFIG_BLOG)
+
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)   
+#define BLOG_NF_CONNTRACK
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#endif /* defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) */
+
+#include "../bridge/br_private.h"
+#include "../bridge/br_igmp.h"
+#include "../bridge/br_mld.h"
+
+#include <linux/bcm_colors.h>
+#include <linux/bcm_assert_locks.h>
+
+#include <net/dsfield.h>
+#include <linux/netfilter/xt_dscp.h>
+
+/*--- globals ---*/
+
+/* RFC4008 */
+uint32_t blog_nat_tcp_def_idle_timeout = BLOG_NAT_TCP_DEFAULT_IDLE_TIMEOUT; /* 1 DAY */
+uint32_t blog_nat_udp_def_idle_timeout = BLOG_NAT_UDP_DEFAULT_IDLE_TIMEOUT; /* 300 seconds */
+
+uint32_t blog_nat_generic_def_idle_timeout = 600 *HZ;/* 600 seconds */
+
+EXPORT_SYMBOL(blog_nat_tcp_def_idle_timeout);
+EXPORT_SYMBOL(blog_nat_udp_def_idle_timeout);
+EXPORT_SYMBOL(blog_nat_generic_def_idle_timeout);
+
+/* Debug macros */
+int blog_dbg = 0;
+
+DEFINE_SPINLOCK(blog_lock_tbl_g);
+#define BLOG_LOCK_TBL()         spin_lock_bh( &blog_lock_tbl_g )
+#define BLOG_UNLOCK_TBL()       spin_unlock_bh( &blog_lock_tbl_g )
+
+/* Length prioritization table index */
+static uint8_t blog_len_tbl_idx = 0;
+/* Length prioritization table
+ * {tbl idx}{min, max, original mark, target mark}
+ */
+static uint32_t blog_len_tbl[BLOG_MAX_LEN_TBLSZ][BLOG_LEN_PARAM_NUM];
+
+/* DSCP mangle table
+ * {target dscp}
+ */
+static uint8_t blog_dscp_tbl[BLOG_MAX_DSCP_TBLSZ];
+
+/* TOS mangle table
+ * {target tos}
+ */
+static uint8_t blog_tos_tbl[BLOG_MAX_TOS_TBLSZ];
+
+/* Temporary storage for passing the values from pre-modify hook to
+ * post-modify hook.
+ * {ack priority, length priority, dscp value, tos value}
+ */
+static uint32_t blog_mangl_params[BLOG_MAX_FEATURES];
+
+#if defined(CC_BLOG_SUPPORT_DEBUG)
+#define blog_print(fmt, arg...)                                         \
+    if ( blog_dbg )                                                     \
+    printk( CLRc "BLOG %s :" fmt CLRnl, __FUNCTION__, ##arg )
+#define blog_assertv(cond)                                              \
+    if ( !cond ) {                                                      \
+        printk( CLRerr "BLOG ASSERT %s : " #cond CLRnl, __FUNCTION__ ); \
+        return;                                                         \
+    }
+#define blog_assertr(cond, rtn)                                         \
+    if ( !cond ) {                                                      \
+        printk( CLRerr "BLOG ASSERT %s : " #cond CLRnl, __FUNCTION__ ); \
+        return rtn;                                                     \
+    }
+#define BLOG_DBG(debug_code)    do { debug_code } while(0)
+#else
+#define blog_print(fmt, arg...) NULL_STMT
+#define blog_assertv(cond)      NULL_STMT
+#define blog_assertr(cond, rtn) NULL_STMT
+#define BLOG_DBG(debug_code)    NULL_STMT
+#endif
+
+#define blog_error(fmt, arg...)                                         \
+    printk( CLRerr "BLOG ERROR %s :" fmt CLRnl, __FUNCTION__, ##arg)
+
+#undef  BLOG_DECL
+#define BLOG_DECL(x)        #x,         /* string declaration */
+
+/*--- globals ---*/
+
+DEFINE_SPINLOCK(blog_lock_g);               /* blogged packet flow */
+EXPORT_SYMBOL(blog_lock_g);
+static DEFINE_SPINLOCK(blog_pool_lock_g);   /* blog pool only */
+#define BLOG_POOL_LOCK()   spin_lock_irqsave(&blog_pool_lock_g, lock_flags)
+#define BLOG_POOL_UNLOCK() spin_unlock_irqrestore(&blog_pool_lock_g, lock_flags)
+
+/*
+ * blog_support_mcast_g inherits the default value from CC_BLOG_SUPPORT_MCAST
+ * Exported blog_support_mcast() may be used to set blog_support_mcast_g.
+ */
+int blog_support_mcast_g = CC_BLOG_SUPPORT_MCAST;
+void blog_support_mcast(int config) { blog_support_mcast_g = config; }
+
+/*
+ * blog_support_mcast_learn_g inherits the default value from
+ * CC_BLOG_SUPPORT_MCAST_LEARN
+ * Exported blog_support_mcast_learn() may be used to set
+ * blog_support_mcast_learn_g.
+ */
+int blog_support_mcast_learn_g = CC_BLOG_SUPPORT_MCAST_LEARN;
+void blog_support_mcast_learn(int config) 
+{ blog_support_mcast_learn_g = config; }
+
+/*
+ * blog_support_ipv6_g inherits the value from CC_BLOG_SUPPORT_IPV6
+ * Exported blog_support_ipv6() may be used to set blog_support_ipv6_g.
+ */
+int blog_support_ipv6_g = CC_BLOG_SUPPORT_IPV6;
+void blog_support_ipv6(int config) { blog_support_ipv6_g = config; }
+
+/*
+ * blog_tunl_tos_g gets the value from BLOG_DEFAULT_TUNL_TOS
+ * Exported blog_tunl_tos_g() may be used to set blog_tunl_tos_g.
+ */
+
+/*
+ * blog_support_gre_g inherits the default value from CC_BLOG_SUPPORT_GRE
+ * Exported blog_support_gre() may be used to set blog_support_gre_g.
+ */
+int blog_gre_tunnel_accelerated_g = BLOG_GRE_DISABLE;
+
+int blog_support_gre_g = CC_BLOG_SUPPORT_GRE;
+void blog_support_gre(int config) 
+{ 
+    blog_support_gre_g = config; 
+
+    if (blog_fc_enabled() && (blog_support_gre_g == BLOG_GRE_TUNNEL))
+        blog_gre_tunnel_accelerated_g = BLOG_GRE_TUNNEL;  
+    else
+        blog_gre_tunnel_accelerated_g = BLOG_GRE_DISABLE;  
+}
+
+/*
+ * blog_support_l2tp_g inherits the default value from CC_BLOG_SUPPORT_L2TP
+ * Exported blog_support_l2tp() may be used to set blog_support_l2tp_g.
+ */
+
+int blog_l2tp_tunnel_accelerated_g = BLOG_L2TP_DISABLE;
+int blog_support_l2tp_g = CC_BLOG_SUPPORT_L2TP;
+void blog_support_l2tp(int config) 
+{ 
+    blog_support_l2tp_g = config; 
+    if (blog_fc_enabled())
+    {
+        if( !blog_support_l2tp_g )
+            blog_l2tp_tunnel_accelerated_g = BLOG_L2TP_DISABLE; 
+        else if ( blog_support_l2tp_g == BLOG_L2TP_TUNNEL )
+            blog_l2tp_tunnel_accelerated_g = BLOG_L2TP_TUNNEL; 
+        else if ( blog_support_l2tp_g == BLOG_L2TP_TUNNEL_WITHCHKSUM )
+            blog_l2tp_tunnel_accelerated_g = BLOG_L2TP_TUNNEL_WITHCHKSUM;        
+    }   
+
+}
+
+/*
+ * Traffic flow generator, keep conntrack alive during idle traffic periods
+ * by refreshing the conntrack. 
+ * Netfilter may not be statically loaded.
+ */
+blog_cttime_upd_t blog_cttime_update_fn = (blog_cttime_upd_t) NULL;
+struct sk_buff * nfskb_p = (struct sk_buff *) NULL;
+blog_xtm_get_tx_chan_t blog_xtm_get_tx_chan_fn = (blog_xtm_get_tx_chan_t) NULL;
+
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+blog_gre_rcv_check_t blog_gre_rcv_check_fn = NULL;
+blog_gre_xmit_upd_t blog_gre_xmit_update_fn = NULL;
+#endif
+
+blog_pptp_rcv_check_t blog_pptp_rcv_check_fn = NULL;
+blog_pptp_xmit_upd_t blog_pptp_xmit_update_fn = NULL; 
+blog_pptp_xmit_get_t blog_pptp_xmit_get_fn = NULL; 
+
+blog_dpi_ctk_update_t blog_dpi_ctk_update_fn = NULL;
+
+blog_l2tp_rcv_check_t blog_l2tp_rcv_check_fn = NULL;
+
+
+/*----- Constant string representation of enums for print -----*/
+const char * strBlogAction[BLOG_ACTION_MAX] =
+{
+    BLOG_DECL(PKT_DONE)
+    BLOG_DECL(PKT_NORM)
+    BLOG_DECL(PKT_BLOG)
+    BLOG_DECL(PKT_DROP)
+};
+
+const char * strBlogDir[BLOG_DIR_MAX] =
+{
+    BLOG_DECL(DIR_RX)
+    BLOG_DECL(DIR_TX)
+};
+
+const char * strBlogNetEntity[BLOG_NET_ENTITY_MAX] =
+{
+    BLOG_DECL(FLOWTRACK)
+    BLOG_DECL(BRIDGEFDB)
+    BLOG_DECL(MCAST_FDB)
+    BLOG_DECL(IF_DEVICE)
+    BLOG_DECL(IF_DEVICE_MCAST)
+    BLOG_DECL(GRE_TUNL)
+    BLOG_DECL(TOS_MODE)
+};
+
+const char * strBlogNotify[BLOG_NOTIFY_MAX] =
+{
+    BLOG_DECL(DESTROY_FLOWTRACK)
+    BLOG_DECL(DESTROY_BRIDGEFDB)
+    BLOG_DECL(MCAST_CONTROL_EVT)
+    BLOG_DECL(MCAST_SYNC_EVT)
+    BLOG_DECL(DESTROY_NETDEVICE)
+    BLOG_DECL(LINK_STATE_CHANGE)
+    BLOG_DECL(FETCH_NETIF_STATS)
+    BLOG_DECL(DYNAMIC_DSCP_EVENT)
+    BLOG_DECL(UPDATE_NETDEVICE)
+    BLOG_DECL(ARP_BIND_CHG)
+    BLOG_DECL(CONFIG_CHANGE)
+};
+
+const char * strBlogQuery[BLOG_QUERY_MAX] =
+{
+    BLOG_DECL(QUERY_FLOWTRACK)
+    BLOG_DECL(QUERY_BRIDGEFDB)
+};
+
+const char * strBlogRequest[BLOG_REQUEST_MAX] =
+{
+    BLOG_DECL(FLOWTRACK_KEY_SET)
+    BLOG_DECL(FLOWTRACK_KEY_GET)
+    BLOG_DECL(FLOWTRACK_DSCP_GET)
+    BLOG_DECL(FLOW_CONFIRMED)
+    BLOG_DECL(FLOW_ASSURED)
+    BLOG_DECL(FLOW_ALG_HELPER)
+    BLOG_DECL(FLOW_EXCLUDE)
+    BLOG_DECL(FLOW_REFRESH)
+    BLOG_DECL(FLOW_TIME_SET)
+    BLOG_DECL(NETIF_PUT_STATS)
+    BLOG_DECL(LINK_XMIT_FN)
+    BLOG_DECL(LINK_NOCARRIER)
+    BLOG_DECL(NETDEV_NAME)
+    BLOG_DECL(MCAST_KEY_SET)
+    BLOG_DECL(MCAST_KEY_GET)
+    BLOG_DECL(MCAST_DFLT_MIPS)
+    BLOG_DECL(IQPRIO_SKBMARK_SET)
+    BLOG_DECL(TCPACK_PRIO)
+    BLOG_DECL(BRIDGEFDB_KEY_SET)
+    BLOG_DECL(BRIDGEFDB_KEY_GET)
+    BLOG_DECL(BRIDGEFDB_TIME_SET)
+    BLOG_DECL(SYS_TIME_GET)
+    BLOG_DECL(GRE_TUNL_XMIT)
+    BLOG_DECL(SKB_DST_ENTRY_SET)
+    BLOG_DECL(SKB_DST_ENTRY_RELEASE)
+};
+
+const char * strBlogEncap[PROTO_MAX] =
+{
+    BLOG_DECL(BCM_XPHY)
+    BLOG_DECL(BCM_SWC)
+    BLOG_DECL(ETH_802x)
+    BLOG_DECL(VLAN_8021Q)
+    BLOG_DECL(PPPoE_2516)
+    BLOG_DECL(PPP_1661)
+    BLOG_DECL(PLD_IPv4)
+    BLOG_DECL(PLD_IPv6)
+    BLOG_DECL(GRE)
+    BLOG_DECL(DEL_IPv4)
+    BLOG_DECL(DEL_IPv6)
+};
+
+/*
+ *------------------------------------------------------------------------------
+ * Support for RFC 2684 headers logging.
+ *------------------------------------------------------------------------------
+ */
+const char * strRfc2684[RFC2684_MAX] =
+{
+    BLOG_DECL(RFC2684_NONE)         /*                               */
+    BLOG_DECL(LLC_SNAP_ETHERNET)    /* AA AA 03 00 80 C2 00 07 00 00 */
+    BLOG_DECL(LLC_SNAP_ROUTE_IP)    /* AA AA 03 00 00 00 08 00       */
+    BLOG_DECL(LLC_ENCAPS_PPP)       /* FE FE 03 CF                   */
+    BLOG_DECL(VC_MUX_ETHERNET)      /* 00 00                         */
+    BLOG_DECL(VC_MUX_IPOA)          /*                               */
+    BLOG_DECL(VC_MUX_PPPOA)         /*                               */
+    BLOG_DECL(PTM)                  /*                               */
+};
+
+const uint8_t rfc2684HdrLength[RFC2684_MAX] =
+{
+     0, /* header was already stripped. :                               */
+    10, /* LLC_SNAP_ETHERNET            : AA AA 03 00 80 C2 00 07 00 00 */
+     8, /* LLC_SNAP_ROUTE_IP            : AA AA 03 00 00 00 08 00       */
+     4, /* LLC_ENCAPS_PPP               : FE FE 03 CF                   */
+     2, /* VC_MUX_ETHERNET              : 00 00                         */
+     0, /* VC_MUX_IPOA                  :                               */
+     0, /* VC_MUX_PPPOA                 :                               */
+     0, /* PTM                          :                               */
+};
+
+const uint8_t rfc2684HdrData[RFC2684_MAX][16] =
+{
+    {},
+    { 0xAA, 0xAA, 0x03, 0x00, 0x80, 0xC2, 0x00, 0x07, 0x00, 0x00 },
+    { 0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x00 },
+    { 0xFE, 0xFE, 0x03, 0xCF },
+    { 0x00, 0x00 },
+    {},
+    {},
+    {}
+};
+
+const char * strBlogPhy[BLOG_MAXPHY] =
+{
+    BLOG_DECL(BLOG_XTMPHY)
+    BLOG_DECL(BLOG_ENETPHY)
+    BLOG_DECL(BLOG_GPONPHY)
+    BLOG_DECL(BLOG_EPONPHY)    
+    BLOG_DECL(BLOG_USBPHY)
+    BLOG_DECL(BLOG_WLANPHY)
+    BLOG_DECL(BLOG_MOCAPHY)
+    BLOG_DECL(BLOG_EXTRA1PHY)
+    BLOG_DECL(BLOG_EXTRA2PHY)
+    BLOG_DECL(BLOG_EXTRA3PHY)
+};
+
+const char * strIpctDir[] = {   /* in reference to enum ip_conntrack_dir */
+    BLOG_DECL(DIR_ORIG)
+    BLOG_DECL(DIR_RPLY)
+    BLOG_DECL(DIR_UNKN)
+};
+
+const char * strIpctStatus[] =  /* in reference to enum ip_conntrack_status */
+{
+    BLOG_DECL(EXPECTED)
+    BLOG_DECL(SEEN_REPLY)
+    BLOG_DECL(ASSURED)
+    BLOG_DECL(CONFIRMED)
+    BLOG_DECL(SRC_NAT)
+    BLOG_DECL(DST_NAT)
+    BLOG_DECL(SEQ_ADJUST)
+    BLOG_DECL(SRC_NAT_DONE)
+    BLOG_DECL(DST_NAT_DONE)
+    BLOG_DECL(DYING)
+    BLOG_DECL(FIXED_TIMEOUT)
+    BLOG_DECL(BLOG)
+};
+
+const char * strBlogTos[] =
+{
+    BLOG_DECL(BLOG_TOS_FIXED)
+    BLOG_DECL(BLOG_TOS_INHERIT)
+};
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Default Rx and Tx hooks.
+ * FIXME: Group these hooks into a structure and change blog_bind to use
+ *        a structure.
+ *------------------------------------------------------------------------------
+ */
+static BlogDevHook_t blog_rx_hook_g = (BlogDevHook_t)NULL;
+static BlogDevHook_t blog_tx_hook_g = (BlogDevHook_t)NULL;
+static BlogNotifyHook_t blog_xx_hook_g = (BlogNotifyHook_t)NULL;
+static BlogQueryHook_t blog_qr_hook_g = (BlogQueryHook_t)NULL;
+static BlogScHook_t blog_sc_hook_g[BlogClient_MAX] = { (BlogScHook_t)NULL };
+static BlogSdHook_t blog_sd_hook_g[BlogClient_MAX] = { (BlogSdHook_t)NULL };
+
+#if defined(CONFIG_BCM_KF_WL)
+void (*wl_pktc_del_hook)(uint32_t addr) = NULL;
+void (*dhd_pktc_del_hook)(uint32_t addr) = NULL;
+EXPORT_SYMBOL(wl_pktc_del_hook);
+EXPORT_SYMBOL(dhd_pktc_del_hook);
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Blog_t Free Pool Management.
+ * The free pool of Blog_t is self growing (extends upto an engineered
+ * value). Could have used a kernel slab cache. 
+ *------------------------------------------------------------------------------
+ */
+
+/* Global pointer to the free pool of Blog_t */
+static Blog_t * blog_list_gp = BLOG_NULL;
+
+static int blog_extends = 0;        /* Extension of Pool on depletion */
+#if defined(CC_BLOG_SUPPORT_DEBUG)
+static int blog_cnt_free = 0;       /* Number of Blog_t free */
+static int blog_cnt_used = 0;       /* Number of in use Blog_t */
+static int blog_cnt_hwm  = 0;       /* In use high water mark for engineering */
+static int blog_cnt_fails = 0;
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : blog_extend
+ * Description: Create a pool of Blog_t objects. When a pool is exhausted
+ *              this function may be invoked to extend the pool. The pool is
+ *              identified by a global pointer, blog_list_gp. All objects in
+ *              the pool chained together in a single linked list.
+ * Parameters :
+ *   num      : Number of Blog_t objects to be allocated.
+ * Returns    : Number of Blog_t objects allocated in pool.
+ *
+ * CAUTION: blog_extend must be called with blog_pool_lock_g acquired.
+ *------------------------------------------------------------------------------
+ */
+uint32_t blog_extend( uint32_t num )
+{
+    register int i;
+    register Blog_t * list_p;
+
+    blog_print( "%u", num );
+
+    list_p = (Blog_t *) kmalloc( num * sizeof(Blog_t), GFP_ATOMIC);
+    if ( list_p == BLOG_NULL )
+    {
+#if defined(CC_BLOG_SUPPORT_DEBUG)
+        blog_cnt_fails++;
+#endif
+        blog_print( "WARNING: Failure to initialize %d Blog_t", num );
+        return 0;
+    }
+
+    /* memset( (void *)list_p, 0, (sizeof(Blog_t) * num ); */
+    for ( i = 0; i < num; i++ )
+        list_p[i].blog_p = &list_p[i+1];
+
+    blog_extends++;
+
+    BLOG_DBG( blog_cnt_free += num; );
+    list_p[num-1].blog_p = blog_list_gp; /* chain last Blog_t object */
+    blog_list_gp = list_p;  /* Head of list */
+
+    return num;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _fast_memset
+ * Description  : sets the memory starting from dst_p to val.
+ * Note         : dst_p should be atleast 32-bit aligned, and len is in bytes
+ *------------------------------------------------------------------------------
+ */
+static inline 
+void _fast_memset( uint32_t *dst_p, uint32_t val, uint32_t len )
+{
+    int num_words = len >> 2;
+    int num_bytes = len & 3;
+    uint8_t *byte_p;
+    int i;
+
+    for( i=0; i < num_words; i++ )
+        *dst_p++ = val;
+
+    if (num_bytes)
+    {
+        byte_p = (uint8_t *) dst_p;
+        for( i=0; i < num_bytes; i++ )
+            *byte_p++ = val;
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_clr
+ * Description  : Clear the data of a Blog_t
+ *                Need not be protected by blog_pool_lock_g
+ *------------------------------------------------------------------------------
+ */
+static inline void blog_clr( Blog_t * blog_p )
+{
+    blog_assertv( ((blog_p != BLOG_NULL) && (_IS_BPTR_(blog_p))) );
+    BLOG_DBG( memset( (void*)blog_p, 0, sizeof(Blog_t) ); );
+    _fast_memset( (void*)blog_p, 0, sizeof(Blog_t) );
+
+    /* clear phyHdr, count, bmap, and channel */
+    blog_p->minMtu = BLOG_ETH_MTU_LEN;
+    blog_p->vid = 0xFFFFFFFF;
+    blog_p->wl = 0; /* Clear the WL-METADATA */
+
+    blog_print( "blog<0x%08x>", (int)blog_p );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_get
+ * Description  : Allocate a Blog_t from the free list
+ * Returns      : Pointer to an Blog_t or NULL, on depletion.
+ *------------------------------------------------------------------------------
+ */
+Blog_t * blog_get( void )
+{
+    register Blog_t * blog_p;
+    unsigned long lock_flags;
+
+    BLOG_POOL_LOCK();   /* DO NOT USE blog_assertr() until BLOG_POOL_UNLOCK() */
+
+    if ( blog_list_gp == BLOG_NULL )
+    {
+#ifdef CC_BLOG_SUPPORT_EXTEND
+        if ( (blog_extends >= BLOG_EXTEND_MAX_ENGG)/* Try extending free pool */
+          || (blog_extend( BLOG_EXTEND_SIZE_ENGG ) != BLOG_EXTEND_SIZE_ENGG))
+        {
+            blog_print( "WARNING: free list exhausted" );
+        }
+#else
+        if ( blog_extend( BLOG_EXTEND_SIZE_ENGG ) == 0 )
+        {
+            blog_print( "WARNING: out of memory" );
+        }
+#endif
+        if (blog_list_gp == BLOG_NULL)
+        {
+        blog_p = BLOG_NULL;
+        BLOG_POOL_UNLOCK(); /* May use blog_assertr() now onwards */
+        goto blog_get_return;
+    }
+    }
+
+    BLOG_DBG(
+        blog_cnt_free--;
+        if ( ++blog_cnt_used > blog_cnt_hwm )
+            blog_cnt_hwm = blog_cnt_used;
+        );
+
+    blog_p = blog_list_gp;
+    blog_list_gp = blog_list_gp->blog_p;
+
+    BLOG_POOL_UNLOCK();     /* May use blog_assertr() now onwards */
+
+    blog_clr( blog_p );     /* quickly clear the contents */
+
+blog_get_return:
+
+    blog_print( "blog<0x%08x>", (int)blog_p );
+
+    return blog_p;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_put
+ * Description  : Release a Blog_t back into the free pool
+ * Parameters   :
+ *  blog_p      : Pointer to a non-null Blog_t to be freed.
+ *------------------------------------------------------------------------------
+ */
+void blog_put( Blog_t * blog_p )
+{
+    unsigned long lock_flags;
+
+    blog_assertv( ((blog_p != BLOG_NULL) && (_IS_BPTR_(blog_p))) );
+
+    blog_clr( blog_p );
+
+    BLOG_POOL_LOCK();   /* DO NOT USE blog_assertv() until BLOG_POOL_UNLOCK() */
+
+    BLOG_DBG( blog_cnt_used--; blog_cnt_free++; );
+    blog_p->blog_p = blog_list_gp;  /* clear pointer to skb_p */
+    blog_list_gp = blog_p;          /* link into free pool */
+
+    BLOG_POOL_UNLOCK();/* May use blog_assertv() now onwards */
+
+    blog_print( "blog<0x%08x>", (int)blog_p );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_skb
+ * Description  : Allocate and associate a Blog_t with an sk_buff.
+ * Parameters   :
+ *  skb_p       : pointer to a non-null sk_buff
+ * Returns      : A Blog_t object or NULL,
+ *------------------------------------------------------------------------------
+ */
+Blog_t * blog_skb( struct sk_buff * skb_p )
+{
+    blog_assertr( (skb_p != (struct sk_buff *)NULL), BLOG_NULL );
+    blog_assertr( (!_IS_BPTR_(skb_p->blog_p)), BLOG_NULL ); /* avoid leak */
+
+    skb_p->blog_p = blog_get(); /* Allocate and associate with sk_buff */
+
+    blog_print( "skb<0x%08x> blog<0x%08x>", (int)skb_p, (int)skb_p->blog_p );
+
+    /* CAUTION: blog_p does not point back to the skb, do it explicitly */
+    return skb_p->blog_p;       /* May be null */
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_fkb
+ * Description  : Allocate and associate a Blog_t with an fkb.
+ * Parameters   :
+ *  fkb_p       : pointer to a non-null FkBuff_t
+ * Returns      : A Blog_t object or NULL,
+ *------------------------------------------------------------------------------
+ */
+Blog_t * blog_fkb( struct fkbuff * fkb_p )
+{
+    uint32_t in_skb_tag;
+    blog_assertr( (fkb_p != (FkBuff_t *)NULL), BLOG_NULL );
+    blog_assertr( (!_IS_BPTR_(fkb_p->blog_p)), BLOG_NULL ); /* avoid leak */
+
+    in_skb_tag = _is_in_skb_tag_( fkb_p->flags );
+
+    fkb_p->blog_p = blog_get(); /* Allocate and associate with fkb */
+
+    if ( fkb_p->blog_p != BLOG_NULL )   /* Move in_skb_tag to blog rx info */
+        fkb_p->blog_p->rx.fkbInSkb = in_skb_tag;
+
+    blog_print( "fkb<0x%08x> blog<0x%08x> in_skb_tag<%u>",
+                (int)fkb_p, (int)fkb_p->blog_p, in_skb_tag );
+    return fkb_p->blog_p;       /* May be null */
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_snull
+ * Description  : Dis-associate a sk_buff with any Blog_t
+ * Parameters   :
+ *  skb_p       : Pointer to a non-null sk_buff
+ * Returns      : Previous Blog_t associated with sk_buff
+ *------------------------------------------------------------------------------
+ */
+inline Blog_t * _blog_snull( struct sk_buff * skb_p )
+{
+    register Blog_t * blog_p;
+    blog_p = skb_p->blog_p;
+    skb_p->blog_p = BLOG_NULL;
+    return blog_p;
+}
+
+Blog_t * blog_snull( struct sk_buff * skb_p )
+{
+    blog_assertr( (skb_p != (struct sk_buff *)NULL), BLOG_NULL );
+    blog_print( "skb<0x%08x> blog<0x%08x>", (int)skb_p, (int)skb_p->blog_p );
+    return _blog_snull( skb_p );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_fnull
+ * Description  : Dis-associate a fkbuff with any Blog_t
+ * Parameters   :
+ *  fkb_p       : Pointer to a non-null fkbuff
+ * Returns      : Previous Blog_t associated with fkbuff
+ *------------------------------------------------------------------------------
+ */
+inline Blog_t * _blog_fnull( struct fkbuff * fkb_p )
+{
+    register Blog_t * blog_p;
+    blog_p = fkb_p->blog_p;
+    fkb_p->blog_p = BLOG_NULL;
+    return blog_p;
+}
+
+Blog_t * blog_fnull( struct fkbuff * fkb_p )
+{
+    blog_assertr( (fkb_p != (struct fkbuff *)NULL), BLOG_NULL );
+    blog_print( "fkb<0x%08x> blogp<0x%08x>", (int)fkb_p, (int)fkb_p->blog_p );
+    return _blog_fnull( fkb_p );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_free
+ * Description  : Free any Blog_t associated with a sk_buff
+ * Parameters   :
+ *  skb_p       : Pointer to a non-null sk_buff
+ *------------------------------------------------------------------------------
+ */
+inline void _blog_free( struct sk_buff * skb_p )
+{
+    register Blog_t * blog_p;
+    blog_p = _blog_snull( skb_p );   /* Dis-associate Blog_t from skb_p */
+    if ( likely(blog_p != BLOG_NULL) )
+        blog_put( blog_p );         /* Recycle blog_p into free list */
+}
+
+void blog_free( struct sk_buff * skb_p )
+{
+    blog_assertv( (skb_p != (struct sk_buff *)NULL) );
+    BLOG_DBG(
+        if ( skb_p->blog_p != BLOG_NULL )
+            blog_print( "skb<0x%08x> blog<0x%08x> [<%08x>]",
+                        (int)skb_p, (int)skb_p->blog_p,
+                        (int)__builtin_return_address(0) ); );
+    _blog_free( skb_p );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_skip
+ * Description  : Disable further tracing of sk_buff by freeing associated
+ *                Blog_t (if any)
+ * Parameters   :
+ *  skb_p       : Pointer to a sk_buff
+ *------------------------------------------------------------------------------
+ */
+void blog_skip( struct sk_buff * skb_p )
+{
+    blog_print( "skb<0x%08x> [<%08x>]",
+                (int)skb_p, (int)__builtin_return_address(0) );
+    blog_assertv( (skb_p != (struct sk_buff *)NULL) );
+    _blog_free( skb_p );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_xfer
+ * Description  : Transfer ownership of a Blog_t between two sk_buff(s)
+ * Parameters   :
+ *  skb_p       : New owner of Blog_t object 
+ *  prev_p      : Former owner of Blog_t object
+ *------------------------------------------------------------------------------
+ */
+void blog_xfer( struct sk_buff * skb_p, const struct sk_buff * prev_p )
+{
+    Blog_t * blog_p;
+    struct sk_buff * mod_prev_p;
+    blog_assertv( (prev_p != (struct sk_buff *)NULL) );
+    blog_assertv( (skb_p != (struct sk_buff *)NULL) );
+
+    mod_prev_p = (struct sk_buff *) prev_p; /* const removal without warning */
+    blog_p = _blog_snull( mod_prev_p );
+    skb_p->blog_p = blog_p;
+
+    if ( likely(blog_p != BLOG_NULL) )
+    {
+        blog_print( "skb<0x%08x> to new<0x%08x> blog<0x%08x> [<%08x>]",
+                    (int)prev_p, (int)skb_p, (int)blog_p,
+                    (int)__builtin_return_address(0) );
+        blog_assertv( (_IS_BPTR_(blog_p)) );
+        blog_p->skb_p = skb_p;
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_clone
+ * Description  : Duplicate a Blog_t for another sk_buff
+ * Parameters   :
+ *  skb_p       : New owner of cloned Blog_t object 
+ *  prev_p      : Blog_t object to be cloned
+ *------------------------------------------------------------------------------
+ */
+void blog_clone( struct sk_buff * skb_p, const struct blog_t * prev_p )
+{
+    blog_assertv( (skb_p != (struct sk_buff *)NULL) );
+
+    if ( likely(prev_p != BLOG_NULL) )
+    {
+        Blog_t * blog_p;
+        int      i;
+
+        blog_assertv( (_IS_BPTR_(prev_p)) );
+        
+        skb_p->blog_p = blog_get(); /* Allocate and associate with skb */
+        blog_p = skb_p->blog_p;
+
+        blog_print( "orig blog<0x%08x> new skb<0x%08x> blog<0x%08x> [<%08x>]",
+                    (int)prev_p, (int)skb_p, (int)blog_p,
+                    (int)__builtin_return_address(0) );
+
+        if ( likely(blog_p != BLOG_NULL) )
+        {
+            blog_p->skb_p = skb_p;
+#define CPY(x) blog_p->x = prev_p->x
+            CPY(key.match);
+            CPY(hash);
+            CPY(mark);
+            CPY(priority);
+            CPY(rx);
+            CPY(vid);
+            CPY(vtag_num);
+            CPY(tupleV6);
+            for(i=0; i<MAX_VIRT_DEV; i++)
+            {
+               if( prev_p->virt_dev_p[i] )
+               {
+                  blog_p->virt_dev_p[i] = prev_p->virt_dev_p[i];
+                  blog_p->delta[i] = prev_p->delta[i];
+               }
+               else
+                  break;
+            }
+            blog_p->tx.word = 0;
+        }
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_copy
+ * Description  : Copy a Blog_t object another blog object.
+ * Parameters   :
+ *  new_p       : Blog_t object to be filled in
+ *  prev_p      : Blog_t object with the data information
+ *------------------------------------------------------------------------------
+ */
+void blog_copy(struct blog_t * new_p, const struct blog_t * prev_p)
+{
+    blog_assertv( (new_p != BLOG_NULL) );
+    blog_print( "new_p<0x%08x> prev_p<0x%08x>", (int)new_p, (int)prev_p );
+
+    if ( likely(prev_p != BLOG_NULL) )
+    {
+       memcpy( new_p, prev_p, sizeof(Blog_t) );
+    }
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_iq
+ * Description  : get the iq prio from blog
+ * Parameters   :
+ *  skb_p       : Pointer to a sk_buff
+ *------------------------------------------------------------------------------
+ */
+int blog_iq( const struct sk_buff * skb_p )
+{
+    int prio = IQOS_PRIO_LOW;
+
+    blog_print( "skb<0x%08x> [<%08x>]",
+                (int)skb_p, (int)__builtin_return_address(0) );
+
+    if (skb_p)
+    {
+        Blog_t *blog_p = skb_p->blog_p;
+
+    if (blog_p)
+            prio = blog_p->iq_prio;
+    }
+    return prio;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_fc_enabled
+ * Description  : get the enabled/disabled status of flow cache
+ * Parameters   :
+ *  none        :
+ *------------------------------------------------------------------------------
+ */
+inline int blog_fc_enabled( void )
+{
+    if ( likely(blog_rx_hook_g != (BlogDevHook_t)NULL) )
+        return 1;
+    else
+        return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_gre_tunnel_accelerated
+ * Description  : get the accelerated status of GRE tunnels
+ * Parameters   :
+ *  none        :
+ *------------------------------------------------------------------------------
+ */
+
+inline int blog_gre_tunnel_accelerated( void )
+{
+    return blog_gre_tunnel_accelerated_g;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_l2tp_tunnel_accelerated
+ * Description  : get the accelerated status of L2TP tunnels
+ * Parameters   :
+ *  none        :
+ *------------------------------------------------------------------------------
+ */
+
+inline int blog_l2tp_tunnel_accelerated( void )
+{
+    return blog_l2tp_tunnel_accelerated_g;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_tcpack_prio
+ * Description  : A TCP ACK flow in upstream (ONLY when egress port is XTM) is
+ *                prioritized based on the IP len and the number of back-to-back
+ *                pure TCP ACKs received. Once both the above condition are
+ *                fulfilled the packets of the flow are queued to
+ *                BLOG_TCPACK_XTM_TX_QID. 
+ *
+ *                TCP ACK prioritization is Enabled by default and can be
+ *                Disabled by defining BLOG_TCPACK_MAX_COUNT as 0
+ * NOTES        : 1. The above two conditions should be fulfilled for the first
+ *                   n packets (current default value is 50).
+ *                2. An already "IP QoS classified" TCP ACK flow is not 
+ *                   re-prioritized.
+ *                3. User has to explicitly configure the BLOG_TCPACK_XTM_TX_QID
+ *                   in the WebGUI, otherwise the TCP ACK packets will be 
+ *                   queued to the default queue (queue=0).
+ * Parameters   :
+ *  blog_p      : Pointer to a Blog_t
+ *  len         : IP Payload Len of the TCP ACK packet
+ * Returns      :
+ *  NONE        :
+ *------------------------------------------------------------------------------
+ */
+static void blog_tcpack_prio( Blog_t * blog_p, int len )
+{
+    int max_ack_len = 0;
+#if (BLOG_TCPACK_MAX_COUNT > 15)
+#error "BLOG_TCPACK_MAX_COUNT > 15"
+#endif
+
+    if (RX_IPV4(blog_p))
+        max_ack_len = BLOG_TCPACK_IPV4_LEN;
+    else if (RX_IPV6(blog_p))
+        max_ack_len = BLOG_TCPACK_IPV6_LEN;
+
+    if (len <= max_ack_len)
+    {
+        if ( (blog_p->ack_cnt >= BLOG_TCPACK_MAX_COUNT) || 
+             (SKBMARK_GET_FLOW_ID(blog_p->mark) ) )
+            blog_p->ack_done = 1;    /* optimization */
+        else
+        {
+            blog_p->ack_cnt++;
+            if (blog_p->ack_cnt >= BLOG_TCPACK_MAX_COUNT)
+            {
+                blog_p->mark = 
+                    SKBMARK_SET_Q(blog_p->mark, (BLOG_TCPACK_XTM_TX_QID-1) );
+
+                if ( blog_xtm_get_tx_chan_fn  )
+                    blog_p->tx.info.channel = 
+                        (*blog_xtm_get_tx_chan_fn)( blog_p->tx.dev_p, 
+                            blog_p->tx.info.channel, blog_p->mark );
+                blog_p->ack_done = 1;
+            }
+        }
+    }
+    else
+        blog_p->ack_cnt = 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_link
+ * Description  : Associate a network entity with an skb's blog object
+ * Parameters   :
+ *  entity_type : Network entity type
+ *  blog_p      : Pointer to a Blog_t
+ *  net_p       : Pointer to a network stack entity 
+ *  param1      : optional parameter 1
+ *  param2      : optional parameter 2
+ * PreRequisite : acquire blog_lock_g before calling blog_link()
+ *------------------------------------------------------------------------------
+ */
+void blog_link( BlogNetEntity_t entity_type, Blog_t * blog_p,
+                void * net_p, uint32_t param1, uint32_t param2 )
+{
+    blog_assertv( (entity_type < BLOG_NET_ENTITY_MAX) );
+    blog_assertv( (net_p != (void *)NULL) );
+
+    if ( unlikely(blog_p == BLOG_NULL) )
+        return;
+
+    blog_assertv( (_IS_BPTR_(blog_p)) );
+
+    blog_print( "link<%s> skb<0x%08x> blog<0x%08x> net<0x%08x> %u %u [<%08x>]",
+                strBlogNetEntity[entity_type], (int)blog_p->skb_p, (int)blog_p,
+                (int)net_p, param1, param2, (int)__builtin_return_address(0) );
+
+    switch ( entity_type )
+    {
+        case FLOWTRACK:
+        {
+#if defined(BLOG_NF_CONNTRACK)
+            uint32_t idx = BLOG_CT_PLD;
+
+            blog_assertv( ((param1 == BLOG_PARAM1_DIR_ORIG) ||
+                           (param1 == BLOG_PARAM1_DIR_REPLY)||
+                           (param2 == BLOG_PARAM2_IPV4)     ||
+                           (param2 == BLOG_PARAM2_IPV6)     ||
+                           (param2 == BLOG_PARAM2_GRE_IPV4)) );
+
+            if ( unlikely(blog_p->rx.multicast) )
+                return;
+
+            switch (param2)
+            {
+                case BLOG_PARAM2_IPV4:
+                    if (RX_IPV4(blog_p))
+                        idx = BLOG_CT_PLD;
+                    else
+                    {
+                        if ( (RX_IPV4_DEL(blog_p)) || 
+                                        (blog_p->ct_p[BLOG_CT_DEL] == NULL) )
+                            idx = BLOG_CT_DEL;
+                        else
+                        {
+                            blog_print( "invalid param2 %u", param2 );
+                        }
+                    }
+                    break;
+
+                case BLOG_PARAM2_IPV6:
+                    if (RX_IPV6(blog_p))
+                         idx = BLOG_CT_PLD;
+                    else
+                    {
+                        if ( (RX_IPV6_DEL(blog_p)) || 
+                                       (blog_p->ct_p[BLOG_CT_DEL] == NULL) )
+                            idx = BLOG_CT_DEL;
+                        else
+                        {
+                            blog_print( "invalid param2 %u", param2 );
+                        }
+                    }
+                    break;
+
+                case BLOG_PARAM2_GRE_IPV4:
+                    if (blog_support_gre_g == BLOG_GRE_TUNNEL)
+                        idx = BLOG_CT_DEL;
+                    else
+                        idx = BLOG_CT_PLD;
+                    break;
+
+                default:
+                    blog_print( "unknown param2 %u", param2 );
+                    return;
+            }
+
+            /* param2 indicates the ct_p belongs to IPv4 or IPv6 */
+            blog_p->ct_p[idx] = net_p; /* Pointer to conntrack */
+            blog_p->ct_ver[idx] = (param2 == BLOG_PARAM2_GRE_IPV4) ?
+                                                BLOG_PARAM2_IPV4 : param2;
+            /* 
+             * Save flow direction. Here we make one assumption:
+             * If a packet traverses both IPv4 and IPv6 conntracks,
+             * for example, 6in4 or 4in6 tunnel, the nf_dir must be the same
+             * for both conntracks.
+             */
+            blog_p->nf_dir = param1;
+
+            blog_print( "idx<%d> ct_p<%p> ct_ver<%d>\n",
+                    idx, blog_p->ct_p[idx], blog_p->ct_ver[idx] );
+
+#endif
+            break;
+        }
+
+        case BRIDGEFDB:
+        {
+            blog_assertv( ((param1 == BLOG_PARAM1_SRCFDB) ||
+                           (param1 == BLOG_PARAM1_DSTFDB)) );
+
+            blog_p->fdb[param1] = net_p;
+
+            break;
+        }
+
+        case MCAST_FDB:
+        {
+            blog_p->mc_fdb = net_p; /* Pointer to mc_fdb */
+            break;
+        }
+
+        case IF_DEVICE: /* link virtual interfaces traversed by flow */
+        case IF_DEVICE_MCAST:
+        {
+            int i;
+
+            blog_assertv( (param1 < BLOG_DIR_MAX) );
+
+            for (i=0; i<MAX_VIRT_DEV; i++)
+            {
+                /* A flow should not rx and tx with the same device!!  */
+                blog_assertv((net_p != DEVP_DETACH_DIR(blog_p->virt_dev_p[i])));
+
+                if ( blog_p->virt_dev_p[i] == NULL )
+                {
+                    blog_p->virt_dev_p[i] = DEVP_APPEND_DIR(net_p, param1);
+                    if (IF_DEVICE_MCAST == entity_type )
+                    {
+                       blog_p->delta[i] = -(param2 & 0xFF);
+                    }
+                    else
+                    {
+                       blog_p->delta[i] = (param2 - blog_p->tx.pktlen) & 0xFF;
+                    }
+                    break;
+                }
+            }
+
+            blog_assertv( (i != MAX_VIRT_DEV) );
+            break;
+        }
+
+        case GRE_TUNL:
+        {
+            blog_p->tunl_p = net_p; /* Pointer to tunnel */
+            break;
+        }
+
+        case TOS_MODE:
+        {
+            if (param1 == DIR_RX) 
+                blog_p->tos_mode_ds = param2;
+            else
+                blog_p->tos_mode_us = param2;
+
+            break;
+        }
+
+        default:
+            break;
+    }
+    return;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_notify
+ * Description  : Notify a Blog client (xx_hook) of an event.
+ * Parameters   :
+ *  event       : notification
+ *  net_p       : Pointer to a network stack entity
+ *  param1      : optional parameter 1
+ *  param2      : optional parameter 2
+ * PreRequisite : acquire blog_lock_g before calling blog_notify()
+ *------------------------------------------------------------------------------
+ */
+void blog_notify( BlogNotify_t event, void * net_p,
+                  uint32_t param1, uint32_t param2 )
+{
+    blog_assertv( (event < BLOG_NOTIFY_MAX) );
+    blog_assertv( (net_p != (void *)NULL) );
+
+    if ( unlikely(blog_xx_hook_g == (BlogNotifyHook_t)NULL) )
+        return;
+
+    blog_print( "notify<%s> net_p<0x%08x>"
+                " param1<%u:0x%08x> param2<%u:0x%08x> [<%08x>]",
+                strBlogNotify[event], (int)net_p,
+                param1, (int)param1, param2, (int)param2,
+                (int)__builtin_return_address(0) );
+
+    blog_xx_hook_g( event, net_p, param1, param2 );
+
+#if defined(CONFIG_BCM_KF_WL)
+	/* first flush the flows from Flow-cache/FAP and then clear the BRC_HOT */
+	if (event == DESTROY_BRIDGEFDB) { /* for WLAN PKTC use */
+		if ( likely(wl_pktc_del_hook != NULL) ) 
+			wl_pktc_del_hook((uint32_t)(((struct net_bridge_fdb_entry *)net_p)->addr.addr));
+		if ( likely(dhd_pktc_del_hook != NULL) ) 
+			dhd_pktc_del_hook((uint32_t)(((struct net_bridge_fdb_entry *)net_p)->addr.addr));
+	}
+#endif
+
+    return;
+}
+
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+uint32_t blog_cttime( struct nf_conn *ct, BlogCtTime_t *ct_time_p )
+{
+    uint32_t proto = ct_time_p->proto; 
+    uint32_t proto_type = ct_time_p->unknown; 
+
+    ct_time_p->idle_jiffies = ct_time_p->idle*HZ;
+
+    if ( proto_type == 0 )
+    {
+        if ( proto == IPPROTO_TCP )
+        {
+            if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+            {
+                /*
+                Conntrack CLOSED TCP connection entries can have 
+                large timeout, when :
+                1.	Accelerator overflows (i.e. full)
+                2.	somehow  *only* one leg of connection is
+                accelerated 
+                3.	TCP-RST is received on non-accelerated flow 
+                (i.e. conntrack will mark the connection as CLOSED)
+                4.	Accelerated leg of connection received some 
+                packets - triggering accelerator to refresh the
+                connection in conntrack with large timeout.
+                 */
+                return 0; /* Only set timeout in established state */
+            }
+            ct_time_p->extra_jiffies = blog_nat_tcp_def_idle_timeout;
+        }
+        else if ( proto == IPPROTO_UDP )
+#if defined(CONFIG_BCM_KF_NETFILTER)
+            if(ct->derived_timeout > 0) 
+                ct_time_p->extra_jiffies = ct->derived_timeout;
+            else
+#endif
+                ct_time_p->extra_jiffies = blog_nat_udp_def_idle_timeout;
+        else /* default:non-TCP|UDP timer refresh */
+            ct_time_p->extra_jiffies = blog_nat_generic_def_idle_timeout;
+    }
+    else
+    {
+        /* refresh timeout of unknown protocol */
+        ct_time_p->extra_jiffies = blog_nat_generic_def_idle_timeout;
+    }
+    return 0;
+}
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_request
+ * Description  : Blog client requests an operation to be performed on a network
+ *                stack entity.
+ * Parameters   :
+ *  request     : request type
+ *  net_p       : Pointer to a network stack entity
+ *  param1      : optional parameter 1
+ *  param2      : optional parameter 2
+ *------------------------------------------------------------------------------
+ */
+extern int blog_rule_delete_action( void *rule_p );
+
+uint32_t blog_request( BlogRequest_t request, void * net_p,
+                       uint32_t param1, uint32_t param2 )
+{
+    uint32_t ret=0;
+
+    blog_assertr( (request < BLOG_REQUEST_MAX), 0 );
+    blog_assertr( (net_p != (void *)NULL), 0 );
+
+#if defined(CC_BLOG_SUPPORT_DEBUG)
+    if ( (request!=FLOWTRACK_REFRESH) && (request != SYS_TIME_GET) )
+#endif
+        blog_print( "request<%s> net_p<0x%08x>"
+                    " param1<%u:0x%08x> param2<%u:0x%08x>",
+                    strBlogRequest[request], (int)net_p,
+                    param1, (int)param1, param2, (int)param2);
+
+    switch ( request )
+    {
+#if defined(BLOG_NF_CONNTRACK)
+        case FLOWTRACK_KEY_SET:
+            blog_assertr( ((param1 == BLOG_PARAM1_DIR_ORIG) ||
+                           (param1 == BLOG_PARAM1_DIR_REPLY)), 0 );
+            ((struct nf_conn *)net_p)->blog_key[param1] = param2;
+            return 0;
+
+        case FLOWTRACK_KEY_GET:
+            blog_assertr( ((param1 == BLOG_PARAM1_DIR_ORIG) ||
+                           (param1 == BLOG_PARAM1_DIR_REPLY)), 0 );
+            ret = ((struct nf_conn *)net_p)->blog_key[param1];
+            break;
+
+#if defined(CONFIG_NF_DYNDSCP) || defined(CONFIG_NF_DYNDSCP_MODULE)
+        case FLOWTRACK_DSCP_GET:
+            blog_assertr( ((param1 == BLOG_PARAM1_DIR_ORIG) ||
+                           (param1 == BLOG_PARAM1_DIR_REPLY)), 0 );
+            ret = ((struct nf_conn *)net_p)->dyndscp.dscp[param1];
+            break;
+#endif
+
+        case FLOWTRACK_CONFIRMED:    /* E.g. UDP connection confirmed */
+            ret = test_bit( IPS_CONFIRMED_BIT,
+                            &((struct nf_conn *)net_p)->status );
+            break;
+
+        case FLOWTRACK_ASSURED:      /* E.g. TCP connection confirmed */
+            ret = test_bit( IPS_ASSURED_BIT,
+                            &((struct nf_conn *)net_p)->status );
+            break;
+
+        case FLOWTRACK_ALG_HELPER:
+        {
+            struct nf_conn * nfct_p;
+            struct nf_conn_help * help;
+
+            nfct_p = (struct nf_conn *)net_p;
+            help = nfct_help(nfct_p);
+
+            if ( (help != (struct nf_conn_help *)NULL )
+                && (help->helper != (struct nf_conntrack_helper *)NULL) 
+                && (help->helper->name && strcmp(help->helper->name, "BCM-NAT")) )
+            {
+                blog_print( "HELPER ct<0x%08x> helper<%s>",
+                            (int)net_p, help->helper->name );
+                return 1;
+            }
+            return 0;
+        }
+
+        case FLOWTRACK_EXCLUDE:  /* caution: modifies net_p */
+            clear_bit(IPS_BLOG_BIT, &((struct nf_conn *)net_p)->status);
+            return 0;
+
+        case FLOWTRACK_REFRESH:
+        {
+            blog_cttime( (struct nf_conn *)net_p, (BlogCtTime_t *) param1);
+            return 0;
+        }
+
+        case FLOWTRACK_TIME_SET:
+        {
+            struct nf_conn *ct = (struct nf_conn *)net_p;
+            BlogCtTime_t *ct_time_p = (BlogCtTime_t *) param1;
+
+            blog_assertr( (ct_time_p != NULL), 0 );
+
+            if (blog_cttime_update_fn && ct && ct_time_p)
+            {
+                blog_cttime( ct, ct_time_p );
+                (*blog_cttime_update_fn)(ct, ct_time_p);
+            }
+            return 0;
+        }
+#endif /* defined(BLOG_NF_CONNTRACK) */
+
+        case BRIDGEFDB_KEY_SET:
+        {
+            blog_assertr( ((param1 == BLOG_PARAM1_SRCFDB) ||
+                           (param1 == BLOG_PARAM1_DSTFDB)), 0 );
+            ((struct net_bridge_fdb_entry *)net_p)->fdb_key = param2;
+            return 0;
+        }
+
+        case BRIDGEFDB_KEY_GET:
+        {
+            blog_assertr( ((param1 == BLOG_PARAM1_SRCFDB) ||
+                           (param1 == BLOG_PARAM1_DSTFDB)), 0 );
+            ret = ((struct net_bridge_fdb_entry *)net_p)->fdb_key;
+            break;
+        }
+
+        case BRIDGEFDB_TIME_SET:
+        {
+            ((struct net_bridge_fdb_entry *)net_p)->updated = param2;
+            return 0;
+        }
+
+        case NETIF_PUT_STATS:
+        {
+            struct net_device * dev_p = (struct net_device *)net_p;
+            BlogStats_t * bstats_p = (BlogStats_t *) param1;
+            blog_assertr( (bstats_p != (BlogStats_t *)NULL), 0 );
+
+            blog_print("dev_p<0x%08x> rx_pkt<%lu> rx_byte<%lu> tx_pkt<%lu>"
+                       " tx_byte<%lu> multicast<%lu>", (int)dev_p,
+                        bstats_p->rx_packets, bstats_p->rx_bytes,
+                        bstats_p->tx_packets, bstats_p->tx_bytes,
+                        bstats_p->multicast);
+
+            if ( dev_p->put_stats )
+                dev_p->put_stats( dev_p, bstats_p );
+            return 0;
+        }
+        
+        case LINK_XMIT_FN:
+        {
+            struct net_device * dev_p = (struct net_device *)net_p;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+            ret = (uint32_t)(dev_p->netdev_ops->ndo_start_xmit);
+#else
+            ret = (uint32_t)(dev_p->hard_start_xmit);
+#endif
+            break;
+        }
+
+        case LINK_NOCARRIER:
+            ret = test_bit( __LINK_STATE_NOCARRIER,
+                            &((struct net_device *)net_p)->state );
+            break;
+
+        case NETDEV_NAME:
+        {
+            struct net_device * dev_p = (struct net_device *)net_p;
+            ret = (uint32_t)(dev_p->name);
+            break;
+        }
+
+        case MCAST_KEY_SET:
+        {
+#if defined(CONFIG_BR_IGMP_SNOOP) && defined(CONFIG_BCM_KF_IGMP)
+
+            if ( param1 == BlogTraffic_IPV4_MCAST )
+               ((struct net_bridge_mc_fdb_entry *)net_p)->blog_idx = param2;
+#endif
+#if defined(CONFIG_BR_MLD_SNOOP) && defined(CONFIG_BCM_KF_MLD)
+            if ( param1 == BlogTraffic_IPV6_MCAST )
+                ((struct net_br_mld_mc_fdb_entry *)net_p)->blog_idx = param2;
+#endif
+            return 0;
+        }
+#if 0
+        case MCAST_KEY_GET:
+            if ( param1 == BlogTraffic_IPV4_MCAST )
+               ret = ((struct net_bridge_mc_fdb_entry *)net_p)->blog_idx;
+#if defined(CONFIG_BR_MLD_SNOOP)
+            else
+               ret = ((struct net_br_mld_mc_fdb_entry *)net_p)->blog_idx;
+#endif
+
+            break;
+#endif
+        case IQPRIO_SKBMARK_SET:
+        {
+            Blog_t *blog_p = (Blog_t *)net_p;
+            blog_p->mark = SKBMARK_SET_IQPRIO_MARK(blog_p->mark, param1 );
+            return 0;
+        }
+
+        case MCAST_DFLT_MIPS:
+        {
+            blog_rule_delete_action( net_p );
+            return 0;
+        }
+
+        case TCPACK_PRIO:
+        {
+            blog_tcpack_prio( (Blog_t *)net_p, param1 );
+            return 0;
+        }
+
+        case SYS_TIME_GET:
+        {
+           *(uint32_t *)net_p = jiffies;
+            return 0;
+        }
+
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+        case GRE_TUNL_XMIT:
+        {
+            blog_assertr( ((BlogIpv4Hdr_t *)param1 != NULL), 0 );
+
+            return blog_gre_xmit_update_fn(net_p, (BlogIpv4Hdr_t *)param1,
+                        param2);
+        }
+#endif
+
+        case SKB_DST_ENTRY_SET:
+        {
+            Blog_t *blog_p = (Blog_t *)net_p;
+            struct sk_buff *skb_p = (struct sk_buff *)param1;
+            struct dst_entry *dst_p;
+
+            blog_assertr( (skb_p != (void *)NULL), 0 );
+
+            dst_p = skb_dst(skb_p);
+            dst_hold(dst_p);
+            blog_p->dst_entry = (void *)dst_p;
+            return 0;
+        }
+
+        case SKB_DST_ENTRY_RELEASE:
+        {
+            dst_release((struct dst_entry *)net_p);
+            return 0;
+        }
+
+        default:
+            return 0;
+    }
+
+    blog_print("ret<%u:0x%08x>", ret, (int)ret);
+
+    return ret;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_query
+ * Description  : Query a Blog client (qr_hook) of an event.
+ * Parameters   :
+ *  query       : query
+ *  net_p       : Pointer to a network stack entity
+ *  param1      : optional parameter 1
+ *  param2      : optional parameter 2
+ *  param3      : optional parameter 3
+ * PreRequisite : acquire blog_lock_g before calling blog_query()
+ *------------------------------------------------------------------------------
+ */
+void blog_query( BlogQuery_t query, void * net_p,
+                 uint32_t param1, uint32_t param2, uint32_t param3 )
+{
+    blog_assertv( (query < BLOG_QUERY_MAX) );
+    blog_assertv( (net_p != (void *)NULL) );
+
+    if ( unlikely(blog_qr_hook_g == (BlogQueryHook_t)NULL) )
+        return;
+
+    blog_print( "Query<%s> net_p<0x%08x> param1<%u:0x%08x> "
+                "param2<%u:0x%08x> param3<%u:0x%08x> [<%08x>] ",
+                strBlogQuery[query], (int)net_p, param1, (int)param1, 
+                param2, (int)param2, param3, (int)param3,
+                (int)__builtin_return_address(0) );
+
+    blog_qr_hook_g( query, net_p, param1, param2, param3 );
+
+    return;
+}
+
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_filter
+ * Description  : Filter packets that need blogging.
+ *                E.g. To skip logging of control versus data type packet.
+ *   blog_p     : Received packet parsed and logged into a blog
+ * Returns      :
+ *   PKT_NORM   : If normal stack processing without logging
+ *   PKT_BLOG   : If stack processing with logging
+ *------------------------------------------------------------------------------
+ */
+BlogAction_t blog_filter( Blog_t * blog_p )
+{
+    blog_assertr( ((blog_p != BLOG_NULL) && (_IS_BPTR_(blog_p))), PKT_NORM );
+    blog_assertr( (blog_p->rx.info.hdrs != 0), PKT_NORM );
+
+    /*
+     * E.g. IGRS/UPnP using Simple Service Discovery Protocol SSDP over HTTPMU
+     *      HTTP Multicast over UDP 239.255.255.250:1900,
+     *
+     *  if ( ! RX_IPinIP(blog_p) && RX_IPV4(blog_p)
+     *      && (blog_p->rx.tuple.daddr == htonl(0xEFFFFFFA))
+     *      && (blog_p->rx.tuple.port.dest == 1900)
+     *      && (blog_p->key.protocol == IPPROTO_UDP) )
+     *          return PKT_NORM;
+     *
+     *  E.g. To filter IPv4 Local Network Control Block 224.0.0/24
+     *             and IPv4 Internetwork Control Block  224.0.1/24
+     *
+     *  if ( ! RX_IPinIP(blog_p) && RX_IPV4(blog_p)
+     *      && ( (blog_p->rx.tuple.daddr & htonl(0xFFFFFE00))
+     *           == htonl(0xE0000000) )
+     *          return PKT_NORM;
+     *  
+     */
+    return PKT_BLOG;    /* continue in stack with logging */
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_finit, blog_sinit
+ * Description  : This function may be inserted in a physical network device's
+ *                packet receive handler. A receive handler typically extracts
+ *                the packet data from the rx DMA buffer ring, allocates and
+ *                sets up a sk_buff, decodes the l2 headers and passes the
+ *                sk_buff into the network stack via netif_receive_skb/netif_rx.
+ *
+ *                Prior to constructing a sk_buff, blog_finit() may be invoked
+ *                using a fast kernel buffer to carry the received buffer's
+ *                context <data,len>, and the receive net_device and l1 info.
+ *
+ *                This function invokes the bound receive blog hook.
+ *
+ * Parameters   :
+ *  blog_finit() fkb_p: Pointer to a fast kernel buffer<data,len>
+ *  blog_sinit() skb_p: Pointer to a Linux kernel skbuff
+ *  dev_p       : Pointer to the net_device on which the packet arrived.
+ *  encap       : First encapsulation type
+ *  channel     : Channel/Port number on which the packet arrived.
+ *  phyHdr      : e.g. XTM device RFC2684 header type
+ *
+ * Returns      :
+ *  PKT_DONE    : The fkb|skb is consumed and device should not process fkb|skb.
+ *
+ *  PKT_NORM    : Device may invoke netif_receive_skb for normal processing.
+ *                No Blog is associated and fkb reference count = 0.
+ *                [invoking fkb_release() has no effect]
+ *
+ *  PKT_BLOG    : PKT_NORM behaviour + Blogging enabled.
+ *                Must call fkb_release() to free associated Blog
+ *
+ *------------------------------------------------------------------------------
+ */
+inline
+BlogAction_t blog_finit_locked( struct fkbuff * fkb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr )
+{
+    BlogHash_t blogHash;
+    BlogAction_t action = PKT_NORM;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+    BCM_ASSERT_HAS_SPINLOCK_R(&blog_lock_g, action);
+#endif
+
+    blogHash.match = 0U;     /* also clears hash, protocol = 0 */
+
+    if ( unlikely(blog_rx_hook_g == (BlogDevHook_t)NULL) )
+        goto bypass;
+
+    blogHash.l1_tuple.channel = (uint8_t)channel;
+    blogHash.l1_tuple.phyType = BLOG_GET_PHYTYPE(phyHdr);
+    blogHash.l1_tuple.phyLen = BLOG_GET_PHYLEN(phyHdr);
+
+    blog_assertr( (blogHash.l1_tuple.phyType < BLOG_MAXPHY), PKT_NORM);
+    blog_print( "fkb<0x%08x:%x> pData<0x%08x> length<%d> dev<0x%08x>"
+                " chnl<%u> %s PhyHdrLen<%u> key<0x%08x>",
+                (int)fkb_p, _is_in_skb_tag_(fkb_p->flags),
+                (int)fkb_p->data, fkb_p->len, (int)dev_p,
+                channel, strBlogPhy[blogHash.l1_tuple.phyType],
+                rfc2684HdrLength[blogHash.l1_tuple.phyLen],
+                blogHash.match );
+
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+    if (blog_gre_tunnel_accelerated())
+    {
+        int gre_status;
+        void *tunl_p = NULL;
+        uint32_t pkt_seqno;
+        gre_status = blog_gre_rcv( fkb_p, (void *)dev_p, encap, &tunl_p,
+            &pkt_seqno );
+
+        switch (gre_status)
+        {
+            case BLOG_GRE_RCV_NOT_GRE:
+            case BLOG_GRE_RCV_NO_SEQNO:
+            case BLOG_GRE_RCV_IN_SEQ:
+                break;
+
+            case BLOG_GRE_RCV_NO_TUNNEL:
+                blog_print( "RX GRE no matching tunnel" );
+                break;
+
+            case BLOG_GRE_RCV_FLAGS_MISSMATCH:
+                blog_print( "RX GRE flags miss-match" );
+                action = PKT_DROP;
+                goto bypass;
+
+            case BLOG_GRE_RCV_CHKSUM_ERR:
+                blog_print( "RX GRE checksum error" );
+                action = PKT_DROP;
+                goto bypass;
+
+            case BLOG_GRE_RCV_OOS_LT:
+                blog_print( "RX GRE out-of-seq LT pkt seqno <%u>", pkt_seqno );
+                action = PKT_DROP;
+                goto bypass;
+
+            case BLOG_GRE_RCV_OOS_GT:
+                blog_print( "RX GRE out-of-seq GT pkt seqno <%u>", pkt_seqno );
+                break;
+
+            default:
+                blog_print( "RX GRE unkown status <%u>", gre_status );
+                break;
+        }
+    }
+#endif
+
+#if defined(CONFIG_ACCEL_PPTP) 
+	if (blog_gre_tunnel_accelerated())
+	{
+		int pptp_status;
+        uint32_t rcv_pktSeq;
+        pptp_status = blog_pptp_rcv( fkb_p, encap, &rcv_pktSeq );
+        switch (pptp_status)
+        {
+            case BLOG_PPTP_RCV_NOT_PPTP:
+            case BLOG_PPTP_RCV_NO_SEQNO:
+            case BLOG_PPTP_RCV_IN_SEQ:
+            	break;
+
+            case BLOG_PPTP_RCV_NO_TUNNEL:
+                blog_print( "RX PPTP no matching tunnel" );
+            	break;
+
+            case BLOG_PPTP_RCV_FLAGS_MISSMATCH:
+               	blog_print( "RX PPTP flags miss-match" );
+                action = PKT_DROP;
+                goto bypass;
+
+            case BLOG_PPTP_RCV_OOS_LT:
+                blog_print( "RX PPTP out-of-seq LT pkt seqno <%u>", rcv_pktSeq );
+                action = PKT_DROP;
+                goto bypass;
+
+            case BLOG_PPTP_RCV_OOS_GT:
+                blog_print( "RX PPTP out-of-seq GT pkt seqno <%u>", rcv_pktSeq );
+                break;
+
+            default:
+                blog_print( "RX PPTP unkown status <%u>", pptp_status );
+                break;
+        }       
+        
+	}	
+#endif
+
+    action = blog_rx_hook_g( fkb_p, (void *)dev_p, encap, blogHash.match );
+
+    if ( action == PKT_BLOG )
+    {
+        fkb_p->blog_p->rx.dev_p = (void *)dev_p;           /* Log device info */
+#if defined(CC_BLOG_SUPPORT_USER_FILTER)
+        action = blog_filter(fkb_p->blog_p);
+#endif
+    }
+
+    if ( unlikely(action == PKT_NORM) )
+        fkb_release( fkb_p );
+
+bypass:
+    return action;
+}
+
+BlogAction_t blog_finit( struct fkbuff * fkb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr )
+{
+    BlogAction_t ret;
+
+    blog_lock();
+
+    ret = blog_finit_locked(fkb_p, dev_p, encap, channel, phyHdr);
+
+    blog_unlock();
+
+    return ret;
+}
+
+/*
+ * blog_sinit serves as a wrapper to blog_finit() by overlaying an fkb into a
+ * skb and invoking blog_finit().
+ */
+static inline BlogAction_t _blog_sinit( struct sk_buff * skb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr, int is_locked )
+{
+    struct fkbuff * fkb_p;
+    BlogAction_t action = PKT_NORM;
+
+    if ( unlikely(blog_rx_hook_g == (BlogDevHook_t)NULL) )
+        goto bypass;
+
+    blog_assertr( (BLOG_GET_PHYTYPE(phyHdr) < BLOG_MAXPHY), PKT_NORM );
+    blog_print( "skb<0x%08x> pData<0x%08x> length<%d> dev<0x%08x>"
+                " chnl<%u> %s PhyHdrLen<%u>",
+                (int)skb_p, (int)skb_p->data, skb_p->len, (int)dev_p,
+                channel, strBlogPhy[BLOG_GET_PHYTYPE(phyHdr)],
+                rfc2684HdrLength[BLOG_GET_PHYLEN(phyHdr)] );
+
+    /* CAUTION: Tag that the fkbuff is from sk_buff */
+    fkb_p = (FkBuff_t *) &skb_p->fkbInSkb;
+    fkb_p->flags = _set_in_skb_tag_(0); /* clear and set in_skb tag */
+    FKB_CLEAR_LEN_WORD_FLAGS(fkb_p->len_word); /*clears bits 31-24 of skb->len */
+
+    if (is_locked)
+        action = blog_finit_locked( fkb_p, dev_p, encap, channel, phyHdr );
+    else
+    action = blog_finit( fkb_p, dev_p, encap, channel, phyHdr );
+
+    if ( action == PKT_BLOG )
+    {
+         blog_assertr( (fkb_p->blog_p != BLOG_NULL), PKT_NORM );
+         fkb_p->blog_p->skb_p = skb_p;
+    } 
+    else
+         fkb_p->blog_p = NULL;
+
+bypass:
+    return action;
+}
+
+BlogAction_t blog_sinit( struct sk_buff * skb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr )
+{
+    return _blog_sinit(skb_p, dev_p, encap, channel, phyHdr, 0);
+}
+
+BlogAction_t blog_sinit_locked( struct sk_buff * skb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr )
+{
+    return _blog_sinit(skb_p, dev_p, encap, channel, phyHdr, 1);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_emit
+ * Description  : This function may be inserted in a physical network device's
+ *                hard_start_xmit function just before the packet data is
+ *                extracted from the sk_buff and enqueued for DMA transfer.
+ *
+ *                This function invokes the transmit blog hook.
+ * Parameters   :
+ *  nbuff_p     : Pointer to a NBuff
+ *  dev_p       : Pointer to the net_device on which the packet is transmited.
+ *  encap       : First encapsulation type
+ *  channel     : Channel/Port number on which the packet is transmited.
+ *  phyHdr      : e.g. XTM device RFC2684 header type
+ *
+ * Returns      :
+ *  PKT_DONE    : The skb_p is consumed and device should not process skb_p.
+ *  PKT_NORM    : Device may use skb_p and proceed with hard xmit 
+ *                Blog object is disassociated and freed.
+ *------------------------------------------------------------------------------
+ */
+BlogAction_t _blog_emit( void * nbuff_p, void * dev_p,
+                        uint32_t encap, uint32_t channel, uint32_t phyHdr )
+{
+    BlogHash_t blogHash;
+    struct sk_buff * skb_p;
+    Blog_t * blog_p;
+    BlogAction_t action = PKT_NORM;
+
+    // outer inline function has already verified this is a skbuff
+    skb_p = PNBUFF_2_SKBUFF(nbuff_p);   /* same as nbuff_p */
+
+    blog_p = skb_p->blog_p;
+    if ( ( blog_p == BLOG_NULL ) || ( dev_p == NULL ) )
+        goto bypass;
+
+    blog_assertr( (_IS_BPTR_(blog_p)), PKT_NORM );
+
+    blogHash.match = 0U;
+
+    if ( likely(blog_tx_hook_g != (BlogDevHook_t)NULL) )
+    {
+        blog_lock();
+
+        blog_p->tx.dev_p = (void *)dev_p;           /* Log device info */
+
+        if (blog_p->minMtu > ((struct net_device *)dev_p)->mtu)
+            blog_p->minMtu = ((struct net_device *)dev_p)->mtu;
+
+        blogHash.l1_tuple.channel = (uint8_t)channel;
+        blogHash.l1_tuple.phyType = BLOG_GET_PHYTYPE(phyHdr);
+        blogHash.l1_tuple.phyLen  = BLOG_GET_PHYLEN(phyHdr);
+
+        blog_p->priority = skb_p->priority;         /* Log skb info */
+        blog_p->mark     = skb_p->mark;
+
+        blog_assertr( (BLOG_GET_PHYTYPE(phyHdr) < BLOG_MAXPHY), PKT_NORM);
+        blog_print( "skb<0x%08x> blog<0x%08x> pData<0x%08x> length<%d>"
+                    " dev<0x%08x> chnl<%u> %s PhyHdrLen<%u> key<0x%08x>",
+            (int)skb_p, (int)blog_p, (int)skb_p->data, skb_p->len,
+            (int)dev_p, channel, strBlogPhy[BLOG_GET_PHYTYPE(phyHdr)],
+            rfc2684HdrLength[BLOG_GET_PHYLEN(phyHdr)],
+            blogHash.match );
+
+        action = blog_tx_hook_g( skb_p, (void*)skb_p->dev,
+                                 encap, blogHash.match );
+
+        blog_unlock();
+    }
+    blog_free( skb_p );                             /* Dis-associate w/ skb */
+
+bypass:
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+    blog_gre_xmit(skb_p, encap);
+#endif
+
+#if defined(CONFIG_ACCEL_PPTP)
+    blog_pptp_xmit(skb_p, encap); 
+#endif    
+
+    return action;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_activate
+ * Description  : This function is a static configuration function of blog
+ *                application. It invokes blog configuration hook
+ * Parameters   :
+ *  blog_p      : pointer to a blog with configuration information
+ *  traffic     : type of the traffic
+ *  client      : configuration client
+ *
+ * Returns      :
+ *  ActivateKey : If the configuration is successful, a key is returned.
+ *                Otherwise, BLOG_KEY_INVALID is returned
+ *------------------------------------------------------------------------------
+ */
+uint32_t blog_activate( Blog_t * blog_p, BlogTraffic_t traffic,
+                        BlogClient_t client )
+{
+    uint32_t     key;
+
+    key = BLOG_KEY_INVALID;
+    
+    if ( blog_p == BLOG_NULL ||
+         traffic >= BlogTraffic_MAX ||
+         client >= BlogClient_MAX )
+    {
+        blog_assertr( ( blog_p != BLOG_NULL ), key );
+        goto bypass;
+    }
+
+    if ( unlikely(blog_sc_hook_g[client] == (BlogScHook_t)NULL) )
+        goto bypass;
+
+#if defined(CC_BLOG_SUPPORT_DEBUG)
+    blog_print( "blog_p<0x%08x> traffic<%u> client<%u>", (int)blog_p, traffic, client );
+    blog_dump( blog_p );
+#endif
+
+    blog_lock();
+    key = blog_sc_hook_g[client]( blog_p, traffic );
+    blog_unlock();
+
+bypass:
+    return key;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_deactivate
+ * Description  : This function is a deconfiguration function of blog
+ *                application
+ * Parameters   :
+ *  key         : blog key information
+ *  traffic     : type of traffic
+ *  client      : configuration client
+ *
+ * Returns      :
+ *  blog_p      : If the deconfiguration is successful, the associated blog 
+ *                pointer is returned to the caller
+ *------------------------------------------------------------------------------
+ */
+Blog_t * blog_deactivate( uint32_t key, BlogTraffic_t traffic,
+                          BlogClient_t client )
+{
+    Blog_t * blog_p = NULL;
+
+    if ( key == BLOG_KEY_INVALID ||
+         traffic >= BlogTraffic_MAX ||
+         client >= BlogClient_MAX )
+    {
+        blog_assertr( (key != BLOG_KEY_INVALID), blog_p );
+        goto bypass;
+    }
+
+    if ( unlikely(blog_sd_hook_g[client] == (BlogSdHook_t)NULL) )
+        goto bypass;
+
+    blog_print( "key<%08x> traffic<%u> client<%u>", key, traffic, client );
+
+    blog_lock();
+    blog_p = blog_sd_hook_g[client]( key, traffic );
+    blog_unlock();
+
+#if defined(CC_BLOG_SUPPORT_DEBUG)
+    blog_dump( blog_p );
+#endif
+
+bypass:
+    return blog_p;
+}
+
+/*
+ * blog_iq_prio determines the Ingress QoS priority of the packet
+ */
+int blog_iq_prio( struct sk_buff * skb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr )
+{
+    struct fkbuff * fkb_p;
+    BlogAction_t action = PKT_NORM;
+    int iq_prio = 1;
+    uint32_t dummy;
+    void *dummy_dev_p = &dummy;
+
+    if ( unlikely(blog_rx_hook_g == (BlogDevHook_t)NULL) )
+        goto bypass;
+
+    blog_assertr( (BLOG_GET_PHYTYPE(phyHdr) < BLOG_MAXPHY), 1 );
+    blog_print( "skb<0x%08x> pData<0x%08x> length<%d> dev<0x%08x>"
+                " chnl<%u> %s PhyHdrLen<%u>",
+                (int)skb_p, (int)skb_p->data, skb_p->len, (int)dev_p,
+                channel, strBlogPhy[BLOG_GET_PHYTYPE(phyHdr)],
+                rfc2684HdrLength[BLOG_GET_PHYLEN(phyHdr)] );
+
+    /* CAUTION: Tag that the fkbuff is from sk_buff */
+    fkb_p = (FkBuff_t *) &skb_p->fkbInSkb;
+
+    /* set in_skb and chk_iq_prio tag */
+    fkb_p->flags = _set_in_skb_n_chk_iq_prio_tag_(0); 
+    action = blog_finit( fkb_p, dummy_dev_p, encap, channel, phyHdr );
+
+    if ( action == PKT_BLOG )
+    {
+         blog_assertr( (fkb_p->blog_p != BLOG_NULL), PKT_NORM );
+         fkb_p->blog_p->skb_p = skb_p;
+         iq_prio = fkb_p->blog_p->iq_prio;
+         blog_free( skb_p );
+    } 
+    else
+         fkb_p->blog_p = NULL;
+
+bypass:
+    return iq_prio;
+}
+
+static int blog_notify_netevent(struct notifier_block *nb, unsigned long event, void *_neigh)
+{
+    struct neighbour *neigh = _neigh;
+    switch (event)
+    {
+        case NETEVENT_ARP_BINDING_CHANGE:
+              blog_lock();
+              blog_notify(ARP_BIND_CHG, nb, *(uint32_t *)neigh->primary_key, (uint32_t)neigh->ha); 
+              blog_unlock();
+              return 0;
+        default:
+              return 1;
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_bind
+ * Description  : Override default rx and tx hooks.
+ *  blog_rx     : Function pointer to be invoked in blog_finit(), blog_sinit()
+ *  blog_tx     : Function pointer to be invoked in blog_emit()
+ *  blog_xx     : Function pointer to be invoked in blog_notify()
+ *  info        : Mask of the function pointers for configuration
+ *------------------------------------------------------------------------------
+ */
+void blog_bind( BlogDevHook_t blog_rx, BlogDevHook_t blog_tx,
+                BlogNotifyHook_t blog_xx, BlogQueryHook_t blog_qr, 
+                BlogBind_t bind)
+{
+    blog_print( "Bind Rx[<%08x>] Tx[<%08x>] Notify[<%08x>] bind[<%u>]",
+                (int)blog_rx, (int)blog_tx, (int)blog_xx,
+                (uint8_t)bind.hook_info );
+
+    if ( bind.bmap.RX_HOOK )
+        blog_rx_hook_g = blog_rx;   /* Receive  hook */
+    if ( bind.bmap.TX_HOOK )
+        blog_tx_hook_g = blog_tx;   /* Transmit hook */
+    if ( bind.bmap.XX_HOOK )
+        blog_xx_hook_g = blog_xx;   /* Notify hook */
+    if ( bind.bmap.QR_HOOK )
+        blog_qr_hook_g = blog_qr;   /* Query hook */
+}
+
+static BlogClient_t hw_accelerator_client = BlogClient_MAX;
+static BlogClient_t sw_accelerator_client = BlogClient_MAX;
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : is_hw_accelerator
+ * Description  : 
+ *------------------------------------------------------------------------------
+ */
+static int is_hw_accelerator(BlogClient_t client)
+{
+    switch(client)
+    {
+    case BlogClient_fap:
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+    case BlogClient_runner: 
+#endif /* (CONFIG_BCM_RDPA) || (CONFIG_BCM_RDPA_MODULE) */
+#endif /* CONFIG_BCM_KF_RUNNER */
+        return 1;
+    default:
+        break;
+    }
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : hw_accelerator_client_get
+ * Description  :
+ *------------------------------------------------------------------------------
+ */
+int hw_accelerator_client_get(void)
+{
+    return hw_accelerator_client;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : sw_accelerator_client_get
+ * Description  :
+ *------------------------------------------------------------------------------
+ */
+int sw_accelerator_client_get(void)
+{
+    return sw_accelerator_client;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_bind_config
+ * Description  : Override default sc and sd hooks.
+ *  blog_sc     : Function pointer to be invoked in blog_activate()
+ *  blog_sd     : Function pointer to be invoked in blog_deactivate()
+ *  client      : configuration client
+ *  info        : Mask of the function pointers for configuration
+ *------------------------------------------------------------------------------
+ */
+void blog_bind_config( BlogScHook_t blog_sc, BlogSdHook_t blog_sd,
+                       BlogClient_t client, BlogBind_t bind)
+{
+    blog_print( "Bind Sc[<%08x>] Sd[<%08x>] Client[<%u>] bind[<%u>]",
+                (int)blog_sc, (int)blog_sd, client,
+                (uint8_t)bind.hook_info );
+
+    if ( bind.bmap.SC_HOOK )
+        blog_sc_hook_g[client] = blog_sc;   /* Static config hook */
+    if ( bind.bmap.SD_HOOK )
+        blog_sd_hook_g[client] = blog_sd;   /* Static deconf hook */
+
+    if (is_hw_accelerator(client))
+        hw_accelerator_client = client;
+    else
+        sw_accelerator_client = client;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog
+ * Description  : Log the L2 or L3+4 tuple information
+ * Parameters   :
+ *  skb_p       : Pointer to the sk_buff
+ *  dir         : rx or tx path
+ *  encap       : Encapsulation type
+ *  len         : Length of header
+ *  data_p      : Pointer to encapsulation header data.
+ *------------------------------------------------------------------------------
+ */
+void blog( struct sk_buff * skb_p, BlogDir_t dir, BlogEncap_t encap,
+           size_t len, void * data_p )
+{
+    BlogHeader_t * bHdr_p;
+    Blog_t * blog_p;
+
+    blog_assertv( (skb_p != (struct sk_buff *)NULL ) );
+    blog_assertv( (skb_p->blog_p != BLOG_NULL) );
+    blog_assertv( (_IS_BPTR_(skb_p->blog_p)) );
+    blog_assertv( (data_p != (void *)NULL ) );
+    blog_assertv( (len <= BLOG_HDRSZ_MAX) );
+    blog_assertv( (encap < PROTO_MAX) );
+
+    blog_p = skb_p->blog_p;
+    blog_assertv( (blog_p->skb_p == skb_p) );
+
+    bHdr_p = &blog_p->rx + dir;
+
+    if ( encap == PLD_IPv4 )    /* Log the IP Tuple */
+    {
+        BlogTuple_t * bTuple_p = &bHdr_p->tuple;
+        BlogIpv4Hdr_t * ip_p   = (BlogIpv4Hdr_t *)data_p;
+
+        /* Discontinue if non IPv4 or with IP options, or fragmented */
+        if ( (ip_p->ver != 4) || (ip_p->ihl != 5)
+             || (ip_p->flagsFrag & htons(BLOG_IP_FRAG_OFFSET|BLOG_IP_FLAG_MF)) )
+            goto skip;
+
+        if ( ip_p->proto == BLOG_IPPROTO_TCP )
+        {
+            BlogTcpHdr_t * th_p;
+            th_p = (BlogTcpHdr_t*)( (uint8_t *)ip_p + BLOG_IPV4_HDR_LEN );
+
+            /* Discontinue if TCP RST/FIN */
+            if ( TCPH_RST(th_p) | TCPH_FIN(th_p) )
+                goto skip;
+            bTuple_p->port.source = th_p->sPort;
+            bTuple_p->port.dest = th_p->dPort;
+        }
+        else if ( ip_p->proto == BLOG_IPPROTO_UDP )
+        {
+            BlogUdpHdr_t * uh_p;
+            uh_p = (BlogUdpHdr_t *)( (uint8_t *)ip_p + BLOG_UDP_HDR_LEN );
+            bTuple_p->port.source = uh_p->sPort;
+            bTuple_p->port.dest = uh_p->dPort;
+        }
+        else
+            goto skip;  /* Discontinue if non TCP or UDP upper layer protocol */
+
+        bTuple_p->ttl = ip_p->ttl;
+        bTuple_p->tos = ip_p->tos;
+        bTuple_p->check = ip_p->chkSum;
+        bTuple_p->saddr = blog_read32_align16( (uint16_t *)&ip_p->sAddr );
+        bTuple_p->daddr = blog_read32_align16( (uint16_t *)&ip_p->dAddr );
+        blog_p->key.protocol = ip_p->proto;
+    }
+    else if ( encap == PLD_IPv6 )    /* Log the IPv6 Tuple */
+    {
+        printk("FIXME blog encap PLD_IPv6 \n");
+    }
+    else    /* L2 encapsulation */
+    {
+        register short int * d;
+        register const short int * s;
+
+        blog_assertv( (bHdr_p->count < BLOG_ENCAP_MAX) );
+        blog_assertv( ((len<=20) && ((len & 0x1)==0)) );
+        blog_assertv( ((bHdr_p->length + len) < BLOG_HDRSZ_MAX) );
+
+        bHdr_p->info.hdrs |= (1U << encap);
+        bHdr_p->encap[ bHdr_p->count++ ] = encap;
+        s = (const short int *)data_p;
+        d = (short int *)&(bHdr_p->l2hdr[bHdr_p->length]);
+        bHdr_p->length += len;
+
+        switch ( len ) /* common lengths, using half word alignment copy */
+        {
+            case 20: *(d+9)=*(s+9);
+                     *(d+8)=*(s+8);
+                     *(d+7)=*(s+7);
+            case 14: *(d+6)=*(s+6);
+            case 12: *(d+5)=*(s+5);
+            case 10: *(d+4)=*(s+4);
+            case  8: *(d+3)=*(s+3);
+            case  6: *(d+2)=*(s+2);
+            case  4: *(d+1)=*(s+1);
+            case  2: *(d+0)=*(s+0);
+                 break;
+            default:
+                 goto skip;
+        }
+    }
+
+    return;
+
+skip:   /* Discontinue further logging by dis-associating Blog_t object */
+
+    blog_skip( skb_p );
+
+    /* DO NOT ACCESS blog_p !!! */
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_nfct_dump
+ * Description  : Dump the nf_conn context
+ *  dev_p       : Pointer to a net_device object
+ * CAUTION      : nf_conn is not held !!!
+ *------------------------------------------------------------------------------
+ */
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+void blog_nfct_dump( struct sk_buff * skb_p, struct nf_conn * ct, uint32_t dir )
+{
+#if defined(BLOG_NF_CONNTRACK)
+    struct nf_conn_help *help_p;
+    struct nf_conn_nat  *nat_p;
+    int bitix;
+    if ( ct == NULL )
+    {
+        blog_error( "NULL NFCT error" );
+        return;
+    }
+
+#ifdef CONFIG_NF_NAT_NEEDED
+    nat_p = nfct_nat(ct);
+#else
+    nat_p = (struct nf_conn_nat *)NULL;
+#endif
+
+    help_p = nfct_help(ct);
+    printk("\tNFCT: ct<0x%p>, info<%x> master<0x%p>\n"
+           "\t\tF_NAT<%p> keys[%u %u] dir<%s>\n"
+           "\t\thelp<0x%p> helper<%s>\n",
+            ct, 
+            (int)skb_p->nfctinfo, 
+            ct->master,
+            nat_p, 
+            ct->blog_key[IP_CT_DIR_ORIGINAL], 
+            ct->blog_key[IP_CT_DIR_REPLY],
+            (dir<IP_CT_DIR_MAX)?strIpctDir[dir]:strIpctDir[IP_CT_DIR_MAX],
+            help_p,
+            (help_p && help_p->helper) ? help_p->helper->name : "NONE" );
+
+    printk( "\t\tSTATUS[ " );
+    for ( bitix = 0; bitix <= IPS_BLOG_BIT; bitix++ )
+        if ( ct->status & (1 << bitix) )
+            printk( "%s ", strIpctStatus[bitix] );
+    printk( "]\n" );
+#endif /* defined(BLOG_NF_CONNTRACK) */
+}
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_netdev_dump
+ * Description  : Dump the contents of a net_device object.
+ *  dev_p       : Pointer to a net_device object
+ *
+ * CAUTION      : Net device is not held !!!
+ *
+ *------------------------------------------------------------------------------
+ */
+static void blog_netdev_dump( struct net_device * dev_p )
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+    int i;
+    printk( "\tDEVICE: %s dev<0x%08x> ndo_start_xmit[<0x%08x>]\n"
+            "\t  dev_addr[ ", dev_p->name,
+            (int)dev_p, (int)dev_p->netdev_ops->ndo_start_xmit );
+    for ( i=0; i<dev_p->addr_len; i++ )
+        printk( "%02x ", *((uint8_t *)(dev_p->dev_addr) + i) );
+    printk( "]\n" );
+#else
+    int i;
+    printk( "\tDEVICE: %s dev<0x%08x>: poll[<%08x>] hard_start_xmit[<%08x>]\n"
+            "\t  hard_header[<%08x>] hard_header_cache[<%08x>]\n"
+            "\t  dev_addr[ ", dev_p->name,
+            (int)dev_p, (int)dev_p->poll, (int)dev_p->hard_start_xmit,
+            (int)dev_p->hard_header, (int)dev_p->hard_header_cache );
+    for ( i=0; i<dev_p->addr_len; i++ )
+        printk( "%02x ", *((uint8_t *)(dev_p->dev_addr) + i) );
+    printk( "]\n" );
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30) */
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_tuple_dump
+ * Description  : Dump the contents of a BlogTuple_t object.
+ *  bTuple_p    : Pointer to the BlogTuple_t object
+ *------------------------------------------------------------------------------
+ */
+static void blog_tuple_dump( BlogTuple_t * bTuple_p )
+{
+    printk( "\tIPv4:\n"
+            "\t\tSrc" BLOG_IPV4_ADDR_PORT_FMT
+             " Dst" BLOG_IPV4_ADDR_PORT_FMT "\n"
+            "\t\tttl<%3u> tos<%3u> check<0x%04x>\n",
+            BLOG_IPV4_ADDR(bTuple_p->saddr), ntohs(bTuple_p->port.source),
+            BLOG_IPV4_ADDR(bTuple_p->daddr), ntohs(bTuple_p->port.dest),
+            bTuple_p->ttl, bTuple_p->tos, bTuple_p->check );
+}
+ 
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_tupleV6_dump
+ * Description  : Dump the contents of a BlogTupleV6_t object.
+ *  bTupleV6_p    : Pointer to the BlogTupleV6_t object
+ *------------------------------------------------------------------------------
+ */
+static void blog_tupleV6_dump( BlogTupleV6_t * bTupleV6_p )
+{
+    printk( "\tIPv6:\n"
+            "\t\tSrc" BLOG_IPV6_ADDR_PORT_FMT "\n"
+            "\t\tDst" BLOG_IPV6_ADDR_PORT_FMT "\n"
+            "\t\thop_limit<%3u>\n",
+            BLOG_IPV6_ADDR(bTupleV6_p->saddr), ntohs(bTupleV6_p->port.source),
+            BLOG_IPV6_ADDR(bTupleV6_p->daddr), ntohs(bTupleV6_p->port.dest),
+            bTupleV6_p->rx_hop_limit );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_l2_dump
+ * Description  : parse and dump the contents of all L2 headers
+ *  bHdr_p      : Pointer to logged header
+ *------------------------------------------------------------------------------
+ */
+void blog_l2_dump( BlogHeader_t * bHdr_p )
+{
+    register int i, ix, length, offset = 0;
+    BlogEncap_t type;
+    char * value = bHdr_p->l2hdr;
+
+    for ( ix=0; ix<bHdr_p->count; ix++ )
+    {
+        type = bHdr_p->encap[ix];
+
+        switch ( type )
+        {
+            case PPP_1661   : length = BLOG_PPP_HDR_LEN;    break;
+            case PPPoE_2516 : length = BLOG_PPPOE_HDR_LEN;  break;
+            case VLAN_8021Q : length = BLOG_VLAN_HDR_LEN;   break;
+            case ETH_802x   : length = BLOG_ETH_HDR_LEN;    break;
+            case BCM_SWC    : 
+                              if ( *((uint16_t *)(bHdr_p->l2hdr + 12) ) 
+                                   == BLOG_ETH_P_BRCM4TAG)
+                                  length = BLOG_BRCM4_HDR_LEN;
+                              else
+                                  length = BLOG_BRCM6_HDR_LEN;
+                              break;
+
+            case PLD_IPv4   :
+            case PLD_IPv6   :
+            case DEL_IPv4   :
+            case DEL_IPv6   :
+            case BCM_XPHY   :
+            default         : printk( "Unsupported type %d\n", type );
+                              return;
+        }
+
+        printk( "\tENCAP %d. %10s +%2d %2d [ ",
+                ix, strBlogEncap[type], offset, length );
+
+        for ( i=0; i<length; i++ )
+            printk( "%02x ", (uint8_t)value[i] );
+
+        offset += length;
+        value += length;
+
+        printk( "]\n" );
+    }
+}
+
+void blog_virdev_dump( Blog_t * blog_p )
+{
+    int i;
+
+    printk( " VirtDev: ");
+
+    for (i=0; i<MAX_VIRT_DEV; i++)
+        printk("<0x%08x> ", (int)blog_p->virt_dev_p[i]);
+
+    printk("\n");
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_gre_dump
+ * Description  : Dump the contents of a BlogTuple_t object.
+ *  blog_p      : Pointer to the Blog
+ *------------------------------------------------------------------------------
+ */
+static void blog_gre_dump( Blog_t *blog_p )
+{
+    BlogGre_t *bGreRx_p = &blog_p->grerx; 
+    BlogGre_t *bGreTx_p = &blog_p->gretx; 
+
+    printk( "\tGRE RX:"
+            "\t\thlen<%u> ipid<0x%04x:%u> flags<0x%04x>\n",
+            bGreRx_p->hlen, ntohs(bGreRx_p->ipid), ntohs(bGreRx_p->ipid),
+            bGreRx_p->gre_flags.u16 ); 
+
+    printk( "\tGRE TX:"
+            "\t\thlen<%u> ipid<0x%04x:%u> flags<0x%04x>\n",
+            bGreTx_p->hlen, ntohs(bGreTx_p->ipid), ntohs(bGreTx_p->ipid),
+            bGreTx_p->gre_flags.u16 ); 
+}
+ 
+void blog_lock(void)
+{
+    BLOG_LOCK_BH();
+}
+
+void blog_unlock(void)
+{
+    BLOG_UNLOCK_BH();
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_dump
+ * Description  : Dump the contents of a Blog object.
+ *  blog_p      : Pointer to the Blog_t object
+ *------------------------------------------------------------------------------
+ */
+void blog_dump( Blog_t * blog_p )
+{
+    if ( blog_p == BLOG_NULL )
+        return;
+
+    blog_assertv( (_IS_BPTR_(blog_p)) );
+
+    printk( "BLOG <0x%08x> owner<0x%08x> delCt<0x%08x> pldCt<0x%08x>\n"
+            "\t\tL1 channel<%u> phyLen<%u> phy<%u> <%s>\n"
+            "\t\tfdb_src<0x%08x> fdb_dst<0x%08x> tos_mode<%u:%u>\n"
+            "\t\thash<%u> prot<%u> prio<0x%08x> mark<0x%08x> Mtu<%u>\n",
+            (int)blog_p, (int)blog_p->skb_p, 
+            (int)blog_p->ct_p[BLOG_CT_DEL],
+            (int)blog_p->ct_p[BLOG_CT_PLD],
+            blog_p->key.l1_tuple.channel,
+            rfc2684HdrLength[blog_p->key.l1_tuple.phyLen],
+            blog_p->key.l1_tuple.phy,
+            strBlogPhy[blog_p->key.l1_tuple.phyType],
+            (int)blog_p->fdb[0], (int)blog_p->fdb[1], 
+            (int)blog_p->tos_mode_us, (int)blog_p->tos_mode_ds, 
+            blog_p->hash, blog_p->key.protocol,
+            blog_p->priority, blog_p->mark, blog_p->minMtu);
+
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+    if ( blog_p->ct_p[BLOG_PARAM2_IPV4] )
+        blog_nfct_dump( blog_p->skb_p, blog_p->ct_p[BLOG_PARAM2_IPV4], 
+                        blog_p->nf_dir );
+
+    if ( blog_p->ct_p[BLOG_PARAM2_IPV6] )
+        blog_nfct_dump( blog_p->skb_p, blog_p->ct_p[BLOG_PARAM2_IPV6], 
+                        blog_p->nf_dir );
+#endif
+
+    printk( "  RX count<%u> channel<%02u> bmap<0x%04x> phyLen<%u> "
+            "phyHdr<%u> %s\n"
+            "     wan_qdisc<%u> multicast<%u> fkbInSkb<%u>\n",
+            blog_p->rx.count, blog_p->rx.info.channel,
+            blog_p->rx.info.hdrs,
+            rfc2684HdrLength[blog_p->rx.info.phyHdrLen],
+            blog_p->rx.info.phyHdr, 
+            strBlogPhy[blog_p->rx.info.phyHdrType],
+            blog_p->rx.wan_qdisc,
+            blog_p->rx.multicast, blog_p->rx.fkbInSkb );
+    if ( blog_p->rx.info.bmap.PLD_IPv4 )
+        blog_tuple_dump( &blog_p->rx.tuple );
+    blog_l2_dump( &blog_p->rx );
+
+    printk("  TX count<%u> channel<%02u> bmap<0x%04x> phyLen<%u> "
+           "phyHdr<%u> %s\n",
+            blog_p->tx.count, blog_p->tx.info.channel,
+            blog_p->tx.info.hdrs, 
+            rfc2684HdrLength[blog_p->tx.info.phyHdrLen],
+            blog_p->tx.info.phyHdr, 
+            strBlogPhy[blog_p->tx.info.phyHdrType] );
+    if ( blog_p->tx.dev_p )
+        blog_netdev_dump( blog_p->tx.dev_p );
+    if ( blog_p->rx.info.bmap.PLD_IPv4 )
+        blog_tuple_dump( &blog_p->tx.tuple );
+    blog_l2_dump( &blog_p->tx );
+    blog_virdev_dump( blog_p );
+
+    if ( blog_p->rx.info.bmap.PLD_IPv6 )
+        blog_tupleV6_dump( &blog_p->tupleV6 );
+
+    blog_gre_dump( blog_p );
+    printk("  Del Tuple\n" );
+    blog_tuple_dump( &blog_p->delrx_tuple );
+    blog_tuple_dump( &blog_p->deltx_tuple );
+
+#if defined(CC_BLOG_SUPPORT_DEBUG)
+    printk( "\t\textends<%d> free<%d> used<%d> HWM<%d> fails<%d>\n",
+            blog_extends, blog_cnt_free, blog_cnt_used, blog_cnt_hwm,
+            blog_cnt_fails );
+#endif
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_getTxMtu
+ * Description  : Gets unadjusted mtu from tx network devices associated with blog.
+ *  blog_p      : Pointer to the Blog_t object
+ *------------------------------------------------------------------------------
+ */
+uint16_t blog_getTxMtu(Blog_t * blog_p)
+{
+    int     i;
+    uint16_t  minMtu;
+    void *  dir_dev_p; 
+    struct net_device *  dev_p;
+
+    dev_p = (struct net_device *)blog_p->tx.dev_p;
+    if (dev_p)
+        minMtu = dev_p->mtu;
+    else
+        minMtu = 0xFFFF;
+    
+    
+    for (i = 0; i < MAX_VIRT_DEV; i++)
+    {
+        dir_dev_p = blog_p->virt_dev_p[i];
+        if ( dir_dev_p == (void *)NULL ) 
+            continue;
+        if ( IS_RX_DIR(dir_dev_p) )
+            continue;
+        dev_p = (struct net_device *)DEVP_DETACH_DIR(dir_dev_p);
+        /* Exclude Bridge device - bridge always has the least MTU of all attached interfaces -
+         * irrespective of this specific flow path */
+        if (dev_p && !(dev_p->priv_flags&IFF_EBRIDGE) && dev_p->mtu < minMtu)
+        {
+            minMtu = dev_p->mtu;
+        }
+    }
+    
+    blog_print( "minMtu <%d>", (int)minMtu );
+
+    return minMtu;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_set_len_tbl
+ * Description  : Set the values learnt from iptables rule for length
+ *                prioritization.
+ * Parameters   :
+ *  val[]       : Array that stores {minimum length, maximum length, original
+ *                mark, target mark}.
+ * Returns      :
+ *  Zero        : Success
+ *  Non-zero    : Fail
+ *------------------------------------------------------------------------------
+ */
+int blog_set_len_tbl(uint32_t val[])
+{
+    if ( blog_len_tbl_idx >= BLOG_MAX_LEN_TBLSZ )
+    {
+        blog_print("%s: Length priority entries exceed the table size.\n", __func__);
+        return -1;
+    }
+
+    BLOG_LOCK_TBL();
+
+    blog_len_tbl[blog_len_tbl_idx][BLOG_MIN_LEN_INDEX] = val[BLOG_MIN_LEN_INDEX];
+    blog_len_tbl[blog_len_tbl_idx][BLOG_MAX_LEN_INDEX] = val[BLOG_MAX_LEN_INDEX];
+    blog_len_tbl[blog_len_tbl_idx][BLOG_ORIGINAL_MARK_INDEX] = val[BLOG_ORIGINAL_MARK_INDEX];
+    blog_len_tbl[blog_len_tbl_idx][BLOG_TARGET_MARK_INDEX] = val[BLOG_TARGET_MARK_INDEX];
+    blog_len_tbl_idx++;
+
+    BLOG_UNLOCK_TBL();
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_clr_len_tbl
+ * Description  : Clear the table for length prioritization.
+ * Returns      :
+ *  Zero        : Success
+ *  Non-zero    : Fail
+ *------------------------------------------------------------------------------
+ */
+int blog_clr_len_tbl()
+{
+    int i;
+
+    BLOG_LOCK_TBL();
+
+    for ( i = 0; i < BLOG_MAX_LEN_TBLSZ; i++ )
+    {
+        blog_len_tbl[i][BLOG_MIN_LEN_INDEX] = BLOG_INVALID_UINT32;
+        blog_len_tbl[i][BLOG_MAX_LEN_INDEX] = BLOG_INVALID_UINT32;
+        blog_len_tbl[i][BLOG_ORIGINAL_MARK_INDEX] = BLOG_INVALID_UINT32;
+        blog_len_tbl[i][BLOG_TARGET_MARK_INDEX] = BLOG_INVALID_UINT32;
+    }
+    blog_len_tbl_idx = 0;
+
+    BLOG_UNLOCK_TBL();
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_set_dscp_tbl
+ * Description  : Set the values learnt from iptables rule for DSCP mangle.
+ * Parameters   :
+ *  idx         : DSCP match value
+ *  val         : DSCP target value
+ * Returns      :
+ *  Zero        : Success
+ *  Non-zero    : Fail
+ *------------------------------------------------------------------------------
+ */
+int blog_set_dscp_tbl(uint8_t idx, uint8_t val)
+{
+    BLOG_LOCK_TBL();
+
+    blog_dscp_tbl[idx] = val;
+
+    BLOG_UNLOCK_TBL();
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_clr_dscp_tbl
+ * Description  : Clear the table for DSCP mangle.
+ * Returns      :
+ *  Zero        : Success
+ *  Non-zero    : Fail
+ *------------------------------------------------------------------------------
+ */
+int blog_clr_dscp_tbl()
+{
+    int i;
+
+    BLOG_LOCK_TBL();
+
+    for ( i = 0; i < BLOG_MAX_DSCP_TBLSZ; i++ )
+        blog_dscp_tbl[i] = BLOG_INVALID_UINT8;
+
+    BLOG_UNLOCK_TBL();
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_set_tos_tbl
+ * Description  : Set the values learnt from iptables rule for TOS mangle.
+ * Parameters   :
+ *  idx         : TOS match value
+ *  val         : TOS target value
+ * Returns      :
+ *  Zero        : Success
+ *  Non-zero    : Fail
+ *------------------------------------------------------------------------------
+ */
+int blog_set_tos_tbl(uint8_t idx, uint8_t val)
+{
+    BLOG_LOCK_TBL();
+
+    blog_tos_tbl[idx] = val;
+
+    BLOG_UNLOCK_TBL();
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_clr_tos_tbl
+ * Description  : Clear the table for TOS mangle.
+ * Returns      :
+ *  Zero        : Success
+ *  Non-zero    : Fail
+ *------------------------------------------------------------------------------
+ */
+int blog_clr_tos_tbl()
+{
+    int i;
+
+    BLOG_LOCK_TBL();
+
+    for ( i = 0; i < BLOG_MAX_TOS_TBLSZ; i++ )
+        blog_tos_tbl[i] = BLOG_INVALID_UINT8;
+
+    BLOG_UNLOCK_TBL();
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_pre_mod_hook
+ * Description  : Called by flow cache prior to the modification phase.
+ * Parameters   :
+ *  blog_p      : Pointer to the Blog_t object
+ *  nbuff_p     : Pointer to a NBuff
+ * Returns      :
+ *  PKT_DONE    : Success
+ *  PKT_DROP    : Drop the packet
+ *  PKT_NORM    : Return to normal network stack
+ *------------------------------------------------------------------------------
+ */
+int blog_pre_mod_hook(Blog_t *blog_p, void *nbuff_p)
+{
+    FkBuff_t *fkb_p = PNBUFF_2_FKBUFF(nbuff_p);
+    BlogIpv4Hdr_t *ip_p = (BlogIpv4Hdr_t *)&fkb_p->data[blog_p->ip_offset];
+
+    if ( blog_p->lenPrior )
+    {
+        int i;
+
+        for ( i = blog_len_tbl_idx; i >= 0; i-- )
+        {
+            if ( (ip_p->len >= blog_len_tbl[i][BLOG_MIN_LEN_INDEX]) &&
+                 (ip_p->len <= blog_len_tbl[i][BLOG_MAX_LEN_INDEX]) )
+            {
+                blog_mangl_params[BLOG_LEN_PARAM_INDEX] = blog_len_tbl[i][BLOG_TARGET_MARK_INDEX];
+                break;
+            }
+            else
+                blog_mangl_params[BLOG_LEN_PARAM_INDEX] = blog_len_tbl[i][BLOG_ORIGINAL_MARK_INDEX];
+        }
+    }
+
+    if ( blog_p->dscpMangl )
+    {
+        blog_mangl_params[BLOG_DSCP_PARAM_INDEX] = blog_dscp_tbl[ip_p->tos>>XT_DSCP_SHIFT];
+    }
+
+    if ( blog_p->tosMangl )
+    {
+        blog_mangl_params[BLOG_TOS_PARAM_INDEX] = blog_tos_tbl[ip_p->tos];
+    }
+
+    return PKT_DONE;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_post_mod_hook
+ * Description  : Called by flow cache after the modification phase.
+ * Parameters   :
+ *  blog_p      : Pointer to the Blog_t object
+ *  nbuff_p     : Pointer to a NBuff
+ * Returns      :
+ *  Zero        : Success
+ *  Non-zero    : Fail
+ *------------------------------------------------------------------------------
+ */
+int blog_post_mod_hook(Blog_t *blog_p, void *nbuff_p)
+{
+    FkBuff_t *fkb_p = PNBUFF_2_FKBUFF(nbuff_p);
+
+    if ( blog_p->lenPrior )
+    {
+        fkb_p->mark = blog_mangl_params[BLOG_LEN_PARAM_INDEX];
+    }
+
+    if ( blog_p->dscpMangl )
+    {
+        if ( blog_mangl_params[BLOG_DSCP_PARAM_INDEX] != BLOG_INVALID_UINT8 )
+        {
+            struct iphdr *ip_p = (struct iphdr *)(fkb_p->data + blog_p->ip_offset +
+                (sizeof(blog_p->tx.l2hdr) - sizeof(blog_p->rx.l2hdr)));
+            ipv4_change_dsfield(ip_p, (uint8_t)(~XT_DSCP_MASK),
+                (uint8_t)(blog_mangl_params[BLOG_DSCP_PARAM_INDEX] << XT_DSCP_SHIFT));
+        }
+    }
+
+    if ( blog_p->tosMangl )
+    {
+        if ( blog_mangl_params[BLOG_TOS_PARAM_INDEX] != BLOG_INVALID_UINT8 )
+        {
+            struct iphdr *ip_p = (struct iphdr *)(fkb_p->data + blog_p->ip_offset +
+                (sizeof(blog_p->tx.l2hdr) - sizeof(blog_p->rx.l2hdr)));
+            ipv4_change_dsfield(ip_p, 0, (uint8_t)blog_mangl_params[BLOG_TOS_PARAM_INDEX]);
+        }
+    }
+
+    return 0;
+}
+
+
+static struct notifier_block net_nb =
+{
+    .notifier_call = blog_notify_netevent,
+};
+
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE) || defined(CONFIG_ACCEL_PPTP)
+/*
+ * Macro specific to parsing: Used in blog_gre_rcv().
+ * - Fetch the next encapsulation
+ * - set the hdr_p to point to next next header start
+ */
+#define BLOG_PARSE(tag, length, proto)       h_proto = (proto);  \
+                                        hdr_p += (length);  \
+    blog_print( "BLOG_PARSE %s: length<%d> proto<0x%04x>", \
+                          #tag, length, ntohs(h_proto) );
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_parse_l2hdr
+ * Description  : Given a packet quickly parse the L2 header
+ * Parameters   :
+ *  fkb_p       : Pointer to a fast kernel buffer<data,len>
+ *  h_proto     : First encapsulation type
+                : NULL : if the parsing failed or not an IPv4 Hdr
+                : ipv4_p : pointer to first IPv4 Hdr if the parsing was
+                : successful upto IPv4 Hdr
+ * Return values:
+ *              : Pointer to first IPv4 header
+ *------------------------------------------------------------------------------
+ */
+static inline 
+BlogIpv4Hdr_t * _blog_parse_l2hdr( struct fkbuff *fkb_p, uint32_t h_proto )
+{
+    int          ix;
+    char         * hdr_p;
+    BlogIpv4Hdr_t *ipv4_p;
+
+    BLOG_DBG(
+          if ((fkb_p!=FKB_NULL) &&
+              ((h_proto==TYPE_ETH)||(h_proto==TYPE_PPP)||(h_proto==TYPE_IP)))
+          {
+            blog_assertr(((fkb_p!=FKB_NULL) 
+                         && ((h_proto==TYPE_ETH)||(h_proto==TYPE_PPP)
+                              ||(h_proto==TYPE_IP))), NULL );
+          } );
+    blog_print( "fkb<0x%08x> data<0x%08x> len<%d> h_proto<%u>",
+                (int)fkb_p, (int)fkb_p->data, (int)fkb_p->len, h_proto );
+
+    /* PACKET PARSE PHASE */
+
+    /* initialize locals */
+    hdr_p           = fkb_p->data;
+    ix              = -1;
+    ipv4_p          = (BlogIpv4Hdr_t *)NULL;
+    h_proto         = htons(h_proto);
+
+    do /* Quickly parse upto IP tuple loop */
+    {
+        ix++;
+        if ( unlikely(ix > BLOG_ENCAP_MAX)) goto done;
+        switch ( h_proto )
+        {
+        case htons(TYPE_ETH):  /* first encap: XYZoE */
+            if ( unlikely(ix != 0) )
+                goto done;
+            /* Check whether multicast logging support is enabled */
+            if (((BlogEthHdr_t*)hdr_p)->macDa.u8[0] & 0x1) /* mcast or bcast */
+            {
+                blog_print( "ABORT multicast MAC" );
+                goto done;
+            }
+            /* PS. Multicast over PPPoE would not have multicast MacDA */
+            BLOG_PARSE( ETH, BLOG_ETH_HDR_LEN, *((uint16_t*)hdr_p+6) ); break;
+        case htons(TYPE_PPP):  /* first encap: PPPoA */
+            if ( unlikely(ix != 0) )
+                goto done;
+            BLOG_PARSE( PPP, BLOG_PPP_HDR_LEN, *(uint16_t*)hdr_p ); break;
+        case htons(BLOG_ETH_P_BRCM6TAG):
+            BLOG_PARSE( BRCM6, BLOG_BRCM6_HDR_LEN, *((uint16_t*)hdr_p+2) );
+            break;
+        case htons(BLOG_ETH_P_BRCM4TAG):
+            BLOG_PARSE( BRCM4, BLOG_BRCM4_HDR_LEN, *((uint16_t*)hdr_p+1) );
+            break;
+        case htons(BLOG_ETH_P_PPP_SES):
+            BLOG_PARSE( PPPOE, BLOG_PPPOE_HDR_LEN, *((uint16_t*)hdr_p+3) );
+            break;
+        case htons(BLOG_ETH_P_8021Q): 
+        case htons(BLOG_ETH_P_8021AD):
+            BLOG_PARSE( VLAN, BLOG_VLAN_HDR_LEN, *((uint16_t*)hdr_p+1) ); break;
+        case htons(TYPE_IP):   /* first encap: IPoA */
+            if ( unlikely(ix != 0) )
+                goto done;
+        case htons(BLOG_PPP_IPV4):
+        case htons(BLOG_ETH_P_IPV4):
+            ipv4_p = (BlogIpv4Hdr_t *)hdr_p;
+            goto done;
+
+        default :
+            blog_print( "ABORT UNKNOWN Rx h_proto 0x%04x", 
+                (uint16_t) ntohs(h_proto) );
+            goto done;
+        } /* switch ( h_proto ) */
+    } while(1);
+
+done:
+    return ipv4_p;
+}
+#endif
+
+#if defined(CONFIG_NET_IPGRE_MODULE)
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_gre_rcv
+ * Description  : Given a packet quickly detect whether it is a GRE packet.
+ *                If yes then do the other processing based on the GRE flags.
+ * Parameters   :
+ *  fkb_p       : Pointer to a fast kernel buffer<data,len>
+ *  dev_p       : Pointer to the net_device on which the packet arrived.
+ *  h_proto     : First encapsulation type
+ *  tunl_pp     : Pointer to pointer to GRE tunnel
+ *  pkt_seqno_p : Pointer to received packet seqno
+ * Return values:
+ *  BLOG_GRE_RCV_NO_GRE: 
+ *              : Either the packet is not GRE or it cannot be 
+ *                accelerated.
+ *  BLOG_GRE_RCV_NO_SEQNO: 
+ *              : Received packet does not have seqno.
+ *  BLOG_GRE_RCV_IN_SEQ: 
+ *              : GRE tunnel is configured with seqno and the received packet
+ *              : seqno is in sync with the tunnel seqno.
+ *  BLOG_GRE_RCV_NO_TUNNEL: 
+ *              : Could not find the GRE tunnel matching with packet. 
+ *  BLOG_GRE_RCV_FLAGS_MISSMATCH: 
+ *              : GRE flags in the received packet does not match the flags 
+ *              : in the configured GRE tunnel.
+ *  BLOG_GRE_RCV_CHKSUM_ERR: 
+ *              : Received packet has bad GRE checksum.
+ *  BLOG_GRE_RCV_OOS_LT: 
+ *              : GRE tunnel is configured with seqno and the received packet
+ *              : seqno is out-of-seq (OOS) and less than the next seqno
+ *              : expected by the tunnel seqno.
+ *  BLOG_GRE_RCV_OOS_GT: 
+ *              : GRE tunnel is configured with seqno and the received packet
+ *              : seqno is out-of-seq and greater than the next seqno 
+ *              : expected by the tunnel.
+ * Note         : The *tunl_pp pointer makes all the tunnel fields available
+ *                (including seqno). The tunnel seqno and pkt_seqno can
+ *                be used to implement functions to put received packets 
+ *                in sequence before giving the packets to flow cache 
+ *                (i.e. invoking the blog_rx_hook_g()).
+ *------------------------------------------------------------------------------
+ */
+int blog_gre_rcv( struct fkbuff *fkb_p, void * dev_p, uint32_t h_proto,
+    void **tunl_pp, uint32_t *pkt_seqno_p)
+{
+    BlogIpv4Hdr_t* ip_p;
+    int ret = BLOG_GRE_RCV_NOT_GRE;
+
+    ip_p = _blog_parse_l2hdr( fkb_p, h_proto );
+
+    if (ip_p != NULL) 
+    {
+        blog_print( "BLOG PARSE IPv4:" );
+
+        /* 
+         * Abort parse
+         * - If not IPv4 or with options.
+         * - If this is a unicast and fragmented IP Pkt, let it pass through the
+         *   network stack, as intermediate fragments do not carry a
+         *   full upper layer protocol to determine the port numbers.
+         */
+        if ( unlikely(*(uint8_t*)ip_p != 0x45) )
+        {
+            blog_print( "ABORT IP ver<%d> len<%d>", ip_p->ver, ip_p->ihl );
+            goto pkt_not_gre;
+        }
+
+        if ( ip_p->proto == BLOG_IPPROTO_GRE ) 
+        {
+            blog_print( "BLOG PARSE GRE:" );
+            ret = blog_gre_rcv_check_fn( dev_p, ip_p, 
+                fkb_p->len - ((uint32_t)ip_p - (uint32_t)fkb_p->data), 
+                tunl_pp, pkt_seqno_p );
+        }
+    }
+
+pkt_not_gre:
+    return ret;
+}
+
+void blog_gre_xmit(struct sk_buff *skb_p, uint32_t h_proto)
+{
+    if (skb_p && skb_p->tunl && blog_gre_tunnel_accelerated())
+    {
+        BlogIpv4Hdr_t* ip_p;
+        struct fkbuff * fkb_p;
+
+        /* non-accelerated GRE tunnel US case we need to sync seqno */
+        blog_print( "non-XL GRE Tunnel" );
+
+        fkb_p = (struct fkbuff*) ((uint32_t)skb_p + 
+                                        BLOG_OFFSETOF(sk_buff,fkbInSkb));
+        ip_p = _blog_parse_l2hdr( fkb_p, h_proto );
+
+        if (ip_p != NULL)
+        {
+            blog_print( "tunl<%p> skb<%p> data<%p> len<%d> ip_p<%p> "
+                        "l2_data_len<%d>",
+                skb_p->tunl, skb_p, skb_p->data, skb_p->len, ip_p, 
+                skb_p->len - ((uint32_t) ip_p - (uint32_t) skb_p->data)); 
+
+            blog_gre_xmit_update_fn(skb_p->tunl, ip_p, 
+                skb_p->len - ((uint32_t) ip_p - (uint32_t) skb_p->data)); 
+        }
+    }
+}
+#endif
+
+#if defined(CONFIG_ACCEL_PPTP) 
+static inline uint32_t _read32_align16(uint16_t *from)
+{
+    return (uint32_t)((from[0] << 16) | (from[1]));
+}
+
+int blog_pptp_rcv( struct fkbuff *fkb_p, uint32_t h_proto, uint32_t *rcv_pktSeq) 
+{
+	BlogIpv4Hdr_t* ip_p;
+	char * hdr_p;
+    uint16_t *grehdr_p;
+    BlogGreIeFlagsVer_t gre_flags = {.u16 = 0 };
+	uint16_t call_id = 0;
+	uint32_t saddr, rcv_pktAck = 0;
+	
+    int ret = BLOG_PPTP_RCV_NOT_PPTP;
+
+    ip_p = _blog_parse_l2hdr( fkb_p, h_proto );
+
+    if (ip_p != NULL) 
+    {
+        blog_print( "BLOG PARSE IPv4:" );
+
+        /* 
+         * Abort parse
+         * - If not IPv4 or with options.
+         * - If this is a unicast and fragmented IP Pkt, let it pass through the
+         *   network stack, as intermediate fragments do not carry a
+         *   full upper layer protocol to determine the port numbers.
+         */
+        if ( unlikely(*(uint8_t*)ip_p != 0x45) )
+        {
+            blog_print( "ABORT IP ver<%d> len<%d>", ip_p->ver, ip_p->ihl );
+            goto pkt_not_pptp;
+        }
+
+        if ( ip_p->proto == BLOG_IPPROTO_GRE ) 
+        {
+            hdr_p = (char *)ip_p;
+            hdr_p += BLOG_IPV4_HDR_LEN;
+            grehdr_p = (uint16_t*)hdr_p;
+            gre_flags.u16 = ntohs(*(uint16_t*)grehdr_p);
+            
+            /* the pkt is PPTP with seq number */
+            if (gre_flags.seqIe && gre_flags.keyIe && gre_flags.ver) 
+            {
+            	blog_print( "BLOG PARSE PPTP:" );
+            	call_id = *(uint16_t*) (grehdr_p + 3);
+            	*rcv_pktSeq = *(uint32_t*) (grehdr_p + 4);
+            	saddr  = _read32_align16( (uint16_t *)&ip_p->sAddr );
+            	
+            	blog_print( "\nincoming pptp pkt's seq = %d\n", *rcv_pktSeq );
+            	if(gre_flags.ackIe) /* the pkt is PPTP with ack number */
+                {	
+                	rcv_pktAck = *(uint32_t*) (grehdr_p + 6);
+                	blog_print( "rcv_pktAck = %d \n", rcv_pktAck );
+                }
+                
+            	ret = blog_pptp_rcv_check_fn(call_id, rcv_pktSeq, 
+            	                             rcv_pktAck, saddr );
+            	
+            }
+        }
+    }
+
+pkt_not_pptp:
+    return ret;
+}
+
+void blog_pptp_xmit(struct sk_buff *skb_p, uint32_t h_proto) 
+{
+    if (skb_p && blog_gre_tunnel_accelerated())
+    {
+        BlogIpv4Hdr_t* ip_p;
+        struct fkbuff * fkb_p;
+        char * hdr_p;
+        uint16_t *grehdr_p;
+        BlogGreIeFlagsVer_t gre_flags = {.u16 = 0 };
+        uint16_t call_id = 0;
+        uint32_t seqNum = 0, ackNum = 0;
+        uint32_t        saddr;        
+        uint32_t        daddr;
+    
+        /* non-accelerated PPTP tunnel US case we need to sync seqno */
+        blog_print( "non-XL PPTP Tunnel" );
+
+        fkb_p = (struct fkbuff*) ((uint32_t)skb_p + BLOG_OFFSETOF(sk_buff,fkbInSkb));
+        ip_p = _blog_parse_l2hdr( fkb_p, h_proto );
+        
+        if (ip_p != NULL && ip_p->proto == BLOG_IPPROTO_GRE )
+        {
+            hdr_p = (char *)ip_p;
+            hdr_p += BLOG_IPV4_HDR_LEN;
+            grehdr_p = (uint16_t*)hdr_p;
+            gre_flags.u16 = ntohs(*(uint16_t*)grehdr_p);
+            
+            /* the pkt is PPTP with seq number */
+            if (gre_flags.seqIe && gre_flags.keyIe && gre_flags.ver) 
+            {	
+            	call_id = *(uint16_t*) (grehdr_p + 3);
+            	seqNum = *(uint32_t*) (grehdr_p + 4);
+            	
+            	saddr  = _read32_align16( (uint16_t *)&ip_p->sAddr );
+            	daddr  = _read32_align16( (uint16_t *)&ip_p->dAddr );
+            	blog_print( "call id = %d, seqNum = %d, daddr = %X\n", 
+            	             call_id, seqNum, daddr );
+                if(gre_flags.ackIe) /* the pkt is PPTP with ack number */
+                {	
+                	ackNum = *(uint32_t*) (grehdr_p + 6);
+                	blog_print( "ackNum = %d \n", ackNum );
+                }
+            	
+            	blog_pptp_xmit_update_fn(call_id, seqNum, ackNum, daddr);
+            } 
+        }
+    }
+}
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_ptm_us_bonding
+ * Description  : Sets/Clears the PTM US bonding mode for the flow
+ * Parameters   :
+ *  blog_p      : Pointer to a blog
+ *  mode        : enable=1, disable=0 
+ * Note         : FIXME This is a temporary fix and should be removed shortly.
+ *------------------------------------------------------------------------------
+ */
+void blog_ptm_us_bonding( struct sk_buff *skb_p, int mode )
+{
+    blog_assertv( (skb_p != (struct sk_buff *)NULL) );
+
+    if ((skb_p != NULL) &&
+        ( likely(skb_p->blog_p != BLOG_NULL) ))
+    {
+        skb_p->blog_p->ptm_us_bond = mode;
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_dm
+ * Description  : update DPI configuration to blog
+ * Parameters   :
+ *  type        : configuration type
+ *  param1      : optional parameter 1
+ *  param2      : optional parameter 2
+ *------------------------------------------------------------------------------
+ */
+int blog_dm(BlogDpiType_t type, uint32_t param1, uint32_t param2)
+{
+    uint32_t ret=0;
+
+    blog_assertr( (request < DPI_MAX), 0 );
+    blog_print( "type<%d> param1<%u>", (int)type, param1 );
+
+    if (!blog_dpi_ctk_update_fn)
+        return ret;
+
+    switch ( type )
+    {
+#if defined(BLOG_NF_CONNTRACK)
+        case DPI_PARENTAL:
+            blog_dpi_ctk_update_fn(param1);
+            return 0;
+#endif
+
+        default:
+            return ret;
+    }
+
+    blog_print("ret<%u:0x%08x>", ret, (int)ret);
+
+    return ret;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : __init_blog
+ * Description  : Incarnates the blog system during kernel boot sequence,
+ *                in phase subsys_initcall()
+ *------------------------------------------------------------------------------
+ */
+static int __init __init_blog( void )
+{
+    /* Clear the feature tables for per-packet modification */
+    blog_clr_len_tbl();
+    blog_clr_dscp_tbl();
+    blog_clr_tos_tbl();
+
+    nfskb_p = alloc_skb( 0, GFP_ATOMIC );
+    blog_cttime_update_fn = (blog_cttime_upd_t) NULL;
+    blog_extend( BLOG_POOL_SIZE_ENGG ); /* Build preallocated pool */
+    BLOG_DBG( printk( CLRb "BLOG blog_dbg<0x%08x> = %d\n"
+                           "%d Blogs allocated of size %d" CLRnl,
+                           (int)&blog_dbg, blog_dbg,
+                           BLOG_POOL_SIZE_ENGG, sizeof(Blog_t) ););
+    register_netevent_notifier(&net_nb);
+
+    printk( CLRb "BLOG %s Initialized" CLRnl, BLOG_VERSION );
+    return 0;
+}
+
+subsys_initcall(__init_blog);
+
+EXPORT_SYMBOL(_blog_emit);
+EXPORT_SYMBOL(blog_extend);
+
+EXPORT_SYMBOL(strBlogAction);
+EXPORT_SYMBOL(strBlogEncap);
+
+EXPORT_SYMBOL(strRfc2684);
+EXPORT_SYMBOL(rfc2684HdrLength);
+EXPORT_SYMBOL(rfc2684HdrData);
+
+EXPORT_SYMBOL(blog_set_len_tbl);
+EXPORT_SYMBOL(blog_clr_len_tbl);
+EXPORT_SYMBOL(blog_set_dscp_tbl);
+EXPORT_SYMBOL(blog_clr_dscp_tbl);
+EXPORT_SYMBOL(blog_set_tos_tbl);
+EXPORT_SYMBOL(blog_clr_tos_tbl);
+EXPORT_SYMBOL(blog_pre_mod_hook);
+EXPORT_SYMBOL(blog_post_mod_hook);
+
+#else   /* !defined(CONFIG_BLOG) */
+
+int blog_dbg = 0;
+int blog_support_mcast_g = BLOG_MCAST_DISABLE; /* = CC_BLOG_SUPPORT_MCAST; */
+void blog_support_mcast(int enable) {blog_support_mcast_g = BLOG_MCAST_DISABLE;}
+
+/* = CC_BLOG_SUPPORT_MCAST_LEARN; */
+int blog_support_mcast_learn_g = BLOG_MCAST_LEARN_DISABLE; 
+void blog_support_mcast_learn(int enable) {blog_support_mcast_learn_g = BLOG_MCAST_LEARN_DISABLE;}
+
+int blog_support_ipv6_g = BLOG_IPV6_DISABLE; /* = CC_BLOG_SUPPORT_IPV6; */
+void blog_support_ipv6(int enable) {blog_support_ipv6_g = BLOG_IPV6_DISABLE;}
+
+
+blog_cttime_upd_t blog_cttime_update_fn = (blog_cttime_upd_t) NULL;
+blog_xtm_get_tx_chan_t blog_xtm_get_tx_chan_fn = (blog_xtm_get_tx_chan_t) NULL;
+
+int blog_gre_tunnel_accelerated_g = BLOG_GRE_DISABLE;
+int blog_gre_tunnel_accelerated(void) { return blog_gre_tunnel_accelerated_g; }
+
+int blog_support_gre_g = BLOG_GRE_DISABLE; /* = CC_BLOG_SUPPORT_GRE; */
+void blog_support_gre(int enable) {blog_support_gre_g = BLOG_GRE_DISABLE;}
+
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+blog_gre_rcv_check_t blog_gre_rcv_check_fn = NULL;
+blog_gre_xmit_upd_t blog_gre_xmit_update_fn = NULL;
+#endif
+
+blog_pptp_rcv_check_t blog_pptp_rcv_check_fn = NULL;
+blog_pptp_xmit_upd_t blog_pptp_xmit_update_fn = NULL;
+blog_pptp_xmit_get_t blog_pptp_xmit_get_fn = NULL;
+
+blog_l2tp_rcv_check_t blog_l2tp_rcv_check_fn = NULL;
+
+int blog_l2tp_tunnel_accelerated_g = BLOG_L2TP_DISABLE;
+int blog_support_l2tp_g = BLOG_L2TP_DISABLE; /* = CC_BLOG_SUPPORT_l2TP; */
+void blog_support_l2tp(int enable) {blog_support_l2tp_g = BLOG_L2TP_DISABLE;}
+
+#if defined(CONFIG_L2TP)
+void blog_support_l2tp(int enable) {blog_support_l2tp_g = BLOG_L2TP_DISABLE;}
+#endif
+
+/* Stub functions for Blog APIs that may be used by modules */
+Blog_t * blog_get( void ) { return BLOG_NULL; }
+void     blog_put( Blog_t * blog_p ) { return; }
+
+Blog_t * blog_skb( struct sk_buff * skb_p) { return BLOG_NULL; }
+Blog_t * blog_fkb( struct fkbuff * fkb_p ) { return BLOG_NULL; }
+
+Blog_t * blog_snull( struct sk_buff * skb_p ) { return BLOG_NULL; }
+Blog_t * blog_fnull( struct fkbuff * fkb_p ) { return BLOG_NULL; }
+
+void     blog_free( struct sk_buff * skb_p ) { return; }
+
+void     blog_skip( struct sk_buff * skb_p ) { return; }
+void     blog_xfer( struct sk_buff * skb_p, const struct sk_buff * prev_p )
+         { return; }
+void     blog_clone( struct sk_buff * skb_p, const struct blog_t * prev_p )
+         { return; }
+void     blog_copy(struct blog_t * new_p, const struct blog_t * prev_p)
+         { return; }
+
+int blog_iq( const struct sk_buff * skb_p ) { return IQOS_PRIO_LOW; }
+int blog_fc_enabled(void) { return 0; };
+
+void     blog_link( BlogNetEntity_t entity_type, Blog_t * blog_p,
+                    void * net_p, uint32_t param1, uint32_t param2 ) { return; }
+
+void     blog_notify( BlogNotify_t event, void * net_p,
+                      uint32_t param1, uint32_t param2 ) { return; }
+
+uint32_t blog_request( BlogRequest_t event, void * net_p,
+                       uint32_t param1, uint32_t param2 ) { return 0; }
+
+void     blog_query( BlogQuery_t event, void * net_p,
+           uint32_t param1, uint32_t param2, uint32_t param3 ) { return; }
+
+BlogAction_t blog_filter( Blog_t * blog_p )
+         { return PKT_NORM; }
+
+BlogAction_t blog_sinit( struct sk_buff * skb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr )
+         { return PKT_NORM; }
+
+BlogAction_t blog_sinit_locked( struct sk_buff * skb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr )
+         { return PKT_NORM; }
+
+BlogAction_t blog_finit( struct fkbuff * fkb_p, void * dev_p,
+                        uint32_t encap, uint32_t channel, uint32_t phyHdr )
+         { return PKT_NORM; }
+
+BlogAction_t blog_finit_locked( struct fkbuff * fkb_p, void * dev_p,
+                        uint32_t encap, uint32_t channel, uint32_t phyHdr )
+         { return PKT_NORM; }
+
+BlogAction_t blog_emit( void * nbuff_p, void * dev_p,
+                        uint32_t encap, uint32_t channel, uint32_t phyHdr )
+         { return PKT_NORM; }
+
+int blog_iq_prio( struct sk_buff * skb_p, void * dev_p,
+                         uint32_t encap, uint32_t channel, uint32_t phyHdr )
+         { return 1; }
+
+void blog_bind( BlogDevHook_t blog_rx, BlogDevHook_t blog_tx,
+                BlogNotifyHook_t blog_xx, BlogQueryHook_t blog_qr, 
+                BlogBind_t bind) { return; }
+
+void blog_bind_config( BlogScHook_t blog_sc, BlogSdHook_t blog_sd,
+                       BlogClient_t client, BlogBind_t bind ) { return; }
+
+void     blog( struct sk_buff * skb_p, BlogDir_t dir, BlogEncap_t encap,
+               size_t len, void * data_p ) { return; }
+
+void     blog_dump( Blog_t * blog_p ) { return; }
+
+void     blog_lock(void) {return; }
+
+void     blog_unlock(void) {return; }
+
+uint16_t   blog_getTxMtu(Blog_t * blog_p) {return 0;}
+
+uint32_t blog_activate( Blog_t * blog_p, BlogTraffic_t traffic,
+                        BlogClient_t client ) { return 0; }
+
+Blog_t * blog_deactivate( uint32_t key, BlogTraffic_t traffic,
+                          BlogClient_t client ) { return BLOG_NULL; }
+
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+int blog_gre_rcv( struct fkbuff *fkb_p, void * dev_p, uint32_t h_proto, 
+                  void **tunl_pp, uint32_t *pkt_seqno_p ) { return 1; }
+void blog_gre_xmit(struct sk_buff *skb_p, uint32_t h_proto) { return; }
+#endif
+
+#if defined(CONFIG_ACCEL_PPTP) 
+int blog_pptp_rcv( struct fkbuff *fkb_p, uint32_t h_proto, 
+                    uint32_t *rcv_pktSeq) { return 1; }
+void blog_pptp_xmit( struct sk_buff *skb_p, uint32_t h_proto ) { return; }
+#endif
+
+void blog_ptm_us_bonding( struct sk_buff *skb_p, int mode ) { return; }
+
+int blog_dm(BlogDpiType_t type, uint32_t param1, uint32_t param2) { return 0; }
+
+EXPORT_SYMBOL(blog_emit);
+
+#endif  /* else !defined(CONFIG_BLOG) */
+
+EXPORT_SYMBOL(blog_dbg);
+EXPORT_SYMBOL(blog_support_mcast_g);
+EXPORT_SYMBOL(blog_support_mcast);
+EXPORT_SYMBOL(blog_support_mcast_learn_g);
+EXPORT_SYMBOL(blog_support_mcast_learn);
+EXPORT_SYMBOL(blog_support_ipv6_g);
+EXPORT_SYMBOL(blog_support_ipv6);
+EXPORT_SYMBOL(blog_cttime_update_fn);
+EXPORT_SYMBOL(blog_gre_tunnel_accelerated_g);
+EXPORT_SYMBOL(blog_support_gre_g);
+EXPORT_SYMBOL(blog_support_gre);
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+EXPORT_SYMBOL(blog_gre_rcv_check_fn);
+EXPORT_SYMBOL(blog_gre_xmit_update_fn);
+EXPORT_SYMBOL(blog_gre_rcv);
+EXPORT_SYMBOL(blog_gre_xmit);
+#endif
+
+EXPORT_SYMBOL(blog_pptp_rcv_check_fn);
+EXPORT_SYMBOL(blog_pptp_xmit_update_fn); 
+EXPORT_SYMBOL(blog_pptp_xmit_get_fn);
+
+#if defined(CONFIG_ACCEL_PPTP) 
+EXPORT_SYMBOL(blog_pptp_rcv);
+EXPORT_SYMBOL(blog_pptp_xmit);
+#endif
+
+EXPORT_SYMBOL(blog_l2tp_tunnel_accelerated_g);
+EXPORT_SYMBOL(blog_support_l2tp_g);
+EXPORT_SYMBOL(blog_support_l2tp);
+EXPORT_SYMBOL(blog_l2tp_rcv_check_fn);
+
+
+EXPORT_SYMBOL(blog_xtm_get_tx_chan_fn);
+
+EXPORT_SYMBOL(blog_get);
+EXPORT_SYMBOL(blog_put);
+EXPORT_SYMBOL(blog_skb);
+EXPORT_SYMBOL(blog_fkb);
+EXPORT_SYMBOL(blog_snull);
+EXPORT_SYMBOL(blog_fnull);
+EXPORT_SYMBOL(blog_free);
+EXPORT_SYMBOL(blog_dump);
+EXPORT_SYMBOL(blog_skip);
+EXPORT_SYMBOL(blog_xfer);
+EXPORT_SYMBOL(blog_clone);
+EXPORT_SYMBOL(blog_copy);
+EXPORT_SYMBOL(blog_iq);
+EXPORT_SYMBOL(blog_fc_enabled);
+EXPORT_SYMBOL(blog_gre_tunnel_accelerated);
+EXPORT_SYMBOL(blog_link);
+EXPORT_SYMBOL(blog_notify);
+EXPORT_SYMBOL(blog_request);
+EXPORT_SYMBOL(blog_query);
+EXPORT_SYMBOL(blog_filter);
+EXPORT_SYMBOL(blog_sinit);
+EXPORT_SYMBOL(blog_sinit_locked);
+EXPORT_SYMBOL(blog_finit);
+EXPORT_SYMBOL(blog_finit_locked);
+EXPORT_SYMBOL(blog_lock);
+EXPORT_SYMBOL(blog_unlock);
+EXPORT_SYMBOL(blog_bind);
+EXPORT_SYMBOL(blog_bind_config);
+EXPORT_SYMBOL(blog_iq_prio);
+EXPORT_SYMBOL(blog_getTxMtu);
+EXPORT_SYMBOL(blog_activate);
+EXPORT_SYMBOL(blog_deactivate);
+EXPORT_SYMBOL(blog_ptm_us_bonding);
+EXPORT_SYMBOL(blog_dm);
+EXPORT_SYMBOL(blog_dpi_ctk_update_fn);
+
+EXPORT_SYMBOL(blog);
+
+#endif // defined(BCM_KF_BLOG)
diff --git a/net/core/blog_rule.c b/net/core/blog_rule.c
new file mode 100644
index 0000000000000000000000000000000000000000..464b58980442ae495755f81ca4ed24c407f1ad9f
--- /dev/null
+++ b/net/core/blog_rule.c
@@ -0,0 +1,442 @@
+#if defined(CONFIG_BCM_KF_BLOG)
+/* 
+* <:copyright-BRCM:2010:DUAL/GPL:standard
+* 
+*    Copyright (c) 2010 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+:>
+*/
+
+/*
+ *******************************************************************************
+ *
+ * File Name  : blog_rule.c
+ *
+ * Description: Implements packet modification rules that can be associated to
+ *              a Blog.
+ *
+ *******************************************************************************
+ */
+
+#include <linux/slab.h>
+#include <linux/blog.h>
+#include <linux/if_ether.h>
+#include <linux/if_pppox.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <net/ip.h>
+#include <linux/blog_rule.h>
+#include <linux/export.h> 
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Private functions, macros and global variables.
+ *------------------------------------------------------------------------------
+ */
+
+#if defined(CC_CONFIG_BLOG_RULE_DEBUG)
+#define blog_rule_assertv(cond)                                         \
+    if ( !(cond) ) {                                                    \
+        printk( "BLOG RULE ASSERT %s : " #cond, __FUNCTION__ );         \
+        return;                                                         \
+    }
+#define blog_rule_assertr(cond, rtn)                                    \
+    if ( !(cond) ) {                                                    \
+        printk( "BLOG RULE ASSERT %s : " #cond CLRN, __FUNCTION__ );    \
+        return rtn;                                                     \
+    }
+#else
+#define blog_rule_assertv(cond)
+#define blog_rule_assertr(cond, rtn)
+#endif
+
+typedef struct {
+    struct kmem_cache *kmemCache;
+} blogRule_Ctrl_t;
+
+static blogRule_Ctrl_t blogRuleCtrl;
+
+/* External hooks */
+blogRuleVlanHook_t blogRuleVlanHook = NULL;
+blogRuleVlanNotifyHook_t blogRuleVlanNotifyHook = NULL;
+#if (defined(CONFIG_BCM_ARL) || defined(CONFIG_BCM_ARL_MODULE))
+blogArlHook_t bcm_arl_process_hook_g = NULL;
+#endif
+
+#undef  BLOG_RULE_DECL
+#define BLOG_RULE_DECL(x) #x
+
+static char *blogRuleCommandName[] = {
+    BLOG_RULE_DECL(BLOG_RULE_CMD_NOP),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_MAC_DA),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_MAC_SA),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_ETHERTYPE),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_PUSH_VLAN_HDR),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_POP_VLAN_HDR),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_PBITS),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_DEI),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_VID),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_VLAN_PROTO),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_PBITS),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_DEI),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_VID),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_VLAN_PROTO),
+//    BLOG_RULE_DECL(BLOG_RULE_CMD_XLATE_DSCP_TO_PBITS),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_POP_PPPOE_HDR),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_DSCP),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_DECR_TTL),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_DECR_HOP_LIMIT),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_DROP),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_SKB_MARK_PORT),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_SKB_MARK_QUEUE),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_OVRD_LEARNING_VID),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_SET_STA_MAC_ADDRESS),
+    BLOG_RULE_DECL(BLOG_RULE_CMD_MAX)
+};
+
+static void __printEthAddr(char *name, char *addr)
+{
+    int i;
+
+    printk("%s : ", name);
+
+    for(i=0; i<ETH_ALEN; ++i)
+    {
+        printk("%02X", addr[i]);
+        if(i != ETH_ALEN-1) printk(":");
+    }
+
+    printk("\n");
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Public API
+ *------------------------------------------------------------------------------
+ */
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_rule_alloc
+ * Description  : Allocates a Blog Rule from the Blog Rule cache. Once the Blog
+ *                Rule is not needed, it must be freed back to the Blog Rule
+ *                cache via blog_rule_free().
+ *------------------------------------------------------------------------------
+ */
+blogRule_t *blog_rule_alloc(void)
+{
+    return kmem_cache_alloc(blogRuleCtrl.kmemCache, GFP_ATOMIC);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_rule_free
+ * Description  : Frees a Blog Rule previously allocated via blog_rule_alloc().
+ *------------------------------------------------------------------------------
+ */
+void blog_rule_free(blogRule_t *blogRule_p)
+{
+    kmem_cache_free(blogRuleCtrl.kmemCache, blogRule_p);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_rule_free_list
+ * Description  : Frees all Blog Rules linked to a Blog.
+ *------------------------------------------------------------------------------
+ */
+int blog_rule_free_list(Blog_t *blog_p)
+{
+    blogRule_t *blogRule_p;
+    blogRule_t *nextBlogRule_p;
+    int blogRuleCount;
+
+    blogRule_p = (blogRule_t *)blog_p->blogRule_p;
+    blogRuleCount = 0;
+
+    while(blogRule_p != NULL)
+    {
+        nextBlogRule_p = blogRule_p->next_p;
+
+        blog_rule_free(blogRule_p);
+
+        blogRule_p = nextBlogRule_p;
+
+        blogRuleCount++;
+    }
+
+    return blogRuleCount;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_rule_init
+ * Description  : Initializes a Blog Rule with no filters and no modifications.
+ *------------------------------------------------------------------------------
+ */
+void blog_rule_init(blogRule_t *blogRule_p)
+{
+    blog_rule_assertv(blogRule_p != NULL);
+
+    memset(blogRule_p, 0, sizeof(blogRule_t));
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_rule_dump
+ * Description  : Prints the contents of a Blog Rule.
+ *------------------------------------------------------------------------------
+ */
+void blog_rule_dump(blogRule_t *blogRule_p)
+{
+    int i;
+    blogRuleFilter_t *filter_p;
+    blogRuleFilterVlan_t *vlanFilter_p;
+    blogRuleAction_t *action_p;
+
+    blog_rule_assertv(blogRule_p != NULL);
+
+    printk("Blog Rule <0x%08X>, next <0x%08X>\n",
+           (unsigned int)blogRule_p, (unsigned int)blogRule_p->next_p);
+
+    filter_p = &blogRule_p->filter;
+
+    if(filter_p->flags)
+    {
+        printk("Flags: ");
+        if(filter_p->flags & BLOG_RULE_FILTER_FLAGS_IS_UNICAST)
+        {
+            printk("IS_UNICAST ");
+        }
+        if(filter_p->flags & BLOG_RULE_FILTER_FLAGS_IS_MULTICAST)
+        {
+            printk("IS_MULTICAST ");
+        }
+        if(filter_p->flags & BLOG_RULE_FILTER_FLAGS_IS_BROADCAST)
+        {
+            printk("IS_BROADCAST ");
+        }
+        printk("\n");
+    }
+
+    printk("Ethernet Filters:\n");
+    if(blog_rule_filterInUse(filter_p->eth.mask.h_dest))
+    {
+        __printEthAddr("\tDA", filter_p->eth.value.h_dest);
+    }
+    if(blog_rule_filterInUse(filter_p->eth.mask.h_source))
+    {
+        __printEthAddr("\tSA", filter_p->eth.value.h_source);
+    }
+    if(blog_rule_filterInUse(filter_p->eth.mask.h_proto))
+    {
+        printk("\tEthertype : 0x%04X\n", filter_p->eth.value.h_proto);
+    }
+
+    printk("PPPoE Header: %s\n", filter_p->hasPppoeHeader ? "Yes" : "No");
+
+    printk("VLAN Filters:\n");
+    printk("\tNumber of Tags : <%d>\n", filter_p->nbrOfVlanTags);
+    for(i=0; i<filter_p->nbrOfVlanTags; ++i)
+    {
+        vlanFilter_p = &filter_p->vlan[i];
+
+        if(vlanFilter_p->mask.h_vlan_TCI & BLOG_RULE_PBITS_MASK)
+        {
+            printk("\tPBITS : <%d>, tag <%d>\n",
+                   BLOG_RULE_GET_TCI_PBITS(vlanFilter_p->value.h_vlan_TCI), i);
+        }
+
+        if(vlanFilter_p->mask.h_vlan_TCI & BLOG_RULE_DEI_MASK)
+        {
+            printk("\tDEI   : <%d>, tag <%d>\n",
+                   BLOG_RULE_GET_TCI_DEI(vlanFilter_p->value.h_vlan_TCI), i);
+        }
+
+        if(vlanFilter_p->mask.h_vlan_TCI & BLOG_RULE_VID_MASK)
+        {
+            printk("\tVID   : <%d>, tag <%d>\n",
+                   BLOG_RULE_GET_TCI_VID(vlanFilter_p->value.h_vlan_TCI), i);
+        }
+
+        if(vlanFilter_p->mask.h_vlan_encapsulated_proto)
+        {
+            printk("  etherType   : <%04x>, tag <%d>\n",
+                   vlanFilter_p->value.h_vlan_encapsulated_proto, i);
+        }
+    }
+
+    printk("IPv4 Filters:\n");
+    if(blog_rule_filterInUse(filter_p->ipv4.mask.tos))
+    {
+        printk("\tTOS : 0x%04X -> DSCP <%d>\n",
+               filter_p->ipv4.value.tos,
+               filter_p->ipv4.value.tos >> BLOG_RULE_DSCP_IN_TOS_SHIFT);
+    }
+    if(blog_rule_filterInUse(filter_p->ipv4.mask.ip_proto))
+    {
+        printk("\tIP-PROTO : %d \n", filter_p->ipv4.value.ip_proto);
+    }
+
+    printk("SKB Filters:\n");
+    if(blog_rule_filterInUse(filter_p->skb.priority))
+    {
+        printk("\tpriority : %d\n", filter_p->skb.priority - 1);
+    }
+    if(blog_rule_filterInUse(filter_p->skb.markFlowId))
+    {
+        printk("\tmark->flowId : %d\n", filter_p->skb.markFlowId);
+    }
+    if(blog_rule_filterInUse(filter_p->skb.markPort))
+    {
+        printk("\tmark->port : %d\n", filter_p->skb.markPort - 1);
+    }
+
+    printk("Actions:\n");
+    for(i=0; i<blogRule_p->actionCount; ++i)
+    {
+        action_p = &blogRule_p->action[i];
+
+        if(action_p->cmd == BLOG_RULE_CMD_SET_MAC_DA ||
+           action_p->cmd == BLOG_RULE_CMD_SET_MAC_SA ||
+           action_p->cmd == BLOG_RULE_CMD_SET_STA_MAC_ADDRESS)
+        {
+            printk("\t");
+            __printEthAddr(blogRuleCommandName[action_p->cmd],
+                           action_p->macAddr);
+        }
+        else
+        {
+            printk("\t%s : arg <%d>/<0x%02X>, tag <%d>\n",
+                   blogRuleCommandName[action_p->cmd],
+                   action_p->arg, action_p->arg, action_p->toTag);
+        }
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_rule_add_action
+ * Description  : Adds an action to a Blog Rule.
+ *------------------------------------------------------------------------------
+ */
+int blog_rule_add_action(blogRule_t *blogRule_p, blogRuleAction_t *action_p)
+{
+    int ret = 0;
+
+    if(blogRule_p->actionCount == BLOG_RULE_ACTION_MAX)
+    {
+        printk("ERROR : Maximum number of actions reached for blogRule_p <0x%08X>\n",
+               (unsigned int)blogRule_p);
+
+        ret = -ENOMEM;
+        goto out;
+    }
+
+    blogRule_p->action[blogRule_p->actionCount] = *action_p;
+
+    blogRule_p->actionCount++;
+
+out:
+    return ret;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : blog_rule_delete_action
+ * Description  : Set actionCount of all blogRule chain to 0 for deleting action
+ *------------------------------------------------------------------------------
+ */
+int blog_rule_delete_action( void *rule_p )
+{
+    blogRule_t *blogrule_p = (blogRule_t *)rule_p;
+    int ret = 0;
+
+    while ( blogrule_p != NULL )
+    {
+        blogrule_p->actionCount = 0;
+        blogrule_p = blogrule_p->next_p;
+    }
+
+    return ret;
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : __init_blog_rule
+ * Description  : Initializes the Blog Rule subsystem.
+ *------------------------------------------------------------------------------
+ */
+static int __init __init_blog_rule(void)
+{
+    int ret = 0;
+
+    /* create a slab cache for device descriptors */
+    blogRuleCtrl.kmemCache = kmem_cache_create("blog_rule",
+                                               sizeof(blogRule_t),
+                                               0, /* align */
+                                               SLAB_HWCACHE_ALIGN, /* flags */
+                                               NULL); /* ctor */
+    if(blogRuleCtrl.kmemCache == NULL)
+    {
+        printk("ERROR : Unable to create Blog Rule cache\n");
+
+        ret = -ENOMEM;
+        goto out;
+    }
+
+    printk("BLOG Rule %s Initialized\n", BLOG_RULE_VERSION);
+
+out:
+    return ret;
+}
+
+/* /\* */
+/*  *------------------------------------------------------------------------------ */
+/*  * Function     : __exit_blog_rule */
+/*  * Description  : Brings down the Blog Rule subsystem. */
+/*  *------------------------------------------------------------------------------ */
+/*  *\/ */
+/* void __exit __exit_blog_rule(void) */
+/* { */
+/*     kmem_cache_destroy(blogRuleCtrl.kmemCache); */
+/* } */
+
+subsys_initcall(__init_blog_rule);
+
+EXPORT_SYMBOL(blog_rule_alloc);
+EXPORT_SYMBOL(blog_rule_free);
+EXPORT_SYMBOL(blog_rule_free_list);
+EXPORT_SYMBOL(blog_rule_init);
+EXPORT_SYMBOL(blog_rule_dump);
+EXPORT_SYMBOL(blog_rule_add_action);
+EXPORT_SYMBOL(blog_rule_delete_action);
+EXPORT_SYMBOL(blogRuleVlanHook);
+EXPORT_SYMBOL(blogRuleVlanNotifyHook);
+#if (defined(CONFIG_BCM_ARL) || defined(CONFIG_BCM_ARL_MODULE))
+EXPORT_SYMBOL(bcm_arl_process_hook_g);
+#endif
+#endif /* defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e4fbfd6e2bd43a0cda49f705058631a1bfec7811..3381814ba48c53ee618ecd68f36b97a4886994c7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -127,6 +127,67 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
 	goto out;
 }
 
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+/*
+ *	skb_copy_datagram_to_kernel_iovec - Copy a datagram to a kernel iovec structure.
+ *	@skb: buffer to copy
+ *	@offset: offset in the buffer to start copying from
+ *	@to: io vector to copy to
+ *	@len: amount of data to copy from buffer to iovec
+ *
+ *	Note: the iovec is modified during the copy.
+ */
+int skb_copy_datagram_to_kernel_iovec(const struct sk_buff *skb, int offset,
+				      struct iovec *to, int len, unsigned int *dma_cookie)
+{
+	int i, fraglen, end = 0;
+	struct sk_buff *next = skb_shinfo(skb)->frag_list;
+
+	if (!len)
+		return 0;
+
+next_skb:
+	fraglen = skb_headlen(skb);
+	i = -1;
+
+	while (1) {
+		int start = end;
+
+		if ((end += fraglen) > offset) {
+			int copy = end - offset;
+			int o = offset - start;
+
+			if (copy > len)
+				copy = len;
+			if (i == -1)
+				memcpy_tokerneliovec(to, skb->data + o, copy, dma_cookie);
+			else {
+				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+				struct page *page = skb_frag_page(frag);
+				void *p = kmap(page) + frag->page_offset + o;
+				memcpy_tokerneliovec(to, p, copy, dma_cookie);
+				kunmap(page);
+			}
+
+			if (!(len -= copy))
+				return 0;
+			offset += copy;
+		}
+		if (++i >= skb_shinfo(skb)->nr_frags)
+			break;
+		fraglen = skb_shinfo(skb)->frags[i].size;
+	}
+	if (next) {
+		skb = next;
+		BUG_ON(skb_shinfo(skb)->frag_list);
+		next = skb->next;
+		goto next_skb;
+	}
+
+	return -EFAULT;
+}
+#endif
+
 /**
  *	__skb_recv_datagram - Receive a datagram skbuff
  *	@sk: socket
diff --git a/net/core/dev.c b/net/core/dev.c
index 1fc16ee0f9e515f1b3a8cbd83cb0ccfaf4bbe401..2c00e08f90f3db5028b19b6f13fc171db01e3f28 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -135,8 +135,20 @@
 #include <linux/net_tstamp.h>
 #include <linux/static_key.h>
 #include <net/flow_keys.h>
+#if defined(CONFIG_BCM_KF_IGMP)
+#include <linux/mroute.h>
+#endif
 
 #include "net-sysfs.h"
+#if defined(CONFIG_BCM_KF_BALOO) && defined(BCM_BALOO)
+#include <asm/baloo.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_SKB_DEFINES)
+#include <linux/kthread.h>
+#include <linux/bcm_realtime.h>
+#include "skb_defines.h"
+#endif
 
 /* Instead of increasing this, you should create a hash table. */
 #define MAX_GRO_SKBS 8
@@ -177,6 +189,7 @@
 
 static DEFINE_SPINLOCK(ptype_lock);
 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+
 static struct list_head ptype_all __read_mostly;	/* Taps */
 
 /*
@@ -201,6 +214,7 @@ static struct list_head ptype_all __read_mostly;	/* Taps */
 DEFINE_RWLOCK(dev_base_lock);
 EXPORT_SYMBOL(dev_base_lock);
 
+
 static inline void dev_base_seq_inc(struct net *net)
 {
 	while (++net->dev_base_seq == 0);
@@ -365,6 +379,91 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 }
 #endif
 
+#if defined(CONFIG_BCM_KF_NETDEV_PATH)
+/* Adds a NON-ROOT device to a path. A Root device is indirectly
+   added to a path once another device points to it */
+int netdev_path_add(struct net_device *new_dev, struct net_device *next_dev)
+{
+    if(netdev_path_is_linked(new_dev))
+    {
+        /* new device already in a path, fail */
+        return -EBUSY;
+    }
+
+    netdev_path_next_dev(new_dev) = next_dev;
+
+    next_dev->path.refcount++;
+
+    return 0;
+}
+
+/* Removes a device from a path */
+int netdev_path_remove(struct net_device *dev)
+{
+    if(!netdev_path_is_leaf(dev))
+    {
+        /* device referenced by one or more interfaces, fail */
+        return -EBUSY;
+    }
+
+    if(netdev_path_is_root(dev))
+    {
+        /* device is the first in the list */
+        /* Nothing to do */
+        return 0;
+    }
+
+    netdev_path_next_dev(dev)->path.refcount--;
+
+    netdev_path_next_dev(dev) = NULL;
+
+    return 0;
+}
+
+/* Prints all devices in a path */
+void netdev_path_dump(struct net_device *dev)
+{
+    printk("netdev path : ");
+
+    while(1)
+    {
+        printk("%s", dev->name);
+
+        if(netdev_path_is_root(dev))
+        {
+            break;
+        }
+
+        printk(" -> ");
+
+        dev = netdev_path_next_dev(dev);
+    }
+
+    printk("\n");
+}
+
+int netdev_path_set_hw_subport_mcast_idx(struct net_device *dev,
+                                         unsigned int subport_idx)
+{
+    if(subport_idx >= NETDEV_PATH_HW_SUBPORTS_MAX)
+    {
+        printk(KERN_ERR "%s : Invalid subport <%u>, max <%u>",
+               __FUNCTION__, subport_idx, NETDEV_PATH_HW_SUBPORTS_MAX);
+        return -1;
+    }
+
+    dev->path.hw_subport_mcast_idx = subport_idx;
+
+    return 0;
+}
+
+EXPORT_SYMBOL(netdev_path_add);
+EXPORT_SYMBOL(netdev_path_remove);
+EXPORT_SYMBOL(netdev_path_dump);
+EXPORT_SYMBOL(netdev_path_set_hw_subport_mcast_idx);
+#endif /* CONFIG_BCM_KF_NETDEV_PATH */
+
+
 /*******************************************************************************
 
 		Protocol management and registration routines
@@ -865,6 +964,39 @@ bool dev_valid_name(const char *name)
 }
 EXPORT_SYMBOL(dev_valid_name);
 
+#if defined(CONFIG_BCM_KF_FAP)
+/**
+ * Features changed due to FAP power up/down
+**/
+void dev_change_features(unsigned int features, unsigned int op)
+{
+    struct net *net;
+    struct net_device *dev;
+
+    write_lock_bh(&dev_base_lock);
+
+    for_each_net(net) {
+        for_each_netdev(net, dev) {
+            if(dev->priv_flags & (IFF_HW_SWITCH | IFF_EBRIDGE) )
+            {
+                if(op)
+                {
+                    /* FAP power up = add features */
+                    dev->features |= features;
+                }
+                else
+                {
+                    /* FAP powerdown = remove features */
+                    dev->features &= ~features;
+                }
+            }
+        }
+    }
+
+    write_unlock_bh(&dev_base_lock);
+}
+EXPORT_SYMBOL(dev_change_features);
+#endif
 /**
  *	__dev_alloc_name - allocate a name for a device
  *	@net: network namespace to allocate the device name in
@@ -1635,6 +1767,9 @@ static inline int deliver_skb(struct sk_buff *skb,
 			      struct net_device *orig_dev)
 {
 	atomic_inc(&skb->users);
+#if defined(CONFIG_BCM_KF_BALOO) && defined(CONFIG_BALOO_NET_SUPPORT)
+	BALOOKLVL(3, balook1(BALOO_NET_SKBRECV_EVT, (int)pt_prev->func));
+#endif
 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
 
@@ -1679,13 +1814,16 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 			    skb2->network_header > skb2->tail) {
 				if (net_ratelimit())
 					pr_crit("protocol %04x is buggy, dev %s\n",
-						ntohs(skb2->protocol),
-						dev->name);
+					       ntohs(skb2->protocol),
+					       dev->name);
 				skb_reset_network_header(skb2);
 			}
 
 			skb2->transport_header = skb2->network_header;
 			skb2->pkt_type = PACKET_OUTGOING;
+#if defined(CONFIG_BCM_KF_BALOO) && defined(CONFIG_BALOO_NET_SUPPORT)
+            BALOOKLVL(3, balook1(BALOO_NET_SKBSEND_EVT, (int)ptype->func));
+#endif
 			pt_prev = ptype;
 		}
 	}
@@ -1725,7 +1863,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
 		tc = &dev->tc_to_txq[q];
 		if (tc->offset + tc->count > txq) {
 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
-				i, q);
+				   i, q);
 			netdev_set_prio_tc_map(dev, i, 0);
 		}
 	}
@@ -1835,6 +1973,156 @@ void dev_kfree_skb_irq(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(dev_kfree_skb_irq);
 
+#if defined(CONFIG_BCM_KF_SKB_DEFINES)
+static struct task_struct *skbFreeTask = NULL;
+static struct sk_buff *skb_completion_queue = NULL;
+static spinlock_t skbfree_lock;
+
+/* Setting value to WDF budget + some room for SKBs
+   freed by other threads */
+#define MAX_SKB_FREE_BUDGET  256 
+
+static int skbFree_thread_func(void *thread_data)
+{
+    unsigned int budget;
+    struct sk_buff *skb;
+    struct sk_buff *free_list= NULL;
+    unsigned long flags;
+
+    while(!kthread_should_stop())
+    {
+        budget = MAX_SKB_FREE_BUDGET;
+
+    update_list:
+        spin_lock_irqsave(&skbfree_lock, flags);
+        if(free_list == NULL)
+        {
+            if(skb_completion_queue)
+            {
+                free_list = skb_completion_queue;
+                skb_completion_queue = NULL;
+            }
+        }
+        spin_unlock_irqrestore(&skbfree_lock, flags);
+
+        local_bh_disable();
+        while(free_list && budget)
+        {
+            skb = free_list;
+            free_list = free_list->next;
+            __kfree_skb(skb);
+			budget--;
+        }
+        local_bh_enable();	
+
+        if(free_list || skb_completion_queue)
+        {
+            if(budget)
+                goto update_list;
+            /*we still have packets in Q, reschedule the task */
+            yield();
+            //schedule();
+        }
+        else
+        {
+            set_current_state(TASK_INTERRUPTIBLE);
+            schedule();
+        }
+    }
+    return 0;
+}
+
+struct task_struct* create_skbFreeTask(void)
+{
+    struct task_struct *tsk;
+    struct sched_param param;
+
+	spin_lock_init(&skbfree_lock);
+
+    tsk = kthread_create(skbFree_thread_func, NULL,"skbFreeTask");
+
+    if (IS_ERR(tsk)) {
+        printk("skbFreeTask creation failed\n");
+        return NULL;
+    }
+
+    param.sched_priority = BCM_RTPRIO_DATA;
+    sched_setscheduler(tsk, SCHED_RR, &param);
+
+    //kthread_bind(tsk, 0);/*pin the thread to cpu0 */
+    printk("skbFreeTask created successfully\n");
+    return tsk;
+}
+
+#include <linux/gbpm.h>
+gbpm_evt_hook_t gbpm_fap_evt_hook_g = (gbpm_evt_hook_t)NULL;
+EXPORT_SYMBOL(gbpm_fap_evt_hook_g);
+static int fapdrv_thread_func(void *thread_data)
+{
+    while(!kthread_should_stop())
+    {
+        static int scheduled=0;
+        local_bh_disable();
+
+        if (!scheduled)
+        {
+            printk("gbpm_do_work scheduled\n");
+            scheduled = 1;
+        }
+        if (gbpm_fap_evt_hook_g) 
+            gbpm_fap_evt_hook_g();
+
+        local_bh_enable();	
+
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule();
+    }
+    return 0;
+}
+
+
+struct task_struct *fapDrvTask = NULL;
+EXPORT_SYMBOL(fapDrvTask);
+struct task_struct* create_fapDrvTask(void)
+{
+    // Create FAP driver thread for dqm work handling
+    fapDrvTask = kthread_create(fapdrv_thread_func, NULL,"bcmFapDrv");
+
+    if (IS_ERR(fapDrvTask)) {
+        printk("fapDrvTask creation failed\n");
+        return 0;
+    }
+    {
+        struct sched_param param;
+        param.sched_priority = BCM_RTPRIO_DATA;
+        sched_setscheduler(fapDrvTask, SCHED_RR, &param);
+
+        kthread_bind(fapDrvTask, 0);/*pin the thread to cpu0 */
+    }
+
+    wake_up_process(fapDrvTask);
+    return fapDrvTask;
+}
+
+/* queue the skb so it can be freed in thread context 
+ * note: this thread is not binded to any cpu,and we rely on scheduler to 
+ * run it on cpu with less load
+ */
+void dev_kfree_skb_thread(struct sk_buff *skb)
+{
+    unsigned long flags;
+    if (atomic_dec_and_test(&skb->users)) {
+        spin_lock_irqsave(&skbfree_lock, flags);
+        skb->next = skb_completion_queue;
+        skb_completion_queue = skb;
+        spin_unlock_irqrestore(&skbfree_lock, flags);
+        if(skbFreeTask->state != TASK_RUNNING)
+            wake_up_process(skbFreeTask);
+    }
+}
+EXPORT_SYMBOL(dev_kfree_skb_thread);
+#endif
+
 void dev_kfree_skb_any(struct sk_buff *skb)
 {
 	if (in_irq() || irqs_disabled())
@@ -1878,7 +2166,11 @@ EXPORT_SYMBOL(netif_device_attach);
 
 static void skb_warn_bad_offload(const struct sk_buff *skb)
 {
+#if defined(CONFIG_BCM_KF_DEBUGGING_DISABLED_FIX)
+	static const netdev_features_t null_features __attribute__((unused)) = 0;
+#else
 	static const netdev_features_t null_features = 0;
+#endif
 	struct net_device *dev = skb->dev;
 	const char *driver = "";
 
@@ -2214,7 +2506,44 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 		}
 
 		skb_len = skb->len;
-		rc = ops->ndo_start_xmit(skb, dev);
+
+#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK))
+        {
+            unsigned int devId = bcm_is_gso_loopback_dev(dev);
+
+            if(devId && bcm_gso_loopback_hw_offload)
+            { 
+                if(skb_shinfo(skb)->nr_frags || skb_is_gso(skb) || (skb->ip_summed == CHECKSUM_PARTIAL))
+                {
+                    rc = bcm_gso_loopback_hw_offload(skb, devId); 
+                }
+                else if(!skb->recycle_hook) 
+                {
+                 /*  To avoid any outof order packets, send all the locally generated packets through
+                  *  gso loop back 
+                  */
+
+                    /* TODO: we are classifying the traffic as local based on recycle hook.
+                     * But cloned forwarding tarffic can also have recyle_hook as NULL, so this traffic
+                     * will make an extra trip through FAP unnecessarily. But we dont expecet alot
+                     * of traffic in this case. so this shoud be okay for now. Later add a flag
+                     * in skb and mark the skb as local in local_out hook.
+                     */  
+                        rc = bcm_gso_loopback_hw_offload(skb, devId); 
+                }
+                else
+                {
+                    rc = ops->ndo_start_xmit(skb, dev);
+                }
+            }
+            else
+            {
+                rc = ops->ndo_start_xmit(skb, dev);
+            }
+        }
+#else
+        rc = ops->ndo_start_xmit(skb, dev);
+#endif
 		trace_net_dev_xmit(skb, rc, dev, skb_len);
 		if (rc == NETDEV_TX_OK)
 			txq_trans_update(txq);
@@ -2259,6 +2588,54 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 	return rc;
 }
 
+#if defined(CONFIG_BCM_KF_SPDSVC) && defined(CONFIG_BCM_SPDSVC_SUPPORT)
+/* Perform GSO and checksum computations on the given SKB, regardless of
+   the advertised network interface features */
+int skb_bypass_hw_features(struct sk_buff *skb)
+{
+    netdev_features_t features;
+
+    features = netif_skb_features(skb);
+
+    features &= ~(NETIF_F_SG |
+                  NETIF_F_IP_CSUM |
+                  NETIF_F_IPV6_CSUM |
+                  NETIF_F_TSO |
+                  NETIF_F_TSO6 |
+                  NETIF_F_UFO);
+
+    if (netif_needs_gso(skb, features)) {
+        if (unlikely(dev_gso_segment(skb, features))) {
+            goto out_kfree_skb;
+        }
+    } else {
+        if (skb_needs_linearize(skb, features) &&
+            __skb_linearize(skb)) {
+            goto out_kfree_skb;
+        }
+
+        /* If packet is not checksummed and device does not
+         * support checksumming for this protocol, complete
+         * checksumming here.
+         */
+        if (skb->ip_summed == CHECKSUM_PARTIAL) {
+            skb_set_transport_header(skb,
+                                     skb_checksum_start_offset(skb));
+            if (skb_checksum_help(skb)) {
+                goto out_kfree_skb;
+            }
+        }
+    }
+
+    return 0;
+
+out_kfree_skb:
+    kfree_skb(skb);
+    return -1;
+}
+EXPORT_SYMBOL(skb_bypass_hw_features);
+#endif
+
 static u32 hashrnd __read_mostly;
 
 /*
@@ -2509,6 +2886,9 @@ int dev_queue_xmit(struct sk_buff *skb)
 	trace_net_dev_queue(skb);
 	if (q->enqueue) {
 		rc = __dev_xmit_skb(skb, q, dev, txq);
+#if defined(CONFIG_BCM_KF_BALOO) && defined(CONFIG_BALOO_NET_SUPPORT)
+            BALOOKLVL(3, balook1(BALOO_NET_SKBSEND_EVT, (int)q->enqueue));
+#endif
 		goto out;
 	}
 
@@ -2536,6 +2916,10 @@ int dev_queue_xmit(struct sk_buff *skb)
 
 			if (!netif_xmit_stopped(txq)) {
 				__this_cpu_inc(xmit_recursion);
+#if defined(CONFIG_BCM_KF_BALOO) && defined(CONFIG_BALOO_NET_SUPPORT)
+                BALOOKLVL(3, balook1(BALOO_NET_SKBSEND_EVT,
+                                     (int)dev->hard_start_xmit));
+#endif
 				rc = dev_hard_start_xmit(skb, dev, txq);
 				__this_cpu_dec(xmit_recursion);
 				if (dev_xmit_complete(rc)) {
@@ -2604,8 +2988,8 @@ void __skb_get_rxhash(struct sk_buff *skb)
 	if (keys.ports) {
 		if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
 			swap(keys.port16[0], keys.port16[1]);
-		skb->l4_rxhash = 1;
-	}
+			skb->l4_rxhash = 1;
+		}
 
 	/* get a consistent hash (same value on both flow directions) */
 	if ((__force u32)keys.dst < (__force u32)keys.src)
@@ -2907,10 +3291,29 @@ int netif_rx(struct sk_buff *skb)
 
 	/* if netpoll wants it, pretend we never saw it */
 	if (netpoll_rx(skb))
+#if defined(CONFIG_BCM_KF_BALOO) && defined(CONFIG_BALOO_NET_SUPPORT)
+	{
+		balook2(BALOO_NET_RX_DROP_EVT, (int)skb, (int)skb->dev);
 		return NET_RX_DROP;
+	}
+#else
+		return NET_RX_DROP;
+#endif
 
 	net_timestamp_check(netdev_tstamp_prequeue, skb);
 
+#if defined(CONFIG_BCM_KF_WANDEV)
+	/*mark IFFWAN flag in skb based on dev->priv_flags */
+	if(skb->dev)
+	{
+		unsigned int mark = skb->mark;
+		skb->mark |= SKBMARK_SET_IFFWAN_MARK(mark, ((skb->dev->priv_flags & IFF_WANDEV) ? 1:0));
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+		if ( skb->blog_p )
+			skb->blog_p->isWan = (skb->dev->priv_flags & IFF_WANDEV) ? 1 : 0;
+#endif /* defined(CONFIG_BLOG_FEATURE) */
+	}
+#endif /* CONFIG_BCM_KF_WANDEV */
 	trace_netif_rx(skb);
 #ifdef CONFIG_RPS
 	if (static_key_false(&rps_needed)) {
@@ -2935,6 +3338,11 @@ int netif_rx(struct sk_buff *skb)
 		ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
 		put_cpu_light();
 	}
+#if defined(CONFIG_BCM_KF_BALOO) && defined(CONFIG_BALOO_NET_SUPPORT)
+	if (ret == NET_RX_DROP)
+		balook2(BALOO_NET_RX_DROP_EVT, (int)skb, (int)skb->dev);
+#endif
+
 	return ret;
 }
 EXPORT_SYMBOL(netif_rx);
@@ -2953,6 +3361,7 @@ int netif_rx_ni(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(netif_rx_ni);
 
+
 #ifdef CONFIG_PREEMPT_RT_FULL
 /*
  * RT runs ksoftirqd as a real time thread and the root_lock is a
@@ -3037,6 +3446,7 @@ static void net_tx_action(struct softirq_action *h)
 						  &q->state);
 				}
 			}
+			
 		}
 	}
 }
@@ -3068,7 +3478,7 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
 	if (unlikely(MAX_RED_LOOP < ttl++)) {
 		if (net_ratelimit())
 			pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
-				skb->skb_iif, dev->ifindex);
+			       skb->skb_iif, dev->ifindex);
 		return TC_ACT_SHOT;
 	}
 
@@ -3143,6 +3553,13 @@ int netdev_rx_handler_register(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
 
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+int (*bcm_vlan_handle_frame_hook)(struct sk_buff **) = NULL;
+#endif
+#if (defined(CONFIG_BR_IGMP_SNOOP) && defined(CONFIG_BCM_KF_IGMP)) || (defined(CONFIG_BR_MLD_SNOOP) && defined(CONFIG_BCM_KF_MLD))
+void (*bcm_mcast_def_pri_queue_hook)(struct sk_buff *) = NULL;
+#endif
+
 /**
  *	netdev_rx_handler_unregister - unregister receive handler
  *	@dev: device to unregister a handler from
@@ -3170,6 +3587,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
 	int ret = NET_RX_DROP;
 	__be16 type;
 
+
 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
 	trace_netif_receive_skb(skb);
@@ -3178,6 +3596,19 @@ static int __netif_receive_skb(struct sk_buff *skb)
 	if (netpoll_receive_skb(skb))
 		return NET_RX_DROP;
 
+#if defined(CONFIG_BCM_KF_WANDEV)
+	/*mark IFFWAN flag in skb based on dev->priv_flags */
+	if(skb->dev)
+	{
+		unsigned int mark = skb->mark;
+		skb->mark |= SKBMARK_SET_IFFWAN_MARK(mark, ((skb->dev->priv_flags & IFF_WANDEV) ? 1:0));
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+		if ( skb->blog_p )
+			skb->blog_p->isWan = (skb->dev->priv_flags & IFF_WANDEV) ? 1 : 0;
+#endif
+	}
+#endif
+
 	if (!skb->skb_iif)
 		skb->skb_iif = skb->dev->ifindex;
 	orig_dev = skb->dev;
@@ -3190,6 +3621,36 @@ static int __netif_receive_skb(struct sk_buff *skb)
 
 	rcu_read_lock();
 
+#if (defined(CONFIG_BCM_KF_IGMP) && defined(CONFIG_BR_IGMP_SNOOP)) || (defined(CONFIG_BCM_KF_MLD) && defined(CONFIG_BR_MLD_SNOOP))
+   if ( bcm_mcast_def_pri_queue_hook ) {
+      bcm_mcast_def_pri_queue_hook(skb);
+   }
+#endif   
+
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+#if defined(CONFIG_BCM_TMS_MODULE)
+	if (skb->protocol == htons(ETH_P_8021AG) || skb->protocol == htons(ETH_P_8023AH)) {
+         goto skip_vlanctl;
+   }
+	else if (skb->protocol == htons(ETH_P_8021Q)) {
+		struct vlan_hdr *vh = (struct vlan_hdr *)skb->data;
+	   if (vh->h_vlan_encapsulated_proto == htons(ETH_P_8021AG)) {
+         goto skip_vlanctl;
+      }
+	}
+#endif
+
+   if (bcm_vlan_handle_frame_hook && (ret = bcm_vlan_handle_frame_hook(&skb)) != 0)
+   {
+      goto out;
+   }
+
+#if defined(CONFIG_BCM_TMS_MODULE)
+skip_vlanctl:      
+#endif
+        
+#endif
+
 another_round:
 
 	__this_cpu_inc(softnet_data.processed);
@@ -3234,6 +3695,15 @@ static int __netif_receive_skb(struct sk_buff *skb)
 			goto out;
 	}
 
+#if defined(CONFIG_BCM_KF_PPP)
+   if (skb->protocol == __constant_htons(ETH_P_PPP_SES) ||
+       skb->protocol == __constant_htons(ETH_P_PPP_DISC)) {
+      if (!memcmp(skb->mac_header, skb->dev->dev_addr, ETH_ALEN)) {
+         goto skip_rx_handler;
+      }
+   }
+#endif
+
 	if (rx_handler) {
 		if (pt_prev) {
 			ret = deliver_skb(skb, pt_prev, orig_dev);
@@ -3253,6 +3723,10 @@ static int __netif_receive_skb(struct sk_buff *skb)
 		}
 	}
 
+#if defined(CONFIG_BCM_KF_PPP)
+skip_rx_handler:
+#endif
+
 	/* deliver only exact match when indicated */
 	null_or_dev = deliver_exact ? skb->dev : NULL;
 
@@ -3269,8 +3743,14 @@ static int __netif_receive_skb(struct sk_buff *skb)
 	}
 
 	if (pt_prev) {
+#if defined(CONFIG_BCM_KF_BALOO) && defined(CONFIG_BALOO_NET_SUPPORT)
+        BALOOKLVL(3, balook1(BALOO_NET_SKBRECV_EVT,(int)pt_prev->func));
+#endif
 		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 	} else {
+#if defined(CONFIG_BCM_KF_BALOO) && defined(CONFIG_BALOO_NET_SUPPORT)
+        	balook2(BALOO_NET_RX_DROP_EVT, (int)skb, (int)skb->dev);
+#endif
 		atomic_long_inc(&skb->dev->rx_dropped);
 		kfree_skb(skb);
 		/* Jamal, now you will not able to escape explaining
@@ -3320,8 +3800,8 @@ int netif_receive_skb(struct sk_buff *skb)
 			rcu_read_unlock();
 			return ret;
 		}
-		rcu_read_unlock();
-	}
+			rcu_read_unlock();
+		}
 #endif
 	return __netif_receive_skb(skb);
 }
@@ -3509,8 +3989,8 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
 		diffs |= p->vlan_tci ^ skb->vlan_tci;
 		if (maclen == ETH_HLEN)
-			diffs |= compare_ether_header(skb_mac_header(p),
-						      skb_gro_mac_header(skb));
+		diffs |= compare_ether_header(skb_mac_header(p),
+					      skb_gro_mac_header(skb));
 		else if (!diffs)
 			diffs = memcmp(skb_mac_header(p),
 				       skb_gro_mac_header(skb),
@@ -3874,6 +4354,7 @@ static void net_rx_action(struct softirq_action *h)
 
 		have = netpoll_poll_lock(n);
 
+
 		weight = n->weight;
 
 		/* This NAPI_STATE_SCHED test is for avoiding a race
@@ -4065,7 +4546,7 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff
 	hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
 		if (++count == offset)
 			return dev;
-	}
+		}
 
 	return NULL;
 }
@@ -4140,6 +4621,66 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
 		   stats->tx_compressed);
 }
 
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+
+static void devextstats_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
+{
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
+    
+    unsigned long long rx_unicast_packets=0, tx_unicast_packets=0;  /* Calculated unicast packets */
+    
+    /* Calculate unicast packet counts as total packets less broadcast and multicast.
+       Normalize to zero in case an error sum of multicast and broadcast packets is reported */
+    if((stats->multicast + stats->rx_broadcast_packets) < stats->rx_packets)
+        rx_unicast_packets = stats->rx_packets - (stats->multicast + stats->rx_broadcast_packets);
+    else
+        rx_unicast_packets = 0;
+        
+    if((stats->tx_multicast_packets + stats->tx_broadcast_packets) < stats->tx_packets)
+        tx_unicast_packets = stats->tx_packets - (stats->tx_multicast_packets + stats->tx_broadcast_packets);
+    else
+        tx_unicast_packets = 0;
+        
+    /* Print basic statistics, which are identical to baseline with only a few spacing differences */
+	seq_printf(seq, "%6s:%8llu %7llu %4llu %4llu %4llu %5llu %5llu %5llu "
+		   "%8llu %7llu %4llu %4llu %4llu %4llu %4llu %5llu ",
+		   dev->name, stats->rx_bytes, stats->rx_packets,
+		   stats->rx_errors,
+		   stats->rx_dropped + stats->rx_missed_errors,
+		   stats->rx_fifo_errors,
+		   stats->rx_length_errors + stats->rx_over_errors +
+		    stats->rx_crc_errors + stats->rx_frame_errors,
+		   stats->rx_compressed, stats->multicast,
+		   stats->tx_bytes, stats->tx_packets,
+		   stats->tx_errors, stats->tx_dropped,
+		   stats->tx_fifo_errors, stats->collisions,
+		   stats->tx_carrier_errors +
+		    stats->tx_aborted_errors +
+		    stats->tx_window_errors +
+		    stats->tx_heartbeat_errors,
+		   stats->tx_compressed);    
+
+    /* Are extended stats supported? */
+    if (dev->features & NETIF_F_EXTSTATS)
+        /* Print extended statistics */ 
+        seq_printf(seq, "%6llu %6llu %6llu "  /* Multicast */
+                        "%5llu %5llu %5llu %5llu "  /* Unicast and broadcast*/
+                        "%5llu\n",  /* Unknown RX errors */                    
+               stats->tx_multicast_packets, stats->rx_multicast_bytes, stats->tx_multicast_bytes, 
+               rx_unicast_packets, tx_unicast_packets, stats->rx_broadcast_packets, stats->tx_broadcast_packets, 
+               stats->rx_unknown_packets);
+    else
+        /* Print placeholder with dashes */
+        seq_printf(seq, "     -      -      - "  /* Multicast */
+                        "    -     -     -     - "  /* Unicast and broadcast*/
+                        "    -\n");  /* Unknown RX errors */     
+        
+}
+#endif
+
+
+
 /*
  *	Called from the PROCfs module. This now uses the new arbitrary sized
  *	/proc/net interface to create /proc/net/dev
@@ -4157,6 +4698,23 @@ static int dev_seq_show(struct seq_file *seq, void *v)
 	return 0;
 }
 
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+/*
+ *	Called from the PROCfs module to create extended statistics file /proc/net/dev_extstats
+ */
+static int devextstats_seq_show(struct seq_file *seq, void *v)
+{
+	if (v == SEQ_START_TOKEN)
+ 		seq_puts(seq, "   Basic Statistics                                                                                     |   Extended Statistics\n"
+                      "Inter-|   Receive                                       |  Transmit                                     |multicast           |unicast    |broadcast  |unkn\n"
+			          " face |  bytes    pckts errs drop fifo frame  comp multi|  bytes    pckts errs drop fifo coll carr  comp|txpckt rxbyte txbyte|   rx    tx|   rx    tx|rxerr\n");
+                    // 123456:12345678 1234567 1234 1234 1234 12345 12345 12345 12345678 1234567 1234 1234 1234 1234 1234 12345 123456 123456 123456 12345 12345 12345 12345 12345
+	else
+		devextstats_seq_printf_stats(seq, v);
+	return 0;
+}
+#endif
+
 static struct softnet_data *softnet_get_online(loff_t *pos)
 {
 	struct softnet_data *sd = NULL;
@@ -4196,6 +4754,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
 	return 0;
 }
 
+
 static const struct seq_operations dev_seq_ops = {
 	.start = dev_seq_start,
 	.next  = dev_seq_next,
@@ -4203,12 +4762,29 @@ static const struct seq_operations dev_seq_ops = {
 	.show  = dev_seq_show,
 };
 
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+static const struct seq_operations devextstats_seq_ops = {
+	.start = dev_seq_start,
+	.next  = dev_seq_next,
+	.stop  = dev_seq_stop,
+	.show  = devextstats_seq_show,
+};
+#endif
+
 static int dev_seq_open(struct inode *inode, struct file *file)
 {
 	return seq_open_net(inode, file, &dev_seq_ops,
 			    sizeof(struct seq_net_private));
 }
 
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+static int devextstats_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open_net(inode, file, &devextstats_seq_ops,
+			    sizeof(struct seq_net_private));
+}
+#endif
+
 static const struct file_operations dev_seq_fops = {
 	.owner	 = THIS_MODULE,
 	.open    = dev_seq_open,
@@ -4217,6 +4793,17 @@ static const struct file_operations dev_seq_fops = {
 	.release = seq_release_net,
 };
 
+#if defined(CONFIG_BCM_KF_EXTSTATS)
+static const struct file_operations devextstats_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open    = devextstats_seq_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_net,
+};
+#endif
+
+
 static const struct seq_operations softnet_seq_ops = {
 	.start = softnet_seq_start,
 	.next  = softnet_seq_next,
@@ -4342,6 +4929,7 @@ static const struct file_operations ptype_seq_fops = {
 };
 
 
+
 static int __net_init dev_proc_net_init(struct net *net)
 {
 	int rc = -ENOMEM;
@@ -4355,9 +4943,21 @@ static int __net_init dev_proc_net_init(struct net *net)
 
 	if (wext_proc_init(net))
 		goto out_ptype;
+        
+#if defined(CONFIG_BCM_KF_EXTSTATS) && defined(CONFIG_BLOG)
+	if (!proc_net_fops_create(net, "dev_extstats", S_IRUGO, &devextstats_seq_fops))
+		goto out_extstats;
+#endif        
+        
 	rc = 0;
 out:
 	return rc;
+    
+#if defined(CONFIG_BCM_KF_EXTSTATS) && defined(CONFIG_BLOG)
+out_extstats:
+	proc_net_remove(net, "dev_extstats");
+#endif
+    
 out_ptype:
 	proc_net_remove(net, "ptype");
 out_softnet:
@@ -4871,6 +5471,40 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
 		ifr->ifr_qlen = dev->tx_queue_len;
 		return 0;
 
+#if defined(CONFIG_BCM_KF_IGMP) || defined(CONFIG_BCM_KF_MLD)
+		case SIOCDEVISWANDEV:
+			if(netdev_path_is_leaf(dev))
+			{
+#if defined(CONFIG_BCM_KF_WANDEV)
+				if ((dev->priv_flags & IFF_WANDEV) || 
+				    (dev->priv_flags & IFF_EPON_IF))
+				{
+					ifr->ifr_flags = 1;
+				}
+#endif
+				else
+				{
+					ifr->ifr_flags = 0;
+				}
+			}
+			else
+			{
+				ifr->ifr_flags = 0;
+			}
+			return 0;
+
+		case SIOCDEVISBRDEV:
+			if (dev->priv_flags & IFF_EBRIDGE)
+			{
+				ifr->ifr_flags = 1;
+			}
+			else
+			{
+				ifr->ifr_flags = 0;
+			}
+			return 0;
+#endif
+
 	default:
 		/* dev_ioctl() should ensure this case
 		 * is never reached
@@ -4959,6 +5593,36 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
 			return err;
 		/* fall through */
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+		case SIOCGIFTRANSSTART:
+			ifr->ifr_ifru.ifru_ivalue = dev->trans_start;
+			return 0;
+		case SIOCCIFSTATS:	/* Clean up the Stats of a device */
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+			if ( dev->clr_stats )
+				dev->clr_stats( dev );
+			else
+#endif
+			{
+				struct net_device_stats * pStats;
+				if (dev->netdev_ops == NULL || dev->netdev_ops->ndo_get_stats == NULL)
+				{
+					printk("[%s.%d]: dev->netdev_ops->ndo_get_stats is %p (%s)\n", __func__, __LINE__, dev->netdev_ops->ndo_get_stats, dev->name);
+					return 0;
+				}
+				else
+				{
+					pStats = dev->netdev_ops->ndo_get_stats(dev);
+				}
+				if (pStats)
+				    memset(pStats, 0, sizeof(struct net_device_stats));
+				else
+					printk("ERROR: [%s.%d]: could not reset stats for device %s\n", __func__, __LINE__, dev->name);
+			}
+
+			return 0;
+#endif
+
 	/*
 	 *	Unknown or private ioctl
 	 */
@@ -5057,6 +5721,13 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 	case SIOCGIFMAP:
 	case SIOCGIFINDEX:
 	case SIOCGIFTXQLEN:
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	case SIOCGIFTRANSSTART:
+#endif
+#if defined(CONFIG_BCM_KF_IGMP) || defined(CONFIG_BCM_KF_MLD)
+	case SIOCDEVISWANDEV:
+	case SIOCDEVISBRDEV:
+#endif
 		dev_load(net, ifr.ifr_name);
 		rcu_read_lock();
 		ret = dev_ifsioc_locked(net, &ifr, cmd);
@@ -5157,6 +5828,9 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 	 */
 	default:
 		if (cmd == SIOCWANDEV ||
+#if defined(CONFIG_BCM_KF_NETFILTER)
+		    cmd == SIOCCIFSTATS ||
+#endif                
 		    (cmd >= SIOCDEVPRIVATE &&
 		     cmd <= SIOCDEVPRIVATE + 15)) {
 			dev_load(net, ifr.ifr_name);
@@ -5187,7 +5861,29 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 static int dev_new_index(struct net *net)
 {
 	static int ifindex;
+#if defined(CONFIG_BCM_KF_LIMITED_IFINDEX)
+   int loop = 0;
+#endif   
+   
 	for (;;) {
+#if defined(CONFIG_BCM_KF_LIMITED_IFINDEX)
+#define BCM_MAX_IFINDEX 128
+
+      /* On DSL CPE, xtm interfaces are created/deleted when
+       * the link goes up/down. On a noisy dsl link that goes
+       * down and up frequently, the xtm ifindex may get higher
+       * and higher if we don't reuse the lower ifindex values.
+       * that were released when the interfaces were deleted.
+       * Therefore, we limit index value to BCM_MAX_IFINDEX,
+       * and try reuse ifindex that had been released.
+       */
+      WARN_ONCE((++loop > BCM_MAX_IFINDEX),
+         "Cannot get new ifindex. All %d index values had been used.\n", BCM_MAX_IFINDEX);
+		if (ifindex >= BCM_MAX_IFINDEX)
+			ifindex = 0;   /* try reuse ifindex that had been released. */
+         
+#undef BCM_MAX_IFINDEX
+#endif   
 		if (++ifindex <= 0)
 			ifindex = 1;
 		if (!__dev_get_by_index(net, ifindex))
@@ -5494,6 +6190,99 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
 	return 0;
 }
 
+
+#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK))
+
+int (*bcm_gso_loopback_hw_offload)(struct sk_buff *skb,  unsigned int txDevId)= NULL;
+EXPORT_SYMBOL(bcm_gso_loopback_hw_offload);
+
+
+struct net_device *bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_MAXDEVS]; 
+
+void bcm_gso_loopback_devs_init(void)
+{
+    unsigned int i;
+    for(i=0; i<BCM_GSO_LOOPBACK_MAXDEVS; i++)
+    {
+        bcm_gso_loopback_devs[i] = NULL;
+    }
+}
+
+static inline void bcm_gso_loopback_add_devptr(struct net_device *dev)
+{
+
+    if(!strcmp("wl0", dev->name))
+    {
+        bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL0] = dev;
+    }
+    else if(!strcmp("wl1", dev->name))
+    {
+        bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL1] = dev;
+    }
+    else
+    {
+        /* not a know device */
+        return;
+
+    }
+    printk("+++++ Added gso loopback support for dev=%s <%p>\n",
+        dev->name, dev);
+}
+
+static inline void bcm_gso_loopback_del_devptr(struct net_device *dev)
+{
+    if(!strcmp("wl0", dev->name))
+    {
+        bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL0] = NULL;
+    }
+    else if(!strcmp("wl1", dev->name))
+    {
+        bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL1] = NULL;
+    }
+    else
+    {
+        /* not a know device */
+        return;
+    }
+    printk("------ Removed gso loopback support for dev=%s <%p>\n",
+        dev->name, dev);
+}
+
+inline unsigned int bcm_is_gso_loopback_dev(void *dev)
+{
+    int i;
+    for(i =1; i < BCM_GSO_LOOPBACK_MAXDEVS;i++)
+    {
+        if( bcm_gso_loopback_devs[i] == dev)
+        {
+            return i;
+        }
+    }
+    return BCM_GSO_LOOPBACK_NONE;
+}
+
+unsigned int bcm_gso_loopback_devptr2devid(void *dev)
+{
+    return bcm_is_gso_loopback_dev(dev);
+}
+EXPORT_SYMBOL(bcm_gso_loopback_devptr2devid);
+
+struct net_device * bcm_gso_loopback_devid2devptr(unsigned int devId)
+{
+    if( devId < BCM_GSO_LOOPBACK_MAXDEVS)
+    {
+        return bcm_gso_loopback_devs[devId];
+    }
+    else
+    {
+        printk(KERN_ERR "%s: invalid devId<%d> max devs=%d\n",
+                __FUNCTION__, devId, BCM_GSO_LOOPBACK_MAXDEVS);
+        return NULL;
+    }
+}
+EXPORT_SYMBOL(bcm_gso_loopback_devid2devptr);
+#endif
+
 /**
  *	register_netdevice	- register a network device
  *	@dev: device to register
@@ -5557,11 +6346,11 @@ int register_netdevice(struct net_device *dev)
 
 	/* Turn on no cache copy if HW is doing checksum */
 	if (!(dev->flags & IFF_LOOPBACK)) {
-		dev->hw_features |= NETIF_F_NOCACHE_COPY;
+	dev->hw_features |= NETIF_F_NOCACHE_COPY;
 		if (dev->features & NETIF_F_ALL_CSUM) {
-			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
-			dev->features |= NETIF_F_NOCACHE_COPY;
-		}
+		dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+		dev->features |= NETIF_F_NOCACHE_COPY;
+	}
 	}
 
 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -5592,6 +6381,10 @@ int register_netdevice(struct net_device *dev)
 	list_netdevice(dev);
 	add_device_randomness(dev->dev_addr, dev->addr_len);
 
+#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK))
+    bcm_gso_loopback_add_devptr(dev);
+#endif
+
 	/* Notify protocols, that a new device appeared. */
 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
 	ret = notifier_to_errno(ret);
@@ -5744,7 +6537,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
 
 		if (time_after(jiffies, warning_time + 10 * HZ)) {
 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
-				 dev->name, refcnt);
+			       dev->name, refcnt);
 			warning_time = jiffies;
 		}
 	}
@@ -5825,11 +6618,11 @@ void netdev_run_todo(void)
  * fields in the same order, with only the type differing.
  */
 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
-			     const struct net_device_stats *netdev_stats)
+				    const struct net_device_stats *netdev_stats)
 {
 #if BITS_PER_LONG == 64
-	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
-	memcpy(stats64, netdev_stats, sizeof(*stats64));
+        BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
+        memcpy(stats64, netdev_stats, sizeof(*stats64));
 #else
 	size_t i, n = sizeof(*stats64) / sizeof(u64);
 	const unsigned long *src = (const unsigned long *)netdev_stats;
@@ -5909,6 +6702,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 	size_t alloc_size;
 	struct net_device *p;
 
+#if defined( CONFIG_BCM_KF_MISALIGN_MQS  )
+	static int offset = 0;
+#endif
+
 	BUG_ON(strlen(name) >= sizeof(dev->name));
 
 	if (txqs < 1) {
@@ -5932,13 +6729,33 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 	/* ensure 32-byte alignment of whole construct */
 	alloc_size += NETDEV_ALIGN - 1;
 
+#if defined(CONFIG_BCM_KF_MISALIGN_MQS)
+// KU_TBD - This causes the ethernet driver to panic on boot in register_netdev
+//          BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
+
+	/* Add an offset to break possible alignment of dev structs in cache */
+	/* Note that "offset" is a static variable so it will retain its value */
+	/* on each call of this function */
+	alloc_size += offset;
+#endif
+
 	p = kzalloc(alloc_size, GFP_KERNEL);
 	if (!p) {
 		pr_err("alloc_netdev: Unable to allocate device\n");
 		return NULL;
 	}
 
+#if defined(CONFIG_BCM_KF_MISALIGN_MQS)
+	dev = PTR_ALIGN(p, NETDEV_ALIGN) + offset;
+	/* Increment offset in preparation for the next call to this function */
+	/* but don't allow it to increment excessively to avoid wasting memory */
+	offset += NETDEV_ALIGN;
+	if (offset >= 512) {
+		offset -= 512;
+	}
+#else
 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
+#endif
 	dev->padded = (char *)dev - (char *)p;
 
 	dev->pcpu_refcnt = alloc_percpu(int);
@@ -6111,6 +6928,9 @@ EXPORT_SYMBOL(unregister_netdevice_many);
 void unregister_netdev(struct net_device *dev)
 {
 	rtnl_lock();
+#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK))
+    bcm_gso_loopback_del_devptr(dev);
+#endif
 	unregister_netdevice(dev);
 	rtnl_unlock();
 }
@@ -6472,7 +7292,7 @@ static void __net_exit default_device_exit(struct net *net)
 		err = dev_change_net_namespace(dev, &init_net, fb_name);
 		if (err) {
 			pr_emerg("%s: failed to move %s to init_net: %d\n",
-				 __func__, dev->name, err);
+				__func__, dev->name, err);
 			BUG();
 		}
 	}
@@ -6590,6 +7410,16 @@ static int __init net_dev_init(void)
 	hotcpu_notifier(dev_cpu_callback, 0);
 	dst_init();
 	dev_mcast_init();
+
+#if defined(CONFIG_BCM_KF_SKB_DEFINES)
+    skbFreeTask = create_skbFreeTask();
+
+    create_fapDrvTask();
+#endif
+
+#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK))
+    bcm_gso_loopback_devs_init();
+#endif
 	rc = 0;
 out:
 	return rc;
@@ -6605,3 +7435,156 @@ static int __init initialize_hashrnd(void)
 
 late_initcall_sync(initialize_hashrnd);
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+
+struct net_device_stats * net_dev_collect_stats(struct net_device *dev_p)
+{
+	BlogStats_t bStats;
+	BlogStats_t * bStats_p;
+	struct net_device_stats *dStats_p;
+	struct net_device_stats *cStats_p;
+
+	if ( dev_p == (struct net_device *)NULL || dev_p->get_stats_pointer==NULL)
+		return (struct net_device_stats *)NULL;
+
+	dStats_p = (struct net_device_stats *) dev_p->get_stats_pointer(dev_p,'d');
+	cStats_p = (struct net_device_stats *) dev_p->get_stats_pointer(dev_p,'c');
+	bStats_p = (BlogStats_t *)dev_p->get_stats_pointer(dev_p,'b');
+
+	if(dStats_p && cStats_p && bStats_p) {
+
+		memset(&bStats, 0, sizeof(BlogStats_t));
+
+		blog_lock();
+		blog_notify(FETCH_NETIF_STATS, (void*)dev_p,
+				(uint32_t)&bStats, BLOG_PARAM2_NO_CLEAR);
+		blog_unlock();
+
+		memcpy( cStats_p, dStats_p, sizeof(struct net_device_stats) );
+
+#if defined(CONFIG_BCM_KF_EXTSTATS)    
+		/* Handle packet count statistics */
+		cStats_p->rx_packets += ( bStats.rx_packets + bStats_p->rx_packets );
+		cStats_p->tx_packets += ( bStats.tx_packets + bStats_p->tx_packets );
+		cStats_p->multicast  += ( bStats.multicast  + bStats_p->multicast );
+		cStats_p->tx_multicast_packets += ( bStats.tx_multicast_packets + bStats_p->tx_multicast_packets );
+		/* NOTE: There are no broadcast packets in BlogStats_t since the
+		   flowcache doesn't accelerate broadcast.  Thus, they aren't added here */
+
+		/* set byte counts to 0 if the bstat packet counts are non 0 and the
+		   octet counts are 0 */
+		/* Handle RX byte counts */
+		if ( ((bStats.rx_bytes + bStats_p->rx_bytes) == 0) &&
+				((bStats.rx_packets + bStats_p->rx_packets) > 0) )
+		{
+			cStats_p->rx_bytes = 0;
+		}
+		else
+		{
+			cStats_p->rx_bytes   += ( bStats.rx_bytes   + bStats_p->rx_bytes );
+		}
+
+		/* Handle TX byte counts */
+		if ( ((bStats.tx_bytes + bStats_p->tx_bytes) == 0) &&
+				((bStats.tx_packets + bStats_p->tx_packets) > 0) )
+		{
+			cStats_p->tx_bytes = 0;
+		}
+		else
+		{
+			cStats_p->tx_bytes   += ( bStats.tx_bytes   + bStats_p->tx_bytes );
+		}
+
+		/* Handle RX multicast byte counts */
+		if ( ((bStats.rx_multicast_bytes + bStats_p->rx_multicast_bytes) == 0) &&
+				((bStats.multicast + bStats_p->multicast) > 0) )
+		{
+			cStats_p->rx_multicast_bytes = 0;
+		}
+		else
+		{
+			cStats_p->rx_multicast_bytes   += ( bStats.rx_multicast_bytes   + bStats_p->rx_multicast_bytes );
+		}
+
+		/* Handle TX multicast byte counts */
+		if ( ((bStats.tx_multicast_bytes + bStats_p->tx_multicast_bytes) == 0) &&
+				((bStats.tx_multicast_packets + bStats_p->tx_multicast_packets) > 0) )
+		{
+			cStats_p->tx_multicast_bytes = 0;
+		}
+		else
+		{
+			cStats_p->tx_multicast_bytes   += ( bStats.tx_multicast_bytes   + bStats_p->tx_multicast_bytes );
+		}  
+
+#else
+		cStats_p->rx_packets += ( bStats.rx_packets + bStats_p->rx_packets );
+		cStats_p->tx_packets += ( bStats.tx_packets + bStats_p->tx_packets );
+
+		/* set byte counts to 0 if the bstat packet counts are non 0 and the
+		   octet counts are 0 */
+		if ( ((bStats.rx_bytes + bStats_p->rx_bytes) == 0) &&
+				((bStats.rx_packets + bStats_p->rx_packets) > 0) )
+		{
+			cStats_p->rx_bytes = 0;
+		}
+		else
+		{
+			cStats_p->rx_bytes   += ( bStats.rx_bytes   + bStats_p->rx_bytes );
+		}
+
+		if ( ((bStats.tx_bytes + bStats_p->tx_bytes) == 0) &&
+				((bStats.tx_packets + bStats_p->tx_packets) > 0) )
+		{
+			cStats_p->tx_bytes = 0;
+		}
+		else
+		{
+			cStats_p->tx_bytes   += ( bStats.tx_bytes   + bStats_p->tx_bytes );
+		}
+		cStats_p->multicast  += ( bStats.multicast  + bStats_p->multicast );
+#endif
+
+		return cStats_p;
+	} else {
+		printk("!!!!The device should have three stats,bcd,refer br_netdevice.c\r\n");
+		return NULL;
+	}
+	
+
+}
+
+void net_dev_clear_stats(struct net_device * dev_p)
+{
+	BlogStats_t * bStats_p;
+	struct net_device_stats *dStats_p;
+	struct net_device_stats *cStats_p;
+
+	if ( dev_p == (struct net_device *)NULL )
+		return;
+
+	dStats_p = (struct net_device_stats *) dev_p->get_stats_pointer(dev_p,'d');
+	cStats_p = (struct net_device_stats *) dev_p->get_stats_pointer(dev_p,'c');
+	bStats_p = (BlogStats_t *)dev_p->get_stats_pointer(dev_p,'b');
+
+	if(dStats_p && cStats_p && bStats_p) {
+		blog_lock();
+		blog_notify(FETCH_NETIF_STATS, (void*)dev_p, 0, BLOG_PARAM2_DO_CLEAR);
+		blog_unlock();
+
+		memset(bStats_p, 0, sizeof(BlogStats_t));
+		memset(dStats_p, 0, sizeof(struct net_device_stats));
+		memset(cStats_p, 0, sizeof(struct net_device_stats));
+	}
+
+	return;
+}
+
+EXPORT_SYMBOL(net_dev_collect_stats);
+EXPORT_SYMBOL(net_dev_clear_stats);
+#endif
+
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+EXPORT_SYMBOL(bcm_vlan_handle_frame_hook);
+#endif
+
diff --git a/net/core/devinfo.c b/net/core/devinfo.c
new file mode 100644
index 0000000000000000000000000000000000000000..58e08aabb2cb95db6b492aa070740d66976b2539
--- /dev/null
+++ b/net/core/devinfo.c
@@ -0,0 +1,389 @@
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+/*
+<:copyright-BRCM:2014:DUAL/GPL:standard 
+
+   Copyright (c) 2014 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/devinfo.h>
+#include <linux/bcm_colors.h>
+
+typedef struct {
+    DevInfo_t      * htable[ DEVINFO_HTABLE_SIZE ];
+    DevInfo_t        etable[ DEVINFO_MAX_ENTRIES ];
+
+    Dll_t         frlist;           /* List of free devinfo entries */
+} __attribute__((aligned(16))) DeviceInfo_t;
+
+DeviceInfo_t deviceInfo;    /* Global device info context */
+
+#if defined(CC_DEVINFO_SUPPORT_DEBUG)
+#define devinfo_print(fmt, arg...)                                           \
+    if ( devinfo_dbg )                                                       \
+        printk( CLRc "DEVINFO %s :" fmt CLRnl, __FUNCTION__, ##arg )
+#define devinfo_assertv(cond)                                                \
+    if ( !cond ) {                                                           \
+        printk( CLRerr "DEVINFO ASSERT %s : " #cond CLRnl, __FUNCTION__ );   \
+        return;                                                              \
+    }
+#define devinfo_assertr(cond, rtn)                                           \
+    if ( !cond ) {                                                           \
+        printk( CLRerr "DEVINFO ASSERT %s : " #cond CLRnl, __FUNCTION__ );   \
+        return rtn;                                                          \
+    }
+#define DEVINFO_DBG(debug_code)    do { debug_code } while(0)
+#else
+#define devinfo_print(fmt, arg...) DEVINFO_NULL_STMT
+#define devinfo_assertv(cond) DEVINFO_NULL_STMT
+#define devinfo_assertr(cond, rtn) DEVINFO_NULL_STMT
+#define DEVINFO_DBG(debug_code) DEVINFO_NULL_STMT
+#endif
+
+int devinfo_dbg = 0;
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : devinfo_alloc
+ * Description  : Allocate a device info entry
+ *------------------------------------------------------------------------------
+ */
+static DevInfo_t * devinfo_alloc( void )
+{
+    DevInfo_t * dev_p = DEVINFO_NULL;
+
+    if (unlikely(dll_empty(&deviceInfo.frlist)))
+    {
+        devinfo_print("no free entry! No collect now");
+        return dev_p;
+    }
+
+    if (likely(!dll_empty(&deviceInfo.frlist)))
+    {
+        dev_p = (DevInfo_t*)dll_head_p(&deviceInfo.frlist);
+        dll_delete(&dev_p->node);
+    }
+
+    devinfo_print("idx<%u>", dev_p->entry.idx);
+
+    return dev_p;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _hash
+ * Description  : Computes a simple hash from a 32bit value.
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _hash( uint32_t hash_val )
+{
+    hash_val ^= ( hash_val >> 16 );
+    hash_val ^= ( hash_val >>  8 );
+    hash_val ^= ( hash_val >>  3 );
+
+    return ( hash_val );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _devinfo_hash
+ * Description  : Compute the hash of a MAC
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _devinfo_hash( const uint8_t *mac )
+{
+    uint32_t hashix;
+
+    hashix = _hash( (*((uint32_t *) (&(mac[2])))) );
+
+    return hashix % DEVINFO_HTABLE_SIZE;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _devinfo_match
+ * Description  : Checks whether the mac matches.
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _devinfo_match( const DevInfo_t *dev_p,
+                                       const uint8_t *mac )
+{
+    return ( !memcmp(dev_p->mac, mac, ETH_ALEN) );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : devinfo_hashin
+ * Description  : Insert a new entry into the devinfo at a given hash index.
+ *------------------------------------------------------------------------------
+ */
+static void devinfo_hashin( DevInfo_t * dev_p, uint32_t hashix )
+{
+    devinfo_print("enter");
+
+    dev_p->chain_p = deviceInfo.htable[ hashix ];  /* Insert into hash table */
+    deviceInfo.htable[ hashix ] = dev_p;
+}
+
+static uint32_t devinfo_new( const uint8_t *mac, uint32_t hashix )
+{
+    DevInfo_t * dev_p;
+
+    devinfo_print("enter");
+
+    dev_p = devinfo_alloc();
+    if ( unlikely(dev_p == DEVINFO_NULL) )
+    {
+        devinfo_print("failed devinfo_alloc");
+        return DEVINFO_IX_INVALID;              /* Element table depletion */
+    }
+
+    memcpy(dev_p->mac, mac, ETH_ALEN);
+    devinfo_hashin(dev_p, hashix);              /* Insert into hash table */
+
+    devinfo_print("idx<%u>", dev_p->entry.idx);
+
+    return dev_p->entry.idx;
+}
+
+#if 0
+/*
+ *------------------------------------------------------------------------------
+ * Function     : devinfo_free
+ * Description  : Free a device info entry
+ *------------------------------------------------------------------------------
+ */
+void devinfo_free( DevInfo_t * dev_p )
+{
+    dev_p->entry.flags = 0;
+    dev_p->entry.vendor_id = 0;
+    dev_p->entry.os_id = 0;
+    dev_p->entry.class_id = 0;
+    dev_p->entry.type_id = 0;
+    dev_p->entry.dev_id = 0;
+
+    memset(dev_p->mac, 0, ETH_ALEN);
+
+    dll_prepend(&deviceInfo.frlist, &dev_p->node);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : devinfo_unhash
+ * Description  : Remove a devinfo from the device info at a given hash index.
+ *------------------------------------------------------------------------------
+ */
+static void devinfo_unhash(DevInfo_t * dev_p, uint32_t hashix)
+{
+    register DevInfo_t * hDev_p = deviceInfo.htable[hashix];
+
+    if ( unlikely(hDev_p == DEVINFO_NULL) )
+    {
+        devinfo_print( "ERROR: deviceInfo.htable[%u] is NULL", hashix );
+        goto devinfo_notfound;
+    }
+
+    if ( likely(hDev_p == dev_p) )                /* At head */
+    {
+        deviceInfo.htable[ hashix ] = dev_p->chain_p;  /* Delete at head */
+    }
+    else
+    {
+        uint32_t found = 0;
+
+        /* Traverse the single linked hash collision chain */
+        for ( hDev_p = deviceInfo.htable[ hashix ];
+              likely(hDev_p->chain_p != DEVINFO_NULL);
+              hDev_p = hDev_p->chain_p )
+        {
+            if ( hDev_p->chain_p == dev_p )
+            {
+                hDev_p->chain_p = dev_p->chain_p;
+                found = 1;
+                break;
+            }
+        }
+
+        if ( unlikely(found == 0) )
+        {
+            devinfo_print( "ERROR:deviceInfo.htable[%u] find failure", hashix );
+            goto devinfo_notfound;
+        }
+    }
+
+    return; /* SUCCESS */
+
+devinfo_notfound:
+    devinfo_print( "not found: hash<%u>", hashix );
+}
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : devinfo_lookup
+ * Description  : Given a mac, lookup device info.
+ *------------------------------------------------------------------------------
+ */
+uint16_t devinfo_lookup( const uint8_t *mac )
+{
+    DevInfo_t * dev_p;
+    uint16_t idx;
+    uint32_t hashix;
+
+    hashix = _devinfo_hash(mac);
+
+    devinfo_print("hashix<%u> mac<%02x:%02x:%02x:%02x:%02x:%02x>", 
+                  hashix, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+
+    for ( dev_p = deviceInfo.htable[ hashix ]; dev_p != DEVINFO_NULL;
+          dev_p = dev_p->chain_p)
+    {
+        devinfo_print("elem: idx<%u> mac<%02x:%02x:%02x:%02x:%02x:%02x>",
+                      dev_p->entry.idx,
+                      mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+
+        if (likely( _devinfo_match(dev_p, mac) ))
+        {
+            devinfo_print("idx<%u>", dev_p->entry.idx);
+            return dev_p->entry.idx;
+        }
+    }
+
+    /* New device found, alloc an entry */
+    idx = devinfo_new(mac, hashix);
+
+    devinfo_print("idx<%u>", idx);
+
+    return idx;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : devinfo_get
+ * Description  : Given devinfo index, return the devinfo_entry.
+ *------------------------------------------------------------------------------
+ */
+void devinfo_get( uint16_t idx, DevInfoEntry_t *entry )
+{
+    DevInfo_t * dev_p;
+
+    memset(entry, 0, sizeof(DevInfoEntry_t));
+
+    dev_p = &deviceInfo.etable[idx];
+    entry->idx = dev_p->entry.idx;
+    entry->flags = dev_p->entry.flags;
+    entry->vendor_id = dev_p->entry.vendor_id;
+    entry->os_id = dev_p->entry.os_id;
+    entry->class_id = dev_p->entry.class_id;
+    entry->type_id = dev_p->entry.type_id;
+    entry->dev_id = dev_p->entry.dev_id;
+
+    devinfo_print("idx<%u> flag<%u> ven<%u> os<%u> class<%u> type<%u> dev<%u>",
+                  entry->idx, entry->flags, entry->vendor_id, entry->os_id,
+                  entry->class_id, entry->type_id, entry->dev_id);
+
+    return;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : devinfo_set
+ * Description  : Given devinfo index, set the devinfo_entry.
+ *------------------------------------------------------------------------------
+ */
+void devinfo_set( const DevInfoEntry_t *entry )
+{
+    DevInfo_t * dev_p;
+
+    devinfo_print("idx<%u> flag<%u> ven<%u> os<%u> class<%u> type<%u> dev<%u>",
+                  entry->idx, entry->flags, entry->vendor_id, entry->os_id,
+                  entry->class_id, entry->type_id, entry->dev_id);
+
+    dev_p = &deviceInfo.etable[entry->idx];
+    dev_p->entry.flags = entry->flags;
+    dev_p->entry.vendor_id = entry->vendor_id;
+    dev_p->entry.os_id = entry->os_id;
+    dev_p->entry.class_id = entry->class_id;
+    dev_p->entry.type_id = entry->type_id;
+    dev_p->entry.dev_id = entry->dev_id;
+
+    return;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : devinfo_getmac
+ * Description  : Given devinfo index, get the mac address.
+ *------------------------------------------------------------------------------
+ */
+void devinfo_getmac( uint16_t idx, uint8_t *mac )
+{
+    DevInfo_t * dev_p;
+
+    dev_p = &deviceInfo.etable[idx];
+    memcpy(mac, dev_p->mac, ETH_ALEN);
+
+    devinfo_print("idx<%d> mac<%2x:%2x:%2x:%2x:%2x:%2x>", idx, 
+            mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 
+}
+
+int devinfo_init( void )
+{
+    register int id;
+    DevInfo_t * dev_p;
+
+    memset( (void*)&deviceInfo, 0, sizeof(DeviceInfo_t) );
+
+    /* Initialize list */
+    dll_init( &deviceInfo.frlist );
+
+    /* Initialize each devinfo entry and insert into free list */
+    for ( id=DEVINFO_IX_INVALID; id < DEVINFO_MAX_ENTRIES; id++ )
+    {
+        dev_p = &deviceInfo.etable[id];
+        dev_p->entry.idx = id;
+
+        if ( unlikely(id == DEVINFO_IX_INVALID) )
+            continue;           /* Exclude this entry from the free list */
+
+        dll_append(&deviceInfo.frlist, &dev_p->node);/* Insert into free list */
+    }
+
+    DEVINFO_DBG( printk( "DEVINFO devinfo_dbg<0x%08x> = %d\n"
+                         "%d Available entries\n",
+                         (int)&devinfo_dbg, devinfo_dbg,
+                         DEVINFO_MAX_ENTRIES-1 ); );
+    
+    return 0;
+}
+
+EXPORT_SYMBOL(devinfo_init);
+EXPORT_SYMBOL(devinfo_lookup);
+EXPORT_SYMBOL(devinfo_get);
+EXPORT_SYMBOL(devinfo_getmac);
+EXPORT_SYMBOL(devinfo_set);
+#endif /* if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI) */
diff --git a/net/core/dpistats.c b/net/core/dpistats.c
new file mode 100644
index 0000000000000000000000000000000000000000..1e09f7fcdf48ebc7d76cf75fa060a794066eaea1
--- /dev/null
+++ b/net/core/dpistats.c
@@ -0,0 +1,503 @@
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+/*
+<:copyright-BRCM:2014:DUAL/GPL:standard 
+
+   Copyright (c) 2014 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/dpistats.h>
+#include <linux/bcm_colors.h>
+#include <linux/devinfo.h>
+#include <linux/seq_file.h>
+
+typedef struct {
+    DpiStats_t      * htable[ DPISTATS_HTABLE_SIZE ];
+    DpiStats_t        etable[ DPISTATS_MAX_ENTRIES ];
+
+    Dll_t         usedlist;           /* List of used dpistats entries */
+    Dll_t         frlist;           /* List of free dpistats entries */
+} __attribute__((aligned(16))) DpiStatistic_t;
+
+DpiStatistic_t dpistats;    /* Global dpi stats context */
+
+#if defined(CC_DPISTATS_SUPPORT_DEBUG)
+#define dpistats_print(fmt, arg...)                                           \
+    if ( dpistats_dbg )                                                       \
+        printk( CLRc "DPISTATS %s :" fmt CLRnl, __FUNCTION__, ##arg )
+#define dpistats_assertv(cond)                                                \
+    if ( !cond ) {                                                           \
+        printk( CLRerr "DPISTATS ASSERT %s : " #cond CLRnl, __FUNCTION__ );   \
+        return;                                                              \
+    }
+#define dpistats_assertr(cond, rtn)                                           \
+    if ( !cond ) {                                                           \
+        printk( CLRerr "DPISTATS ASSERT %s : " #cond CLRnl, __FUNCTION__ );   \
+        return rtn;                                                          \
+    }
+#define DPISTATS_DBG(debug_code)    do { debug_code } while(0)
+#else
+#define dpistats_print(fmt, arg...) DPISTATS_NULL_STMT
+#define dpistats_assertv(cond) DPISTATS_NULL_STMT
+#define dpistats_assertr(cond, rtn) DPISTATS_NULL_STMT
+#define DPISTATS_DBG(debug_code) DPISTATS_NULL_STMT
+#endif
+
+int dpistats_dbg = 0;
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : dpistats_alloc
+ * Description  : Allocate a dpi stats entry
+ *------------------------------------------------------------------------------
+ */
+static DpiStats_t * dpistats_alloc( void )
+{
+    DpiStats_t * stats_p = DPISTATS_NULL;
+
+    if (unlikely(dll_empty(&dpistats.frlist)))
+    {
+        dpistats_print("no free entry! No collect now");
+        return stats_p;
+    }
+
+    if (likely(!dll_empty(&dpistats.frlist)))
+    {
+        stats_p = (DpiStats_t*)dll_head_p(&dpistats.frlist);
+        dll_delete(&stats_p->node);
+    }
+
+    dpistats_print("idx<%u>", stats_p->entry.idx);
+
+    return stats_p;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _hash
+ * Description  : Computes a simple hash from a 32bit value.
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _hash( uint32_t hash_val )
+{
+    hash_val ^= ( hash_val >> 16 );
+    hash_val ^= ( hash_val >>  8 );
+    hash_val ^= ( hash_val >>  3 );
+
+    return ( hash_val );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _dpistats_hash
+ * Description  : Compute the hash of dpi info
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _dpistats_hash( unsigned int app_id, uint16_t dev_key )
+{
+    uint32_t hashix;
+
+    hashix = _hash( app_id + dev_key );
+
+    return hashix % DPISTATS_HTABLE_SIZE;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _dpistats_match
+ * Description  : Checks whether the dpi info matches.
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _dpistats_match( const dpi_info_t *elem_p,
+                                        const dpi_info_t *res_p )
+{
+    return ( (elem_p->app_id == res_p->app_id) &&
+             (elem_p->dev_key == res_p->dev_key) );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : dpistats_hashin
+ * Description  : Insert a new entry into the dpistats at a given hash index.
+ *------------------------------------------------------------------------------
+ */
+static void dpistats_hashin( DpiStats_t * stats_p, uint32_t hashix )
+{
+    dpistats_print("enter");
+
+    dll_prepend(&dpistats.usedlist, &stats_p->node);
+    stats_p->chain_p = dpistats.htable[ hashix ];  /* Insert into hash table */
+    dpistats.htable[ hashix ] = stats_p;
+}
+
+static uint32_t dpistats_new( const dpi_info_t *res_p, uint32_t hashix )
+{
+    DpiStats_t * stats_p;
+
+    dpistats_print("enter");
+
+    stats_p = dpistats_alloc();
+    if ( unlikely(stats_p == DPISTATS_NULL) )
+    {
+        dpistats_print("failed dpistats_alloc");
+        return DPISTATS_IX_INVALID;              /* Element table depletion */
+    }
+
+    stats_p->entry.result.app_id = res_p->app_id;
+    stats_p->entry.result.dev_key = res_p->dev_key;
+
+    dpistats_hashin(stats_p, hashix);              /* Insert into hash table */
+
+    dpistats_print("idx<%u>", stats_p->entry.idx);
+
+    return stats_p->entry.idx;
+}
+
+#if 0
+/*
+ *------------------------------------------------------------------------------
+ * Function     : dpistats_free
+ * Description  : Free a device info entry
+ *------------------------------------------------------------------------------
+ */
+void dpistats_free( DpiStats_t * dev_p )
+{
+    dev_p->entry.flags = 0;
+    dev_p->entry.vendor_id = 0;
+    dev_p->entry.os_id = 0;
+    dev_p->entry.class_id = 0;
+    dev_p->entry.type_id = 0;
+    dev_p->entry.dev_id = 0;
+
+    memset(dev_p->mac, 0, ETH_ALEN);
+
+    dll_prepend(&dpistats.frlist, &dev_p->node);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : dpistats_unhash
+ * Description  : Remove a dpistats from the device info at a given hash index.
+ *------------------------------------------------------------------------------
+ */
+static void dpistats_unhash(DpiStats_t * dev_p, uint32_t hashix)
+{
+    register DpiStats_t * hDev_p = dpistats.htable[hashix];
+
+    if ( unlikely(hDev_p == DPISTATS_NULL) )
+    {
+        dpistats_print( "dpistats.htable[%u] is NULL", hashix );
+        goto dpistats_notfound;
+    }
+
+    if ( likely(hDev_p == dev_p) )                /* At head */
+    {
+        dpistats.htable[ hashix ] = dev_p->chain_p;  /* Delete at head */
+    }
+    else
+    {
+        uint32_t found = 0;
+
+        /* Traverse the single linked hash collision chain */
+        for ( hDev_p = dpistats.htable[ hashix ];
+              likely(hDev_p->chain_p != DPISTATS_NULL);
+              hDev_p = hDev_p->chain_p )
+        {
+            if ( hDev_p->chain_p == dev_p )
+            {
+                hDev_p->chain_p = dev_p->chain_p;
+                found = 1;
+                break;
+            }
+        }
+
+        if ( unlikely(found == 0) )
+        {
+            dpistats_print( "dpistats.htable[%u] find failure", hashix );
+            goto dpistats_notfound;
+        }
+    }
+
+    return; /* SUCCESS */
+
+dpistats_notfound:
+    dpistats_print( "not found: hash<%u>\n", hashix );
+}
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : dpistats_lookup
+ * Description  : Given appID and devKey, lookup corresponding appinst entry.
+ *------------------------------------------------------------------------------
+ */
+uint32_t dpistats_lookup( const dpi_info_t *res_p )
+{
+    DpiStats_t * stats_p;
+    uint32_t idx;
+    uint32_t hashix;
+
+    hashix = _dpistats_hash(res_p->app_id, res_p->dev_key);
+
+    dpistats_print("hashix<%u> appID<%06x> devkey<%u>",
+                  hashix, res_p->app_id, res_p->dev_key);
+
+    for ( stats_p = dpistats.htable[ hashix ]; stats_p != DPISTATS_NULL;
+          stats_p = stats_p->chain_p)
+    {
+        dpistats_print("elem: idx<%u> appID<%06x> devkey<%u>",
+                      stats_p->entry.idx, stats_p->entry.result.app_id,
+                      stats_p->entry.result.dev_key);
+
+        if (likely( _dpistats_match(&stats_p->entry.result, res_p) ))
+        {
+            dpistats_print("idx<%u>", stats_p->entry.idx);
+            return stats_p->entry.idx;
+        }
+    }
+
+    /* New device found, alloc an entry */
+    idx = dpistats_new(res_p, hashix);
+
+    dpistats_print("idx<%u>", idx);
+
+    return idx;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : dpistats_info
+ * Description  : when querying stats, get current statistic of one conntrack
+ *------------------------------------------------------------------------------
+ */
+void dpistats_info( uint32_t idx, const DpiStatsEntry_t *stats_p )
+{
+    /*
+     * Null stats_p means beginning of query: reset stats.
+     */
+    if (!(stats_p == (DpiStatsEntry_t *)NULL))
+    {
+        DpiStats_t *elem_p;
+        CtkStats_t *ctk1_p;
+        const CtkStats_t *ctk2_p;
+
+        dpistats_print("idx<%d> appID<%06x> dev_key<%u>", idx, 
+                        stats_p->result.app_id, stats_p->result.dev_key);
+
+        dpistats_assertv( (idx != DPISTATS_IX_INVALID) );
+
+        elem_p = &dpistats.etable[idx];
+
+        ctk1_p = &elem_p->entry.upstream;
+        ctk2_p = &stats_p->upstream;
+
+        ctk1_p->pkts += ctk2_p->pkts;
+        ctk1_p->bytes += ctk2_p->bytes;
+        if (ctk1_p->ts < ctk2_p->ts) ctk1_p->ts = ctk2_p->ts;
+
+        ctk1_p = &elem_p->entry.dnstream;
+        ctk2_p = &stats_p->dnstream;
+
+        ctk1_p->pkts += ctk2_p->pkts;
+        ctk1_p->bytes += ctk2_p->bytes;
+        if (ctk1_p->ts < ctk2_p->ts) ctk1_p->ts = ctk2_p->ts;
+    }
+    else
+    {
+        Dll_t  *tmp_p;
+        Dll_t  *list_p;
+        DpiStats_t *elem_p;
+
+        dpistats_print("Reset");
+
+        list_p = &dpistats.usedlist;
+
+        if (!dll_empty(list_p))
+        {
+            dll_for_each(tmp_p, list_p) 
+            {
+                CtkStats_t *ctk_p;
+                elem_p = (DpiStats_t *)tmp_p;
+
+                dpistats_print("idx<%d> appID<%06x> dev_key<%u>",
+                                elem_p->entry.idx, elem_p->entry.result.app_id,
+                                elem_p->entry.result.dev_key); 
+
+                ctk_p = &elem_p->entry.upstream;
+                ctk_p->pkts = 0;
+                ctk_p->bytes = 0;
+
+                ctk_p = &elem_p->entry.dnstream;
+                ctk_p->pkts = 0;
+                ctk_p->bytes = 0;
+            }
+        }
+    }
+    dpistats_print("exit");
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : dpistats_update
+ * Description  : when a conntrack evicts, record the statistics
+ *------------------------------------------------------------------------------
+ */
+void dpistats_update( uint32_t idx, const DpiStatsEntry_t *stats_p )
+{
+    DpiStats_t *elem_p;
+    CtkStats_t *ctk1_p;
+    const CtkStats_t *ctk2_p;
+
+    dpistats_print("idx<%d> uppkt<%lu> upbyte<%llu> upts<%lu> "
+                   "dnpkt<%lu> dnbyte<%llu> dnts<%lu>", idx, 
+                   stats_p->upstream.pkts, stats_p->upstream.bytes, 
+                   stats_p->upstream.ts, stats_p->dnstream.pkts, 
+                   stats_p->dnstream.bytes, stats_p->dnstream.ts); 
+
+    dpistats_assertv( ((idx != DPISTATS_IX_INVALID) && (stats_p != NULL)) );
+
+    elem_p = &dpistats.etable[idx];
+
+    ctk1_p = &elem_p->evict_up;
+    ctk2_p = &stats_p->upstream;
+
+    ctk1_p->pkts += ctk2_p->pkts;
+    ctk1_p->bytes += ctk2_p->bytes;
+    if (ctk1_p->ts < ctk2_p->ts) ctk1_p->ts = ctk2_p->ts;
+
+    ctk1_p = &elem_p->evict_dn;
+    ctk2_p = &stats_p->dnstream;
+
+    ctk1_p->pkts += ctk2_p->pkts;
+    ctk1_p->bytes += ctk2_p->bytes;
+    if (ctk1_p->ts < ctk2_p->ts) ctk1_p->ts = ctk2_p->ts;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : dpistats_show
+ * Description  : show dpi statistics
+ *------------------------------------------------------------------------------
+ */
+void dpistats_show( struct seq_file *s )
+{
+    Dll_t  *tmp_p;
+    Dll_t  *list_p;
+    DpiStats_t *elem_p;
+
+    dpistats_print("enter");
+
+    list_p = &dpistats.usedlist;
+
+    if (!dll_empty(list_p))
+    {
+        dll_for_each(tmp_p, list_p) 
+        {
+            CtkStats_t *ctk_p, *evict_p;
+            elem_p = (DpiStats_t *)tmp_p;
+
+            seq_printf(s, "%08x ", elem_p->entry.result.app_id);
+
+            if (elem_p->entry.result.dev_key != DEVINFO_IX_INVALID)
+            {
+                uint8_t mac[6];
+                DevInfoEntry_t entry;
+
+                devinfo_getmac(elem_p->entry.result.dev_key, mac);
+                devinfo_get(elem_p->entry.result.dev_key, &entry);
+
+                seq_printf(s, "%02x:%02x:%02x:%02x:%02x:%02x ",
+                        mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+
+                seq_printf(s, "%u %u %u %u %u ",
+                        entry.vendor_id, entry.os_id, entry.class_id,
+                        entry.type_id, entry.dev_id);
+            }
+            else
+            {
+                seq_printf(s, "NoMac ");
+            }
+
+            ctk_p = &elem_p->entry.upstream;
+            evict_p = &elem_p->evict_up;
+//            printk("%lu %llu %lu ", ctk_p->pkts + evict_p->pkts,
+//                    ctk_p->bytes + evict_p->bytes,
+//                    ctk_p->ts);
+            seq_printf(s, "%lu %llu ", ctk_p->pkts + evict_p->pkts,
+                    ctk_p->bytes + evict_p->bytes);
+            
+            ctk_p = &elem_p->entry.dnstream;
+            evict_p = &elem_p->evict_dn;
+//            printk("%lu %llu %lu ", ctk_p->pkts + evict_p->pkts,
+//                    ctk_p->bytes + evict_p->bytes,
+//                    ctk_p->ts);
+            seq_printf(s, "%lu %llu ", ctk_p->pkts + evict_p->pkts,
+                    ctk_p->bytes + evict_p->bytes);
+
+//            printk("%x ", elem_p->entry.result.flags);
+            seq_printf(s, "\n");
+        }
+    }
+}
+
+int dpistats_init( void )
+{
+    register uint32_t id;
+    DpiStats_t * stats_p;
+
+    memset( (void*)&dpistats, 0, sizeof(DpiStatistic_t) );
+
+    /* Initialize list */
+    dll_init( &dpistats.frlist );
+    dll_init( &dpistats.usedlist );
+
+    /* Initialize each dpistats entry and insert into free list */
+    for ( id=DPISTATS_IX_INVALID; id < DPISTATS_MAX_ENTRIES; id++ )
+    {
+        stats_p = &dpistats.etable[id];
+        stats_p->entry.idx = id;
+
+        if ( unlikely(id == DPISTATS_IX_INVALID) )
+            continue;           /* Exclude this entry from the free list */
+
+        dll_append(&dpistats.frlist, &stats_p->node);/* Insert into free list */
+    }
+
+    DPISTATS_DBG( printk( "DPISTATS dpistats_dbg<0x%08x> = %d\n"
+                         "%d Available entries\n",
+                         (int)&dpistats_dbg, dpistats_dbg,
+                         DPISTATS_MAX_ENTRIES-1 ); );
+    
+    return 0;
+}
+
+EXPORT_SYMBOL(dpistats_init);
+EXPORT_SYMBOL(dpistats_info);
+EXPORT_SYMBOL(dpistats_update);
+EXPORT_SYMBOL(dpistats_show);
+EXPORT_SYMBOL(dpistats_lookup);
+#endif /* if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI) */
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6d6d7d25caaa30319870c6b1d62b38eb8972e543..8b7233dd196c05fd75ba60e26e63d6b1346cc1ca 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1286,7 +1286,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
 	void __user *useraddr = ifr->ifr_data;
 	u32 ethcmd;
 	int rc;
+#if defined(CONFIG_BCM_KF_ETHTOOL)   
+	netdev_features_t old_features;
+#else
 	u32 old_features;
+#endif
 
 	if (!dev || !netif_device_present(dev))
 		return -ENODEV;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a225089df5b6693a715f95ab89de71be4a400338..d4914d9fc22232706617d8c9685b5208a39665cb 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -35,7 +35,11 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
 		struct iphdr _iph;
 ip:
 		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+		if (!iph || iph->ihl < 5)
+#else
 		if (!iph)
+#endif
 			return false;
 
 		if (ip_is_fragment(iph))
diff --git a/net/core/flwstif.c b/net/core/flwstif.c
new file mode 100644
index 0000000000000000000000000000000000000000..3b10f584be9b3f76e88fdeefdb8a878da71222cd
--- /dev/null
+++ b/net/core/flwstif.c
@@ -0,0 +1,90 @@
+/*--------------------------------------*/
+/* flwstif.h and flwstif.c for Linux OS */
+/*--------------------------------------*/
+
+/* 
+* <:copyright-BRCM:2014:DUAL/GPL:standard
+* 
+*    Copyright (c) 2014 Broadcom Corporation
+*    All Rights Reserved
+* 
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed
+* to you under the terms of the GNU General Public License version 2
+* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+* with the following added to such license:
+* 
+*    As a special exception, the copyright holders of this software give
+*    you permission to link this software with independent modules, and
+*    to copy and distribute the resulting executable under terms of your
+*    choice, provided that you also meet, for each linked independent
+*    module, the terms and conditions of the license of that module.
+*    An independent module is a module which is not derived from this
+*    software.  The special exception does not apply to any modifications
+*    of the software.
+* 
+* Not withstanding the above, under no circumstances may you combine
+* this software in any way with any other Broadcom software provided
+* under a license other than the GPL, without Broadcom's express prior
+* written consent.
+* 
+:>
+*/
+
+#include <linux/flwstif.h>
+#include <linux/module.h>
+
+static flwStIfGetHook_t flwStIfGet_hook_g = NULL;
+static flwStIfPushHook_t flwStIfPush_hook_g = NULL; 
+
+uint32_t flwStIf_request( FlwStIfReq_t req, void *ptr, uint32_t param1,
+                          uint32_t param2, uint32_t param3 )
+{
+    int ret=0;
+    switch (req)
+    {
+        case FLWSTIF_REQ_GET:
+            if (flwStIfGet_hook_g)
+            {
+                ret = flwStIfGet_hook_g(param1, (FlwStIf_t *)ptr);
+            }
+            else
+            {
+                ret = -1;
+            }
+            break;
+        case FLWSTIF_REQ_PUSH:
+            if (flwStIfPush_hook_g)
+            {
+                ret = flwStIfPush_hook_g(ptr, (void *)param1, param2, 
+                                         (FlwStIf_t *)param3);
+            }
+            else
+            {
+                ret = -1;
+            }
+            break;
+        default:
+            printk("Invalid Flw Stats Req type %d\n", (int)req);
+            ret = -1;
+            break;
+    }
+    return ret;
+}
+
+void flwStIf_bind( flwStIfGetHook_t flwStIfGetHook,
+                   flwStIfPushHook_t flwStIfPushHook )
+{
+    if (flwStIfGetHook)
+    {
+        flwStIfGet_hook_g = flwStIfGetHook;
+    }
+
+    if (flwStIfPushHook)
+    {
+        flwStIfPush_hook_g = flwStIfPushHook;
+    }
+}
+
+EXPORT_SYMBOL(flwStIf_bind);
+EXPORT_SYMBOL(flwStIf_request);
diff --git a/net/core/gbpm.c b/net/core/gbpm.c
new file mode 100644
index 0000000000000000000000000000000000000000..6613a78d7ce1d3a8897da281dc1a9f27611eea04
--- /dev/null
+++ b/net/core/gbpm.c
@@ -0,0 +1,374 @@
+/*
+<:copyright-BRCM:2009:DUAL/GPL:standard
+
+   Copyright (c) 2009 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+
+
+/*
+ *******************************************************************************
+ * File Name  : gbpm.c
+ *******************************************************************************
+ */
+#if (defined(CONFIG_BCM_BPM) || defined(CONFIG_BCM_BPM_MODULE))
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/gbpm.h>
+#include <linux/bcm_colors.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/ip.h>
+#include <linux/bcm_log_mod.h>
+#include <linux/bcm_log.h>
+#include <linux/bcm_realtime.h>
+
+/* Debug macros */
+int gbpm_debug_g = 0;
+
+#if defined(CC_GBPM_SUPPORT_DEBUG)
+#define gbpm_print(fmt, arg...)                                         \
+    if ( gbpm_debug_g )                                                     \
+    printk( CLRc "GBPM %s :" fmt CLRnl, __FUNCTION__, ##arg )
+#define gbpm_assertv(cond)                                              \
+    if ( !cond ) {                                                      \
+        printk( CLRerr "GBPM ASSERT %s : " #cond CLRnl, __FUNCTION__ ); \
+        return;                                                         \
+    }
+#define gbpm_assertr(cond, rtn)                                         \
+    if ( !cond ) {                                                      \
+        printk( CLRerr "GBPM ASSERT %s : " #cond CLRnl, __FUNCTION__ ); \
+        return rtn;                                                     \
+    }
+#define GBPM_DBG(debug_code)    do { debug_code } while(0)
+#else
+#define gbpm_print(fmt, arg...) NULL_STMT
+#define gbpm_assertv(cond)      NULL_STMT
+#define gbpm_assertr(cond, rtn) NULL_STMT
+#define GBPM_DBG(debug_code)    NULL_STMT
+#endif
+
+#define gbpm_error(fmt, arg...)                                         \
+    printk( CLRerr "GBPM ERROR %s :" fmt CLRnl, __FUNCTION__, ##arg)
+
+#undef  GBPM_DECL
+#define GBPM_DECL(x)        #x,         /* string declaration */
+
+
+/* global Buffer Pool Manager (BPM) */
+gbpm_status_hook_t gbpm_enet_status_hook_g = (gbpm_status_hook_t)NULL;
+EXPORT_SYMBOL(gbpm_enet_status_hook_g);
+
+
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+gbpm_status_hook_t gbpm_fap_status_hook_g = (gbpm_status_hook_t)NULL;
+
+gbpm_thresh_hook_t gbpm_fap_thresh_hook_g = (gbpm_thresh_hook_t)NULL;
+gbpm_thresh_hook_t gbpm_fap_enet_thresh_hook_g = (gbpm_thresh_hook_t)NULL;
+gbpm_thresh_hook_t gbpm_enet_thresh_hook_g = (gbpm_thresh_hook_t)NULL;
+gbpm_upd_buf_lvl_hook_t gbpm_fap_upd_buf_lvl_hook_g = (gbpm_upd_buf_lvl_hook_t)NULL;
+
+EXPORT_SYMBOL(gbpm_fap_status_hook_g);
+EXPORT_SYMBOL(gbpm_fap_thresh_hook_g);
+EXPORT_SYMBOL(gbpm_fap_enet_thresh_hook_g);
+EXPORT_SYMBOL(gbpm_enet_thresh_hook_g);
+EXPORT_SYMBOL(gbpm_fap_upd_buf_lvl_hook_g);
+#endif
+
+
+
+#if defined(CONFIG_BCM_XTMCFG) || defined(CONFIG_BCM_XTMCFG_MODULE)
+gbpm_status_hook_t gbpm_xtm_status_hook_g = (gbpm_status_hook_t)NULL;
+gbpm_thresh_hook_t gbpm_xtm_thresh_hook_g = (gbpm_thresh_hook_t)NULL;
+
+EXPORT_SYMBOL(gbpm_xtm_status_hook_g);
+EXPORT_SYMBOL(gbpm_xtm_thresh_hook_g);
+#endif
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Default hooks.
+ * FIXME: Group these hooks into a structure and change gbpm_bind to use
+ *        a structure.
+ *------------------------------------------------------------------------------
+ */
+static gbpm_dyn_buf_lvl_hook_t gbpm_dyn_buf_lvl_hook_g = 
+                                    (gbpm_dyn_buf_lvl_hook_t ) NULL;
+static gbpm_alloc_mult_hook_t gbpm_alloc_mult_hook_g = (gbpm_alloc_mult_hook_t) NULL;
+static gbpm_free_mult_hook_t gbpm_free_mult_hook_g=(gbpm_free_mult_hook_t) NULL;
+static gbpm_alloc_hook_t gbpm_alloc_hook_g = (gbpm_alloc_hook_t) NULL;
+static gbpm_free_hook_t gbpm_free_hook_g = (gbpm_free_hook_t) NULL;
+static gbpm_resv_rx_hook_t gbpm_resv_rx_hook_g = (gbpm_resv_rx_hook_t) NULL;
+static gbpm_unresv_rx_hook_t gbpm_unresv_rx_hook_g=(gbpm_unresv_rx_hook_t) NULL;
+static gbpm_get_total_bufs_hook_t gbpm_get_total_bufs_hook_g=(gbpm_get_total_bufs_hook_t) NULL;
+static gbpm_get_avail_bufs_hook_t gbpm_get_avail_bufs_hook_g=(gbpm_get_avail_bufs_hook_t) NULL;
+static gbpm_get_max_dyn_bufs_hook_t gbpm_get_max_dyn_bufs_hook_g=(gbpm_get_max_dyn_bufs_hook_t) NULL;
+
+
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+static void gbpm_do_work(struct work_struct *);
+static DECLARE_WORK(gbpm_work, gbpm_do_work);
+static struct workqueue_struct *gbpm_workqueue;
+
+extern gbpm_evt_hook_t gbpm_fap_evt_hook_g;
+
+/* Do the BPM work */
+void gbpm_do_work(struct work_struct *work_unused)
+{
+    /* process BPM pending events */
+    if ( likely(gbpm_fap_evt_hook_g != (gbpm_evt_hook_t)NULL) )
+        gbpm_fap_evt_hook_g();
+}
+EXPORT_SYMBOL(gbpm_do_work);
+
+/* Add the BPM work */
+void gbpm_queue_work(void)
+{
+	queue_work(gbpm_workqueue, &gbpm_work);
+}
+
+EXPORT_SYMBOL(gbpm_queue_work);
+#endif
+
+
+/* Stub functions */
+int gbpm_dyn_buf_lvl_stub( void ) 
+{
+    return 1;
+}
+
+int gbpm_alloc_mult_stub( uint32_t num, uint32_t *buf_p )
+{
+    return GBPM_ERROR;
+}
+
+void gbpm_free_mult_stub( uint32_t num, uint32_t *buf_p )
+{
+    return;
+}
+
+uint32_t *gbpm_alloc_stub( void )
+{
+    return NULL;
+}
+
+void gbpm_free_stub( uint32_t *buf_p )
+{
+    return;
+}
+
+int gbpm_resv_rx_stub( gbpm_port_t port, uint32_t chnl,
+        uint32_t num_rx_buf, uint32_t bulk_alloc_cnt ) 
+{
+    return GBPM_ERROR;
+}
+
+int gbpm_unresv_rx_stub( gbpm_port_t port, uint32_t chnl ) 
+{
+    return GBPM_ERROR;
+}
+
+uint32_t gbpm_get_total_bufs_stub( void )
+{
+    return 0;
+}
+
+uint32_t gbpm_get_avail_bufs_stub( void )
+{
+    return 0;
+}
+
+uint32_t gbpm_get_max_dyn_bufs_stub( void )
+{
+    return 0;
+}
+
+
+/* BQoS BPM APIs invoked through hooks */
+int gbpm_get_dyn_buf_level(void) 
+{
+    return gbpm_dyn_buf_lvl_hook_g();
+}
+
+int gbpm_alloc_mult_buf( uint32_t num, uint32_t *buf_p )
+{
+    return gbpm_alloc_mult_hook_g( num, buf_p );
+}
+
+void gbpm_free_mult_buf( uint32_t num, uint32_t *buf_p )
+{
+    gbpm_free_mult_hook_g( num, buf_p );
+}
+
+uint32_t *gbpm_alloc_buf( void )
+{
+    return gbpm_alloc_hook_g();
+}
+
+void gbpm_free_buf( uint32_t *buf_p )
+{
+    return gbpm_free_hook_g( buf_p );
+}
+
+int gbpm_resv_rx_buf( gbpm_port_t port, uint32_t chnl,
+        uint32_t num_rx_buf, uint32_t bulk_alloc_cnt ) 
+{
+    return gbpm_resv_rx_hook_g(port, chnl, num_rx_buf, bulk_alloc_cnt);
+}
+
+int gbpm_unresv_rx_buf( gbpm_port_t port, uint32_t chnl ) 
+{
+    return gbpm_unresv_rx_hook_g( port, chnl );
+}
+
+uint32_t gbpm_get_total_bufs( void )
+{
+    return gbpm_get_total_bufs_hook_g();
+}
+
+uint32_t gbpm_get_avail_bufs( void )
+{
+    return gbpm_get_avail_bufs_hook_g();
+}
+
+uint32_t gbpm_get_max_dyn_bufs( void )
+{
+    return gbpm_get_max_dyn_bufs_hook_g();
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : gbpm_bind
+ * Description  : Override default hooks.
+ *  gbpm_dyn_buf_lvl: Function pointer to get the buffer level in BPM
+ *  gbpm_alloc_mult: Function pointer to allocate multiple buffers
+ *  gbpm_free_mult : Function pointer to free multiple buffers
+ *  gbpm_alloc     : Function pointer to allocate one buffer
+ *  gbpm_free      : Function pointer to free one buffer
+ *  gbpm_resv_rx   : Function pointer to reserve buffers
+ *  gbpm_uresv_rx  : Function pointer to unreserve buffers
+ *  gbpm_get_total_bufs : get total number of bufs BPM manages
+ *  gbpm_get_avail_bufs : get current number of free bufs in BPM pool
+ *  gbpm_get_max_dyn_bufs : get number of free bufs in BPM pool at init time.
+ *------------------------------------------------------------------------------
+ */
+void gbpm_bind( gbpm_dyn_buf_lvl_hook_t gbpm_dyn_buf_lvl, 
+                gbpm_alloc_mult_hook_t gbpm_alloc_mult,
+                gbpm_free_mult_hook_t gbpm_free_mult,
+                gbpm_alloc_hook_t gbpm_alloc,
+                gbpm_free_hook_t gbpm_free,
+                gbpm_resv_rx_hook_t gbpm_resv_rx, 
+                gbpm_unresv_rx_hook_t gbpm_unresv_rx,
+                gbpm_get_total_bufs_hook_t gbpm_get_total_bufs,
+                gbpm_get_avail_bufs_hook_t gbpm_get_avail_bufs,
+                gbpm_get_max_dyn_bufs_hook_t gbpm_get_max_dyn_bufs )
+{
+    gbpm_print( "Bind dyn[<%08x>] "
+                "mult_alloc[<%08x>] mult_free[<%08x>]" 
+                "alloc[<%08x>] free[<%08x>]" 
+                "resv_rx[<%08x>] unresv_rx[<%08x>]" 
+                "get_total[<%08x>] get_avail[<%08x>]" 
+                "get_max_dyn[<%08x>]", 
+                (int)gbpm_dyn_buf_lvl, 
+                (int)gbpm_alloc_mult, (int)gbpm_free_mult,
+                (int)gbpm_alloc, (int)gbpm_free,
+                (int)gbpm_resv_rx, (int)gbpm_unresv_rx,
+                (int)gbpm_get_total_bufs, (int)gbpm_get_avail_bufs,
+                (int)gbpm_get_max_dyn_bufs
+                );
+
+    gbpm_dyn_buf_lvl_hook_g = gbpm_dyn_buf_lvl; 
+    gbpm_alloc_mult_hook_g = gbpm_alloc_mult;
+    gbpm_free_mult_hook_g  = gbpm_free_mult;
+    gbpm_alloc_hook_g  = gbpm_alloc;
+    gbpm_free_hook_g   = gbpm_free;
+    gbpm_resv_rx_hook_g = gbpm_resv_rx; 
+    gbpm_unresv_rx_hook_g = gbpm_unresv_rx; 
+    gbpm_get_total_bufs_hook_g = gbpm_get_total_bufs;
+    gbpm_get_avail_bufs_hook_g = gbpm_get_avail_bufs;
+    gbpm_get_max_dyn_bufs_hook_g = gbpm_get_max_dyn_bufs;
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : gbpm_unbind
+ * Description  : use default hooks.
+ *------------------------------------------------------------------------------
+ */
+void gbpm_unbind( void )
+{
+    gbpm_bind( gbpm_dyn_buf_lvl_stub, 
+        gbpm_alloc_mult_stub, gbpm_free_mult_stub, 
+        gbpm_alloc_stub, gbpm_free_stub, 
+        gbpm_resv_rx_stub, gbpm_unresv_rx_stub,
+        gbpm_get_total_bufs_stub, gbpm_get_avail_bufs_stub,
+        gbpm_get_max_dyn_bufs_stub
+        );
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : __init_gbpm
+ * Description  : Static construction of global buffer pool manager subsystem.
+ *------------------------------------------------------------------------------
+ */
+static int __init __init_gbpm( void )
+{
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+    /* Set up  BPM workqueue - single threaded/high priority */
+    gbpm_workqueue = alloc_workqueue("bpm", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+#endif
+
+    gbpm_unbind();
+
+    printk( GBPM_MODNAME GBPM_VER_STR " initialized\n" );
+    return 0;
+}
+
+subsys_initcall(__init_gbpm);
+
+EXPORT_SYMBOL(gbpm_get_dyn_buf_level); 
+EXPORT_SYMBOL(gbpm_alloc_mult_buf);
+EXPORT_SYMBOL(gbpm_free_mult_buf);
+EXPORT_SYMBOL(gbpm_alloc_buf);
+EXPORT_SYMBOL(gbpm_free_buf);
+EXPORT_SYMBOL(gbpm_resv_rx_buf);
+EXPORT_SYMBOL(gbpm_unresv_rx_buf);
+EXPORT_SYMBOL(gbpm_get_total_bufs);
+EXPORT_SYMBOL(gbpm_get_avail_bufs);
+EXPORT_SYMBOL(gbpm_get_max_dyn_bufs);
+
+EXPORT_SYMBOL(gbpm_bind);
+EXPORT_SYMBOL(gbpm_unbind);
+#endif /* (defined(CONFIG_BCM_BPM) || defined(CONFIG_BCM_BPM_MODULE)) */
+
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 7e7aeb01de45cf3236469d7f62b51254b73abac4..7c4561a7d7914cf0caaaea328a0b7b698cd15000 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -27,6 +27,12 @@
 #include <net/checksum.h>
 #include <net/sock.h>
 
+#if ((defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)) \
+     && (defined(CONFIG_BCM_KF_M2M_DMA) && defined(CONFIG_BCM_M2M_DMA)))
+#include <linux/bcm_m2mdma.h>
+#endif
+
+
 /*
  *	Verify iovec. The caller must ensure that the iovec is big enough
  *	to hold the message iovec.
@@ -124,6 +130,37 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
 }
 EXPORT_SYMBOL(memcpy_toiovecend);
 
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+/* This was removed in 2.6. Re-add it for splice from socket to file. */
+/*
+ *	In kernel copy to iovec. Returns -EFAULT on error.
+ *
+ *	Note: this modifies the original iovec.
+ */
+
+void memcpy_tokerneliovec(struct iovec *iov, unsigned char *kdata, int len,
+			unsigned int *dma_cookie)
+{
+	while(len>0)
+	{
+		if(iov->iov_len)
+		{
+			int copy = min_t(unsigned int, iov->iov_len, len);
+#if defined(CONFIG_BCM_KF_M2M_DMA) && defined(CONFIG_BCM_M2M_DMA)
+            *dma_cookie = bcm_m2m_dma_memcpy_async_no_flush(iov->iov_base, kdata, copy);
+#else
+            memcpy(iov->iov_base, kdata, copy);
+#endif
+			len -= copy;
+			kdata += copy;
+			iov->iov_base += copy;
+			iov->iov_len -= copy;
+		}
+		iov++;
+	}
+}
+#endif
+
 /*
  *	Copy iovec to kernel. Returns -EFAULT on error.
  *
diff --git a/net/core/iqos.c b/net/core/iqos.c
new file mode 100644
index 0000000000000000000000000000000000000000..143d39c2d3ed081e1bbb2bde105dfdbe84bfd1c9
--- /dev/null
+++ b/net/core/iqos.c
@@ -0,0 +1,320 @@
+
+/*
+<:copyright-BRCM:2009:DUAL/GPL:standard
+
+   Copyright (c) 2009 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+
+/*
+ *******************************************************************************
+ * File Name  : iqos.c
+ *******************************************************************************
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/iqos.h>
+#include <linux/bcm_colors.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/ip.h>
+#if defined(CONFIG_BCM_KF_LOG)
+#include <linux/bcm_log.h>
+#endif
+
+
+/* Debug macros */
+#if defined(CC_IQOS_SUPPORT_DEBUG)
+#define iqos_print(fmt, arg...)                                         \
+    if ( iqos_debug_g )                                                     \
+    printk( CLRc "IQOS %s :" fmt CLRnl, __FUNCTION__, ##arg )
+#define iqos_assertv(cond)                                              \
+    if ( !cond ) {                                                      \
+        printk( CLRerr "IQOS ASSERT %s : " #cond CLRnl, __FUNCTION__ ); \
+        return;                                                         \
+    }
+#define iqos_assertr(cond, rtn)                                         \
+    if ( !cond ) {                                                      \
+        printk( CLRerr "IQOS ASSERT %s : " #cond CLRnl, __FUNCTION__ ); \
+        return rtn;                                                     \
+    }
+#define IQOS_DBG(debug_code)    do { debug_code } while(0)
+#else
+#ifndef NULL_STMT
+#define NULL_STMT		do { /* NULL BODY */ } while (0)
+#endif
+#define iqos_print(fmt, arg...) NULL_STMT
+#define iqos_assertv(cond)      NULL_STMT
+#define iqos_assertr(cond, rtn) NULL_STMT
+#define IQOS_DBG(debug_code)    NULL_STMT
+#endif
+
+#define iqos_error(fmt, arg...)                                         \
+    printk( CLRerr "IQOS ERROR %s :" fmt CLRnl, __FUNCTION__, ##arg)
+
+#undef  IQOS_DECL
+#define IQOS_DECL(x)        #x,         /* string declaration */
+
+
+/*--- globals ---*/
+uint32_t iqos_enable_g = 0;      /* Enable Ingress QoS feature */
+uint32_t iqos_cpu_cong_g = 0;
+uint32_t iqos_debug_g = 0;
+
+DEFINE_SPINLOCK(iqos_lock_g);
+EXPORT_SYMBOL(iqos_lock_g);
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Default Ingress QoS hooks.
+ *------------------------------------------------------------------------------
+ */
+static iqos_add_L4port_hook_t iqos_add_L4port_hook_g = 
+                                            (iqos_add_L4port_hook_t) NULL;
+static iqos_rem_L4port_hook_t iqos_rem_L4port_hook_g =
+                                            (iqos_rem_L4port_hook_t) NULL;
+static iqos_prio_L4port_hook_t iqos_prio_L4port_hook_g =
+                                            (iqos_prio_L4port_hook_t) NULL;
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Get the Ingress QoS priority for L4 Dest port (layer4 UDP or TCP)
+ *------------------------------------------------------------------------------
+ */
+int iqos_prio_L4port( iqos_ipproto_t ipProto, uint16_t destPort )
+{
+    unsigned long flags;
+    uint8_t prio = IQOS_PRIO_HIGH;
+
+    if ( unlikely(iqos_prio_L4port_hook_g == (iqos_prio_L4port_hook_t)NULL) )
+        goto iqos_prio_L4port_exit;
+
+    IQOS_LOCK_IRQSAVE();
+    prio = iqos_prio_L4port_hook_g( ipProto, destPort ); 
+    IQOS_UNLOCK_IRQRESTORE();
+
+iqos_prio_L4port_exit:
+    return prio;
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Add the Ingress QoS priority, and type of entry for L4 Dest port. 
+ *------------------------------------------------------------------------------
+ */
+uint8_t iqos_add_L4port( iqos_ipproto_t ipProto, uint16_t destPort, 
+        iqos_ent_t ent, iqos_prio_t prio )
+{
+    unsigned long flags;
+    uint8_t addIx = IQOS_INVALID_NEXT_IX;
+
+#if defined(CONFIG_BCM_KF_LOG)
+    BCM_LOG_DEBUG( BCM_LOG_ID_IQ, 
+            "AddPort ent<%d> ipProto<%d> dport<%d> prio<%d> ", 
+            ent, ipProto, destPort, prio );  
+#endif
+
+    if ( unlikely(iqos_add_L4port_hook_g == (iqos_add_L4port_hook_t)NULL) )
+        goto iqos_add_L4port_exit;
+
+    IQOS_LOCK_IRQSAVE();
+    addIx = iqos_add_L4port_hook_g( ipProto, destPort, ent, prio ); 
+    IQOS_UNLOCK_IRQRESTORE();
+
+iqos_add_L4port_exit:
+#if defined(CONFIG_BCM_KF_LOG)
+    BCM_LOG_DEBUG( BCM_LOG_ID_IQ, "addIx<%d>", addIx );  
+#endif
+    return addIx;
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Remove the L4 dest port from the Ingress QoS priority table
+ *------------------------------------------------------------------------------
+ */
+uint8_t iqos_rem_L4port( iqos_ipproto_t ipProto, uint16_t destPort, 
+        iqos_ent_t ent )
+{
+    unsigned long flags;
+    uint8_t remIx = IQOS_INVALID_NEXT_IX;
+
+#if defined(CONFIG_BCM_KF_LOG)
+    BCM_LOG_DEBUG( BCM_LOG_ID_IQ, "RemPort ent<%d> ipProto<%d> dport<%d> ", 
+                ent, ipProto, destPort);  
+#endif
+
+    if ( unlikely(iqos_rem_L4port_hook_g == (iqos_rem_L4port_hook_t)NULL) )
+        goto iqos_rem_L4port_exit;
+
+    IQOS_LOCK_IRQSAVE();
+    remIx = iqos_rem_L4port_hook_g( ipProto, destPort, ent ); 
+    IQOS_UNLOCK_IRQRESTORE();
+
+iqos_rem_L4port_exit:
+#if defined(CONFIG_BCM_KF_LOG)
+    BCM_LOG_DEBUG( BCM_LOG_ID_IQ, "remIx<%d> ", remIx);  
+#endif
+    return remIx;
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : iqos_bind
+ * Description  : Override default hooks.
+ *  iqos_add    : Function pointer to be invoked in iqos_add_L4port
+ *  iqos_rem    : Function pointer to be invoked in iqos_rem_L4port
+ *  iqos_prio   : Function pointer to be invoked in iqos_prio_L4port
+ *------------------------------------------------------------------------------
+ */
+void iqos_bind( iqos_add_L4port_hook_t  iqos_add, 
+                iqos_rem_L4port_hook_t  iqos_rem,
+                iqos_prio_L4port_hook_t iqos_prio )
+{
+    iqos_print( "Bind add[<%08x>] rem[<%08x>] prio[<%08x>]", 
+                (int)iqos_add, (int)iqos_rem, (int)iqos_prio );
+
+    iqos_add_L4port_hook_g = iqos_add;
+    iqos_rem_L4port_hook_g = iqos_rem;
+    iqos_prio_L4port_hook_g = iqos_prio;
+}
+
+EXPORT_SYMBOL(iqos_cpu_cong_g);
+EXPORT_SYMBOL(iqos_enable_g);
+EXPORT_SYMBOL(iqos_debug_g);
+EXPORT_SYMBOL(iqos_add_L4port);
+EXPORT_SYMBOL(iqos_rem_L4port);
+EXPORT_SYMBOL(iqos_prio_L4port);
+EXPORT_SYMBOL(iqos_bind);
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : __init_iqos
+ * Description  : Static construction of ingress QoS subsystem.
+ *------------------------------------------------------------------------------
+ */
+static int __init __init_iqos( void )
+{
+    printk( IQOS_MODNAME IQOS_VER_STR " initialized\n" );
+    return 0;
+}
+
+subsys_initcall(__init_iqos);
+
+#if (defined(CONFIG_BCM_INGQOS) || defined(CONFIG_BCM_INGQOS_MODULE))
+
+
+/* Hooks for getting/dumping the Ingress QoS status */
+iqos_status_hook_t iqos_enet_status_hook_g = (iqos_status_hook_t)NULL;
+
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+iqos_status_hook_t iqos_fap_status_hook_g = (iqos_status_hook_t)NULL;
+#endif
+
+#if defined(CONFIG_BCM_XTMCFG) || defined(CONFIG_BCM_XTMCFG_MODULE)
+iqos_status_hook_t iqos_xtm_status_hook_g = (iqos_status_hook_t)NULL;
+#endif
+
+
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+/* Hooks for getting the current RX DQM queue depth */
+iqos_fap_ethRxDqmQueue_hook_t iqos_fap_ethRxDqmQueue_hook_g = NULL;
+#if defined(CONFIG_BCM_XTMCFG) || defined(CONFIG_BCM_XTMCFG_MODULE)
+iqos_fap_xtmRxDqmQueue_hook_t iqos_fap_xtmRxDqmQueue_hook_g = NULL;
+#endif
+
+iqos_fap_set_status_hook_t    iqos_fap_set_status_hook_g = NULL;
+iqos_fap_add_L4port_hook_t    iqos_fap_add_L4port_hook_g = NULL;
+iqos_fap_rem_L4port_hook_t    iqos_fap_rem_L4port_hook_g = NULL;
+iqos_fap_dump_porttbl_hook_t  iqos_fap_dump_porttbl_hook_g = NULL;
+#endif
+
+
+/* get the congestion status for system */ 
+iqos_cong_status_t iqos_get_sys_cong_status( void )
+{
+    return  ((iqos_cpu_cong_g) ? IQOS_CONG_STATUS_HI : IQOS_CONG_STATUS_LO);
+}
+
+
+/* get the congestion status for an RX channel of an interface */ 
+iqos_cong_status_t iqos_get_cong_status( iqos_if_t iface, uint32_t chnl )
+{
+    return ((iqos_cpu_cong_g & (1<<(iface + chnl))) ? 
+                                IQOS_CONG_STATUS_HI : IQOS_CONG_STATUS_LO);
+}
+
+
+/* set/reset the congestion status for an RX channel of an interface */ 
+uint32_t  iqos_set_cong_status( iqos_if_t iface, uint32_t chnl, 
+        iqos_cong_status_t status )
+{
+    unsigned long flags;
+
+    IQOS_LOCK_IRQSAVE();
+
+    if (status == IQOS_CONG_STATUS_HI)
+        iqos_cpu_cong_g |= (1<<(iface + chnl));
+    else
+        iqos_cpu_cong_g &= ~(1<<(iface + chnl));
+
+    IQOS_UNLOCK_IRQRESTORE();
+
+    return iqos_cpu_cong_g;
+}
+
+EXPORT_SYMBOL(iqos_get_cong_status);
+EXPORT_SYMBOL(iqos_set_cong_status);
+EXPORT_SYMBOL(iqos_enet_status_hook_g);
+
+#if defined(CONFIG_BCM_KF_FAP) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
+EXPORT_SYMBOL(iqos_fap_status_hook_g);
+EXPORT_SYMBOL(iqos_fap_set_status_hook_g);
+#if defined(CONFIG_BCM_XTMCFG) || defined(CONFIG_BCM_XTMCFG_MODULE)
+EXPORT_SYMBOL(iqos_fap_xtmRxDqmQueue_hook_g);
+#endif
+EXPORT_SYMBOL(iqos_fap_ethRxDqmQueue_hook_g);
+EXPORT_SYMBOL(iqos_fap_add_L4port_hook_g);
+EXPORT_SYMBOL(iqos_fap_rem_L4port_hook_g);
+EXPORT_SYMBOL(iqos_fap_dump_porttbl_hook_g);
+#endif
+
+#if defined(CONFIG_BCM_XTMCFG) || defined(CONFIG_BCM_XTMCFG_MODULE)
+EXPORT_SYMBOL(iqos_xtm_status_hook_g);
+#endif
+
+#endif /* (defined(CONFIG_BCM_INGQOS) || defined(CONFIG_BCM_INGQOS_MODULE)) */
+
+
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index c3519c6d1b169a5c895efd781c7958218d7f8dc6..24a933b6d876f91247b7b57d5b61a4120e7241aa 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -23,6 +23,30 @@
 #include <linux/bitops.h>
 #include <asm/types.h>
 
+#if defined(CONFIG_BCM_KF_LINKWATCH_WQ)
+#include <linux/init.h>
+/*
+ * Problem scenario: wlmngr does an ioctl, and the upper layers
+ * of ioctl code grabs the rtnl lock before calling the wlan ioctl code.
+ * Wlan ioctl code calls flush_workqueue, but there is a linkwatch_event
+ * function on the work queue.  Linkwatch_event function tries to grab the
+ * rtnl lock, but cannot, so now the event thread is deadlocked.
+ * Partial solution: put linkwatch_event on its own workqueue so that
+ * the wlan events can get flushed without having to run the linkwatch_event.
+ * Eventually, the wlan events should also get moved off of the common
+ * event workqueue.
+ */
+static struct workqueue_struct *lw_wq=NULL;
+
+int __init init_linkwatch(void)
+{
+	lw_wq = create_singlethread_workqueue("linkwatch");
+
+	return 0;
+}
+
+__initcall(init_linkwatch);
+#endif
 
 enum lw_bits {
 	LW_URGENT = 0,
@@ -123,7 +147,11 @@ static void linkwatch_schedule_work(int urgent)
 	 * This is true if we've scheduled it immeditately or if we don't
 	 * need an immediate execution and it's already pending.
 	 */
+#if defined(CONFIG_BCM_KF_LINKWATCH_WQ)
+	if (queue_delayed_work(lw_wq, &linkwatch_work, delay) == !delay)
+#else
 	if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
+#endif
 		return;
 
 	/* Don't bother if there is nothing urgent. */
@@ -135,7 +163,11 @@ static void linkwatch_schedule_work(int urgent)
 		return;
 
 	/* Otherwise we reschedule it again for immediate execution. */
+#if defined(CONFIG_BCM_KF_LINKWATCH_WQ)
+	queue_delayed_work(lw_wq, &linkwatch_work, 0);
+#else
 	schedule_delayed_work(&linkwatch_work, 0);
+#endif
 }
 
 
diff --git a/net/core/nbuff.c b/net/core/nbuff.c
new file mode 100644
index 0000000000000000000000000000000000000000..c1149ac84df38a6b2bd1b49a7d059fb7aac492bd
--- /dev/null
+++ b/net/core/nbuff.c
@@ -0,0 +1,891 @@
+#if defined(CONFIG_BCM_KF_NBUFF)
+
+/*
+ * <:copyright-BRCM:2009:GPL/GPL:standard
+ * 
+ *    Copyright (c) 2009 Broadcom Corporation
+ *    All Rights Reserved
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as published by
+ * the Free Software Foundation (the "GPL").
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * 
+ * A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+ * writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * 
+ * :>
+*/
+
+#define FKB_IMPLEMENTATION_FILE
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/nbuff.h>
+#include <linux/export.h>
+
+#ifdef CC_CONFIG_FKB_COLOR
+#define COLOR(clr_code)     clr_code
+#else
+#define COLOR(clr_code)
+#endif
+#define CLRb                COLOR("\e[0;34m")       /* blue */
+#define CLRc                COLOR("\e[0;36m")       /* cyan */
+#define CLRn                COLOR("\e[0m")          /* normal */
+#define CLRerr              COLOR("\e[0;33;41m")    /* yellow on red */
+#define CLRN                CLRn"\n"                /* normal newline */
+
+int nbuff_dbg = 0;
+#if defined(CC_CONFIG_FKB_DEBUG)
+#define fkb_print(fmt, arg...)                                          \
+    printk( CLRc "FKB %s :" fmt CLRN, __FUNCTION__, ##arg )
+#define fkb_assertv(cond)                                               \
+    if ( !cond ) {                                                      \
+        printk( CLRerr "FKB ASSERT %s : " #cond CLRN, __FUNCTION__ );   \
+        return;                                                         \
+    }
+#define fkb_assertr(cond, rtn)                                          \
+    if ( !cond ) {                                                      \
+        printk( CLRerr "FKB ASSERT %s : " #cond CLRN, __FUNCTION__ );   \
+        return rtn;                                                     \
+    }
+#else
+#define fkb_print(fmt, arg...)  NULL_STMT
+#define fkb_assertv(cond)       NULL_STMT
+#define fkb_assertr(cond, rtn)  NULL_STMT
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Test whether an FKB may be translated onto a skb.
+ *------------------------------------------------------------------------------
+ */
+int fkb_in_skb_test(int fkb_offset, int list_offset, int blog_p_offset,
+                    int data_offset, int len_word_offset, int mark_offset,
+                    int priority_offset, int recycle_hook_offset,
+                    int recycle_context_offset)
+{
+
+#undef OFFSETOF
+#define OFFSETOF(stype, member) ((int)&((struct stype*)0)->member)       
+#define FKBOFFSETOF(member)   (member##_offset)
+#define SKBOFFSETOF(member)   (((int)&((struct sk_buff*)0)->member)-fkb_offset)
+#define FKBSIZEOF(member)     (sizeof(((struct fkbuff*)0)->member))
+#define SKBSIZEOF(member)     (sizeof(((struct sk_buff*)0)->member))
+#define FKBINSKB_TEST(member) ((FKBOFFSETOF(member) == SKBOFFSETOF(member)) \
+                              && (FKBSIZEOF(member) == SKBSIZEOF(member)))
+
+    if ( OFFSETOF(sk_buff, fkbInSkb) != fkb_offset)
+        return 0;
+    if (  FKBINSKB_TEST(list) && FKBINSKB_TEST(blog_p) && FKBINSKB_TEST(data)
+       && FKBINSKB_TEST(len_word) && FKBINSKB_TEST(mark) && FKBINSKB_TEST(priority)
+       && FKBINSKB_TEST(recycle_hook) && FKBINSKB_TEST(recycle_context)
+       // && sizeof(struct fkbuff) == (2 * sizeof cacheline)
+       && ((fkb_offset & 0xF) == 0) ) /* ____cacheline_aligned */
+        return 1;
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Pre-allocated Pool of Cloned and Master FkBuff_t objects.
+ *------------------------------------------------------------------------------ */
+
+typedef struct fkbPool {
+    FkBuff_t  * freelist_p;         /* List of free objects                   */
+    uint32_t    extends;            /* Number of pool extensions performed    */
+
+    /* Pool dimensioning parameters */
+    uint32_t    pool_size;          /* Startup default pool size              */
+    uint32_t    object_size;        /* Size of each object in the pool        */
+    uint32_t    extend_size;        /* Number of objects per extension        */
+    uint32_t    extend_max;         /* Maximum number of extensions permitted */
+    char        name[8];
+
+#if defined(CC_CONFIG_FKB_STATS)
+    int         cnt_free;           /* Number of free objects                 */
+    int         cnt_used;           /* Number of in use objects               */
+    int         cnt_hwm;            /* In use high water mark for engineering */
+    int         cnt_fails;          /* Failure due to out of memory           */
+#endif
+} FkbPool_t;
+
+/*
+ *------------------------------------------------------------------------------
+ * Global pools for Cloned and Master FKB Objects. 
+ *------------------------------------------------------------------------------
+ */
+FkbPool_t fkb_pool_g[ FkbMaxPools_e ] = {
+    {
+        .freelist_p     = FKB_NULL,
+        .extends        = 0,
+        .pool_size      = FKBM_POOL_SIZE_ENGG,
+        .object_size    = BCM_PKTBUF_SIZE,     /* Rx Buffer wth in place FKB */
+        .extend_size    = FKBM_EXTEND_SIZE_ENGG,
+        .extend_max     = FKBM_EXTEND_MAX_ENGG,
+        .name           = "Master",
+#if defined(CC_CONFIG_FKB_STATS)
+        .cnt_free = 0, .cnt_used = 0, .cnt_hwm = 0, .cnt_fails = 0,
+#endif
+    }
+    ,
+    {
+        .freelist_p     = FKB_NULL,
+        .extends        = 0,
+        .pool_size      = FKBC_POOL_SIZE_ENGG,
+        .object_size    = sizeof(FkBuff_t),     /* Only FKB object */
+        .extend_size    = FKBC_EXTEND_SIZE_ENGG,
+        .extend_max     = FKBC_EXTEND_MAX_ENGG,
+        .name           = "Cloned",
+#if defined(CC_CONFIG_FKB_STATS)
+        .cnt_free = 0, .cnt_used = 0, .cnt_hwm = 0, .cnt_fails = 0,
+#endif
+    }
+};
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Statistics collection for engineering free pool parameters.
+ *------------------------------------------------------------------------------
+ */
+void fkb_stats(void)
+{
+    int pool;
+    FkbPool_t *pool_p;
+    for (pool = 0; pool < FkbMaxPools_e; pool++ )
+    {
+        pool_p = &fkb_pool_g[pool];
+
+        printk("FKB %s Pool: extends<%u>\n", pool_p->name, pool_p->extends );
+
+        FKB_STATS(
+            printk("\t free<%d> used<%d> HWM<%d> fails<%d>\n",
+                   pool_p->cnt_free,
+                   pool_p->cnt_used, pool_p->cnt_hwm, pool_p->cnt_fails ); );
+    }
+}
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+#include "linux/spinlock.h"
+static DEFINE_SPINLOCK(fkb_pool_lock_g);   /* FkBuff packet flow */
+#define FKB_POOL_LOCK()     spin_lock_irqsave(&fkb_pool_lock_g, lock_flags)
+#define FKB_POOL_UNLOCK()   spin_unlock_irqrestore(&fkb_pool_lock_g, lock_flags)
+#else
+#define FKB_POOL_LOCK()     local_irq_disable()
+#define FKB_POOL_UNLOCK()   local_irq_enable()
+#endif
+
+#if defined(CC_CONFIG_FKB_AUDIT)
+void fkb_audit(const char * function, int line)
+{ /* place any audits here */ }
+EXPORT_SYMBOL(fkb_audit);
+#define FKB_AUDIT_RUN()     fkb_audit(__FUNCTION__,__LINE__)  
+#else
+#define FKB_AUDIT_RUN()     NULL_STMT
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : fkbM_recycle
+ * Description: Recycling a Master FKB that was allocated from Master FKB Pool.
+ * Parameters :
+ *   pNBuff   : pointer to a network buffer
+ *   context  : registered context argument with network buffer.
+ *   flags    : unused by fkb recycling.
+ *------------------------------------------------------------------------------
+ */
+void fkbM_recycle(pNBuff_t pNBuff, unsigned context, unsigned flags)
+{
+    register FkBuff_t  * fkbM_p;
+    register FkbPool_t * pool_p = (FkbPool_t *)context;
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+    unsigned long lock_flags;
+#endif
+
+    fkb_assertv( (pool_p == &fkb_pool_g[FkbMasterPool_e]) ); 
+
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+        struct sk_buff * skb_p = (struct sk_buff *)PNBUFF_2_SKBUFF(pNBuff);
+        fkb_assertv( (flags & SKB_DATA_RECYCLE) );
+        fkbM_p = (FkBuff_t *)((uint32_t)(skb_p->head) - PFKBUFF_PHEAD_OFFSET); 
+    }
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */
+    else
+        fkbM_p = PNBUFF_2_FKBUFF(pNBuff);
+
+    fkb_dbg(1, "fkbM_p<0x%08x>", (int)fkbM_p);
+
+    FKB_AUDIT(
+        if ( fkbM_p->list != NULL )
+            printk("FKB ASSERT cpu<%u> %s(0x%08x) list<0x%08x> recycle<%pS>\n",
+                   smp_processor_id(), __FUNCTION__,
+                   (int)fkbM_p, (int)fkbM_p->list, fkbM_p->recycle_hook);
+        if ( fkbM_p->recycle_hook != (RecycleFuncP)fkbM_recycle )
+            printk("FKB ASSERT cpu<%u> %s <0x%08x>.recycle<%pS>\n",
+                   smp_processor_id(), __FUNCTION__,
+                   (int)fkbM_p, fkbM_p->recycle_hook); );
+            
+    FKB_AUDIT_RUN();
+
+    FKB_POOL_LOCK();
+
+    FKB_STATS( pool_p->cnt_used--; pool_p->cnt_free++; );
+
+    fkbM_p->list = pool_p->freelist_p;  /* resets users */
+    pool_p->freelist_p = fkbM_p;        /* link into Master FKB free pool */
+
+    FKB_POOL_UNLOCK();
+
+    FKB_AUDIT_RUN();
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : fkbC_recycle
+ * Description: Recycling a Cloned FKB back to the Cloned FKB Pool.
+ * Parameters :
+ *   fkbC_p   : Pointer to a Cloned FKB Object.
+ *------------------------------------------------------------------------------
+ */
+void fkbC_recycle(FkBuff_t * fkbC_p)
+{
+    register FkbPool_t * pool_p;
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+    unsigned long lock_flags;
+#endif
+
+    pool_p = &fkb_pool_g[ FkbClonedPool_e ];
+
+    fkb_dbg(2, "fkb_p<0x%08x>", (int)fkbC_p);
+
+    FKB_AUDIT(
+        if ( fkbC_p->recycle_hook != (RecycleFuncP)NULL )
+            printk("FKB ASSERT cpu<%u> %s <0x%08x>.recycle<%pS>\n",
+                   smp_processor_id(), __FUNCTION__,
+                   (int)fkbC_p, fkbC_p->recycle_hook); );
+
+    FKB_AUDIT_RUN();
+
+    FKB_POOL_LOCK();
+
+    FKB_STATS( pool_p->cnt_used--; pool_p->cnt_free++; );
+
+    fkbC_p->list = pool_p->freelist_p;  /* resets master_p */
+    pool_p->freelist_p = fkbC_p;        /* link into Cloned free pool */
+
+    FKB_POOL_UNLOCK();
+
+    FKB_AUDIT_RUN();
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : fkb_extend
+ * Description: Create a pool of FKB objects. When a pool is exhausted
+ *              this function may be invoked to extend the pool.
+ *              All objects in the pool are chained in a single linked list.
+ * Parameters :
+ *   num      : Number of FKB objects to be allocated.
+ *   object   : Object type to locate pool
+ * Returns    : Number of FKB objects allocated.
+ *------------------------------------------------------------------------------
+ */
+static uint32_t fkb_extend(uint32_t num, FkbObject_t object)
+{
+    register int i;
+    register FkBuff_t  * list_p, * fkb_p, * fkbNext_p;
+    register FkbPool_t * pool_p;
+
+    fkb_assertr( (object < FkbMaxPools_e), 0 );
+
+    pool_p = &fkb_pool_g[object];       /* select free pool */
+
+    list_p = (FkBuff_t *) kmalloc( num * pool_p->object_size, GFP_ATOMIC);
+
+    fkb_print( "fkb_extend %u FKB %s objects <%p> .. <%p>",
+               num, pool_p->name, list_p,
+               (FkBuff_t*)((uint32_t)list_p + ((num-1) * pool_p->object_size)));
+
+    if ( unlikely(list_p == FKB_NULL) )   /* may fail if in_interrupt or oom */
+    {
+        FKB_STATS( pool_p->cnt_fails++; );
+        fkb_print( "WARNING: Failure to initialize %d FKB %s objects",
+                    num, pool_p->name );
+        return 0;
+    }
+    pool_p->extends++;
+
+    /* memset( (void *)list, 0, ( num * pool_p->object_size ) ); */
+
+    /* Link all allocated objects together */
+    fkb_p = FKB_NULL;
+    fkbNext_p = list_p;
+    for ( i = 0; i < num; i++ )
+    {
+        fkb_p = fkbNext_p;
+        fkbNext_p = (FkBuff_t *)( (uint32_t)fkb_p + pool_p->object_size );
+
+        if ( object == FkbClonedPool_e )
+        {
+            fkb_p->recycle_hook = (RecycleFuncP)NULL;
+            fkb_p->recycle_context = (uint32_t) &fkb_pool_g[FkbClonedPool_e];
+        }
+        else
+        {
+            // fkb_set_ref(fkb_p, 0);   ... see fkb_p->list
+            fkb_p->recycle_hook = (RecycleFuncP)fkbM_recycle;
+            fkb_p->recycle_context = (uint32_t) &fkb_pool_g[FkbMasterPool_e];
+        }
+
+        fkb_p->list = fkbNext_p;        /* link each FkBuff */
+    }
+
+    FKB_STATS( pool_p->cnt_free += num; );
+
+    /* link allocated list into FKB free pool */
+    fkb_p->list = pool_p->freelist_p;  /* chain last FKB object */
+    pool_p->freelist_p = list_p;       /* head of allocated list */
+
+    return num;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : fkb_construct
+ * Description  : Incarnates the FKB system pools during kernel/module init
+ *------------------------------------------------------------------------------
+ */
+int fkb_construct(int fkb_in_skb_offset)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+    unsigned long lock_flags;
+#endif
+#undef FKBOFFSETOF
+#define FKBOFFSETOF(member)   ((int)&((struct fkbuff*)0)->member)
+    if ( fkb_in_skb_test(fkb_in_skb_offset,
+                FKBOFFSETOF(list), FKBOFFSETOF(blog_p), FKBOFFSETOF(data),
+                FKBOFFSETOF(len_word), FKBOFFSETOF(mark), FKBOFFSETOF(priority),
+                FKBOFFSETOF(recycle_hook), FKBOFFSETOF(recycle_context)) == 0 )
+        return -1;
+    else
+        FKB_DBG( printk(CLRb "FKB compatible with SKB" CLRN); );
+
+    FKB_POOL_LOCK();
+
+    /* Prepare a free pool for Cloned FkBuffs */
+    fkb_extend( fkb_pool_g[FkbClonedPool_e].pool_size, FkbClonedPool_e );
+
+    /* Prepare a free pool for Master FkBuffs */
+    fkb_extend( fkb_pool_g[FkbMasterPool_e].pool_size, FkbMasterPool_e );
+
+    FKB_POOL_UNLOCK();
+
+    FKB_AUDIT_RUN();
+
+    FKB_DBG( printk(CLRb "NBUFF nbuff_dbg<0x%08x> = %d\n"
+                         "\t Pool FkBuff %s size<%u> num<%u>\n"
+                         "\t Pool FkBuff %s size<%u> num<%u>" CLRN,
+                         (int)&nbuff_dbg, nbuff_dbg,
+                         fkb_pool_g[FkbClonedPool_e].name,
+                         fkb_pool_g[FkbClonedPool_e].object_size,
+                         fkb_pool_g[FkbClonedPool_e].pool_size,
+                         fkb_pool_g[FkbMasterPool_e].name,
+                         fkb_pool_g[FkbMasterPool_e].object_size,
+                         fkb_pool_g[FkbMasterPool_e].pool_size );
+           );
+
+    printk( CLRb "NBUFF %s Initialized" CLRN, NBUFF_VERSION );
+
+    return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : fkb_alloc
+ * Description  : Allocate an FKB from one of the pools
+ *  object      : Type of FkbObject, to identify Free Pool
+ * Returns      : Pointer to an FKB, or NULL on pool depletion.
+ *------------------------------------------------------------------------------
+ */
+FkBuff_t * fkb_alloc( FkbObject_t object )
+{
+    register FkBuff_t  * fkb_p;
+    register FkbPool_t * pool_p;
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+    unsigned long lock_flags;
+#endif
+
+    fkb_assertr( (object < FkbMaxPools_e), FKB_NULL );
+
+    FKB_AUDIT(
+        if ( smp_processor_id() )
+            printk("FKB ASSERT %s not supported on CP 1\n", __FUNCTION__); );
+
+    FKB_AUDIT_RUN();
+
+    pool_p = &fkb_pool_g[object];
+
+    fkb_dbg(2, "%s freelist_p<0x%08x>", pool_p->name,(int)pool_p->freelist_p);
+
+    FKB_POOL_LOCK();    /* DO NOT USE fkb_assertr() until FKB_POOL_UNLOCK() */
+
+    if ( unlikely(pool_p->freelist_p == FKB_NULL) )
+    {
+#ifdef SUPPORT_FKB_EXTEND
+        /* Try extending free pool */
+        if ( (pool_p->extends >= pool_p->extend_max)
+          || (fkb_extend( pool_p->extend_size, object ) != pool_p->extend_size))
+        {
+            fkb_print( "WARNING: FKB Pool %s exhausted", pool_p->name );
+        }
+#else
+        if ( fkb_extend( pool_p->extend_size, object ) == 0 )
+        {
+            fkb_print( "WARNING: FKB Pool %s out of memory", pool_p->name );
+        }
+#endif
+        if (pool_p->freelist_p == FKB_NULL)
+        {
+            fkb_p = FKB_NULL;
+            goto fkb_alloc_return;
+        }
+    }
+
+    FKB_STATS(
+        pool_p->cnt_free--;
+        if ( ++pool_p->cnt_used > pool_p->cnt_hwm )
+            pool_p->cnt_hwm = pool_p->cnt_used;
+        );
+
+    /* Delete an FkBuff from the pool */
+    fkb_p = pool_p->freelist_p;
+    pool_p->freelist_p = pool_p->freelist_p->list;
+
+    // fkb_set_ref(fkb_p, 0);
+    fkb_p->list = FKB_NULL;   /* resets list, master_p to NULL , users to 0 */
+
+fkb_alloc_return:
+
+    FKB_POOL_UNLOCK();  /* May use kb_assertr() now onwards */
+
+    FKB_AUDIT_RUN();
+
+    fkb_dbg(1, "fkb_p<0x%08x>", (int)fkb_p);
+
+    return fkb_p;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : fkb_free
+ * Description  : Free an FKB and associated buffer if reference count of the
+ *                buffer is 0. 
+ *                All cloned fkb's are freed to the global Cloned free pool.
+ *                Master FKBs will be recycled into the appropriate network
+ *                device driver's rx pool or the global Master FKB pool.
+ * Parameters   :
+ *   fkb_p      : Pointer to a FKB to be freed.
+ *------------------------------------------------------------------------------
+ */
+void fkb_free(FkBuff_t * fkb_p)
+{
+    register FkBuff_t  * fkbM_p;
+
+    FKB_AUDIT_RUN();
+
+    fkb_assertv( (fkb_p!=FKB_NULL) );
+    fkb_dbg(1, "fkb_p<0x%08x>", (int)fkb_p);
+
+    /* FKB should never point to a Blog, so no need to free fkb_p->blog_p */
+    fkb_assertv( (fkb_p!=FKB_NULL) );
+
+    /* Implementation Note: list_p, master_p and users union.
+       If it is a cloned fkb, then fkb_p->master_p is a KPTR. If a double free
+       is invoked on the same fkb_p, then list_p will also be a KPTR! */
+
+    if ( _is_fkb_cloned_pool_(fkb_p) )
+    {
+        fkbM_p = fkb_p->master_p;
+        fkbC_recycle(fkb_p);
+    }
+    else
+        fkbM_p = fkb_p;
+
+    fkb_assertv( (_get_master_users_(fkbM_p) > 0) );
+
+    /* API atomic_dec_and_test: After decrement, return true if result is 0 */
+    if ( likely(atomic_dec_and_test(&fkbM_p->users)) )
+    {
+        /* No fkbs are referring to master, so master and buffer recyclable */
+        fkbM_p->recycle_hook(FKBUFF_2_PNBUFF(fkbM_p),
+                             fkbM_p->recycle_context, 0);
+    }
+
+    FKB_AUDIT_RUN();
+
+    fkb_dbg(2, "fkbM_p<0x%08x>", (int)fkbM_p);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : fkb_unshare
+ * Description: Returns a pointer to a Master FKB with a single reference to the
+ *              packet. A Cloned FKB with a single reference will result in the
+ *              Clone's Master FKB being returned, and the Cloned FKB Object is
+ *              recycled.
+ *------------------------------------------------------------------------------
+ */
+FkBuff_t * fkb_unshare(FkBuff_t * fkb_p)
+{
+    register FkBuff_t * fkbM_p;
+    uint8_t * dirty_p;
+
+    FKB_AUDIT(
+        if ( smp_processor_id() )
+            printk("FKB ASSERT %s not supported on CP 1\n", __FUNCTION__); );
+
+    FKB_AUDIT_RUN();
+
+    if ( unlikely(_is_fkb_cloned_pool_(fkb_p)) )     /* Cloned FKB */
+    {
+        /* If master is also referenced by other FkBuffs */
+        if ( _get_master_users_(fkb_p->master_p) > 1 )
+        {
+            /* Real unsharing, by allocating new master FkBuff */
+            fkbM_p = fkb_alloc( FkbMasterPool_e );
+            if (fkbM_p == FKB_NULL)
+            {
+                fkb_dbg(1, "fkb_unshare Cloned FKB fkb_alloc failure");
+                return FKB_NULL;
+            }
+            fkb_set_ref(fkbM_p, 1);
+
+            /* Setup FkBuff context */
+            fkbM_p->data = (uint8_t*)(fkbM_p)
+                         + ((uint32_t)fkb_p->data - (uint32_t)fkb_p->master_p);
+            fkbM_p->len_word = fkb_p->len_word;
+            fkbM_p->mark = fkb_p->mark;
+            fkbM_p->priority = fkb_p->priority;
+
+            fkbM_p->dirty_p = _to_dptr_from_kptr_(fkbM_p->data + fkbM_p->len);
+
+            /* Copy from original clone FkBuff */
+            memcpy(fkbM_p->data, fkb_p->data, fkb_p->len);
+
+            dirty_p = _to_dptr_from_kptr_(fkb_p->data  + fkb_p->len);
+            if ( fkb_p->master_p->dirty_p < dirty_p )
+                fkb_p->master_p->dirty_p = dirty_p;
+
+            fkb_dec_ref(fkb_p->master_p); /* decrement masters user count */
+            fkb_dbg(1, "cloned fkb_p with multiple ref master");
+        }
+        else
+        {
+            fkb_dbg(1, "cloned fkb_p with single ref master");
+            fkbM_p = fkb_p->master_p;
+
+            // Move clone context to master and return master
+            fkbM_p->data = fkb_p->data;
+            fkbM_p->len_word = fkb_p->len_word;
+            fkbM_p->mark = fkb_p->mark;
+            fkbM_p->priority = fkb_p->priority;
+
+            if ( fkbM_p->dirty_p < fkb_p->dirty_p )
+                fkbM_p->dirty_p = fkb_p->dirty_p;
+        }
+
+        fkb_dbg(2, "fkbM_p<0x%08x> fkbM_data<0x%08x> dirty_p<0x%08x> len<%d>",
+            (int)fkbM_p, (int)fkbM_p->data, (int)fkbM_p->dirty_p, fkbM_p->len);
+        fkb_dbg(2, "fkb_p<0x%08x> fkb_data<0x%08x> dirty_p<0x%08x> len<%d>",
+            (int)fkb_p, (int)fkb_p->data, (int)fkb_p->dirty_p, fkb_p->len);
+
+        fkbC_recycle(fkb_p);    /* always recycle original clone fkb */
+
+        return fkbM_p;  /* return new allocate master FkBuff */
+    }
+    else    /* Original is a Master */
+    {
+        /* Single reference, no need to unshare */
+        if ( _get_master_users_(fkb_p) == 1 )
+        {
+            fkb_dbg(1, "master fkb_p with single ref ");
+            fkb_dbg(2, "fkb_p<0x%08x> fkb_data<0x%08x> dirty_p<0x%08x> len<%d>",
+                (int)fkb_p, (int)fkb_p->data, (int)fkb_p->dirty_p, fkb_p->len);
+            return fkb_p;
+        }
+
+        /* Allocate a master FkBuff with associated data buffer */
+        fkbM_p = fkb_alloc( FkbMasterPool_e );
+        if (fkbM_p == FKB_NULL)
+        {
+            fkb_dbg(1, "fkb_unshare Master Fkb fkb_alloc failure");
+            return FKB_NULL;
+        }
+        fkb_set_ref(fkbM_p, 1);
+
+        /* Setup FkBuff context */
+        fkbM_p->data = (uint8_t*)(fkbM_p)
+                       + ((uint32_t)fkb_p->data - (uint32_t)fkb_p);
+        fkbM_p->len_word = fkb_p->len_word;
+        fkbM_p->mark = fkb_p->mark;
+        fkbM_p->priority = fkb_p->priority;
+
+        fkbM_p->dirty_p = _to_dptr_from_kptr_(fkbM_p->data + fkbM_p->len);
+
+        /* Copy original FkBuff's data into new allocated master FkBuff */
+        memcpy(fkbM_p->data, fkb_p->data, fkb_p->len);
+
+        dirty_p = _to_dptr_from_kptr_(fkb_p->data  + fkb_p->len);
+        if ( fkb_p->dirty_p < dirty_p )
+            fkb_p->dirty_p = dirty_p;
+
+        /* unshare by decrementing reference count */
+        fkb_dec_ref(fkb_p);
+
+        fkb_dbg(1, "master fkb_p with multiple ref");
+        fkb_dbg(2, "fkbM_p<0x%08x> fkbM_data<0x%08x> dirty_p<0x%08x> len<%d>",
+            (int)fkbM_p, (int)fkbM_p->data, (int)fkbM_p->dirty_p, fkbM_p->len);
+        fkb_dbg(2, "fkb_p<0x%08x> fkb_data<0x%08x> dirty_p<0x%08x> len<%d>",
+            (int)fkb_p, (int)fkb_p->data, (int)fkb_p->dirty_p, fkb_p->len);
+        return fkbM_p;  /* return new allocate master FkBuff */
+    }
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : fkbM_borrow
+ * Description: Fetch a Master FKB from the Master FKB pool. A Master FKB Object
+ *              Pool can serve as a network device driver's preallocated buffer
+ *              pool overflow.
+ *------------------------------------------------------------------------------
+ */
+FkBuff_t * fkbM_borrow(void)
+{
+    FkBuff_t * fkbM_p;
+
+    fkbM_p = fkb_alloc( FkbMasterPool_e );
+
+    fkb_dbg(1, "fkbM_p<0x%08x>", (int)fkbM_p);
+    return fkbM_p;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : fkbM_return
+ * Description: Return a Master FKB to the global Master FKB pool. It is not
+ *              necessary that a returned Master FKB was originially allocated
+ *              from the global Master FKB pool.
+ *------------------------------------------------------------------------------
+ */
+void fkbM_return(FkBuff_t * fkbM_p)
+{
+    register FkbPool_t * pool_p;
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+    unsigned long lock_flags;
+#endif
+ 
+    fkb_assertv( (fkbM_p != FKB_NULL) );
+    fkb_dbg(1, "fkbM_p<0x%08x>", (int)fkbM_p);
+
+    FKB_AUDIT_RUN();
+
+    FKB_POOL_LOCK();    /* DO NOT USE fkb_assertr() until FKB_POOL_UNLOCK() */
+
+    pool_p = &fkb_pool_g[FkbMasterPool_e];
+
+    /* Setup FKB Master Pool recycling feature */
+    fkbM_p->recycle_hook = (RecycleFuncP)fkbM_recycle;
+    fkbM_p->recycle_context = (uint32_t)pool_p;
+
+    FKB_STATS( pool_p->cnt_used--; pool_p->cnt_free++; );
+
+    fkbM_p->list = pool_p->freelist_p;  /* resets fkbM_p->users */
+    pool_p->freelist_p = fkbM_p;        /* link into Master free pool */
+
+    FKB_POOL_UNLOCK();  /* May use kb_assertr() now onwards */
+
+    FKB_AUDIT_RUN();
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function   : fkb_xlate
+ * Description: Translates an FKB to an SKB allocated from kernel SKB cache.
+ *              If the FKB is refering to a packet with multiple FKB references
+ *              to it then it will be first unshared, before it is translated
+ *              to a SKB. Unsharing is done by allocating a Master FKB from the
+ *              Master FKB Pool.
+ *------------------------------------------------------------------------------
+ */
+extern struct sk_buff * skb_xlate_dp(FkBuff_t * fkb_p, uint8_t *dirty_p);
+
+struct sk_buff * fkb_xlate(FkBuff_t * fkb_p)
+{
+    struct sk_buff * skb_p;
+    uint8_t *dirty_p;
+
+    FKB_AUDIT(
+        if ( smp_processor_id() )
+            printk("FKB ASSERT %s not supported on CP 1\n", __FUNCTION__); );
+
+    FKB_AUDIT_RUN();
+
+    if ( unlikely(fkb_p == FKB_NULL) )
+        return (struct sk_buff *)NULL;
+
+    fkb_assertr( (!_IS_BPTR_(fkb_p->ptr)), (struct sk_buff *)NULL );
+
+        /* Ensure that only a single reference exists to the FKB */
+    fkb_p = fkb_unshare(fkb_p);
+    if ( unlikely(fkb_p == FKB_NULL) )
+        goto clone_fail;
+
+    /* carry the dirty_p to the skb */
+    dirty_p = (is_dptr_tag_(fkb_p->dirty_p)) ?
+               _to_kptr_from_dptr_(fkb_p->dirty_p) : NULL;
+
+        /* Now translate the fkb_p to a skb_p */
+    skb_p = skb_xlate_dp(fkb_p, dirty_p);
+
+    if ( unlikely(skb_p == (struct sk_buff *)NULL) )
+        goto clone_fail;
+
+        /* pNBuff may not be used henceforth */
+    return skb_p;
+
+clone_fail:
+    return (struct sk_buff *)NULL;
+}
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : nbuff_align_data
+ * Description  : Aligns NBUFF data to a byte boundary defined by alignMask
+ *                This function can be called ONLY by driver Transmit functions
+ *------------------------------------------------------------------------------
+ */
+pNBuff_t nbuff_align_data(pNBuff_t pNBuff, uint8_t **data_pp,
+                          uint32_t len, uint32_t alignMask)
+{
+    fkb_dbg(1, "pNBuff<0x%08x>", (int)pNBuff);
+
+    FKB_AUDIT(
+        if ( smp_processor_id() )
+            printk("FKB ASSERT %s not supported on CP 1\n", __FUNCTION__); );
+
+    FKB_AUDIT_RUN();
+
+    if ( IS_SKBUFF_PTR(pNBuff) )
+    {
+        struct sk_buff * skb_p = PNBUFF_2_SKBUFF(pNBuff);
+        uint32_t headroom;
+        uint8_t *skb_data_p;
+
+        headroom = (uint32_t)(skb_p->data) & alignMask;
+
+        if(headroom == 0)
+        {
+            /* data is already aligned */
+            goto out;
+        }
+
+        if(skb_cow(skb_p, headroom) < 0)
+        {
+            kfree_skb(skb_p);
+
+            pNBuff = NULL;
+            goto out;
+        }
+
+        skb_data_p = (uint8_t *)((uint32_t)(skb_p->data) & ~alignMask);
+
+        memcpy(skb_data_p, skb_p->data, len);
+
+        skb_p->data = skb_data_p;
+        *data_pp = skb_data_p;
+    }
+    /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */
+    else
+    {
+        FkBuff_t * fkb_p = (FkBuff_t *)PNBUFF_2_PBUF(pNBuff);
+        FkBuff_t * fkb2_p;
+        uint32_t headroom;
+        uint8_t *fkb_data_p;
+
+        headroom = (uint32_t)(fkb_p->data) & alignMask;
+
+        if(headroom == 0)
+        {
+            /* data is already aligned */
+            goto out;
+        }
+
+        if(fkb_headroom(fkb_p) < headroom)
+        {
+            fkb_dbg(1, "FKB has no headroom  "
+                       "(fkb_p<0x%08x>, fkb_p->data<0x%08x>)",
+                       (uint32_t)fkb_p, (uint32_t)fkb_p->data);
+
+            goto out;
+        }
+
+        fkb2_p = fkb_unshare(fkb_p);
+        if (fkb2_p == FKB_NULL)
+        {
+            fkb_free(fkb_p);
+            pNBuff = NULL;
+            goto out;
+        }
+        pNBuff = FKBUFF_2_PNBUFF(fkb2_p);
+
+        fkb_data_p = (uint8_t *)((uint32_t)(fkb2_p->data) & ~alignMask);
+
+        memcpy(fkb_data_p, fkb2_p->data, len);
+
+        fkb2_p->data = fkb_data_p;
+        *data_pp = fkb_data_p;
+
+#if defined(CC_NBUFF_FLUSH_OPTIMIZATION)
+        {
+            uint8_t * tail_p = fkb2_p->data + len; 
+            fkb2_p->dirty_p = _to_dptr_from_kptr_(tail_p);
+        }
+#endif
+    }
+
+    fkb_dbg(2, "<<");
+
+out:
+    FKB_AUDIT_RUN();
+
+    return pNBuff;
+}
+
+
+EXPORT_SYMBOL(nbuff_dbg);
+
+EXPORT_SYMBOL(fkb_in_skb_test);
+EXPORT_SYMBOL(fkb_construct);
+EXPORT_SYMBOL(fkb_stats);
+
+EXPORT_SYMBOL(fkb_alloc);
+EXPORT_SYMBOL(fkb_free);
+
+EXPORT_SYMBOL(fkb_unshare);
+
+EXPORT_SYMBOL(fkbM_borrow);
+EXPORT_SYMBOL(fkbM_return);
+
+EXPORT_SYMBOL(fkb_xlate);
+EXPORT_SYMBOL(nbuff_align_data);
+
+#endif /* CONFIG_BCM_KF_NBUFF */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 73b90351df5cc82d0909ef307e249bd156ab76ea..84572ddbdfaee1cb985fe2a34783b8717b6f83d7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -792,33 +792,33 @@ static void neigh_periodic_work(struct work_struct *work)
 
 		while ((n = rcu_dereference_protected(*np,
 				lockdep_is_held(&tbl->lock))) != NULL) {
-			unsigned int state;
+		unsigned int state;
 
-			write_lock(&n->lock);
+		write_lock(&n->lock);
 
-			state = n->nud_state;
-			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
-				write_unlock(&n->lock);
-				goto next_elt;
-			}
+		state = n->nud_state;
+		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
+			write_unlock(&n->lock);
+			goto next_elt;
+		}
 
-			if (time_before(n->used, n->confirmed))
-				n->used = n->confirmed;
+		if (time_before(n->used, n->confirmed))
+			n->used = n->confirmed;
 
-			if (atomic_read(&n->refcnt) == 1 &&
-			    (state == NUD_FAILED ||
+		if (atomic_read(&n->refcnt) == 1 &&
+		    (state == NUD_FAILED ||
 			     time_after(jiffies, n->used + n->parms->gc_staletime))) {
-				*np = n->next;
-				n->dead = 1;
-				write_unlock(&n->lock);
-				neigh_cleanup_and_release(n);
-				continue;
-			}
+			*np = n->next;
+			n->dead = 1;
 			write_unlock(&n->lock);
+			neigh_cleanup_and_release(n);
+			continue;
+		}
+		write_unlock(&n->lock);
 
 next_elt:
-			np = &n->next;
-		}
+		np = &n->next;
+	}
 		/*
 		 * It's fine to release lock here, even if hash table
 		 * grows while we are preempted.
@@ -947,7 +947,7 @@ static void neigh_timer_handler(unsigned long arg)
 		neigh->nud_state = NUD_FAILED;
 		notify = 1;
 		neigh_invalidate(neigh);
-	}
+		}
 
 	if (neigh->nud_state & NUD_IN_TIMER) {
 		if (time_before(next, jiffies + HZ/2))
@@ -1171,6 +1171,15 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
 	}
 
 	if (lladdr != neigh->ha) {
+#if defined(CONFIG_BCM_KF_BLOG)
+		/* Do not raise an ARP BINDING Change event when the ARP is
+			resolved first time. Raise this event only when there is
+			a real MAC address change.*/
+		if (neigh->ha[0] != 0 || neigh->ha[1] != 0 || neigh->ha[2] != 0 || 
+			neigh->ha[3] != 0 || neigh->ha[4] != 0 || neigh->ha[5] != 0) {
+			call_netevent_notifiers(NETEVENT_ARP_BINDING_CHANGE, neigh);
+		}
+#endif
 		write_seqlock(&neigh->ha_lock);
 		memcpy(&neigh->ha, lladdr, dev->addr_len);
 		write_sequnlock(&neigh->ha_lock);
@@ -1253,7 +1262,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
 		dev->header_ops->cache(n, hh, prot);
 
 	write_unlock_bh(&n->lock);
-}
+	}
 
 /* This function can be used in contexts, where only old dev_queue_xmit
  * worked, f.e. if you want to override normal output path (eql, shaper),
@@ -1330,8 +1339,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
 
 	do {
 		seq = read_seqbegin(&neigh->ha_lock);
-		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
-				      neigh->ha, NULL, skb->len);
+	err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+			      neigh->ha, NULL, skb->len);
 	} while (read_seqretry(&neigh->ha_lock, seq));
 
 	if (err >= 0)
@@ -2146,9 +2155,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
 
 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
-			read_unlock_bh(&neigh->lock);
-			goto nla_put_failure;
-		}
+		read_unlock_bh(&neigh->lock);
+		goto nla_put_failure;
+	}
 	}
 
 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
@@ -2205,7 +2214,7 @@ static void neigh_update_notify(struct neighbour *neigh)
 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
 			    struct netlink_callback *cb)
 {
-	struct net *net = sock_net(skb->sk);
+	struct net * net = sock_net(skb->sk);
 	struct neighbour *n;
 	int rc, h, s_h = cb->args[1];
 	int idx, s_idx = idx = cb->args[2];
@@ -2408,7 +2417,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
 				break;
 			if (n->nud_state & ~NUD_NOARP)
 				break;
-next:
+		next:
 			n = rcu_dereference_bh(n->next);
 		}
 
@@ -2450,7 +2459,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
 
 			if (n->nud_state & ~NUD_NOARP)
 				break;
-next:
+		next:
 			n = rcu_dereference_bh(n->next);
 		}
 
@@ -2513,7 +2522,7 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
 	struct neigh_table *tbl = state->tbl;
 
 	do {
-		pn = pn->next;
+	pn = pn->next;
 	} while (pn && !net_eq(pneigh_net(pn), net));
 
 	while (!pn) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e99aedd9c496a3b315a321a0e3675ea447dede04..b3c83a9f050cee4c2154da9a03f33ee2d9e88fae 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -70,9 +70,146 @@
 
 #include "kmap_skb.h"
 
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+
+#if defined(CONFIG_MIPS)
+#include <asm/r4kcache.h>
+#endif
+extern int fi_bl_drv_bpm_free_buffer(int xi_source_port, unsigned int xi_bn);
+
+#define BL_MIN_SIZE_ALLOC_SKB	128
+#define BL_MIN_SIZE_COPY_SKB	64
+
+/* flags for bl_alloc in the sk_Buff */
+#define SKB_STANDARD			0
+#define SKB_BL_RELEASE_DATA		1
+#define SKB_BL_NO_RELEASE_DATA	2
+
+#define MIPS_CACHABLE_MASK		(0x20000000)
+
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+
+#if defined(CONFIG_BCM_KF_NBUFF)
+#include <linux/nbuff.h>
+#include <linux/blog.h>
+
+/* Returns size of struct sk_buff */
+size_t skb_size(void)
+{
+    return sizeof(struct sk_buff);
+}
+EXPORT_SYMBOL(skb_size);
+
+size_t skb_aligned_size(void)
+{
+    return ((sizeof(struct sk_buff) + 0x0f) & ~0x0f);
+}
+EXPORT_SYMBOL(skb_aligned_size);
+
+int skb_layout_test(int head_offset, int tail_offset, int end_offset)
+{
+#undef SKBOFFSETOF
+#define SKBOFFSETOF(member)    ((int)&((struct sk_buff*)0)->member)
+    if ( (SKBOFFSETOF(head) == head_offset) &&
+         (SKBOFFSETOF(tail) == tail_offset) &&
+         (SKBOFFSETOF(end)  == end_offset) )
+        return 1;
+    return 0;
+}
+EXPORT_SYMBOL(skb_layout_test);
+
+int skb_avail_headroom(const struct sk_buff *skb)
+{
+#if defined(CONFIG_BCM_USBNET_ACCELERATION)
+	if(skb->clone_fc_head)
+	{
+		/* In this case  it's unlikely but possible for 
+		 * the value of skb->data - skb->clone_fc_head to be negative
+		 * the caller should check for negative value
+		 */
+		return skb->data - skb->clone_fc_head;
+	}
+	else
+#endif
+		return skb->data - skb->head;
+}
+EXPORT_SYMBOL(skb_avail_headroom);
+
+/**
+ *  
+ *	skb_headerinit  -   initialize a socket buffer header
+ *	@headroom: reserved headroom size
+ *	@datalen: data buffer size, data buffer is allocated by caller
+ *	@skb: skb allocated by caller
+ *	@data: data buffer allocated by caller
+ *	@recycle_hook: callback function to free data buffer and skb
+ *	@recycle_context: context value passed to recycle_hook, param1
+ *	@blog_p: pass a blog to a skb for logging
+ *
+ *	Initializes the socket buffer and assigns the data buffer to it.
+ *	Both the sk_buff and the pointed data buffer are pre-allocated.
+ *
+ */
+void skb_headerinit(unsigned int headroom, unsigned int datalen,
+					struct sk_buff *skb, unsigned char *data,
+					RecycleFuncP recycle_hook, unsigned int recycle_context,
+					struct blog_t * blog_p)		/* defined(CONFIG_BLOG) */
+{
+	memset(skb, 0, offsetof(struct sk_buff, truesize));
+
+	skb->truesize = datalen + sizeof(struct sk_buff);
+	atomic_set(&skb->users, 1);
+	skb->head = data - headroom;
+	skb->data = data;
+	skb->tail = data + datalen;
+	skb->end  = (unsigned char *) (((unsigned)data + datalen + 0x0f) & ~0x0f);
+	skb->len = datalen;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	skb->blog_p = blog_p;
+	if ( blog_p ) blog_p->skb_p = skb;
+    skb->tunl = NULL;
+#endif
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+	skb->vlan_count = 0;
+#endif
+	skb->recycle_hook = recycle_hook;
+	skb->recycle_context = recycle_context;
+	skb->recycle_flags = SKB_RECYCLE | SKB_DATA_RECYCLE;
+
+	atomic_set(&(skb_shinfo(skb)->dataref), 1);
+	skb_shinfo(skb)->nr_frags = 0;
+	skb_shinfo(skb)->gso_size = 0;
+	skb_shinfo(skb)->gso_segs = 0;
+	skb_shinfo(skb)->gso_type = 0;
+	skb_shinfo(skb)->ip6_frag_id = 0;
+	skb_shinfo(skb)->tx_flags = 0;
+	skb_shinfo(skb)->frag_list = NULL;
+	memset(&(skb_shinfo(skb)->hwtstamps), 0,
+	                                    sizeof(skb_shinfo(skb)->hwtstamps));
+
+	skb_shinfo(skb)->dirty_p=NULL;
+}
+EXPORT_SYMBOL(skb_headerinit);
+
+#endif  /* CONFIG_BCM_KF_NBUFF */
+
 static struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+struct sk_buff * skb_header_alloc(void)
+{
+	return  kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(skb_header_alloc);
+
+#endif  /* CONFIG_BCM_KF_NBUFF */
+
 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
 				  struct pipe_buffer *buf)
 {
@@ -183,6 +320,29 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 		goto out;
 	prefetchw(skb);
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+	if (size < ENET_MIN_MTU_SIZE_EXT_SWITCH)
+	{
+		/* Add enough tailroom so that small packets can be padded
+		with 0s to meet the Ethernet minimum pkt size of 60 bytes + 
+		EXT_SW TAG Total: 64 bytes) 
+		This will help improve performance with NAS test scenarios
+		where the TCP ACK is usually less than 60 bytes 
+		WARNING: Note that the macro SKB_WITH_OVERHEAD() does not 
+		take into account this additional tailroom overhead. If 
+		the original size passed to this function uses the SKB_WITH_OVERHEAD 
+		macro to calculate the alloc length then this function will allocate
+		more than what is expected and this can cause length (calculated 
+		using SKB_WITH_OVERHEAD) based operations to fail. 
+		We found few instances of skb allocations using the macro 
+		SKB_WITH_OVERHEAD (for ex: allocations using NLMSG_DEFAULT_SIZE in 
+		netlink.h) However, all those allocations allocate large skbs 
+		(page size 4k/8k) and will not enter this additional tailroom 
+		logic */ 
+		size = ENET_MIN_MTU_SIZE_EXT_SWITCH;
+	}
+#endif
+
 	/* We do our best to align skb_shared_info on a separate cache
 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
@@ -200,12 +360,22 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	size = SKB_WITH_OVERHEAD(ksize(data));
 	prefetchw(data + size);
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+     /*
+      * Clearing all fields -- fields that were not cleared before
+      * were moved to earlier locations in the structure, so just
+      * zeroing them out (OK, since we overwrite them shortly:
+      */
+	 memset(skb, 0, offsetof(struct sk_buff, truesize));
+#else
 	/*
 	 * Only clear those fields we need to clear, not those that we will
 	 * actually initialise below. Hence, don't put any more fields after
 	 * the tail pointer in struct sk_buff!
 	 */
 	memset(skb, 0, offsetof(struct sk_buff, tail));
+#endif
+
 	/* Account for allocated memory : skb + skb->head */
 	skb->truesize = SKB_TRUESIZE(size);
 	atomic_set(&skb->users, 1);
@@ -220,6 +390,10 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	/* make sure we initialize shinfo sequentially */
 	shinfo = skb_shinfo(skb);
 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+#if defined(CONFIG_BCM_KF_NBUFF)
+	shinfo->dirty_p = NULL;
+#endif
+
 	atomic_set(&shinfo->dataref, 1);
 	kmemcheck_annotate_variable(shinfo->destructor_arg);
 
@@ -243,6 +417,86 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 }
 EXPORT_SYMBOL(__alloc_skb);
 
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+
+struct sk_buff *__bl_alloc_skb(struct bl_buffer_info* buff_info)
+{
+	struct kmem_cache *cache;
+	struct skb_shared_info *shinfo;
+	struct sk_buff *skb;
+	u8 *data;
+	u8* cached_buffer;
+
+	cache = skbuff_head_cache;
+
+	/* Get the HEAD */
+	skb = kmem_cache_alloc_node(cache, GFP_ATOMIC & ~__GFP_DMA, -1);
+	if (!skb)
+		goto out;
+
+	cached_buffer = (u8*)((ulong)buff_info->buffer ^ MIPS_CACHABLE_MASK);
+#if defined(CONFIG_MIPS)
+	blast_inv_dcache_range((ulong)cached_buffer, (ulong)cached_buffer + buff_info->buffer_len);
+#else
+    /* FIXME */
+#endif
+	data = cached_buffer;
+
+	/*
+	 * Only clear those fields we need to clear, not those that we will
+	 * actually initialise below. Hence, don't put any more fields after
+	 * the tail pointer in struct sk_buff!
+	 */
+	memset(skb, 0, offsetof(struct sk_buff, truesize));
+	skb->truesize = buff_info->buffer_len + sizeof(struct sk_buff);
+	atomic_set(&skb->users, 1);
+	skb->head = data;
+	skb->data = data;
+	skb_reset_tail_pointer(skb);
+	skb->end = skb->tail + buff_info->buffer_len - sizeof(struct skb_shared_info);
+	skb->bl_alloc = SKB_BL_RELEASE_DATA;
+	skb->bl_buffer_number = buff_info->buffer_number;
+	skb->bl_port = buff_info->port;
+
+	/* make sure we initialize shinfo sequentially */
+	shinfo = skb_shinfo(skb);
+	atomic_set(&shinfo->dataref, 1);
+	shinfo->nr_frags  = 0;
+	shinfo->gso_size = 0;
+	shinfo->gso_segs = 0;
+	shinfo->gso_type = 0;
+	shinfo->ip6_frag_id = 0;
+	shinfo->tx_flags = 0;
+	shinfo->frag_list = NULL;
+	memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
+	shinfo->dirty_p = NULL;
+
+	skb_reserve( skb, ((unsigned int)buff_info->packet-(unsigned int)buff_info->buffer) );
+	skb_put( skb, buff_info->packet_len );
+
+
+out:
+	return skb;
+}
+EXPORT_SYMBOL(__bl_alloc_skb);
+
+
+struct sk_buff* bl_dev_alloc_skb(struct bl_buffer_info* buff_info)
+{
+	/*
+	 * There is more code here than it seems:
+	 * __dev_alloc_skb is an inline
+	 */
+	return __bl_alloc_skb(buff_info);
+}
+EXPORT_SYMBOL(bl_dev_alloc_skb);
+
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+
 /**
  * build_skb - build a network buffer
  * @data: data buffer provided by caller
@@ -402,7 +656,32 @@ static void skb_release_data(struct sk_buff *skb)
 
 		if (skb_has_frag_list(skb))
 			skb_drop_fraglist(skb);
-
+		
+#if defined(CONFIG_BCM_KF_NBUFF)
+		/*
+		 * If the data buffer came from a preallocated pool, recycle it.
+		 * Recycling may only be performed when no references exist to it.
+	 	 */
+		if (skb->recycle_hook && (skb->recycle_flags & SKB_DATA_RECYCLE)) {
+			(*skb->recycle_hook)(skb, skb->recycle_context, SKB_DATA_RECYCLE);
+			skb->recycle_flags &= SKB_DATA_NO_RECYCLE;	/* mask out */
+		}
+                else 
+#endif
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+		if (skb->bl_alloc)
+		{
+			unsigned long flags;
+			unsigned char* non_cached_area;
+			non_cached_area = (unsigned char*)((ulong)skb->head | MIPS_CACHABLE_MASK);
+			local_irq_save(flags);
+			fi_bl_drv_bpm_free_buffer(skb->bl_port, skb->bl_buffer_number);
+			local_irq_restore(flags);
+		}
+		else
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
 		kfree(skb->head);
 	}
 }
@@ -415,10 +694,25 @@ static void kfree_skbmem(struct sk_buff *skb)
 	struct sk_buff *other;
 	atomic_t *fclone_ref;
 
-	switch (skb->fclone) {
-	case SKB_FCLONE_UNAVAILABLE:
-		kmem_cache_free(skbuff_head_cache, skb);
-		break;
+#if defined(CONFIG_BCM_KF_NBUFF)
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_free(skb);		/* CONFIG_BLOG: Frees associated blog object */
+#endif
+
+	/* If the skb came from a preallocated pool, pass it to recycler hook */
+	if (skb->recycle_hook && (skb->recycle_flags & SKB_RECYCLE)) 
+        {
+		(*skb->recycle_hook)(skb, skb->recycle_context, SKB_RECYCLE);
+	} 
+        else 
+        {
+#endif /* CONFIG_BCM_KF_NBUFF */
+
+
+		switch (skb->fclone) {
+		case SKB_FCLONE_UNAVAILABLE:
+			kmem_cache_free(skbuff_head_cache, skb);
+			break;
 
 	case SKB_FCLONE_ORIG:
 		fclone_ref = (atomic_t *) (skb + 2);
@@ -438,7 +732,10 @@ static void kfree_skbmem(struct sk_buff *skb)
 		if (atomic_dec_and_test(fclone_ref))
 			kmem_cache_free(skbuff_fclone_cache, other);
 		break;
+		}
+#if defined(CONFIG_BCM_KF_NBUFF)
 	}
+#endif	/* CONFIG_BCM_KF_NBUFF */
 }
 
 static void skb_release_head_state(struct sk_buff *skb)
@@ -461,6 +758,9 @@ static void skb_release_head_state(struct sk_buff *skb)
 	nf_bridge_put(skb->nf_bridge);
 #endif
 /* XXX: IS this still necessary? - JHS */
+#if defined(CONFIG_BCM_KF_NBUFF)
+	skb->tc_word = 0;
+#endif	/* CONFIG_BCM_KF_NBUFF */
 #ifdef CONFIG_NET_SCHED
 	skb->tc_index = 0;
 #ifdef CONFIG_NET_CLS_ACT
@@ -473,6 +773,11 @@ static void skb_release_head_state(struct sk_buff *skb)
 static void skb_release_all(struct sk_buff *skb)
 {
 	skb_release_head_state(skb);
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	if ( (skb->bl_alloc == SKB_STANDARD) || (skb->bl_alloc == SKB_BL_RELEASE_DATA) )
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
 	skb_release_data(skb);
 }
 
@@ -487,6 +792,13 @@ static void skb_release_all(struct sk_buff *skb)
 
 void __kfree_skb(struct sk_buff *skb)
 {
+#if defined(CONFIG_BCM_KF_NBUFF)
+	if (skb->recycle_hook && (skb->recycle_flags & SKB_RECYCLE_NOFREE))
+	{
+		(*skb->recycle_hook)(skb, skb->recycle_context, SKB_RECYCLE_NOFREE);
+		return;
+	}
+#endif
 	skb_release_all(skb);
 	kfree_skbmem(skb);
 }
@@ -512,6 +824,531 @@ void kfree_skb(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(kfree_skb);
 
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+
+/* This function clean and free skb 
+   The function does not release the data buffer */
+void bl_kfree_skb_structure (struct sk_buff *skb)
+{
+        if (unlikely(!skb))
+	       return;
+        if ( unlikely(!skb->bl_alloc) )
+            return;
+        skb->bl_alloc = SKB_BL_NO_RELEASE_DATA;
+        dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(bl_kfree_skb_structure);
+
+
+/* This function clean and free skb
+   The function does not release the data buffer
+   This function is called from interrupt context */ 
+void bl_kfree_skb_structure_irq (struct sk_buff *skb)
+{
+    if (unlikely(!skb))
+        return;
+    if ( unlikely(!skb->bl_alloc) )
+        return;
+
+    skb->bl_alloc = SKB_BL_NO_RELEASE_DATA;
+
+    dev_kfree_skb_irq(skb);
+}
+EXPORT_SYMBOL(bl_kfree_skb_structure_irq);
+
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+
+#if defined(CONFIG_BCM_KF_NBUFF)
+/*
+ * Translate a fkb to a skb, by allocating a skb from the skbuff_head_cache.
+ * PS. skb->dev is not set during initialization.
+ *
+ * Caller verifies whether the fkb is unshared:
+ *  if fkb_p==NULL||IS_FKB_CLONE(fkb_p)||fkb_p->users>1 and return NULL skb.
+ *
+ * skb_xlate is deprecated.  New code should call skb_xlate_dp directly.
+ */
+struct sk_buff * skb_xlate(struct fkbuff * fkb_p)
+{
+	return (skb_xlate_dp(fkb_p, NULL));
+}
+
+struct sk_buff * skb_xlate_dp(struct fkbuff * fkb_p, uint8_t *dirty_p)
+{
+	struct sk_buff * skb_p;
+	unsigned int datalen;
+
+	/* Optimization: use preallocated pool of skb with SKB_POOL_RECYCLE flag */
+	skb_p = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+	if ( !skb_p )
+		return skb_p;
+	skb_p->fclone = SKB_FCLONE_UNAVAILABLE;
+
+	memset(skb_p, 0, offsetof(struct sk_buff, truesize));
+
+	datalen = SKB_DATA_ALIGN(fkb_p->len + BCM_SKB_TAILROOM);
+
+	skb_p->data = fkb_p->data;
+	skb_p->head = (unsigned char *)(fkb_p + 1 );
+	skb_p->tail = fkb_p->data + fkb_p->len;
+	skb_p->end  = (unsigned char *)		/* align to skb cacheline */
+                  (((unsigned)skb_p->data + datalen + 0x0f) & ~0x0f);
+
+#define F2S(x) skb_p->x = fkb_p->x
+	F2S(len);
+	F2S(mark);
+	F2S(priority);
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	if (_IS_BPTR_(fkb_p->blog_p)) {	/* should not happen */
+		F2S(blog_p);
+		fkb_p->blog_p->skb_p = skb_p;
+	}
+#endif
+	F2S(recycle_hook);
+	F2S(recycle_context);
+	skb_p->recycle_flags = SKB_DATA_RECYCLE;
+
+	fkb_dec_ref(fkb_p);	/* redundant: fkb_p must not be used henceforth */
+
+	atomic_set(&skb_p->users, 1);
+	skb_p->truesize = datalen + sizeof(struct sk_buff);
+
+	/* any change to skb_shinfo initialization in __alloc_skb must be ported
+	 * to this block. */
+	atomic_set(&(skb_shinfo(skb_p)->dataref), 1);
+	skb_shinfo(skb_p)->nr_frags = 0;
+	skb_shinfo(skb_p)->gso_size = 0;
+	skb_shinfo(skb_p)->gso_segs = 0;
+	skb_shinfo(skb_p)->gso_type = 0;
+	skb_shinfo(skb_p)->ip6_frag_id = 0;
+	skb_shinfo(skb_p)->tx_flags = 0;
+	skb_shinfo(skb_p)->frag_list = NULL;
+	memset(&(skb_shinfo(skb_p)->hwtstamps), 0,
+	                                 sizeof(skb_shinfo(skb_p)->hwtstamps));
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	skb_p->bl_alloc = SKB_STANDARD;
+#endif /* (CONFIG_BCM_RDPA) || (CONFIG_BCM_RDPA_MODULE) */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#if defined(CONFIG_BCM_PKTRUNNER_CSUM_OFFLOAD)
+	if (fkb_p->rx_csum_verified)
+		skb_p->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+	/*
+	 * When fkb is xlated to skb, preserve the dirty_p info.
+	 * This allows receiving driver to shorten its cache flush and also
+	 * can shorten the cache flush when the buffer is recycled.  Improves
+	 * wlan perf by 10%.
+	 */
+	skb_shinfo(skb_p)->dirty_p = dirty_p;
+
+	return skb_p;
+}
+
+EXPORT_SYMBOL(skb_xlate);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+#define NETDEV_XMIT(_dev, _buff)	\
+		_dev->netdev_ops->ndo_start_xmit(_buff, _dev)
+#else
+#define NETDEV_XMIT(_dev, _buff)	\
+		_dev->hard_start_xmit(_buff, _dev)
+#endif
+
+/*
+ *This fucntion fragments the skb into multiple skbs and xmits them
+ *this fucntion is a substitue for ip_fragment when Ip stack is skipped
+ *for packet acceleartion(fcache,CMF)
+ *
+ *Currently only IPv4 is supported
+ *
+ */
+
+void skb_frag_xmit4(struct sk_buff *origskb, struct net_device *txdev,
+                     uint32_t is_pppoe, uint32_t minMtu, void *ipp)
+{
+
+#if 0
+#define DEBUG_SKBFRAG(args) printk args
+#else
+#define DEBUG_SKBFRAG(args) 
+#endif
+
+#define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
+#define IP_MF		0x2000		/* Flag: "More Fragments"	*/
+#define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
+
+	struct iphdr *iph;
+	int datapos, offset;
+	unsigned int max_dlen, hlen, hdrslen, left, len;
+	uint16_t not_last_frag;
+	struct sk_buff *fraglisthead;
+	struct sk_buff *fraglisttail;
+	struct sk_buff *skb2;
+
+	DEBUG_SKBFRAG(("skb_frag_xmit4:enter origskb=%p,netdev=%p,is_pppoe=%d,\
+				minMtu=%d ipp=%p\n",origskb, txdev, is_pppoe,
+				minMtu, ipp));
+
+	if (likely(origskb->len <= minMtu)) {
+		/* xmit packet */
+		NETDEV_XMIT(txdev, (void *)CAST_REAL_TO_VIRT_PNBUFF(origskb,
+					SKBUFF_PTR));
+		return;
+	}
+
+	fraglisthead = NULL;
+	fraglisttail = NULL;
+	skb2 = NULL;
+
+	DEBUG_SKBFRAG(("skb_frag_xmit4: checking for DF\n"));
+	iph = (struct iphdr *)ipp;
+	/* DROP the packet if DF flag is set */
+	if (unlikely((iph->frag_off & htons(IP_DF)) && !(origskb->local_df))) {
+		/*----TODO: update error stats, send icmp error message ?--- */
+		kfree_skb(origskb);
+		return;
+	}
+
+	hlen = iph->ihl * 4;
+
+	DEBUG_SKBFRAG(("skb_frag_xmit4: calculating hdrs len\n"));
+	/* calculate space for data,(ip payload) */
+	hdrslen = ((int)ipp - (int)(origskb->data)) + hlen; 
+
+	left = origskb->len - hdrslen;	/* Size of ip payload */
+	datapos = hdrslen;/* Where to start from */
+	max_dlen =  minMtu - hdrslen;	/* ip payload per frame */
+
+	DEBUG_SKBFRAG(("skb_frag_xmit4: computed hdrslen=%d, left=%d\n",
+			hdrslen, left));
+
+	/* frag_offset is represented in 8 byte blocks */
+	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
+	not_last_frag = iph->frag_off & htons(IP_MF);
+
+	/* copy the excess data (>MTU size) from orig fkb to new fkb's */
+	fraglisthead = origskb;
+
+	while (left > 0) {
+		DEBUG_SKBFRAG(("skb_frag_xmit4: making fragments\n"));
+		len = left;
+		/* IF: it doesn't fit, use 'max_dlen' - the data space left */
+		if (len > max_dlen)
+			len = max_dlen;
+		/* IF: we are not sending upto and including the packet end
+			then align the next start on an eight byte boundary */
+		if (len < left)
+			len &= ~7;
+
+		if (datapos == hdrslen) {
+			/*reuse the orig skb for 1st fragment */
+			skb2 = origskb;
+			DEBUG_SKBFRAG(("skb_frag_xmit4: reusing skb\n"));
+			skb2->next = NULL;
+			fraglisttail = skb2;
+			skb2->len = hdrslen+len;
+			skb2->tail = skb2->data + (hdrslen+len);
+		} else {
+
+			DEBUG_SKBFRAG(("skb_frag_xmit4: genrating new skb\n"));
+			/* Allocate a new skb */
+			if ((skb2 = alloc_skb(len+hdrslen, GFP_ATOMIC)) == NULL) {
+				printk(KERN_INFO "no memory for new fragment!\n");
+				goto fail;
+			}
+
+			/* copy skb metadata */       
+			skb2->mark = origskb->mark;
+			skb2->priority = origskb->priority;
+			skb2->dev = origskb->dev;
+
+			dst_release(skb_dst(skb2));
+			skb_dst_set(skb2, dst_clone(skb_dst(origskb)));
+#ifdef CONFIG_NET_SCHED
+			skb2->tc_index = origskb->tc_index;
+#endif
+
+			skb_put(skb2, len + hdrslen);
+
+			DEBUG_SKBFRAG(("skb_frag_xmit4: copying headerto new skb\n"));
+
+			/* copy the l2 header &l3 header to new fkb from orig fkb */
+			memcpy(skb2->data, origskb->data, hdrslen);
+
+			DEBUG_SKBFRAG(("skb_frag_xmit4: copying data to new skb\n"));
+			/*
+			 *	Copy a block of the IP datagram.
+			 */
+			memcpy(skb2->data + hdrslen, origskb->data + datapos,
+					len);
+
+			skb2->next = NULL;
+			fraglisttail->next = skb2;
+			fraglisttail = skb2;
+		}
+		/*
+		 *	Fill in the new header fields.
+		 */
+		DEBUG_SKBFRAG(("skb_frag_xmit4: adjusting ipheader\n"));
+		iph = (struct iphdr *)(skb2->data + (hdrslen- hlen));
+		iph->frag_off = htons((offset >> 3));
+		iph->tot_len = htons(len + hlen);
+
+		/* fix pppoelen */ 
+		if (is_pppoe)
+			*((uint16_t*)iph - 2) = htons(len + hlen +
+						sizeof(uint16_t));
+
+		left -= len;
+		datapos += len;
+		offset += len;
+
+		/*
+		 *	If we are fragmenting a fragment that's not the
+		 *	 last fragment then keep MF on each fragment 
+		 */
+		if (left > 0 || not_last_frag)
+			iph->frag_off |= htons(IP_MF);
+		//else
+		//iph->frag_off &= ~htons(IP_MF);/*make sure MF is cleared */
+
+
+		DEBUG_SKBFRAG(("skb_frag_xmit4: computing ipcsum\n"));
+		/* fix ip checksum */
+		iph->check = 0;
+		/* TODO replace with our own csum_calc */
+		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+
+		DEBUG_SKBFRAG(("skb_frag_xmit4: loop done\n"));
+	}
+
+	/* xmit skb's */
+	while (fraglisthead) {
+		DEBUG_SKBFRAG(("skb_frag_xmit4: sending skb fragment \n"));
+		skb2 = fraglisthead;
+		fraglisthead = fraglisthead->next;
+		NETDEV_XMIT(txdev, (void *)CAST_REAL_TO_VIRT_PNBUFF(skb2,
+					SKBUFF_PTR));
+	}
+	return;
+
+fail:
+	DEBUG_SKBFRAG(("skb_frag_xmit4: ENTERED FAIL CASE\n"));
+	while (fraglisthead) {
+		skb2 = fraglisthead;
+		fraglisthead = fraglisthead->next;
+		kfree_skb(skb2);
+	}
+	return;
+
+}
+EXPORT_SYMBOL(skb_frag_xmit4);
+
+/*
+ * This fucntion fragments the skb into multiple skbs and xmits them
+ * this fucntion is a substitue for ip6_fragment when IPv6 stack is skipped
+ * for packet acceleartion
+ *
+ * Assumption: there should be no extension header in IPv6 header while
+ *             learning the tunnel traffic
+ *
+ * Currently only IPv6 is supported
+ *
+ */
+
+void skb_frag_xmit6(struct sk_buff *origskb, struct net_device *txdev,
+                     uint32_t is_pppoe, uint32_t minMtu, void *ipp)
+{
+#ifdef CONFIG_IPV6
+	struct ipv6hdr *iph;
+	int datapos, offset;
+	struct frag_hdr *fh;
+	__be32 frag_id=0;
+	u8 nexthdr;
+	unsigned int max_dlen, hlen, hdrslen, left, len, frag_hdrs_len;
+	struct sk_buff *fraglisthead;
+	struct sk_buff *fraglisttail;
+	struct sk_buff *skb2;
+
+	DEBUG_SKBFRAG(("skb_frag_xmit6:enter origskb=%p,netdev=%p,is_pppoe=%d,\
+			minMtu=%d ipp=%p\n",origskb, txdev, is_pppoe, minMtu, ipp));
+
+	if (likely(origskb->len <= minMtu)) {
+		/* xmit packet */
+		NETDEV_XMIT(txdev, (void *)CAST_REAL_TO_VIRT_PNBUFF(origskb,
+					SKBUFF_PTR));
+		return;
+	}
+
+	fraglisthead = NULL;
+	fraglisttail = NULL;
+	skb2 = NULL;
+
+	iph = (struct ipv6hdr *)ipp;
+	hlen = sizeof(struct ipv6hdr);
+
+	DEBUG_SKBFRAG(("skb_frag_xmit6: calculating hdrs len\n"));
+	/* calculate space for data,(ip payload) */
+	hdrslen = ((int)ipp - (int)(origskb->data)) + hlen;
+
+	left = origskb->len - hdrslen;	/* Size of remaining ip payload */
+	datapos = hdrslen;/* Where to start from */
+	/* hdrlens including frag_hdr of packets after fragmented */
+	frag_hdrs_len = hdrslen + sizeof(struct frag_hdr);
+	/* calculate max ip payload len per frame */
+	max_dlen =  minMtu - frag_hdrs_len;
+	nexthdr = iph->nexthdr;
+
+	DEBUG_SKBFRAG(("skb_frag_xmit6: computed hdrslen=%d, left=%d, max=%d\n",
+			hdrslen, left, max_dlen));
+
+	offset = 0;
+	/* copy the excess data (>MTU size) from orig fkb to new fkb's */
+	fraglisthead = origskb;
+
+	/* len represents length of payload! */
+	while (left > 0) {
+		DEBUG_SKBFRAG(("skb_frag_xmit6: making fragments\n"));
+		len = left;
+		/* IF: it doesn't fit, use 'max_dlen' - the data space left */
+		if (len > max_dlen)
+			len = max_dlen;
+		/* IF: we are not sending upto and including the packet end
+			then align the next start on an eight byte boundary */
+		if (len < left)
+			len &= ~7;
+
+		/* 
+		* Create new skbs to fragment the packet. Instead of reusing the
+		* orignal skb, a new skb is allocated to insert frag header
+		*/
+		DEBUG_SKBFRAG(("skb_frag_xmit6: genrating new skb\n"));
+		/* Allocate a new skb */
+		if ((skb2 = alloc_skb(len+frag_hdrs_len, GFP_ATOMIC)) == NULL) {
+				printk(KERN_INFO "no memory for new fragment!\n");
+				goto fail;
+		}
+
+		/* copy skb metadata */       
+		skb2->mark = origskb->mark;
+		skb2->priority = origskb->priority;
+		skb2->dev = origskb->dev;
+
+		dst_release(skb_dst(skb2));
+		skb_dst_set(skb2, dst_clone(skb_dst(origskb)));
+#ifdef CONFIG_NET_SCHED
+		skb2->tc_index = origskb->tc_index;
+#endif
+		skb_put(skb2, len + frag_hdrs_len);
+
+		DEBUG_SKBFRAG(("skb_frag_xmit6: copying headerto new skb\n"));
+
+		/* copy the l2 header & l3 header to new fkb from orig fkb */
+		memcpy(skb2->data, origskb->data, hdrslen);
+
+		DEBUG_SKBFRAG(("skb_frag_xmit6: copying data to new skb\n"));
+		/*
+		* Copy a block of the IP datagram.
+		*/
+		memcpy(skb2->data+frag_hdrs_len, origskb->data+datapos, len);
+
+		skb2->next = NULL;
+
+		/* first fragment, setup fraglist */
+		if (datapos == hdrslen) {
+			fraglisthead = skb2;
+			fraglisttail = skb2;
+		} else {
+			fraglisttail->next = skb2;
+			fraglisttail = skb2;
+		}
+
+		/*
+		 * Fill in the new header fields.
+		 */
+		DEBUG_SKBFRAG(("skb_frag_xmit6: adjusting IPv6 header\n"));
+		iph = (struct ipv6hdr *)(skb2->data + (hdrslen - hlen));
+		iph->payload_len = htons(len + sizeof(struct frag_hdr));
+		iph->nexthdr = NEXTHDR_FRAGMENT;
+
+		/* insert fragmentation header */
+		fh = (struct frag_hdr *)(iph + 1);
+		fh->nexthdr = nexthdr;
+		fh->reserved = 0;
+		if (!frag_id) {
+			ipv6_select_ident(fh, NULL);
+			frag_id = fh->identification;
+		} else
+			fh->identification = frag_id;
+		fh->frag_off = htons(offset);
+
+		/* fix pppoelen */ 
+		if (is_pppoe)
+			*((uint16_t*)iph - 2) = htons(len +
+					sizeof(struct frag_hdr) +
+					sizeof(struct ipv6hdr) +
+					sizeof(uint16_t));
+		left -= len;
+		datapos += len;
+		offset += len;
+
+		/*
+		 *	If we are fragmenting a fragment that's not the
+		 *	 last fragment then keep MF on each fragment 
+		 */
+		if (left > 0)
+			fh->frag_off |= htons(IP6_MF);
+
+		DEBUG_SKBFRAG(("skb_frag_xmit6: loop done\n"));
+	}
+
+	/* xmit skb's */
+	while (fraglisthead) {
+		DEBUG_SKBFRAG(("skb_frag_xmit6: sending skb fragment \n"));
+		skb2 = fraglisthead;
+		fraglisthead = fraglisthead->next;
+		NETDEV_XMIT(txdev, (void *)CAST_REAL_TO_VIRT_PNBUFF(skb2,
+					SKBUFF_PTR));
+	}
+
+	/* free the orignal skb */
+	kfree_skb(origskb);
+
+	return;
+
+fail:
+	DEBUG_SKBFRAG(("skb_frag_xmit6: ENTERED FAIL CASE\n"));
+	while (fraglisthead) {
+		skb2 = fraglisthead;
+		fraglisthead = fraglisthead->next;
+		kfree_skb(skb2);
+	}
+
+	/* free the orignal skb */
+	kfree_skb(origskb);
+
+	return;
+
+#else  /* !CONFIG_IPV6 */
+	DEBUG_SKBFRAG(("skb_frag_xmit6: called while IPv6 is disabled in kernel?\n"));
+	kfree_skb(origskb);
+	return;
+#endif
+}
+EXPORT_SYMBOL(skb_frag_xmit6);
+
+#endif  /* defined(CONFIG_BCM_KF_NBUFF) */
+
+
+
 /**
  *	consume_skb - free an skbuff
  *	@skb: buffer to free
@@ -551,7 +1388,11 @@ void skb_recycle(struct sk_buff *skb)
 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 	atomic_set(&shinfo->dataref, 1);
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+	memset(skb, 0, offsetof(struct sk_buff, truesize));
+#else
 	memset(skb, 0, offsetof(struct sk_buff, tail));
+#endif
 	skb->data = skb->head + NET_SKB_PAD;
 	skb_reset_tail_pointer(skb);
 }
@@ -582,6 +1423,14 @@ EXPORT_SYMBOL(skb_recycle_check);
 
 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+	int i;
+#endif
+
+#if defined(CONFIG_BCM_KF_WL)
+	memset(new->pktc_cb, 0, sizeof(new->pktc_cb));
+#endif
+
 	new->tstamp		= old->tstamp;
 	new->dev		= old->dev;
 	new->transport_header	= old->transport_header;
@@ -604,6 +1453,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 	new->priority		= old->priority;
 #if IS_ENABLED(CONFIG_IP_VS)
 	new->ipvs_property	= old->ipvs_property;
+#endif
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	new->tunl		= old->tunl;
 #endif
 	new->protocol		= old->protocol;
 	new->mark		= old->mark;
@@ -612,14 +1464,47 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
 	new->nf_trace		= old->nf_trace;
 #endif
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_xfer(new, old);	/* CONFIG_BLOG: transfers blog ownership */
+#endif
+
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION))
+	if(old->clone_fc_head)
+	{
+		/* here we expect old->data > old->clone_fc_head, if for some reason this is not
+		 * true we still need to set new->clone_fc_head. 
+		 * skb_avail_headroom , will check for this error
+		 */
+		new->clone_fc_head = new->data -  (int)(old->data - old->clone_fc_head);
+	}
+
+#endif
+
+#if defined(CONFIG_BCM_KF_NBUFF)
+	new->vtag_word = old->vtag_word;
+	new->tc_word = old->tc_word;
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+    new->vlan_count = old->vlan_count;
+    new->vlan_tpid = old->vlan_tpid;
+    for (i=0; i<SKB_VLAN_MAX_TAGS; i++) {
+        new->vlan_header[i] = old->vlan_header[i];
+    }
+    new->rxdev = old->rxdev;
+#endif /* BCM_VLAN */
+#else  /* CONFIG_BCM_KF_NBUFF */
 #ifdef CONFIG_NET_SCHED
 	new->tc_index		= old->tc_index;
 #ifdef CONFIG_NET_CLS_ACT
 	new->tc_verd		= old->tc_verd;
 #endif
 #endif
+#endif /* CONFIG_BCM_KF_NBUFF */
 	new->vlan_tci		= old->vlan_tci;
 
+#if defined(CONFIG_BCM_KF_WL)
+	new->pktc_flags		= old->pktc_flags;
+#endif
+
 	skb_copy_secmark(new, old);
 }
 
@@ -629,6 +1514,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
  */
 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 {
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+	int i;
+#endif
 #define C(x) n->x = skb->x
 
 	n->next = n->prev = NULL;
@@ -642,11 +1530,44 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 	n->cloned = 1;
 	n->nohdr = 0;
 	n->destructor = NULL;
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	C(bl_alloc);
+	C(bl_buffer_number);
+	C(bl_port);
+#endif
+#endif
 	C(tail);
 	C(end);
 	C(head);
 	C(data);
 	C(truesize);
+    
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION))
+	n->clone_wr_head = NULL;
+	skb->clone_wr_head = NULL;
+	n->clone_fc_head = skb->clone_fc_head;
+#endif
+
+#if defined(CONFIG_BCM_KF_NBUFF)
+	C(recycle_hook);
+	C(recycle_context);
+	n->recycle_flags = skb->recycle_flags & SKB_NO_RECYCLE;
+#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE))
+    n->vlan_count = skb->vlan_count;
+    n->vlan_tpid = skb->vlan_tpid;
+    for (i=0; i<SKB_VLAN_MAX_TAGS; i++) {
+        n->vlan_header[i] = skb->vlan_header[i];
+    }
+    n->rxdev = skb->rxdev;
+#endif /* BCM_VLAN */
+#endif /* CONFIG_BCM_KF_NBUFF */
+
+
+#if defined(CONFIG_BCM_KF_80211) && (defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE))
+	C(do_not_encrypt);
+	C(requeue);
+#endif
 	atomic_set(&n->users, 1);
 
 	atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -668,8 +1589,31 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
  */
 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 {
+#if defined(CONFIG_BCM_KF_NBUFF)
+	struct sk_buff *skb;
+	unsigned int recycle_flags; 
+	unsigned int recycle_context;
+	RecycleFuncP recycle_hook;
+
+	skb_release_all(dst);
+
+	/* Need to retain the recycle flags, context & hook of dst to free it into 
+	 * proper pool.    
+	 */
+	recycle_flags = dst->recycle_flags & SKB_RECYCLE;
+	recycle_hook  = dst->recycle_hook;
+	recycle_context  = dst->recycle_context;
+	
+	skb = __skb_clone(dst, src);
+
+	dst->recycle_flags |= recycle_flags;
+	dst->recycle_hook  = recycle_hook;
+	dst->recycle_context  = recycle_context;
+	return skb;
+#else /* CONFIG_BCM_KF_NBUFF */
 	skb_release_all(dst);
 	return __skb_clone(dst, src);
+#endif /* CONFIG_BCM_KF_NBUFF */
 }
 EXPORT_SYMBOL_GPL(skb_morph);
 
@@ -792,7 +1736,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 	new->transport_header += offset;
 	new->network_header   += offset;
 	if (skb_mac_header_was_set(new))
-		new->mac_header	      += offset;
+	new->mac_header	      += offset;
 #endif
 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
@@ -923,6 +1867,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 	int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
 	long off;
 	bool fastpath;
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION))
+	int clone_fc_len = 0;
+#endif
 
 	BUG_ON(nhead < 0);
 
@@ -931,6 +1878,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 
 	size = SKB_DATA_ALIGN(size);
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+	/* the fastpath optimizations are problematic for preallocated buffers */
+	fastpath = false;
+#else
 	/* Check if we can avoid taking references on fragments if we own
 	 * the last reference on skb->head. (see skb_release_data())
 	 */
@@ -940,7 +1891,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
 		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
 	}
-
+#endif
 	if (fastpath &&
 	    size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
 		memmove(skb->head + size, skb_shinfo(skb),
@@ -968,6 +1919,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
 
 	if (fastpath) {
+#if defined(CONFIG_BCM_KF_NBUFF)
+		if (skb->recycle_hook && (skb->recycle_flags & SKB_DATA_RECYCLE)) {
+			(*skb->recycle_hook)(skb, skb->recycle_context, SKB_DATA_RECYCLE);
+			skb->recycle_flags &= SKB_DATA_NO_RECYCLE;	/* mask out */
+		}
+		else
+#endif
 		kfree(skb->head);
 	} else {
 		/* copy this zero copy skb frags */
@@ -982,12 +1940,33 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 			skb_clone_fraglist(skb);
 
 		skb_release_data(skb);
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+		skb->bl_alloc = SKB_STANDARD;
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
 	}
 	off = (data + nhead) - skb->head;
 
 	skb->head     = data;
 adjust_others:
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION))
+	if(skb->clone_fc_head)
+	{
+		clone_fc_len = skb->data - skb->clone_fc_head; 
+	}
+#endif
+
 	skb->data    += off;
+
+#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION))
+	if(skb->clone_fc_head)
+	{
+		skb->clone_fc_head = skb->data - clone_fc_len; 
+	}
+#endif
+
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 	skb->end      = size;
 	off           = nhead;
@@ -999,13 +1978,28 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 	skb->transport_header += off;
 	skb->network_header   += off;
 	if (skb_mac_header_was_set(skb))
-		skb->mac_header += off;
+	skb->mac_header	      += off;
 	/* Only adjust this if it actually is csum_start rather than csum */
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
-		skb->csum_start += nhead;
+	skb->csum_start       += nhead;
 	skb->cloned   = 0;
 	skb->hdr_len  = 0;
 	skb->nohdr    = 0;
+
+#if defined(CONFIG_BCM_KF_NBUFF)
+
+    /* Clear Data recycle as this buffer was allocated via kmalloc
+	   Note that skb_release_data might have already cleared it but it is
+	   not guaranteed. If the buffer is cloned, then skb_release_data
+	   does not clear the buffer. The original data buffer will be freed
+	   when the cloned skb is freed */
+	skb->recycle_flags &= SKB_DATA_NO_RECYCLE;
+	/* The data buffer of this skb is not pre-allocated any more
+	 * even though the skb itself is pre-allocated,
+	  dirty_p pertains to previous buffer so clear it */
+	skb_shinfo(skb)->dirty_p = NULL;
+#endif
+
 	atomic_set(&skb_shinfo(skb)->dataref, 1);
 	return 0;
 
@@ -1092,12 +2086,12 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 
 	off                  = newheadroom - oldheadroom;
 	if (n->ip_summed == CHECKSUM_PARTIAL)
-		n->csum_start += off;
+	n->csum_start       += off;
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 	n->transport_header += off;
 	n->network_header   += off;
 	if (skb_mac_header_was_set(skb))
-		n->mac_header += off;
+	n->mac_header	    += off;
 #endif
 
 	return n;
@@ -1402,7 +2396,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
 					insp = list;
 				}
 				if (!pskb_pull(list, eat)) {
-					kfree_skb(clone);
+						kfree_skb(clone);
 					return NULL;
 				}
 				break;
@@ -1474,6 +2468,14 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 	if (offset > (int)skb->len - len)
 		goto fault;
 
+#if defined(CONFIG_BCM_KF_NBUFF)
+	/*
+	 * since we are touching data in src skb (pulling it into the cache),
+	 * disable CACHE_SMARTFLUSH optimization in this skb.
+	 */
+	skb_shinfo(skb)->dirty_p = NULL;
+#endif
+
 	/* Copy header. */
 	if ((copy = start - offset) > 0) {
 		if (copy > len)
@@ -1512,23 +2514,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 	}
 
 	skb_walk_frags(skb, frag_iter) {
-		int end;
+			int end;
 
-		WARN_ON(start > offset + len);
+			WARN_ON(start > offset + len);
 
 		end = start + frag_iter->len;
-		if ((copy = end - offset) > 0) {
-			if (copy > len)
-				copy = len;
+			if ((copy = end - offset) > 0) {
+				if (copy > len)
+					copy = len;
 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
-				goto fault;
-			if ((len -= copy) == 0)
-				return 0;
-			offset += copy;
-			to     += copy;
+					goto fault;
+				if ((len -= copy) == 0)
+					return 0;
+				offset += copy;
+				to     += copy;
+			}
+			start = end;
 		}
-		start = end;
-	}
 
 	if (!len)
 		return 0;
@@ -1557,8 +2559,8 @@ static inline struct page *linear_to_page(struct page *page, unsigned int *len,
 	if (!p) {
 new_page:
 		p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
-		if (!p)
-			return NULL;
+	if (!p)
+		return NULL;
 
 		off = sk->sk_sndmsg_off = 0;
 		/* hold one ref to this page until it's full */
@@ -1740,8 +2742,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
 		if (!tlen)
 			break;
 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
-			break;
-	}
+				break;
+		}
 
 done:
 	if (spd.nr_pages) {
@@ -1821,24 +2823,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
 	}
 
 	skb_walk_frags(skb, frag_iter) {
-		int end;
+			int end;
 
-		WARN_ON(start > offset + len);
+			WARN_ON(start > offset + len);
 
 		end = start + frag_iter->len;
-		if ((copy = end - offset) > 0) {
-			if (copy > len)
-				copy = len;
+			if ((copy = end - offset) > 0) {
+				if (copy > len)
+					copy = len;
 			if (skb_store_bits(frag_iter, offset - start,
-					   from, copy))
-				goto fault;
-			if ((len -= copy) == 0)
-				return 0;
-			offset += copy;
-			from += copy;
+						   from, copy))
+					goto fault;
+				if ((len -= copy) == 0)
+					return 0;
+				offset += copy;
+				from += copy;
+			}
+			start = end;
 		}
-		start = end;
-	}
 	if (!len)
 		return 0;
 
@@ -1895,25 +2897,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
 	}
 
 	skb_walk_frags(skb, frag_iter) {
-		int end;
+			int end;
 
-		WARN_ON(start > offset + len);
+			WARN_ON(start > offset + len);
 
 		end = start + frag_iter->len;
-		if ((copy = end - offset) > 0) {
-			__wsum csum2;
-			if (copy > len)
-				copy = len;
+			if ((copy = end - offset) > 0) {
+				__wsum csum2;
+				if (copy > len)
+					copy = len;
 			csum2 = skb_checksum(frag_iter, offset - start,
-					     copy, 0);
-			csum = csum_block_add(csum, csum2, pos);
-			if ((len -= copy) == 0)
-				return csum;
-			offset += copy;
-			pos    += copy;
+						     copy, 0);
+				csum = csum_block_add(csum, csum2, pos);
+				if ((len -= copy) == 0)
+					return csum;
+				offset += copy;
+				pos    += copy;
+			}
+			start = end;
 		}
-		start = end;
-	}
 	BUG_ON(len);
 
 	return csum;
@@ -1973,27 +2975,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
 	}
 
 	skb_walk_frags(skb, frag_iter) {
-		__wsum csum2;
-		int end;
+			__wsum csum2;
+			int end;
 
-		WARN_ON(start > offset + len);
+			WARN_ON(start > offset + len);
 
 		end = start + frag_iter->len;
-		if ((copy = end - offset) > 0) {
-			if (copy > len)
-				copy = len;
+			if ((copy = end - offset) > 0) {
+				if (copy > len)
+					copy = len;
 			csum2 = skb_copy_and_csum_bits(frag_iter,
-						       offset - start,
-						       to, copy, 0);
-			csum = csum_block_add(csum, csum2, pos);
-			if ((len -= copy) == 0)
-				return csum;
-			offset += copy;
-			to     += copy;
-			pos    += copy;
+							       offset - start,
+							       to, copy, 0);
+				csum = csum_block_add(csum, csum2, pos);
+				if ((len -= copy) == 0)
+					return csum;
+				offset += copy;
+				to     += copy;
+				pos    += copy;
+			}
+			start = end;
 		}
-		start = end;
-	}
 	BUG_ON(len);
 	return csum;
 }
@@ -3012,22 +4014,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
 	}
 
 	skb_walk_frags(skb, frag_iter) {
-		int end;
+			int end;
 
-		WARN_ON(start > offset + len);
+			WARN_ON(start > offset + len);
 
 		end = start + frag_iter->len;
-		if ((copy = end - offset) > 0) {
-			if (copy > len)
-				copy = len;
+			if ((copy = end - offset) > 0) {
+				if (copy > len)
+					copy = len;
 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
-					      copy);
-			if ((len -= copy) == 0)
-				return elt;
-			offset += copy;
+						      copy);
+				if ((len -= copy) == 0)
+					return elt;
+				offset += copy;
+			}
+			start = end;
 		}
-		start = end;
-	}
 	BUG_ON(len);
 	return elt;
 }
@@ -3281,3 +4283,4 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
 			   " while LRO is enabled\n", skb->dev->name);
 }
 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b9868e1fd62cc0a03ab4b449e44612cf8ff8c5b0..60b5b4ea2c2e339be8f1f4cc5c5e3b8f89cf5821 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -126,6 +126,12 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 	if (nlmsg_len(nlh) < sizeof(*req))
 		return -EINVAL;
 
+	#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2013-1763*/
+	if (req->sdiag_family >= AF_MAX)
+		return -EINVAL;
+	#endif
+
 	hndl = sock_diag_lock_handler(req->sdiag_family);
 	if (hndl == NULL)
 		err = -ENOENT;
diff --git a/net/core/urlinfo.c b/net/core/urlinfo.c
new file mode 100644
index 0000000000000000000000000000000000000000..4b002b25dd2d154ed39a8825cc27edcb3351764e
--- /dev/null
+++ b/net/core/urlinfo.c
@@ -0,0 +1,454 @@
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+/*
+<:copyright-BRCM:2014:DUAL/GPL:standard 
+
+   Copyright (c) 2014 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include <linux/netdevice.h>
+#include <linux/export.h>
+#include <linux/urlinfo.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/bcm_colors.h>
+
+typedef struct {
+    UrlInfo_t      * htable[ URLINFO_HTABLE_SIZE ];
+    UrlInfo_t        etable[ URLINFO_MAX_ENTRIES ];
+
+    Dll_t         usedlist;         /* List of used urlinfo entries */
+    Dll_t         frlist;           /* List of free urlinfo entries */
+} __attribute__((aligned(16))) HttpInfo_t;
+
+HttpInfo_t httpInfo;    /* Global URL info context */
+
+#if defined(CC_URLINFO_SUPPORT_DEBUG)
+#define urlinfo_print(fmt, arg...)                                           \
+    if ( urlinfo_dbg )                                                       \
+        printk( CLRc "URLINFO %s :" fmt CLRnl, __FUNCTION__, ##arg )
+#define urlinfo_assertv(cond)                                                \
+    if ( !cond ) {                                                           \
+        printk( CLRerr "URLINFO ASSERT %s : " #cond CLRnl, __FUNCTION__ );   \
+        return;                                                              \
+    }
+#define urlinfo_assertr(cond, rtn)                                           \
+    if ( !cond ) {                                                           \
+        printk( CLRerr "URLINFO ASSERT %s : " #cond CLRnl, __FUNCTION__ );   \
+        return rtn;                                                          \
+    }
+#define URLINFO_DBG(debug_code)    do { debug_code } while(0)
+#else
+#define urlinfo_print(fmt, arg...) URLINFO_NULL_STMT
+#define urlinfo_assertv(cond) URLINFO_NULL_STMT
+#define urlinfo_assertr(cond, rtn) URLINFO_NULL_STMT
+#define URLINFO_DBG(debug_code) URLINFO_NULL_STMT
+#endif
+
+int urlinfo_dbg = 0;
+static struct proc_dir_entry *url_info_entry = NULL;
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : urlinfo_alloc
+ * Description  : Allocate a URL info entry
+ *------------------------------------------------------------------------------
+ */
+static UrlInfo_t * urlinfo_alloc( void )
+{
+    UrlInfo_t * ptr = URLINFO_NULL;
+
+    if (unlikely(dll_empty(&httpInfo.frlist)))
+    {
+        urlinfo_print("no free entry! No collect now");
+        return ptr;
+    }
+
+    if (likely(!dll_empty(&httpInfo.frlist)))
+    {
+        ptr = (UrlInfo_t*)dll_head_p(&httpInfo.frlist);
+        dll_delete(&ptr->node);
+    }
+
+    urlinfo_print("idx<%u>", ptr->entry.idx);
+
+    return ptr;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _hash
+ * Description  : Computes a simple hash from a 32bit value.
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _hash( uint32_t hash_val )
+{
+    hash_val ^= ( hash_val >> 16 );
+    hash_val ^= ( hash_val >>  8 );
+    hash_val ^= ( hash_val >>  3 );
+
+    return ( hash_val );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _urlinfo_hash
+ * Description  : Compute the hash of a URL
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _urlinfo_hash( const UrlInfoEntry_t *url )
+{
+    uint32_t hashix;
+
+    /* 
+     * if url length > 8, take first 8 characters with lenght for hash
+     * otherwise, take first 4 characters with length for hash
+     */
+    if (url->hostlen > 8)
+    {
+        hashix = _hash( (*((uint32_t *) (&(url->host[0])))) +
+                        (*((uint32_t *) (&(url->host[4])))) +
+                        url->hostlen );
+    }
+    else
+    {
+        hashix = _hash( (*((uint32_t *) (&(url->host[0])))) +
+                        url->hostlen );
+    }
+
+    return hashix % URLINFO_HTABLE_SIZE;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : _urlinfo_match
+ * Description  : Checks whether the URL matches.
+ *------------------------------------------------------------------------------
+ */
+static inline uint32_t _urlinfo_match( const UrlInfo_t *ptr,
+                                       const UrlInfoEntry_t *url )
+{
+    return ( (ptr->entry.hostlen == url->hostlen) && 
+             !memcmp(ptr->entry.host, url->host, url->hostlen) );
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : urlinfo_hashin
+ * Description  : Insert a new entry into the urlinfo at a given hash index.
+ *------------------------------------------------------------------------------
+ */
+static void urlinfo_hashin( UrlInfo_t * ptr, uint32_t hashix )
+{
+    urlinfo_print("enter");
+
+    dll_prepend(&httpInfo.usedlist, &ptr->node);
+    ptr->chain_p = httpInfo.htable[ hashix ];  /* Insert into hash table */
+    httpInfo.htable[ hashix ] = ptr;
+}
+
+static uint32_t urlinfo_new( const UrlInfoEntry_t *url, uint32_t hashix )
+{
+    UrlInfo_t * ptr;
+
+    urlinfo_print("enter");
+
+    ptr = urlinfo_alloc();
+    if ( unlikely(ptr == URLINFO_NULL) )
+    {
+        urlinfo_print("failed urlinfo_alloc");
+        return URLINFO_IX_INVALID;              /* Element table depletion */
+    }
+
+    ptr->entry.hostlen = url->hostlen;
+    strncpy(ptr->entry.host, url->host, url->hostlen);
+
+    urlinfo_hashin(ptr, hashix);              /* Insert into hash table */
+
+    urlinfo_print("idx<%u>", ptr->entry.idx);
+
+    return ptr->entry.idx;
+}
+
+#if 0
+/*
+ *------------------------------------------------------------------------------
+ * Function     : urlinfo_free
+ * Description  : Free a device info entry
+ *------------------------------------------------------------------------------
+ */
+void urlinfo_free( UrlInfo_t * dev_p )
+{
+    dev_p->entry.flags = 0;
+    dev_p->entry.vendor_id = 0;
+    dev_p->entry.os_id = 0;
+    dev_p->entry.class_id = 0;
+    dev_p->entry.type_id = 0;
+
+    memset(dev_p->mac, 0, ETH_ALEN);
+
+    dll_prepend(&httpInfo.frlist, &dev_p->node);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : urlinfo_unhash
+ * Description  : Remove a urlinfo from the device info at a given hash index.
+ *------------------------------------------------------------------------------
+ */
+static void urlinfo_unhash(UrlInfo_t * dev_p, uint32_t hashix)
+{
+    register UrlInfo_t * hDev_p = httpInfo.htable[hashix];
+
+    if ( unlikely(hDev_p == URLINFO_NULL) )
+    {
+        urlinfo_print( "ERROR: httpInfo.htable[%u] is NULL", hashix );
+        goto urlinfo_notfound;
+    }
+
+    if ( likely(hDev_p == dev_p) )                /* At head */
+    {
+        httpInfo.htable[ hashix ] = dev_p->chain_p;  /* Delete at head */
+    }
+    else
+    {
+        uint32_t found = 0;
+
+        /* Traverse the single linked hash collision chain */
+        for ( hDev_p = httpInfo.htable[ hashix ];
+              likely(hDev_p->chain_p != URLINFO_NULL);
+              hDev_p = hDev_p->chain_p )
+        {
+            if ( hDev_p->chain_p == dev_p )
+            {
+                hDev_p->chain_p = dev_p->chain_p;
+                found = 1;
+                break;
+            }
+        }
+
+        if ( unlikely(found == 0) )
+        {
+            urlinfo_print( "ERROR:httpInfo.htable[%u] find failure", hashix );
+            goto urlinfo_notfound;
+        }
+    }
+
+    return; /* SUCCESS */
+
+urlinfo_notfound:
+    urlinfo_print( "not found: hash<%u>", hashix );
+}
+#endif
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : urlinfo_lookup
+ * Description  : Given a mac, lookup device info.
+ *------------------------------------------------------------------------------
+ */
+uint16_t urlinfo_lookup( const UrlInfoEntry_t *url )
+{
+    UrlInfo_t * ptr;
+    uint16_t idx;
+    uint32_t hashix;
+
+    hashix = _urlinfo_hash(url);
+
+    urlinfo_print("hashix<%u> url<%s>", hashix, url->host);
+
+    for ( ptr = httpInfo.htable[ hashix ]; ptr != URLINFO_NULL;
+          ptr = ptr->chain_p)
+    {
+        urlinfo_print("elem: idx<%u> URL<%s>",
+                      ptr->entry.idx, ptr->entry.host);
+
+        if (likely( _urlinfo_match(ptr, url) ))
+        {
+            urlinfo_print("idx<%u>", ptr->entry.idx);
+            return ptr->entry.idx;
+        }
+    }
+
+    /* New URL found, alloc an entry */
+    idx = urlinfo_new(url, hashix);
+
+    urlinfo_print("idx<%u>", idx);
+
+    return idx;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : urlinfo_get
+ * Description  : Given urlinfo index, return the UrlInfoEntry_t.
+ *------------------------------------------------------------------------------
+ */
+void urlinfo_get( uint16_t idx, UrlInfoEntry_t *entry )
+{
+    UrlInfo_t * ptr;
+
+    memset(entry, 0, sizeof(UrlInfoEntry_t));
+
+    ptr = &httpInfo.etable[idx];
+    entry->idx = ptr->entry.idx;
+    strncpy(entry->host, ptr->entry.host, ptr->entry.hostlen);
+
+    urlinfo_print("idx<%u> host<%s>", entry->idx, entry->host);
+
+    return;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : urlinfo_set
+ * Description  : Given urlinfo index, set the urlinfo_entry.
+ *------------------------------------------------------------------------------
+ */
+void urlinfo_set( const UrlInfoEntry_t *entry )
+{
+    UrlInfo_t * ptr;
+
+    urlinfo_print("idx<%u> host<%s>", entry->idx, entry->host);
+
+    ptr = &httpInfo.etable[entry->idx];
+    ptr->entry.hostlen = entry->hostlen;
+    strncpy(ptr->entry.host, entry->host, entry->hostlen);
+
+    return;
+}
+
+
+static void *url_seq_start(struct seq_file *seq, loff_t *pos)
+{
+    static unsigned long counter = 0;
+
+    rcu_read_lock();
+    if (*pos == 0)
+        return &counter;
+    else
+    {
+        *pos = 0;
+        return NULL;
+    }
+}
+
+static void *url_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return NULL;
+}
+
+static int url_seq_show(struct seq_file *seq, void *v)
+{
+    Dll_t  *tmp_p;
+    Dll_t  *list_p;
+    UrlInfo_t *elem_p;
+	int ret = 0;
+
+    urlinfo_print("enter");
+
+   	if (v == SEQ_START_TOKEN) {
+		seq_printf(seq, "URL list\n");
+		return ret;
+	}
+
+    list_p = &httpInfo.usedlist;
+
+    if (!dll_empty(list_p))
+    {
+        dll_for_each(tmp_p, list_p) 
+        {
+            elem_p = (UrlInfo_t *)tmp_p;
+            seq_printf(seq, "%s\n", elem_p->entry.host);
+        }
+    }
+
+	return 0;
+}
+
+static void url_seq_stop(struct seq_file *seq, void *v)
+{
+	rcu_read_unlock();
+}
+
+static struct seq_operations url_seq_ops = {
+	.start = url_seq_start,
+	.next  = url_seq_next,
+	.stop  = url_seq_stop,
+	.show  = url_seq_show,
+};
+
+static int url_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &url_seq_ops);
+}
+
+static struct file_operations url_info_proc_fops = {
+	.owner = THIS_MODULE,
+	.open  = url_seq_open,
+	.read  = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+
+int urlinfo_init( void )
+{
+    register int id;
+    UrlInfo_t * ptr;
+
+    memset( (void*)&httpInfo, 0, sizeof(HttpInfo_t) );
+
+    /* Initialize list */
+    dll_init( &httpInfo.usedlist );
+    dll_init( &httpInfo.frlist );
+
+    /* Initialize each urlinfo entry and insert into free list */
+    for ( id=URLINFO_IX_INVALID; id < URLINFO_MAX_ENTRIES; id++ )
+    {
+        ptr = &httpInfo.etable[id];
+        ptr->entry.idx = id;
+
+        if ( unlikely(id == URLINFO_IX_INVALID) )
+            continue;           /* Exclude this entry from the free list */
+
+        dll_append(&httpInfo.frlist, &ptr->node);/* Insert into free list */
+    }
+
+    url_info_entry = proc_create("url_info", 0, init_net.proc_net,
+			   &url_info_proc_fops);
+
+    URLINFO_DBG( printk( "URLINFO urlinfo_dbg<0x%08x> = %d\n"
+                         "%d Available entries\n",
+                         (int)&urlinfo_dbg, urlinfo_dbg,
+                         URLINFO_MAX_ENTRIES-1 ); );
+    
+    return 0;
+}
+
+EXPORT_SYMBOL(urlinfo_init);
+EXPORT_SYMBOL(urlinfo_lookup);
+EXPORT_SYMBOL(urlinfo_get);
+EXPORT_SYMBOL(urlinfo_set);
+#endif /* if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI) */
diff --git a/net/core/vlanctl_bind.c b/net/core/vlanctl_bind.c
new file mode 100644
index 0000000000000000000000000000000000000000..5d7f453a3d6a70c2cbfe2b77e9cc2295aa76269d
--- /dev/null
+++ b/net/core/vlanctl_bind.c
@@ -0,0 +1,176 @@
+#if defined(CONFIG_BCM_KF_VLANCTL_BIND) && defined(CONFIG_BLOG)
+
+/*
+*    Copyright (c) 2003-2014 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2014:DUAL/GPL:standard 
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+#include "bcm_OS_Deps.h"
+#include <linux/bcm_log.h>
+#include <linux/blog.h>
+
+#include <linux/kernel.h>
+#include <linux/vlanctl_bind.h>
+
+static vlanctl_bind_SnHook_t vlanctl_bind_sn_hook_g[VLANCTL_BIND_CLIENT_MAX] = { (vlanctl_bind_SnHook_t)NULL };
+static vlanctl_bind_ScHook_t vlanctl_bind_sc_hook_g[VLANCTL_BIND_CLIENT_MAX] = { (vlanctl_bind_ScHook_t)NULL };
+static vlanctl_bind_SdHook_t vlanctl_bind_sd_hook_g[VLANCTL_BIND_CLIENT_MAX] = { (vlanctl_bind_SdHook_t)NULL };
+
+#if defined(CC_VLANCTL_BIND_SUPPORT_DEBUG)
+#define vlanctl_assertr(cond, rtn)                                              \
+    if ( !cond ) {                                                              \
+        printk( CLRerr "VLANCTL_BIND ASSERT %s : " #cond CLRnl, __FUNCTION__ ); \
+        return rtn;                                                             \
+    }
+#else
+#define vlanctl_assertr(cond, rtn) NULL_STMT
+#endif
+
+/*------------------------------------------------------------------------------
+ *  Function    : vlanctl_bind_config
+ *  Description : Override default config and deconf hook.
+ *  vlanctl_sc  : Function pointer to be invoked in blog_activate()
+ *  client      : configuration client
+ *------------------------------------------------------------------------------
+ */                      
+void vlanctl_bind_config(vlanctl_bind_ScHook_t vlanctl_bind_sc, 
+	                     vlanctl_bind_SdHook_t vlanctl_bind_sd,  
+	                     vlanctl_bind_SnHook_t vlanctl_bind_sn,  
+	                     vlanctl_bind_client_t client, 
+                         vlanctl_bind_t bind)
+{
+    BCM_LOG_DEBUG(BCM_LOG_ID_VLAN,  "vlanctl Bind Sc[<%08x>] Sd[<%08x>] Sn[<%08x>] Client[<%u>] bind[<%u>]",
+                (int)vlanctl_bind_sc, (int)vlanctl_bind_sd, (int)vlanctl_bind_sn, client, (uint8_t)bind.hook_info);
+
+    if ( bind.bmap.SC_HOOK )
+        vlanctl_bind_sc_hook_g[client] = vlanctl_bind_sc;   /* config hook */
+    if ( bind.bmap.SD_HOOK )
+        vlanctl_bind_sd_hook_g[client] = vlanctl_bind_sd;   /* deconf hook */
+    if ( bind.bmap.SN_HOOK )
+        vlanctl_bind_sn_hook_g[client] = vlanctl_bind_sn;   /* notify hook */
+}
+
+
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : vlanctl_activate
+ * Description  : This function invokes vlanctl configuration hook
+ * Parameters   :
+ *  blog_p      : pointer to a blog with configuration information
+ *  client      : configuration client
+ *
+ * Returns      :
+ *  ActivateKey : If the configuration is successful, a key is returned.
+ *                Otherwise, BLOG_KEY_INVALID is returned
+ *------------------------------------------------------------------------------
+ */
+uint32_t vlanctl_activate( Blog_t * blog_p, vlanctl_bind_client_t client )
+{
+    uint32_t     key;
+
+    key = BLOG_KEY_INVALID;
+    
+    if ( blog_p == BLOG_NULL || client >= VLANCTL_BIND_CLIENT_MAX )
+    {
+        vlanctl_assertr((blog_p != BLOG_NULL), key);
+        goto bypass;
+    }
+
+    if (unlikely(vlanctl_bind_sc_hook_g[client] == (vlanctl_bind_ScHook_t)NULL))
+        goto bypass;
+
+
+    BLOG_LOCK_BH();
+    key = vlanctl_bind_sc_hook_g[client](blog_p, BlogTraffic_Layer2_Flow);
+    BLOG_UNLOCK_BH();
+
+bypass:
+    return key;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Function     : vlanctl_deactivate
+ * Description  : This function invokes a deconfiguration hook
+ * Parameters   :
+ *  key         : blog key information
+ *  client      : configuration client
+ *
+ * Returns      :
+ *  blog_p      : If the deconfiguration is successful, the associated blog 
+ *                pointer is returned to the caller
+ *------------------------------------------------------------------------------
+ */
+Blog_t * vlanctl_deactivate( uint32_t key, vlanctl_bind_client_t client )
+{
+    Blog_t * blog_p = NULL;
+
+    if ( key == BLOG_KEY_INVALID || client >= VLANCTL_BIND_CLIENT_MAX )
+    {
+        vlanctl_assertr( (key != BLOG_KEY_INVALID), blog_p );
+        goto bypass;
+    }
+
+    if ( unlikely(vlanctl_bind_sd_hook_g[client] == (vlanctl_bind_SdHook_t)NULL) )
+        goto bypass;
+
+    BLOG_LOCK_BH();
+    blog_p = vlanctl_bind_sd_hook_g[client](key, BlogTraffic_Layer2_Flow);
+    BLOG_UNLOCK_BH();
+
+bypass:
+    return blog_p;
+}
+
+
+int	vlanctl_notify(vlanctl_bind_Notify_t event, void *ptr, vlanctl_bind_client_t client)
+{
+
+   if (client >= VLANCTL_BIND_CLIENT_MAX)
+       goto bypass;
+
+    BCM_LOG_DEBUG(BCM_LOG_ID_VLAN, "client<%u>" "event<%u>", client, event);
+
+    if (unlikely(vlanctl_bind_sn_hook_g[client] == (vlanctl_bind_SnHook_t)NULL))
+        goto bypass;
+
+	BLOG_LOCK_BH();
+    vlanctl_bind_sn_hook_g[client](event, ptr);
+    BLOG_UNLOCK_BH();
+
+bypass:
+    return 0;
+}
+
+
+EXPORT_SYMBOL(vlanctl_bind_config); 
+EXPORT_SYMBOL(vlanctl_activate); 
+EXPORT_SYMBOL(vlanctl_deactivate); 
+EXPORT_SYMBOL(vlanctl_notify);
+
+
+#endif /* defined(BCM_KF_VLANCTL_BIND && defined(CONFIG_BLOG) */
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index ff75d3bbcd6a4cee0bfbda1747f592b0b6e91b48..0f3e3920216887304666ce7564a35c028dcfcdae 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -2,6 +2,11 @@
 # Makefile for the Linux TCP/IP (INET) layer.
 #
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_BLOG)
+EXTRA_CFLAGS	+= -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD)
+EXTRA_CFLAGS	+= -I$(INC_BRCMSHARED_PUB_PATH)/bcm963xx
+endif # BCM_KF
+
 obj-y     := route.o inetpeer.o protocol.o \
 	     ip_input.o ip_fragment.o ip_forward.o ip_options.o \
 	     ip_output.o ip_sockglue.o inet_hashtables.o \
@@ -14,6 +19,11 @@ obj-y     := route.o inetpeer.o protocol.o \
 	     inet_fragment.o ping.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index fd508b526014209eba345ddb1605117a897afdf4..bd9b99d53b9c8ed0638b47266a9eddab42d7c280 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -13,6 +13,10 @@
 #include <net/icmp.h>
 #include <net/protocol.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 struct ah_skb_cb {
 	struct xfrm_skb_cb xfrm;
 	void *tmp;
@@ -156,6 +160,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
 	struct ip_auth_hdr *ah;
 	struct ah_data *ahp;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
+
 	ahp = x->data;
 	ahash = ahp->ahash;
 
@@ -218,6 +226,22 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
 
 	AH_SKB_CB(skb)->tmp = iph;
 
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && !(defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	/* ensure there is enough headroom and tailroom for HW info */
+	if((skb_headroom(skb) < 12) ||
+	   (skb_tailroom(skb) < 20))
+	{
+		req->alloc_buff_spu = 1;
+	}
+	else
+	{
+		req->alloc_buff_spu = 0;
+	}
+
+	/* not used for output */   
+	req->headerLen = 0;
+#endif
+
 	err = crypto_ahash_digest(req);
 	if (err) {
 		if (err == -EINPROGRESS)
@@ -292,6 +316,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
 	struct ah_data *ahp;
 	int err = -ENOMEM;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
+
 	if (!pskb_may_pull(skb, sizeof(*ah)))
 		goto out;
 
@@ -323,7 +351,6 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
 
 	skb->ip_summed = CHECKSUM_NONE;
 
-
 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 		goto out;
 	nfrags = err;
@@ -366,6 +393,22 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
 
 	AH_SKB_CB(skb)->tmp = work_iph;
 
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && !(defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	/* ensure there is enough headroom and tailroom for HW info */
+	if((skb_headroom(skb) < 12) ||
+	   (skb_tailroom(skb) < 20))
+	{
+		req->alloc_buff_spu = 1;
+	}
+	else
+	{
+		req->alloc_buff_spu = 0;
+	}
+
+	/* offset to icv */
+	req->headerLen = &ah->auth_data[0] - skb->data;
+#endif
+
 	err = crypto_ahash_digest(req);
 	if (err) {
 		if (err == -EINPROGRESS)
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index cb982a61536fade811908a18e6119f513914741e..c51c5cfc1c330739835568741f9f4e104d4f91ab 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -18,6 +18,10 @@
 #include <net/protocol.h>
 #include <net/udp.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 struct esp_skb_cb {
 	struct xfrm_skb_cb xfrm;
 	void *tmp;
@@ -136,6 +140,13 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 	int sglists;
 	int seqhilen;
 	__be32 *seqhi;
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	u8 next_hdr;
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
 
 	/* skb is pure payload to encrypt */
 
@@ -198,6 +209,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 	} while (0);
 	tail[plen - 2] = plen - 2;
 	tail[plen - 1] = *skb_mac_header(skb);
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	next_hdr = *skb_mac_header(skb);
+#endif
 	pskb_put(skb, trailer, clen - skb->len + alen);
 
 	skb_push(skb, -skb_network_offset(skb));
@@ -263,6 +277,25 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 			      XFRM_SKB_CB(skb)->seq.output.low);
 
 	ESP_SKB_CB(skb)->tmp = tmp;
+
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	req->areq.data_offset = (unsigned char *)esph - skb->data;
+	req->areq.next_hdr    = next_hdr;
+#else
+	/* ensure there is enough headroom and tailroom for HW info */
+	if((skb_headroom(skb) < 12) ||
+	   (skb_tailroom(skb) < 16))
+	{
+		req->areq.alloc_buff_spu = 1;
+	}
+	else
+	{
+		req->areq.alloc_buff_spu = 0;
+	}
+	req->areq.headerLen = esph->enc_data + crypto_aead_ivsize(aead) - skb->data;
+#endif
+#endif
 	err = crypto_aead_givencrypt(req);
 	if (err == -EINPROGRESS)
 		goto error;
@@ -388,6 +421,13 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 	struct scatterlist *sg;
 	struct scatterlist *asg;
 	int err = -EINVAL;
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && !(defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	int macLen;
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
 
 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
 		goto out;
@@ -444,6 +484,27 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 	aead_request_set_crypt(req, sg, sg, elen, iv);
 	aead_request_set_assoc(req, asg, assoclen);
 
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	req->data_offset = 0;
+	req->next_hdr    = 0;
+#else
+	/* ensure there is enough headroom and tailroom for HW info */
+	if ( (skb->data >= skb_mac_header(skb)) &&
+	     (skb_headroom(skb) >= ((skb->data - skb_mac_header(skb)) + 12)) &&
+	     (skb_tailroom(skb) >= 16))
+	{
+		macLen = skb->data - skb_mac_header(skb);
+		req->alloc_buff_spu = 0;
+	}
+	else
+	{
+		macLen = 0;
+		req->alloc_buff_spu = 1;
+	}
+	req->headerLen = sizeof(*esph) + crypto_aead_ivsize(aead) + macLen;
+#endif
+#endif
 	err = crypto_aead_decrypt(req);
 	if (err == -EINPROGRESS)
 		goto out;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cbe3a68507cf89809cfc94dd1770afef1f223ac5..c25bc177df499a5be94b15ccc3bebebd6a16dec4 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -215,6 +215,12 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
 		/* Ignore rp_filter for packets protected by IPsec. */
 		rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
 
+#if defined(CONFIG_BCM_KF_MCAST_RP_FILTER)
+		/* ignore rp_filter for multicast traffic */
+		if (skb->pkt_type == PACKET_MULTICAST) {
+			rpf = 0;
+		}
+#endif
 		accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
 		fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
 	}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5dfecfd7d5e9fb9cfe154ad21eb93de60a3fd152..d60b84770bcbf0f60fe95c54170a10ce3fd01fce 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -608,6 +608,9 @@ static void igmpv3_send_cr(struct in_device *in_dev)
 	/* change recs */
 	for_each_pmc_rcu(in_dev, pmc) {
 		spin_lock_bh(&pmc->lock);
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CC_BRCM_KF_MULTI_IGMP_GR_SUPPRESSION)
+		if ( pmc->osfmode == pmc->sfmode ) {
+#endif
 		if (pmc->sfcount[MCAST_EXCLUDE]) {
 			type = IGMPV3_BLOCK_OLD_SOURCES;
 			dtype = IGMPV3_ALLOW_NEW_SOURCES;
@@ -617,15 +620,29 @@ static void igmpv3_send_cr(struct in_device *in_dev)
 		}
 		skb = add_grec(skb, pmc, type, 0, 0);
 		skb = add_grec(skb, pmc, dtype, 0, 1);	/* deleted sources */
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CC_BRCM_KF_MULTI_IGMP_GR_SUPPRESSION)
+		}
+#endif
 
 		/* filter mode changes */
 		if (pmc->crcount) {
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CC_BRCM_KF_MULTI_IGMP_GR_SUPPRESSION)
+			if ( pmc->osfmode != pmc->sfmode ) {
+#endif
 			if (pmc->sfmode == MCAST_EXCLUDE)
 				type = IGMPV3_CHANGE_TO_EXCLUDE;
 			else
 				type = IGMPV3_CHANGE_TO_INCLUDE;
 			skb = add_grec(skb, pmc, type, 0, 0);
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CC_BRCM_KF_MULTI_IGMP_GR_SUPPRESSION)
+			}
+#endif
 			pmc->crcount--;
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CC_BRCM_KF_MULTI_IGMP_GR_SUPPRESSION)
+			if ( pmc->crcount == 0 ) {
+				pmc->osfmode = pmc->sfmode;
+			}
+#endif
 		}
 		spin_unlock_bh(&pmc->lock);
 	}
@@ -1140,6 +1157,10 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
 }
 #endif
 
+#if defined(CONFIG_BCM_KF_IGMP)
+#define IGMP_RIP_ROUTER htonl(0xE0000009L)
+#endif
+
 static void igmp_group_dropped(struct ip_mc_list *im)
 {
 	struct in_device *in_dev = im->interface;
@@ -1156,6 +1177,11 @@ static void igmp_group_dropped(struct ip_mc_list *im)
 	if (im->multiaddr == IGMP_ALL_HOSTS)
 		return;
 
+#if defined(CONFIG_BCM_KF_IGMP)
+	if (im->multiaddr == IGMP_RIP_ROUTER)
+		return;
+#endif
+
 	reporter = im->reporter;
 	igmp_stop_timer(im);
 
@@ -1188,6 +1214,11 @@ static void igmp_group_added(struct ip_mc_list *im)
 	if (im->multiaddr == IGMP_ALL_HOSTS)
 		return;
 
+#if defined(CONFIG_BCM_KF_IGMP)
+	if (im->multiaddr == IGMP_RIP_ROUTER)
+		return;
+#endif
+
 	if (in_dev->dead)
 		return;
 	if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
@@ -1238,6 +1269,9 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
 	im->multiaddr = addr;
 	/* initial mode is (EX, empty) */
 	im->sfmode = MCAST_EXCLUDE;
+#if defined(CONFIG_BCM_KF_IGMP) && defined(CC_BRCM_KF_MULTI_IGMP_GR_SUPPRESSION)
+	im->osfmode = MCAST_INCLUDE;
+#endif
 	im->sfcount[MCAST_EXCLUDE] = 1;
 	atomic_set(&im->refcnt, 1);
 	spin_lock_init(&im->lock);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5ff2a51b6d0c7d5eb1b3ba73f1f516dab86204ee..c5b8df1b27b9c694ce325d658dc0e3087755d3cf 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -75,6 +75,10 @@ void inet_frags_init_net(struct netns_frags *nf)
 	nf->nqueues = 0;
 	atomic_set(&nf->mem, 0);
 	INIT_LIST_HEAD(&nf->lru_list);
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+/*CVE-2014-0100*/
+	spin_lock_init(&nf->lru_lock);
+#endif
 }
 EXPORT_SYMBOL(inet_frags_init_net);
 
@@ -98,9 +102,15 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
 {
 	write_lock(&f->lock);
 	hlist_del(&fq->list);
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 	list_del(&fq->lru_list);
+#endif
 	fq->net->nqueues--;
 	write_unlock(&f->lock);
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2014-0100*/
+	inet_frag_lru_del(fq);
+#endif
 }
 
 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
@@ -165,16 +175,32 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f)
 
 	work = atomic_read(&nf->mem) - nf->low_thresh;
 	while (work > 0) {
+	
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 		read_lock(&f->lock);
+#else
+		/*CVE-2014-0100*/
+		spin_lock(&nf->lru_lock);
+#endif
 		if (list_empty(&nf->lru_list)) {
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 			read_unlock(&f->lock);
+#else
+		/*CVE-2014-0100*/
+			spin_unlock(&nf->lru_lock);
+#endif
 			break;
 		}
 
 		q = list_first_entry(&nf->lru_list,
 				struct inet_frag_queue, lru_list);
 		atomic_inc(&q->refcnt);
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 		read_unlock(&f->lock);
+#else
+		/*CVE-2014-0100*/
+		spin_unlock(&nf->lru_lock);
+#endif
 
 		spin_lock(&q->lock);
 		if (!(q->last_in & INET_FRAG_COMPLETE))
@@ -228,9 +254,15 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
 
 	atomic_inc(&qp->refcnt);
 	hlist_add_head(&qp->list, &f->hash[hash]);
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 	list_add_tail(&qp->lru_list, &nf->lru_list);
+#endif
 	nf->nqueues++;
 	write_unlock(&f->lock);
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2014-0100*/
+	inet_frag_lru_add(nf, qp);
+#endif
 	return qp;
 }
 
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 29a07b6c7168f7369b13e25d8c96011c6118ec56..59ae865762b67bfc32a14ce83889456e549b8316 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -112,6 +112,13 @@ int ip_forward(struct sk_buff *skb)
 
 	skb->priority = rt_tos2priority(iph->tos);
 
+#if defined(CONFIG_BCM_KF_WANDEV)
+	/* Never forward a packet from a WAN intf to the other WAN intf */
+	if( (skb->dev) && (rt->dst.dev) && 
+		((skb->dev->priv_flags & rt->dst.dev->priv_flags) & IFF_WANDEV) )
+		goto drop;
+#endif
+
 	return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
 		       rt->dst.dev, ip_forward_finish);
 
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 3727e234c88444cfa420845d3f899a62b57312a0..e86aacfff3e323d1509df620dac9500217eeb8b5 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -522,10 +522,14 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
 	    qp->q.meat == qp->q.len)
 		return ip_frag_reasm(qp, prev, dev);
-
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 	write_lock(&ip4_frags.lock);
 	list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
 	write_unlock(&ip4_frags.lock);
+#else
+	/*CVE-2014-0100*/
+	inet_frag_lru_move(&qp->q);
+#endif
 	return -EINPROGRESS;
 
 err:
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b57532d4742c7cccc95a05dd641bfc47875b4717..ef35bdefa7001eb7be7abbcaac2a50884e0509d4 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -54,6 +54,11 @@
 #include <net/ip6_route.h>
 #endif
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/nbuff.h>
+#include <linux/blog.h>
+#endif
+
 /*
    Problems & solutions
    --------------------
@@ -125,6 +130,21 @@ static int ipgre_tunnel_init(struct net_device *dev);
 static void ipgre_tunnel_setup(struct net_device *dev);
 static int ipgre_tunnel_bind_dev(struct net_device *dev);
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+static inline 
+int __gre_rcv_check(struct ip_tunnel *tunnel, struct iphdr *iph, 
+	uint16_t len, uint32_t *pkt_seqno);
+
+int gre_rcv_check(struct net_device *dev, struct iphdr *iph,
+	uint16_t len, void **tunl, uint32_t *pkt_seqno);
+
+static inline 
+void __gre_xmit_update(struct ip_tunnel *tunnel, struct iphdr *iph, 
+	uint16_t len);
+void gre_xmit_update(struct ip_tunnel *tunnel, struct iphdr *iph, 
+	uint16_t len);
+#endif
+
 /* Fallback tunnel: no source, no destination, no key, no options */
 
 #define HASH_SIZE  16
@@ -615,6 +635,14 @@ static int ipgre_rcv(struct sk_buff *skb)
 					  gre_proto))) {
 		struct pcpu_tstats *tstats;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		blog_lock();
+		blog_link(IF_DEVICE, blog_ptr(skb), (void*)tunnel->dev, DIR_RX, 
+			skb->len);
+		blog_link(GRE_TUNL, blog_ptr(skb), (void*)tunnel, 0, 0);
+		blog_link(TOS_MODE, blog_ptr(skb), tunnel, DIR_RX, BLOG_TOS_FIXED);
+		blog_unlock();
+#endif   
 		secpath_reset(skb);
 
 		skb->protocol = gre_proto;
@@ -648,6 +676,15 @@ static int ipgre_rcv(struct sk_buff *skb)
 			tunnel->dev->stats.rx_errors++;
 			goto drop;
 		}
+
+#if (defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG))
+		if (!blog_gre_tunnel_accelerated())
+		{
+			uint32_t pkt_seqno;
+			__gre_rcv_check(tunnel, (struct iphdr *)iph, 
+				(skb->len - (iph->ihl<<2)), &pkt_seqno);
+		}
+#else
 		if (tunnel->parms.i_flags&GRE_SEQ) {
 			if (!(flags&GRE_SEQ) ||
 			    (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
@@ -657,7 +694,7 @@ static int ipgre_rcv(struct sk_buff *skb)
 			}
 			tunnel->i_seqno = seqno + 1;
 		}
-
+#endif
 		/* Warning: All skb pointers will be invalidated! */
 		if (tunnel->dev->type == ARPHRD_ETHER) {
 			if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -671,6 +708,15 @@ static int ipgre_rcv(struct sk_buff *skb)
 			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 		}
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		if ((skb->protocol != htons(ETH_P_IP))
+#if IS_ENABLED(CONFIG_IPV6)
+			&& (skb->protocol != htons(ETH_P_IPV6)) 
+#endif
+		) {
+			blog_skip(skb);                         /* No blogging */
+		}
+#endif
 		tstats = this_cpu_ptr(tunnel->dev->tstats);
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
@@ -711,6 +757,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
 	__be32 dst;
 	int    mtu;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	blog_link(IF_DEVICE, blog_ptr(skb), (void*)dev, DIR_TX, skb->len);
+	blog_link(GRE_TUNL, blog_ptr(skb), (void*)tunnel, 0, 0);
+	blog_unlock();
+#endif   
+
 	if (dev->type == ARPHRD_ETHER)
 		IPCB(skb)->flags = 0;
 
@@ -832,6 +885,15 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
 	}
 #endif
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	{
+		Blog_t * blog_p = blog_ptr(skb);
+
+		if (blog_p && blog_p->minMtu > mtu)
+			blog_p->minMtu = mtu;
+	}
+#endif
+
 	if (tunnel->err_count > 0) {
 		if (time_before(jiffies,
 				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
@@ -884,6 +946,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
 	iph->daddr		=	fl4.daddr;
 	iph->saddr		=	fl4.saddr;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	blog_link(TOS_MODE, blog_ptr(skb), tunnel, DIR_TX, tiph->tos);
+	blog_unlock();
+#endif   
+
 	if ((iph->ttl = tiph->ttl) == 0) {
 		if (skb->protocol == htons(ETH_P_IP))
 			iph->ttl = old_iph->ttl;
@@ -903,7 +971,11 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
 		__be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
 
 		if (tunnel->parms.o_flags&GRE_SEQ) {
+#if (defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG))
+		if (!blog_gre_tunnel_accelerated()) ++tunnel->o_seqno;
+#else
 			++tunnel->o_seqno;
+#endif
 			*ptr = htonl(tunnel->o_seqno);
 			ptr--;
 		}
@@ -916,6 +988,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
 			*(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
 		}
 	}
+#if (defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG))
+	skb->tunl = tunnel;
+#endif
 
 	nf_reset(skb);
 	tstats = this_cpu_ptr(dev->tstats);
@@ -1623,6 +1698,134 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
 	return 0;
 }
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+static inline 
+int __gre_rcv_check(struct ip_tunnel *tunnel, struct iphdr *iph, 
+	uint16_t len, uint32_t *pkt_seqno)
+{
+	int ret = BLOG_GRE_RCV_NO_SEQNO;
+	int grehlen = 4;
+	int iph_len = iph->ihl<<2;
+	__be16 *p = (__be16*)((uint8_t *)iph+iph_len);
+	__be16 flags;
+
+	flags = p[0];
+
+	if (tunnel->parms.i_flags&GRE_CSUM) {
+		uint16_t csum;
+
+		grehlen += 4;
+		csum = *(((__be16 *)p) + 2);
+
+		if (!csum)
+			goto no_csum;
+
+		csum = ip_compute_csum((void*)(iph+1), len - iph_len);
+
+		if (csum) {
+			tunnel->dev->stats.rx_crc_errors++;
+			tunnel->dev->stats.rx_errors++;
+			ret = BLOG_GRE_RCV_CHKSUM_ERR;
+			goto rcv_done;
+		}
+	}
+
+no_csum:
+	if ((tunnel->parms.i_flags&GRE_KEY) && (flags&GRE_KEY))
+		grehlen += 4;
+
+	if (tunnel->parms.i_flags&GRE_SEQ) {
+		uint32_t seqno = *(((__be32 *)p) + (grehlen / 4));
+		*pkt_seqno = seqno;
+		if (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) == 0) {
+			tunnel->i_seqno = seqno + 1;
+			ret = BLOG_GRE_RCV_IN_SEQ;
+		} else if (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0) {
+			tunnel->dev->stats.rx_fifo_errors++;
+			tunnel->dev->stats.rx_errors++;
+			ret = BLOG_GRE_RCV_OOS_LT;
+		} else {
+			tunnel->i_seqno = seqno + 1;
+			ret = BLOG_GRE_RCV_OOS_GT;
+		}
+	}
+
+rcv_done:
+	return ret;
+}
+
+int gre_rcv_check(struct net_device *dev, struct iphdr *iph, 
+	uint16_t len, void **tunl, uint32_t *pkt_seqno)
+{
+	int ret = BLOG_GRE_RCV_NO_TUNNEL;
+	int grehlen = 4;
+	int iph_len = iph->ihl<<2;
+	struct ip_tunnel *t;
+	__be16 *p = (__be16*)((uint8_t *)iph+iph_len);
+	__be16 flags;
+
+	flags = p[0];
+
+	if (flags&GRE_CSUM)
+		grehlen += 4;
+
+	t = ipgre_tunnel_lookup(dev, iph->saddr, iph->daddr,
+		flags & GRE_KEY ? *(((__be32 *)p) + (grehlen / 4)) : 0, p[1]);
+
+	if (t) {
+		if (t->parms.i_flags == flags) {
+			rcu_read_lock();
+			ret =  __gre_rcv_check(t, iph, len, pkt_seqno);
+			rcu_read_unlock();
+		}
+		else
+			ret = BLOG_GRE_RCV_FLAGS_MISSMATCH;
+	}
+
+	*tunl = (void *) t;	
+	return ret;
+}
+EXPORT_SYMBOL(gre_rcv_check);
+
+/* Adds the TX seqno, Key and updates the GRE checksum */
+static inline 
+void __gre_xmit_update(struct ip_tunnel *tunnel, struct iphdr *iph, 
+	uint16_t len)
+{
+	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
+		int iph_len = iph->ihl<<2;
+		__be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
+
+		if (tunnel->parms.o_flags&GRE_SEQ) {
+			++tunnel->o_seqno;
+			*ptr = htonl(tunnel->o_seqno);
+			ptr--;
+		}
+
+		if (tunnel->parms.o_flags&GRE_KEY) {
+			*ptr = tunnel->parms.o_key;
+			ptr--;
+		}
+
+		if (tunnel->parms.o_flags&GRE_CSUM) {
+			*ptr = 0;
+			*(__sum16*)ptr = ip_compute_csum((void*)(iph+1), len - iph_len);
+		}
+		cache_flush_len(ptr, tunnel->hlen);
+	}
+}
+
+/* Adds the oseqno and updates the GRE checksum */
+void gre_xmit_update(struct ip_tunnel *tunnel, struct iphdr *iph, 
+	uint16_t len)
+{
+	rcu_read_lock();
+	__gre_xmit_update(tunnel, iph, len);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(gre_xmit_update);
+#endif
+
 static size_t ipgre_get_size(const struct net_device *dev)
 {
 	return
@@ -1738,6 +1941,11 @@ static int __init ipgre_init(void)
 	if (err < 0)
 		goto tap_ops_failed;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_gre_rcv_check_fn = (blog_gre_rcv_check_t) gre_rcv_check;
+	blog_gre_xmit_update_fn = (blog_gre_xmit_upd_t) gre_xmit_update;
+#endif
+
 out:
 	return err;
 
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26eccc5bab1c28e81d170fa0c147c9fb33a499e8..4f6f707ce4ee030489aceba16df96fd2cc26c09e 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -377,6 +377,10 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 {
 	const struct iphdr *iph;
 	u32 len;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	__u8 iph_ihl, iph_version;
+#endif
+
 
 	/* When the interface is in promisc. mode, drop all the crap
 	 * that it receives, do not try to analyse it.
@@ -408,15 +412,30 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 	 *	4.	Doesn't have a bogus length
 	 */
 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	iph_ihl = *(__u8 *)iph & 0xf;
+	iph_version = *(__u8 *)iph >> 4;
+
+	if (iph_ihl < 5 || iph_version != 4)
+#else
 	if (iph->ihl < 5 || iph->version != 4)
+#endif
 		goto inhdr_error;
 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	if (!pskb_may_pull(skb, iph_ihl*4))
+#else
 	if (!pskb_may_pull(skb, iph->ihl*4))
+#endif
 		goto inhdr_error;
 
 	iph = ip_hdr(skb);
 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	if (unlikely(ip_fast_csum((u8 *)iph, iph_ihl)))
+#else
 	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+#endif
 		goto inhdr_error;
 
 	len = ntohs(iph->tot_len);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4910176d24ed5d8b4a8a5430a3bc3e33d8c1ba91..dc678d7f8da8d5b1d11c7479c7539c6304e563fc 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -80,6 +80,10 @@
 #include <linux/netlink.h>
 #include <linux/tcp.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
 EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
@@ -230,6 +234,14 @@ static inline int ip_skb_dst_mtu(struct sk_buff *skb)
 
 static int ip_finish_output(struct sk_buff *skb)
 {
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+    uint32_t mtu = dst_mtu(skb_dst(skb));
+    Blog_t * blog_p = blog_ptr(skb);
+
+    if (blog_p && blog_p->minMtu > mtu)
+        blog_p->minMtu = mtu;
+#endif
+
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 	/* Policy lookup after SNAT yielded a new policy */
 	if (skb_dst(skb)->xfrm != NULL) {
@@ -278,10 +290,18 @@ int ip_mc_output(struct sk_buff *skb)
 #endif
 		   ) {
 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
+
 			if (newskb)
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+            {
+                blog_clone(skb, blog_ptr(newskb));
+#endif
 				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 					newskb, NULL, newskb->dev,
 					ip_dev_loopback_xmit);
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+            }
+#endif
 		}
 
 		/* Multicasts with ttl 0 must not go beyond the host */
@@ -484,6 +504,17 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 		return -EMSGSIZE;
 	}
 
+#if defined(CONFIG_BCM_KF_IP)
+   /* 
+    * Do not fragment the packets going to 4in6 tunnel:
+    * RFC2473 sec 7.2: fragmentation should happen in tunnel
+    */
+    if (strstr(dev->name, "ip6tnl"))
+    {
+        return output(skb);
+    }
+#endif    
+
 	/*
 	 *	Setup starting values.
 	 */
@@ -672,6 +703,9 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 			BUG();
 		left -= len;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+        blog_xfer(skb2, skb);
+#endif
 		/*
 		 *	Fill in the new header fields.
 		 */
@@ -846,7 +880,12 @@ static int __ip_append_data(struct sock *sk,
 		csummode = CHECKSUM_PARTIAL;
 
 	cork->length += length;
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2013-4470*/
+	if (((length > mtu) || (skb && skb_shinfo(skb)->nr_frags)) &&
+#else
 	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+#endif
 	    (sk->sk_protocol == IPPROTO_UDP) &&
 	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 960fbfc3e976f5c8c65a2a95c8d3bd5946c0ec33..ae398876d74eeeb27f05f37891b0f838ac9c6309 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -794,6 +794,34 @@ static int vif_add(struct net *net, struct mr_table *mrt,
 	return 0;
 }
 
+#if defined(CONFIG_BCM_KF_IGMP)
+static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
+                                         __be32 origin, 
+                                         __be32 mcastgrp,
+                                         vifi_t vifi)
+{
+	int line = MFC_HASH(mcastgrp, origin);
+	struct mfc_cache *c;
+	list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
+		if ((c->mfc_origin == origin) && 
+		    (c->mfc_mcastgrp == mcastgrp) &&
+		    (c->mfc_parent == vifi))
+			return c;
+	}
+
+	/* for ASM multicast source does not matter so need to check
+	   for an entry with NULL origin */
+	line = MFC_HASH(mcastgrp, 0x0);
+	list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
+		if ((c->mfc_origin == 0x0) && 
+		    (c->mfc_mcastgrp == mcastgrp) &&
+		    (c->mfc_parent == vifi))
+		return c;
+	}
+
+	return NULL;
+}
+#else
 /* called with rcu_read_lock() */
 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
 					 __be32 origin,
@@ -808,6 +836,7 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
 	}
 	return NULL;
 }
+#endif
 
 /*
  *	Allocate a multicast cache entry
@@ -1042,8 +1071,14 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
 	line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
 
 	list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
+#if defined(CONFIG_BCM_KF_IGMP)
+		if ((c->mfc_origin == mfc->mfcc_origin.s_addr) &&
+		    (c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) &&
+		    (c->mfc_parent == mfc->mfcc_parent)) {
+#else
 		if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
 		    c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
+#endif
 			list_del_rcu(&c->list);
 
 			ipmr_cache_free(c);
@@ -1066,8 +1101,14 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
 	line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
 
 	list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
+#if defined(CONFIG_BCM_KF_IGMP)
+		if ((c->mfc_origin == mfc->mfcc_origin.s_addr) &&
+		    (c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) &&
+		    (c->mfc_parent == mfc->mfcc_parent)) {
+#else
 		if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
 		    c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
+#endif
 			found = true;
 			break;
 		}
@@ -1418,7 +1459,11 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
 			return -EFAULT;
 
 		rcu_read_lock();
+#if defined(CONFIG_BCM_KF_IGMP)
+      c = NULL;
+#else
 		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+#endif
 		if (c) {
 			sr.pktcnt = c->mfc_un.res.pkt;
 			sr.bytecnt = c->mfc_un.res.bytes;
@@ -1492,7 +1537,11 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
 			return -EFAULT;
 
 		rcu_read_lock();
+#if defined(CONFIG_BCM_KF_IGMP)
+		c = NULL;
+#else
 		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+#endif
 		if (c) {
 			sr.pktcnt = c->mfc_un.res.pkt;
 			sr.bytecnt = c->mfc_un.res.bytes;
@@ -1763,8 +1812,15 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
 				if (skb2)
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+                                {
+					blog_clone(skb, blog_ptr(skb2));
+#endif
 					ipmr_queue_xmit(net, mrt, skb2, cache,
 							psend);
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+                                }
+#endif
 			}
 			psend = ct;
 		}
@@ -1853,9 +1909,23 @@ int ip_mr_input(struct sk_buff *skb)
 		    }
 	}
 
+#if defined(CONFIG_BCM_KF_IGMP)
+	/* mroute should not apply to IGMP traffic
+	   in addition it does not make sense for TCP protocol to be used
+	   for multicast so just check for UDP */
+	if( ip_hdr(skb)->protocol == IPPROTO_UDP )
+	{
+		vifi_t vifi = ipmr_find_vif(mrt, skb->dev);
+		cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, vifi);
+	}
+	else
+	{
+		cache = NULL;
+	}
+#else	
 	/* already under rcu_read_lock() */
 	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
-
+#endif
 	/*
 	 *	No usable cache entry
 	 */
@@ -1888,13 +1958,28 @@ int ip_mr_input(struct sk_buff *skb)
 	read_unlock(&mrt_lock);
 
 	if (local)
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+        {
+		/* free blog if present */
+		blog_free(skb);
+#endif
 		return ip_local_deliver(skb);
-
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	}
+#endif
 	return 0;
 
 dont_forward:
 	if (local)
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	{
+		/* free blog if present */
+		blog_free(skb);
+#endif
 		return ip_local_deliver(skb);
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	}
+#endif
 	kfree_skb(skb);
 	return 0;
 }
@@ -2039,9 +2124,15 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
 	return -EMSGSIZE;
 }
 
+#if defined(CONFIG_BCM_KF_IGMP)
+int ipmr_get_route(struct net *net, struct sk_buff *skb,
+		   __be32 saddr, __be32 daddr,
+		   struct rtmsg *rtm, int nowait, unsigned short ifIndex)
+#else
 int ipmr_get_route(struct net *net, struct sk_buff *skb,
 		   __be32 saddr, __be32 daddr,
 		   struct rtmsg *rtm, int nowait)
+#endif
 {
 	struct mfc_cache *cache;
 	struct mr_table *mrt;
@@ -2052,7 +2143,25 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
 		return -ENOENT;
 
 	rcu_read_lock();
+
+#if defined(CONFIG_BCM_KF_IGMP)
+	/* mroute should not apply to IGMP traffic
+	   in addition it does not make sense for TCP protocol to be used
+	   for multicast so just check for UDP */
+	if ((NULL == skb->dev) || (ip_hdr(skb) == NULL) ||
+	    (ip_hdr(skb)->protocol == IPPROTO_UDP))
+	{
+		struct net_device *dev = dev_get_by_index(net, ifIndex);
+		vifi_t vifi = ipmr_find_vif(mrt, dev);
+		cache = ipmr_cache_find(mrt, saddr, daddr, vifi);
+	}
+	else
+	{
+		cache = NULL;
+	}
+#else	
 	cache = ipmr_cache_find(mrt, saddr, daddr);
+#endif
 
 	if (cache == NULL) {
 		struct sk_buff *skb2;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index fcc543cd987a3f22b45ea143c9c754019fa8bacd..9a201add07604e85ce2f5a0b7f514f81c0f2eaa6 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -123,6 +123,19 @@ config IP_NF_TARGET_REJECT
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_NF_TARGET_REJECT_SKERR
+	bool "Force socket error when rejecting with icmp*"
+	depends on IP_NF_TARGET_REJECT
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+          This option enables turning a "--reject-with icmp*" into a matching
+          socket error also.
+	  The REJECT target normally allows sending an ICMP message. But it
+          leaves the local socket unaware of any ingress rejects.
+
+	  If unsure, say N.
+
 config IP_NF_TARGET_ULOG
 	tristate "ULOG target support"
 	default m if NETFILTER_ADVANCED=n
@@ -238,6 +251,13 @@ config NF_NAT_PROTO_SCTP
 	default NF_NAT && NF_CT_PROTO_SCTP
 	depends on NF_NAT && NF_CT_PROTO_SCTP
 	select LIBCRC32C
+	
+#BCM_KF 
+config NF_NAT_PROTO_ESP
+	tristate
+	depends on BCM_KF_NETFILTER && NF_NAT && NF_CT_PROTO_ESP
+	default NF_NAT && NF_CT_PROTO_ESP
+#BCM_KF END
 
 config NF_NAT_FTP
 	tristate
@@ -275,6 +295,24 @@ config NF_NAT_SIP
 	depends on NF_CONNTRACK && NF_NAT
 	default NF_NAT && NF_CONNTRACK_SIP
 
+config NF_NAT_IPSEC
+	tristate
+	depends on  BCM_KF_NETFILTER && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_IPSEC
+
+config NF_NAT_PT
+	tristate "Port Triggering support"
+	depends on NF_NAT && BCM_KF_NETFILTER
+	help
+	  Port Triggering support
+
+	  To compile it as a module, choose M here.  If unsure, say Y.
+
+config NF_NAT_RTSP
+	tristate
+	depends on BCM_KF_NETFILTER && IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_RTSP
+
 # mangle + specific targets
 config IP_NF_MANGLE
 	tristate "Packet mangling"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 240b68469a7a178f990f07559f65ad46b2ae43f0..53520f18738aaa337d52d41ff07633e26e09174b 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -28,14 +28,25 @@ obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
 obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
 obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
 obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_NF_NAT_IPSEC) += nf_nat_ipsec.o
+endif #BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
 obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
 obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_NF_NAT_PT) += nf_nat_pt.o
+obj-$(CONFIG_NF_NAT_RTSP) += nf_nat_rtsp.o
+endif #BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+
 # NAT protocols (nf_nat)
 obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
 obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
 obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
 obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_NF_NAT_PROTO_ESP) += nf_nat_proto_esp.o
+endif #BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
 
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 24e556e83a3ba97fe633525c10e63d4bcb767b34..a03eb2d1c07967b8efb5e78197656fa154440112 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -30,6 +30,12 @@
 #include <net/netfilter/nf_log.h>
 #include "../../netfilter/xt_repldata.h"
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#include <net/bl_ops.h>
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
 MODULE_DESCRIPTION("IPv4 packet filter");
@@ -71,7 +77,13 @@ EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
 /* Returns whether matches rule or not. */
 /* Performance critical - called for every packet */
 static inline bool
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+ip_packet_match(struct sk_buff *skb,
+                const struct iphdr *ip,
+#else
 ip_packet_match(const struct iphdr *ip,
+#endif
 		const char *indev,
 		const char *outdev,
 		const struct ipt_ip *ipinfo,
@@ -81,10 +93,17 @@ ip_packet_match(const struct iphdr *ip,
 
 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	if ( ipinfo->proto == IPPROTO_TCP )
+		skb->ipt_check |= IPT_MATCH_TCP;
+	else if ( ipinfo->proto == IPPROTO_UDP )
+		skb->ipt_check |= IPT_MATCH_UDP;
+#endif
+
 	if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
 		  IPT_INV_SRCIP) ||
 	    FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
-		  IPT_INV_DSTIP)) {
+		     IPT_INV_DSTIP)) {
 		dprintf("Source or dest mismatch.\n");
 
 		dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
@@ -343,9 +362,19 @@ ipt_do_table(struct sk_buff *skb,
 		const struct xt_entry_match *ematch;
 
 		IP_NF_ASSERT(e);
-		if (!ip_packet_match(ip, indev, outdev,
-		    &e->ip, acpar.fragoff)) {
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+        skb->ipt_check = 0;
+        if (!ip_packet_match(skb, ip, indev, outdev,
+            &e->ip, acpar.fragoff)) {
+#else
+        if (!ip_packet_match(ip, indev, outdev,
+            &e->ip, acpar.fragoff)) {
+#endif
+
+#if !defined(CONFIG_BCM_KF_BLOG) || !defined(CONFIG_BLOG_FEATURE)
  no_match:
+#endif
 			e = ipt_next_entry(e);
 			continue;
 		}
@@ -353,8 +382,13 @@ ipt_do_table(struct sk_buff *skb,
 		xt_ematch_foreach(ematch, e) {
 			acpar.match     = ematch->u.kernel.match;
 			acpar.matchinfo = ematch->data;
+
 			if (!acpar.match->match(skb, &acpar))
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+				skb->ipt_check |= IPT_TARGET_CHECK;
+#else
 				goto no_match;
+#endif
 		}
 
 		ADD_COUNTER(e->counters, skb->len, 1);
@@ -367,7 +401,7 @@ ipt_do_table(struct sk_buff *skb,
 		/* The packet is traced: log it */
 		if (unlikely(skb->nf_trace))
 			trace_packet(skb, hook, in, out,
-				     table->name, private, e);
+			             table->name, private, e);
 #endif
 		/* Standard target? */
 		if (!t->u.kernel.target->target) {
@@ -376,48 +410,96 @@ ipt_do_table(struct sk_buff *skb,
 			v = ((struct xt_standard_target *)t)->verdict;
 			if (v < 0) {
 				/* Pop from stack? */
-				if (v != XT_RETURN) {
+			    if (v != XT_RETURN) {
 					verdict = (unsigned)(-v) - 1;
 					break;
 				}
-				if (*stackptr <= origptr) {
-					e = get_entry(table_base,
-					    private->underflow[hook]);
-					pr_debug("Underflow (this is normal) "
-						 "to %p\n", e);
-				} else {
-					e = jumpstack[--*stackptr];
-					pr_debug("Pulled %p out from pos %u\n",
-						 e, *stackptr);
-					e = ipt_next_entry(e);
-				}
+			    if (*stackptr <= origptr) {
+				    e = get_entry(table_base,
+				        private->underflow[hook]);
+				    pr_debug("Underflow (this is normal) "
+					         "to %p\n", e);
+			    } else {
+				    e = jumpstack[--*stackptr];
+				    pr_debug("Pulled %p out from pos %u\n",
+					          e, *stackptr);
+				    e = ipt_next_entry(e);
+			    }
 				continue;
 			}
-			if (table_base + v != ipt_next_entry(e) &&
-			    !(e->ip.flags & IPT_F_GOTO)) {
-				if (*stackptr >= private->stacksize) {
-					verdict = NF_DROP;
-					break;
-				}
-				jumpstack[(*stackptr)++] = e;
-				pr_debug("Pushed %p into pos %u\n",
-					 e, *stackptr - 1);
-			}
+
+		    if (table_base + v != ipt_next_entry(e) &&
+		        !(e->ip.flags & IPT_F_GOTO)) {
+			    if (*stackptr >= private->stacksize) {
+				    verdict = NF_DROP;
+				    break;
+			    }
+			    jumpstack[(*stackptr)++] = e;
+			    pr_debug("Pushed %p into pos %u\n",
+				     e, *stackptr - 1);
+		    }
 
 			e = get_entry(table_base, v);
-			continue;
+		    continue;
 		}
 
+
 		acpar.target   = t->u.kernel.target;
 		acpar.targinfo = t->data;
 
 		verdict = t->u.kernel.target->target(skb, &acpar);
-		/* Target might have changed stuff. */
-		ip = ip_hdr(skb);
+				/* Target might have changed stuff. */
+				ip = ip_hdr(skb);
+		        
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+		if ( skb->ipt_check & IPT_TARGET_CHECK )
+			verdict = NF_DROP;
+
+		if ( skb->blog_p ) {
+			if ( (skb->ipt_check & IPT_MATCH_LENGTH) &&
+				 (skb->ipt_check & IPT_MATCH_TCP) &&
+				 (skb->ipt_check & IPT_TARGET_MARK) &&
+				 !(skb->blog_p->isWan) ) {
+				skb->blog_p->preMod = 1;
+				skb->blog_p->postMod = 1;
+				skb->blog_p->preHook = &blog_pre_mod_hook;
+				skb->blog_p->postHook = &blog_post_mod_hook;
+			}
+			if ( (skb->ipt_check & IPT_MATCH_LENGTH) &&
+				 (skb->ipt_check & IPT_TARGET_MARK) ) {
+				blog_set_len_tbl(skb->ipt_log.u32);
+				skb->blog_p->preMod = 1;
+				skb->blog_p->postMod = 1;
+				skb->blog_p->lenPrior = 1;
+				skb->blog_p->preHook = &blog_pre_mod_hook;
+				skb->blog_p->postHook = &blog_post_mod_hook;
+			}
+			if ( (skb->ipt_check & IPT_MATCH_DSCP) &&
+				 (skb->ipt_check & IPT_TARGET_DSCP) ) {
+				blog_set_dscp_tbl(skb->ipt_log.u8[BLOG_MATCH_DSCP_INDEX], skb->ipt_log.u8[BLOG_TARGET_DSCP_INDEX]);
+				skb->blog_p->preMod = 1;
+				skb->blog_p->postMod = 1;
+				skb->blog_p->dscpMangl = 1;
+				skb->blog_p->preHook = &blog_pre_mod_hook;
+				skb->blog_p->postHook = &blog_post_mod_hook;
+			}
+			if ( (skb->ipt_check & IPT_MATCH_TOS) &&
+				 (skb->ipt_check & IPT_TARGET_TOS) ) {
+				blog_set_tos_tbl(skb->ipt_log.u8[BLOG_MATCH_TOS_INDEX], skb->ipt_log.u8[BLOG_TARGET_DSCP_INDEX]);
+				skb->blog_p->preMod = 1;
+				skb->blog_p->postMod = 1;
+				skb->blog_p->tosMangl = 1;
+				skb->blog_p->preHook = &blog_pre_mod_hook;
+				skb->blog_p->postHook = &blog_post_mod_hook;
+			}
+		}
+#endif
+
 		if (verdict == XT_CONTINUE)
 			e = ipt_next_entry(e);
 		else
-			/* Verdict */
+                        /* Verdict */
 			break;
 	} while (!acpar.hotdrop);
 	pr_debug("Exiting %s; resetting sp from %u to %u\n",
@@ -596,6 +678,13 @@ check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
 		duprintf("check failed for `%s'.\n", par->match->name);
 		return ret;
 	}
+
+    
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	BL_OPS(net_ipv4_netfilter_ip_tables_check_match(m, par, ip));
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
 	return 0;
 }
 
@@ -670,8 +759,8 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
 	mtpar.family    = NFPROTO_IPV4;
 	xt_ematch_foreach(ematch, e) {
 		ret = find_check_match(ematch, &mtpar);
-		if (ret != 0)
-			goto cleanup_matches;
+	if (ret != 0)
+		goto cleanup_matches;
 		++j;
 	}
 
@@ -753,7 +842,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
 				return -EINVAL;
 			}
 			newinfo->underflow[h] = underflows[h];
-		}
+	}
 	}
 
 	/* Clear counters and comefrom */
@@ -811,8 +900,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
 						 repl->hook_entry,
 						 repl->underflow,
 						 repl->valid_hooks);
-		if (ret != 0)
-			return ret;
+	if (ret != 0)
+		return ret;
 		++i;
 		if (strcmp(ipt_get_target(iter)->u.user.name,
 		    XT_ERROR_TARGET) == 0)
@@ -883,7 +972,7 @@ get_counters(const struct xt_table_info *t,
 	for_each_possible_cpu(cpu) {
 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
 
-		i = 0;
+	i = 0;
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
 			u64 bcnt, pcnt;
 			unsigned int start;
@@ -1217,6 +1306,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 	    (newinfo->number <= oldinfo->initial_entries))
 		module_put(t->me);
 
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+        BL_OPS(net_ipv4_netfilter_ip_tables___do_replace(oldinfo, newinfo));
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 	/* Get the old counters, and synchronize with replace */
 	get_counters(oldinfo, counters);
 
@@ -1271,6 +1367,13 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
 		goto free_newinfo;
 	}
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+		BL_OPS(net_ipv4_netfilter_ip_tables_do_replace(&tmp));
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+
 	ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
 	if (ret != 0)
 		goto free_newinfo;
@@ -1507,8 +1610,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
 	xt_ematch_foreach(ematch, e) {
 		ret = compat_find_calc_match(ematch, name,
 					     &e->ip, e->comefrom, &off);
-		if (ret != 0)
-			goto release_matches;
+	if (ret != 0)
+		goto release_matches;
 		++j;
 	}
 
@@ -1577,7 +1680,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
 	xt_ematch_foreach(ematch, e) {
 		ret = xt_compat_match_from_user(ematch, dstptr, size);
 		if (ret != 0)
-			return ret;
+		return ret;
 	}
 	de->target_offset = e->target_offset - (origsize - *size);
 	t = compat_ipt_get_target(e);
@@ -1611,7 +1714,7 @@ compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
 	xt_ematch_foreach(ematch, e) {
 		ret = check_match(ematch, &mtpar);
 		if (ret != 0)
-			goto cleanup_matches;
+		goto cleanup_matches;
 		++j;
 	}
 
@@ -1667,12 +1770,12 @@ translate_compat_table(struct net *net,
 	xt_entry_foreach(iter0, entry0, total_size) {
 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
 							entry0,
-							entry0 + total_size,
+				       entry0 + total_size,
 							hook_entries,
 							underflows,
 							name);
-		if (ret != 0)
-			goto out_unlock;
+	if (ret != 0)
+		goto out_unlock;
 		++j;
 	}
 
@@ -2162,17 +2265,17 @@ static int icmp_checkentry(const struct xt_mtchk_param *par)
 static struct xt_target ipt_builtin_tg[] __read_mostly = {
 	{
 		.name             = XT_STANDARD_TARGET,
-		.targetsize       = sizeof(int),
+	.targetsize	= sizeof(int),
 		.family           = NFPROTO_IPV4,
 #ifdef CONFIG_COMPAT
-		.compatsize       = sizeof(compat_int_t),
-		.compat_from_user = compat_standard_from_user,
-		.compat_to_user   = compat_standard_to_user,
+	.compatsize	= sizeof(compat_int_t),
+	.compat_from_user = compat_standard_from_user,
+	.compat_to_user	= compat_standard_to_user,
 #endif
 	},
 	{
 		.name             = XT_ERROR_TARGET,
-		.target           = ipt_error,
+	.target		= ipt_error,
 		.targetsize       = XT_FUNCTION_MAXNAMELEN,
 		.family           = NFPROTO_IPV4,
 	},
@@ -2197,11 +2300,11 @@ static struct nf_sockopt_ops ipt_sockopts = {
 
 static struct xt_match ipt_builtin_mt[] __read_mostly = {
 	{
-		.name       = "icmp",
-		.match      = icmp_match,
-		.matchsize  = sizeof(struct ipt_icmp),
-		.checkentry = icmp_checkentry,
-		.proto      = IPPROTO_ICMP,
+	.name		= "icmp",
+	.match		= icmp_match,
+	.matchsize	= sizeof(struct ipt_icmp),
+	.checkentry	= icmp_checkentry,
+	.proto		= IPPROTO_ICMP,
 		.family     = NFPROTO_IPV4,
 	},
 };
@@ -2229,7 +2332,7 @@ static int __init ip_tables_init(void)
 	if (ret < 0)
 		goto err1;
 
-	/* No one else will be downing sem now, so we won't sleep */
+	/* Noone else will be downing sem now, so we won't sleep */
 	ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
 	if (ret < 0)
 		goto err2;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 2f210c79dc876c069228e07dda15655e372e5468..ec82dae54e9b06607e4d16f7843eef19fc513a68 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -20,13 +20,153 @@
 #include <net/checksum.h>
 #include <net/route.h>
 #include <net/netfilter/nf_nat_rule.h>
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <net/netfilter/nf_conntrack_zones.h>
+#endif
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter/x_tables.h>
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat_helper.h>
+#endif
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
 MODULE_DESCRIPTION("Xtables: automatic-address SNAT");
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+/****************************************************************************/
+static void bcm_nat_expect(struct nf_conn *ct,
+			   struct nf_conntrack_expect *exp)
+{
+	struct nf_nat_ipv4_range range;
+
+	/* This must be a fresh one. */
+	BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+	/* Change src to where new ct comes from */
+	range.flags = NF_NAT_RANGE_MAP_IPS;
+	range.min_ip = range.max_ip =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+	 
+	/* For DST manip, map port here to where it's expected. */
+	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+	range.min = range.max = exp->saved_proto;
+	range.min_ip = range.max_ip = exp->saved_ip;
+	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+
+/****************************************************************************/
+static int bcm_nat_help(struct sk_buff *skb, unsigned int protoff,
+			struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_expect *exp;
+	
+	if (dir != IP_CT_DIR_ORIGINAL ||
+	    help->expecting[NF_CT_EXPECT_CLASS_DEFAULT])
+		return NF_ACCEPT;
+
+	pr_debug("bcm_nat: packet[%d bytes] ", skb->len);
+	nf_ct_dump_tuple(&ct->tuplehash[dir].tuple);
+	pr_debug("reply: ");
+	nf_ct_dump_tuple(&ct->tuplehash[!dir].tuple);
+	
+	/* Create expect */
+	if ((exp = nf_ct_expect_alloc(ct)) == NULL)
+		return NF_ACCEPT;
+
+	nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, AF_INET, NULL,
+			  &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_UDP,
+			  NULL, &ct->tuplehash[!dir].tuple.dst.u.udp.port);
+	exp->flags = NF_CT_EXPECT_PERMANENT;
+	exp->saved_ip = ct->tuplehash[dir].tuple.src.u3.ip;
+	exp->saved_proto.udp.port = ct->tuplehash[dir].tuple.src.u.udp.port;
+	exp->dir = !dir;
+	exp->expectfn = bcm_nat_expect;
+
+	/* Setup expect */
+	nf_ct_expect_related(exp);
+	nf_ct_expect_put(exp);
+	pr_debug("bcm_nat: expect setup\n");
+
+	return NF_ACCEPT;
+}
+
+/****************************************************************************/
+static struct nf_conntrack_expect_policy bcm_nat_exp_policy __read_mostly = {
+	.max_expected 	= 1000,
+	.timeout	= 240,
+};
+
+/****************************************************************************/
+static struct nf_conntrack_helper nf_conntrack_helper_bcm_nat __read_mostly = {
+	.name = "BCM-NAT",
+	.me = THIS_MODULE,
+	.tuple.src.l3num = AF_INET,
+	.tuple.dst.protonum = IPPROTO_UDP,
+	.expect_policy = &bcm_nat_exp_policy,
+	.expect_class_max = 1,
+	.help = bcm_nat_help,
+};
+
+/****************************************************************************/
+static inline int find_exp(__be32 ip, __be16 port, struct nf_conn *ct)
+{
+	struct nf_conntrack_tuple tuple;
+	struct nf_conntrack_expect *i = NULL;
+
+	
+	memset(&tuple, 0, sizeof(tuple));
+	tuple.src.l3num = AF_INET;
+	tuple.dst.protonum = IPPROTO_UDP;
+	tuple.dst.u3.ip = ip;
+	tuple.dst.u.udp.port = port;
+
+	rcu_read_lock();
+	i = __nf_ct_expect_find(nf_ct_net(ct), nf_ct_zone(ct), &tuple);
+	rcu_read_unlock();
+
+	return i != NULL;
+}
+
+/****************************************************************************/
+static inline struct nf_conntrack_expect *find_fullcone_exp(struct nf_conn *ct)
+{
+	struct nf_conntrack_tuple * tp =
+		&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	struct net *net = nf_ct_net(ct);
+	struct nf_conntrack_expect * exp = NULL;
+	struct nf_conntrack_expect * i;
+	struct hlist_node *n;
+	unsigned int h;
+
+	rcu_read_lock();
+	for (h = 0; h < nf_ct_expect_hsize; h++) {
+		hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
+			if (i->saved_ip == tp->src.u3.ip &&
+		    	    i->saved_proto.all == tp->src.u.all &&
+		    	    i->tuple.dst.protonum == tp->dst.protonum &&
+		    	    i->tuple.src.u3.ip == 0 &&
+		    	    i->tuple.src.u.udp.port == 0) {
+				exp = i;
+				break;
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	return exp;
+}
+#endif /* CONFIG_KF_NETFILTER */
+
 /* FIXME: Multiple targets. --RR */
 static int masquerade_tg_check(const struct xt_tgchk_param *par)
 {
@@ -78,6 +218,71 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
 
 	nat->masq_index = par->out->ifindex;
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	if (mr->range[0].min_ip != 0 /* nat_mode == full cone */
+	    && (nfct_help(ct) == NULL || nfct_help(ct)->helper == NULL)
+	    && nf_ct_protonum(ct) == IPPROTO_UDP) {
+		unsigned int ret;
+		u_int16_t minport;
+		u_int16_t maxport;
+		struct nf_conntrack_expect *exp;
+
+		pr_debug("bcm_nat: need full cone NAT\n");
+
+		/* Choose port */
+		spin_lock_bh(&nf_conntrack_lock);
+		/* Look for existing expectation */
+		exp = find_fullcone_exp(ct);
+		if (exp) {
+			minport = maxport = exp->tuple.dst.u.udp.port;
+			pr_debug("bcm_nat: existing mapped port = %hu\n",
+			       	 ntohs(minport));
+		} else { /* no previous expect */
+			u_int16_t newport, tmpport;
+			
+			minport = mr->range[0].min.all == 0? 
+				ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.
+				u.udp.port : mr->range[0].min.all;
+			maxport = mr->range[0].max.all == 0? 
+				htons(65535) : mr->range[0].max.all;
+			for (newport = ntohs(minport),tmpport = ntohs(maxport); 
+			     newport <= tmpport; newport++) {
+			     	if (!find_exp(newsrc, htons(newport), ct)) {
+					pr_debug("bcm_nat: new mapped port = "
+					       	 "%hu\n", newport);
+					minport = maxport = htons(newport);
+					break;
+				}
+			}
+		}
+		spin_unlock_bh(&nf_conntrack_lock);
+
+		/*
+		newrange = ((struct nf_nat_range)
+			{ mr->range[0].flags | IP_NAT_RANGE_MAP_IPS |
+			  IP_NAT_RANGE_PROTO_SPECIFIED, newsrc, newsrc,
+		  	  mr->range[0].min, mr->range[0].max });
+		*/
+		newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS |
+			NF_NAT_RANGE_PROTO_SPECIFIED;
+		newrange.max_ip = newrange.min_ip = newsrc;
+		newrange.min.udp.port = newrange.max.udp.port = minport;
+	
+		/* Set ct helper */
+		ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+		if (ret == NF_ACCEPT) {
+			struct nf_conn_help *help = nfct_help(ct);
+			if (help == NULL)
+				help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+			if (help != NULL) {
+				help->helper = &nf_conntrack_helper_bcm_nat;
+				pr_debug("bcm_nat: helper set\n");
+			}
+		}
+		return ret;
+	}
+#endif /* CONFIG_KF_NETFILTER */
+
 	/* Transfer from original range. */
 	newrange = ((struct nf_nat_ipv4_range)
 		{ mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
@@ -164,6 +369,9 @@ static int __init masquerade_tg_init(void)
 
 static void __exit masquerade_tg_exit(void)
 {
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	nf_conntrack_helper_unregister(&nf_conntrack_helper_bcm_nat);
+#endif
 	xt_unregister_target(&masquerade_tg_reg);
 	unregister_netdevice_notifier(&masq_dev_notifier);
 	unregister_inetaddr_notifier(&masq_inet_notifier);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index cf73cc70ed2d2e1bfe1a5c837bc9993358e9904e..4518e3adf4d3c14e0480fd7c42a68a4e57787b10 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -44,8 +44,13 @@ static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
 	if (ap == NULL)
 		return false;
 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	memcpy(&tuple->src.u3.ip, &ap[0], sizeof(__be32));
+	memcpy(&tuple->dst.u3.ip, &ap[1], sizeof(__be32));
+#else
 	tuple->src.u3.ip = ap[0];
 	tuple->dst.u3.ip = ap[1];
+#endif
 
 	return true;
 }
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7cbe9cb261c29d44907760b7d51f0d95a9d64b27..9c273027165e5883b4a1179bff3f3c67be459274 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -125,6 +125,13 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
 		 enum ip_conntrack_info *ctinfo,
 		 unsigned int hooknum)
 {
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	struct inside {
+		struct icmphdr icmp;
+		struct iphdr ip;
+	} __attribute__((packed));
+	struct inside _in, *pIn; 
+#endif
 	struct nf_conntrack_tuple innertuple, origtuple;
 	const struct nf_conntrack_l4proto *innerproto;
 	const struct nf_conntrack_tuple_hash *h;
@@ -132,6 +139,13 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
 
 	NF_CT_ASSERT(skb->nfct == NULL);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	/* Not enough header? */
+	pIn = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_in), &_in);
+	if (pIn == NULL)
+		return -NF_ACCEPT;
+#endif
+
 	/* Are they talking about one of our connections? */
 	if (!nf_ct_get_tuplepr(skb,
 			       skb_network_offset(skb) + ip_hdrlen(skb)
@@ -142,7 +156,13 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
 	}
 
 	/* rcu_read_lock()ed by nf_hook_slow */
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	innerproto = __nf_ct_l4proto_find(PF_INET, pIn->ip.protocol);
+	origtuple.src.u3.ip = pIn->ip.saddr;
+	origtuple.dst.u3.ip = pIn->ip.daddr;
+#else
 	innerproto = __nf_ct_l4proto_find(PF_INET, origtuple.dst.protonum);
+#endif
 
 	/* Ordinarily, we'd expect the inverted tupleproto, but it's
 	   been preserved inside the ICMP. */
diff --git a/net/ipv4/netfilter/nf_nat_ipsec.c b/net/ipv4/netfilter/nf_nat_ipsec.c
new file mode 100644
index 0000000000000000000000000000000000000000..5c461010e22f6ad54bc949c141e85e4cbf0fb0fb
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_ipsec.c
@@ -0,0 +1,93 @@
+/*
+<:copyright-BRCM:2012:GPL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:> 
+*/
+
+#include <linux/module.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_ipsec.h>
+
+MODULE_AUTHOR("Pavan Kumar <pavank@broadcom.com>");
+MODULE_DESCRIPTION("Netfilter connection tracking module for ipsec");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("nf_nat_ipsec");
+
+/* outbound packets == from LAN to WAN */
+static int
+ipsec_outbound_pkt(struct sk_buff *skb,
+                   struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+
+{
+   struct iphdr *iph = ip_hdr(skb);
+   struct udphdr *udph = (void *)iph + iph->ihl * 4;
+
+   /* make sure source port is 500 */
+   udph->source = htons(IPSEC_PORT);
+   udph->check = 0;
+   
+   return NF_ACCEPT;
+}
+
+
+/* inbound packets == from WAN to LAN */
+static int
+ipsec_inbound_pkt(struct sk_buff *skb, struct nf_conn *ct,
+                  enum ip_conntrack_info ctinfo, __be32 lan_ip)
+{
+   struct iphdr *iph = ip_hdr(skb);
+   struct udphdr *udph = (void *)iph + iph->ihl * 4;
+
+   iph->daddr = lan_ip;
+   udph->check = 0;
+   iph->check = 0;
+   iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+   
+   return NF_ACCEPT;
+}
+
+static int __init nf_nat_helper_ipsec_init(void)
+{
+   BUG_ON(nf_nat_ipsec_hook_outbound != NULL);
+   RCU_INIT_POINTER(nf_nat_ipsec_hook_outbound, ipsec_outbound_pkt);
+
+   BUG_ON(nf_nat_ipsec_hook_inbound != NULL);
+   RCU_INIT_POINTER(nf_nat_ipsec_hook_inbound, ipsec_inbound_pkt);
+
+   return 0;
+}
+
+static void __exit nf_nat_helper_ipsec_fini(void)
+{
+	RCU_INIT_POINTER(nf_nat_ipsec_hook_inbound, NULL);
+	RCU_INIT_POINTER(nf_nat_ipsec_hook_outbound, NULL);
+
+	synchronize_rcu();
+}
+
+module_init(nf_nat_helper_ipsec_init);
+module_exit(nf_nat_helper_ipsec_fini);
diff --git a/net/ipv4/netfilter/nf_nat_proto_esp.c b/net/ipv4/netfilter/nf_nat_proto_esp.c
new file mode 100644
index 0000000000000000000000000000000000000000..5fbded8efb695cc4a50836c8b6fd0b73514b9f76
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_esp.c
@@ -0,0 +1,114 @@
+/*
+<:copyright-BRCM:2012:GPL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:> 
+*/
+/******************************************************************************
+Filename:       nf_nat_proto_esp.c
+Author:         Pavan Kumar
+Creation Date:  05/27/04
+
+Description:
+    Implements the ESP ALG connectiontracking.
+    Migrated to kernel 2.6.21.5 on April 16, 2008 by Dan-Han Tsai.
+    Migrated to kernel 3.4.11 on Jan 21, 2013 by Kirill Tsym
+*****************************************************************************/
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat_protocol.h>
+#include <linux/netfilter/nf_conntrack_proto_esp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter NAT protocol helper module for ESP");
+
+/* is spi in given range between min and max */
+static bool
+esp_in_range(const struct nf_conntrack_tuple *tuple,
+	     enum nf_nat_manip_type maniptype,
+	     const union nf_conntrack_man_proto *min,
+	     const union nf_conntrack_man_proto *max)
+{
+   return true;
+}
+
+/* generate unique tuple ... */
+static void
+esp_unique_tuple(struct nf_conntrack_tuple *tuple,
+				const struct nf_nat_ipv4_range *range,
+                 enum nf_nat_manip_type maniptype,
+                 const struct nf_conn *conntrack)
+{
+   return;
+}
+
+/* manipulate a ESP packet according to maniptype */
+static bool
+esp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
+              const struct nf_conntrack_tuple *tuple,
+              enum nf_nat_manip_type maniptype)
+{
+   struct esphdr *esph;
+   struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
+   unsigned int hdroff = iphdroff + iph->ihl * 4;
+   __be32 oldip, newip;
+
+   if (!skb_make_writable(skb, hdroff + sizeof(*esph)))
+      return false;
+
+   if (maniptype == NF_NAT_MANIP_SRC)
+   {
+      /* Get rid of src ip and src pt */
+      oldip = iph->saddr;
+      newip = tuple->src.u3.ip;
+   } 
+   else 
+   {
+      /* Get rid of dst ip and dst pt */
+      oldip = iph->daddr;
+      newip = tuple->dst.u3.ip;
+   }
+
+   return true;
+}
+
+const struct nf_nat_protocol esp __read_mostly = {
+   .protonum = IPPROTO_ESP,
+   .manip_pkt = esp_manip_pkt,
+   .in_range = esp_in_range,
+   .unique_tuple = esp_unique_tuple,
+};
+
+int __init nf_nat_proto_esp_init(void)
+{
+   return nf_nat_protocol_register(&esp);
+}
+
+void __exit nf_nat_proto_esp_fini(void)
+{
+   nf_nat_protocol_unregister(&esp);
+}
+
+module_init(nf_nat_proto_esp_init);
+module_exit(nf_nat_proto_esp_fini);
diff --git a/net/ipv4/netfilter/nf_nat_pt.c b/net/ipv4/netfilter/nf_nat_pt.c
new file mode 100644
index 0000000000000000000000000000000000000000..d2dc7709082791aa533f06cd8359b49e477e17c2
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_pt.c
@@ -0,0 +1,421 @@
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+/* PT for IP connection tracking. */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/ctype.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_pt.h>
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#include <net/bl_ops.h>
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#define PT_PROTO_TCP 	1
+#define PT_PROTO_UDP 	2
+#define PT_PROTO_ALL 	(PT_PROTO_TCP|PT_PROTO_UDP)
+
+/*
+** Parameters passed from insmod.
+** outport[25]={proto,a1,a2,proto,b1,b2,proto,c1,c2.............}
+** inport[25]={proton,a1,a2,proto,b1,b2,proto,c1,c2.............}
+** iface[25]={interface1, interface2, interface3................}
+** insmod pt.o outport=0,100,200,1,6000,7000 
+**             inport=0,300,400,2,800,900
+**             iface=eth0,ppp0
+**	       entries=2
+**	       timeout=180	
+** where number tells us how many entries user entered
+** where 1 means tcp
+** where 2 means udp 
+** where 0 means both
+*/
+static unsigned short outport[PT_MAX_ENTRIES*3];
+static unsigned short inport[PT_MAX_ENTRIES*3];
+static char *iface[PT_MAX_ENTRIES];
+static int entries;
+static unsigned timeout = PT_TIMEOUT;
+static unsigned outport_c;
+static unsigned inport_c;
+static unsigned iface_c;
+unsigned short invalid_config = 0;
+
+module_param_array(outport, ushort, &outport_c, 0);
+module_param_array(inport, ushort, &inport_c, 0);
+module_param_array(iface, charp, &iface_c, 0);
+module_param(entries, int, 0);
+module_param(timeout, uint, 0);
+
+static void trigger_ports(struct nf_conn *ct, int dir, int idx)
+{
+	__be16 port;
+	unsigned short iport, iproto;
+	struct nf_conntrack_expect *exp;
+    	struct nf_conntrack_expect *exp2;
+
+	/* Setup expectations */
+	for (iport = inport[idx*3+1]; iport <= inport[idx*3+2]; iport++) {
+		port = htons(iport);
+		if ((exp = nf_ct_expect_alloc(ct)) == NULL) {
+			pr_debug("nf_nat_pt: nf_ct_expect_alloc() error\n");
+			return;
+		}
+		if (inport[idx*3] == PT_PROTO_TCP)
+			iproto = IPPROTO_TCP;
+		else if (inport[idx*3] == PT_PROTO_UDP)
+			iproto = IPPROTO_UDP;
+		else {
+            		if ((exp2 = nf_ct_expect_alloc(ct)) == NULL) {
+                		pr_debug("nf_nat_pt: "
+					 "nf_ct_expect_alloc() error\n");
+                		return;
+            		}	
+            		iproto = IPPROTO_TCP;
+			nf_ct_expect_init(exp2, NF_CT_EXPECT_CLASS_DEFAULT,
+					  AF_INET, NULL,
+					  &ct->tuplehash[!dir].tuple.dst.u3,
+					  iproto, NULL, &port);
+            		exp2->expectfn = nf_nat_follow_master;
+            		exp2->flags = NF_CT_EXPECT_PERMANENT;
+            		exp2->saved_proto.all = port;
+            		exp2->dir = !dir;
+            		if(nf_ct_expect_related(exp2) == 0) {
+                		pr_debug("nf_nat_pt: expect incoming "
+					 "connection to %pI4:%hu %s\n",
+					 &exp2->tuple.dst.u3.ip, iport,
+                       	 		 iproto == IPPROTO_TCP? "tcp" : "udp");
+            		} else {
+                		pr_debug("nf_nat_pt: failed to expect incoming "
+					 "connection to %pI4:%hu %s\n",
+					 &exp2->tuple.dst.u3.ip, iport,
+                       	 		 iproto == IPPROTO_TCP? "tcp" : "udp");
+            		}
+            		nf_ct_expect_put(exp2);
+            
+            		iproto = IPPROTO_UDP;
+        	}
+
+		nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
+				  AF_INET, NULL,
+				  &ct->tuplehash[!dir].tuple.dst.u3,
+				  iproto, NULL, &port);
+       		exp->expectfn = nf_nat_follow_master;
+       		exp->flags = NF_CT_EXPECT_PERMANENT;
+            	exp->saved_proto.all = port;
+            	exp->dir = !dir;
+		if(nf_ct_expect_related(exp) == 0) {
+			pr_debug("nf_nat_pt: expect incoming connection to "
+			       	 "%pI4:%hu %s\n", &exp->tuple.dst.u3.ip, iport,
+			       	 iproto == IPPROTO_TCP? "tcp" : "udp");
+		} else {
+			pr_debug("nf_nat_pt: failed to expect incoming "
+				 "connection to %pI4:%hu %s\n",
+			       	 &exp->tuple.dst.u3.ip, iport,
+			       	 iproto == IPPROTO_TCP? "tcp" : "udp");
+		}
+		nf_ct_expect_put(exp);
+
+	}
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RUNNER_RG) || defined(CONFIG_BCM_RUNNER_RG_MODULE)
+	BL_OPS(net_netfilter_xt_PORTTRIG_trigger_new (ct, ntohl(ct->tuplehash[!dir].tuple.src.u3.ip), ntohl(ct->tuplehash[!dir].tuple.dst.u3.ip),
+		inport[idx*3+1], inport[idx*3+2], inport[idx*3]));
+#endif /* CONFIG_BCM_RUNNER_RG || CONFIG_BCM_RUNNER_RG_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+}
+
+/* FIXME: This should be in userspace.  Later. */
+static int help(struct sk_buff *skb, unsigned int protoff,
+		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	unsigned short oport, oproto;
+	int i;
+
+	if ((nfct_help(ct))->expecting[NF_CT_EXPECT_CLASS_DEFAULT]) {
+		/* Already triggered */
+		return NF_ACCEPT;
+	}
+
+	/* We care only NATed outgoing packets */
+	if (!(ct->status & IPS_SRC_NAT))
+		return NF_ACCEPT;
+	
+	/* Get out protocol and port */
+	if (nf_ct_protonum(ct) == IPPROTO_TCP) {
+		/* Don't do anything until TCP connection is established */
+		if (ctinfo != IP_CT_ESTABLISHED &&
+		    ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+		    	return NF_ACCEPT;
+		oproto = PT_PROTO_TCP;
+		oport = ntohs(ct->tuplehash[dir].tuple.dst.u.tcp.port);
+	} else if(ct->tuplehash[dir].tuple.dst.protonum == IPPROTO_UDP) {
+		oproto = PT_PROTO_UDP;
+		oport = ntohs(ct->tuplehash[dir].tuple.dst.u.udp.port);
+	} else /* Care only TCP and UDP */
+		return NF_ACCEPT;
+	
+	for (i = 0; i < entries; i++) {
+		/* Look for matched port range */
+		if (!(oproto & outport[i*3]) || (oport < outport[i*3+1]) ||
+		    (oport > outport[i*3+2]))
+		    	continue;
+
+		/* If interface specified, they must match */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
+		if (iface[i] && strcmp(iface[i], skb->dst->dev->name))
+#else
+		if (iface[i] && strcmp(iface[i], skb_dst(skb)->dev->name))
+#endif
+			continue;
+
+		trigger_ports(ct, dir, i);
+	}
+
+        return NF_ACCEPT;
+}
+
+static struct nf_conntrack_helper *pt;
+static int valid_ports;
+
+/* Not __exit: called from init() */
+static void fini(void)
+{
+	int i;
+	struct nf_conntrack_helper *h;
+
+	for (i = 0, h = pt; i < valid_ports; i++, h++) {
+		if (hlist_unhashed(&h->hnode))  /* Not registered */
+			break;
+		pr_debug("nf_nat_pt: unregister helper for port %hu\n",
+		       	 ntohs(pt[i].tuple.src.u.all));
+		nf_conntrack_helper_unregister(h);
+	}
+}
+
+/*
+** Becareful with ports that are registered already.
+** ftp:21
+** irc:6667
+** tftp:69
+** snmp: 161,162
+** talk: 517,518
+** h323: 1720
+** sip: 5060
+** pptp: 1723
+** http: 80
+*/
+static int check_port(unsigned short port, unsigned short proto)
+{
+	if(proto & PT_PROTO_TCP) {
+		if (port == 21 || port == 6667 || port == 1720 ||
+		    port == 1723 || port == 80)
+			return 1;
+	}
+	
+	if(proto & PT_PROTO_UDP) {
+		if (port == 69 || port == 161 || port == 162 || port == 517 ||
+		    port == 518 || port == 5060)
+			return 1;
+	}
+
+	return 0;
+}
+
+static int count_outport(void)
+{
+	int i;
+	unsigned short port;
+
+	for (i = valid_ports = 0; i < entries; i++) {
+		for (port = outport[i*3+1]; port <= outport[i*3+2]; port++ ) {
+			/* Don't register known ports */
+			if (check_port(port, outport[i*3])) {
+	    	    		printk("nf_nat_pt: cannot register port %hu "
+				       "(already registered by other module)\n",
+				       port);
+				continue;
+			}
+            		if(outport[i*3] == PT_PROTO_TCP ||
+			   outport[i*3] == PT_PROTO_UDP)
+                		valid_ports++;
+            		else
+                		valid_ports+=2;
+		}
+	}
+
+	if (valid_ports > PT_MAX_PORTS) {
+		printk("nf_nat_pt: Conntrack port forwarding table is full. "
+		       "Remaining entries are not processed.\n" );
+		invalid_config = 1;
+	}
+
+	return valid_ports;
+}
+
+static int count_inport(void)
+{
+   	int i;
+
+   	for( i=0; i<entries; i++) 
+   	{
+      		if( (inport[i*3+2] - inport[i*3+1] + 1) > PT_MAX_EXPECTED ) 
+      		{
+         		printk("nf_nat_pt: inport range is greater than "
+			       "maximum number %d remaining ports are not "
+			       "processed.\n", PT_MAX_EXPECTED);
+			invalid_config = 1;
+      		}
+   	}
+
+   	return 1;
+}
+
+static struct nf_conntrack_expect_policy pt_exp_policy = {
+	.max_expected	= PT_MAX_EXPECTED,
+	.timeout	= PT_TIMEOUT,
+};
+
+static int __init init(void)
+{
+	int i, ret=0;
+	unsigned short port;
+	struct nf_conntrack_helper *h;
+
+	/* Validate parameters */
+	if ((outport_c != inport_c) ||
+	    (outport_c < entries * 3) ||
+	    (inport_c < entries * 3)) {
+	    	printk("nf_nat_pt: parameter numbers don't match\n");
+		return -EINVAL;
+	}
+
+	/* Allocate memory for helpers */
+	if (!count_outport()) {
+		printk("nf_nat_pt: no ports specified\n");
+		return -EINVAL;
+	}
+
+   	/* make sure inport range is less than or equal to PT_MAX_EXPECTED */
+   	count_inport();
+
+	if (invalid_config)
+	{
+		printk("nf_nat_pt: cannot port range larger than %d\n",
+		       PT_MAX_PORTS);
+		return -EINVAL;
+	}
+
+	if ((pt = kzalloc(valid_ports * sizeof(*h), GFP_KERNEL)) == NULL) {
+		printk("nf_nat_pt: OOM\n");
+		return -ENOMEM;
+	}
+	h = pt;
+
+	pt_exp_policy.timeout = timeout;
+	for (i = 0; i < entries; i++) {
+		for (port = outport[i*3+1]; port <= outport[i*3+2]; port++ ) {
+			/* Don't register known ports */
+			if (check_port(port, outport[i*3]))
+				continue;
+
+			h->name = "pt";
+			h->me = THIS_MODULE;
+			h->expect_policy = &pt_exp_policy;
+			h->expect_class_max = 1;
+			if (outport[i*3] == PT_PROTO_TCP) {
+				h->tuple.dst.protonum = IPPROTO_TCP;
+			} else if ( outport[i*3] == PT_PROTO_UDP) {
+				h->tuple.dst.protonum = IPPROTO_UDP;
+			} else {
+				/* To keep backward compatibility, we still use
+				 * 0 as all protocol for input parameters. Here
+				 * we convert it to internal value */
+				outport[i*3] = PT_PROTO_ALL;
+                		h->tuple.dst.protonum = IPPROTO_TCP;
+                		h->tuple.src.u.all = htons(port);
+                		h->tuple.src.l3num = AF_INET;
+                		h->help = help;
+               	 		pr_debug("nf_nat_pt: register helper for "
+					 "port %hu for incoming ports "
+					 "%hu-%hu\n",
+                       	 	 	 port, inport[i*3+1], inport[i*3+2]);
+                		if ((ret = nf_conntrack_helper_register(h))
+				    < 0) {
+                    			printk("nf_nat_pt: register helper "
+					       "error\n");
+                    			fini();
+                    			return ret;
+                		}
+                		h++;
+
+                		h->name = "pt";
+                		h->me = THIS_MODULE;
+				h->expect_policy = &pt_exp_policy;
+				h->expect_class_max = 1;
+                		h->tuple.dst.protonum = IPPROTO_UDP;
+			}
+
+			h->tuple.src.u.all = htons(port);
+			h->tuple.src.l3num = AF_INET;
+			h->help = help;
+
+			pr_debug("nf_nat_pt: register helper for port %hu for "
+			       	 "incoming ports %hu-%hu\n",
+			       	 port, inport[i*3+1], inport[i*3+2]);
+
+            		if ((ret = nf_conntrack_helper_register(h)) < 0) {
+           	    		printk("nf_nat_pt: register helper error\n");
+                		fini();
+                		return ret;
+			}
+			h++;
+		}
+	}
+
+	return 0;
+}
+
+MODULE_AUTHOR("Eddie Shi <eddieshi@broadcom.com>");
+MODULE_DESCRIPTION("Netfilter Conntrack helper for PT");
+MODULE_LICENSE("GPL");
+
+module_init(init);
+module_exit(fini);
diff --git a/net/ipv4/netfilter/nf_nat_rtsp.c b/net/ipv4/netfilter/nf_nat_rtsp.c
new file mode 100644
index 0000000000000000000000000000000000000000..8755ff1d3187716ae0eb0ef106ab588b59305f6c
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_rtsp.c
@@ -0,0 +1,382 @@
+#if defined(CONFIG_BCM_KF_NETFILTER)
+/*
+ * RTSP extension for NAT alteration.
+ *
+ * Copyright (c) 2008 Broadcom Corporation.
+ *
+ * <:label-BRCM:2011:DUAL/GPL:standard
+ * 
+ * Unless you and Broadcom execute a separate written software license 
+ * agreement governing use of this software, this software is licensed 
+ * to you under the terms of the GNU General Public License version 2 
+ * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, 
+ * with the following added to such license:
+ * 
+ *    As a special exception, the copyright holders of this software give 
+ *    you permission to link this software with independent modules, and 
+ *    to copy and distribute the resulting executable under terms of your 
+ *    choice, provided that you also meet, for each linked independent 
+ *    module, the terms and conditions of the license of that module. 
+ *    An independent module is a module which is not derived from this
+ *    software.  The special exception does not apply to any modifications 
+ *    of the software.  
+ * 
+ * Not withstanding the above, under no circumstances may you combine 
+ * this software in any way with any other Broadcom software provided 
+ * under a license other than the GPL, without Broadcom's express prior 
+ * written consent. 
+ * 
+ * :>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <linux/netfilter/nf_conntrack_rtsp.h>
+
+/****************************************************************************/
+static int modify_ports(struct sk_buff *skb, struct nf_conn *ct,
+			enum ip_conntrack_info ctinfo,
+			int matchoff, int matchlen,
+			u_int16_t rtpport, u_int16_t rtcpport,
+			char dash, int *delta)
+{
+	char buf[sizeof("65535-65535")];
+	int len;
+
+	if (dash)
+		len = sprintf(buf, "%hu%c%hu", rtpport, dash, rtcpport);
+	else
+		len = sprintf(buf, "%hu", rtpport);
+	if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, matchlen,
+				      buf, len)) {
+		if (net_ratelimit())
+			printk("nf_nat_rtsp: nf_nat_mangle_tcp_packet error\n");
+		return -1;
+	}
+	*delta = len - matchlen;
+	return 0;
+}
+
+/* Setup NAT on this expected conntrack so it follows master but expect the src ip. */
+/* If we fail to get a free NAT slot, we'll get dropped on confirm */
+static void nf_nat_follow_master_nosrc(struct nf_conn *ct,
+                          struct nf_conntrack_expect *exp)
+{
+	struct nf_nat_ipv4_range range;
+
+	/* This must be a fresh one. */
+	BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+	/* For DST manip, map port here to where it's expected. */
+	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+	range.min = range.max = exp->saved_proto;
+	range.min_ip = range.max_ip
+		= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
+	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+
+/****************************************************************************/
+/* One data channel */
+static int nat_rtsp_channel (struct sk_buff *skb, struct nf_conn *ct,
+			     enum ip_conntrack_info ctinfo,
+			     unsigned int matchoff, unsigned int matchlen,
+			     struct nf_conntrack_expect *rtp_exp, int *delta)
+{
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_expect *exp;
+	int dir = CTINFO2DIR(ctinfo);
+	u_int16_t nated_port = 0;
+	struct hlist_node *n;
+	int exp_exist = 0;
+
+	/* Set expectations for NAT */
+	rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
+	rtp_exp->expectfn = nf_nat_follow_master_nosrc;
+	rtp_exp->dir = !dir;
+
+	/* Lookup existing expects */
+	spin_lock_bh(&nf_conntrack_lock);
+	hlist_for_each_entry(exp, n, &help->expectations, lnode) {
+		if (exp->saved_proto.udp.port == rtp_exp->saved_proto.udp.port){
+			/* Expectation already exists */ 
+			rtp_exp->tuple.dst.u.udp.port = 
+				exp->tuple.dst.u.udp.port;
+			nated_port = ntohs(exp->tuple.dst.u.udp.port);
+			exp_exist = 1;
+			break;
+		}
+	}
+	spin_unlock_bh(&nf_conntrack_lock);
+
+	if (exp_exist) {
+		nf_ct_expect_related(rtp_exp);
+		goto modify_message;
+	}
+
+	/* Try to get a port. */
+	for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port);
+	     nated_port != 0; nated_port++) {
+		rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
+		if (nf_ct_expect_related(rtp_exp) == 0)
+			break;
+	}
+
+	if (nated_port == 0) {	/* No port available */
+		if (net_ratelimit())
+			printk("nf_nat_rtsp: out of UDP ports\n");
+		return 0;
+	}
+
+modify_message:
+	/* Modify message */
+	if (modify_ports(skb, ct, ctinfo, matchoff, matchlen,
+			 nated_port, 0, 0, delta) < 0) {
+		nf_ct_unexpect_related(rtp_exp);
+		return -1;
+	}
+
+	/* Success */
+	pr_debug("nf_nat_rtsp: expect RTP ");
+	nf_ct_dump_tuple(&rtp_exp->tuple);
+
+	return 0;
+}
+
+/****************************************************************************/
+/* A pair of data channels (RTP/RTCP) */
+static int nat_rtsp_channel2 (struct sk_buff *skb, struct nf_conn *ct,
+			      enum ip_conntrack_info ctinfo,
+			      unsigned int matchoff, unsigned int matchlen,
+			      struct nf_conntrack_expect *rtp_exp,
+			      struct nf_conntrack_expect *rtcp_exp,
+			      char dash, int *delta)
+{
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_expect *exp;
+	int dir = CTINFO2DIR(ctinfo);
+	u_int16_t nated_port = 0;
+	struct hlist_node *n;
+	int exp_exist = 0;
+
+	/* Set expectations for NAT */
+	rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
+	rtp_exp->expectfn = nf_nat_follow_master_nosrc;
+	rtp_exp->dir = !dir;
+	rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
+	rtcp_exp->expectfn = nf_nat_follow_master_nosrc;
+	rtcp_exp->dir = !dir;
+
+	/* Lookup existing expects */
+	spin_lock_bh(&nf_conntrack_lock);
+	hlist_for_each_entry(exp, n, &help->expectations, lnode) {
+		if (exp->saved_proto.udp.port == rtp_exp->saved_proto.udp.port){
+			/* Expectation already exists */ 
+			rtp_exp->tuple.dst.u.udp.port = 
+				exp->tuple.dst.u.udp.port;
+			rtcp_exp->tuple.dst.u.udp.port = 
+				htons(ntohs(exp->tuple.dst.u.udp.port) + 1);
+			nated_port = ntohs(exp->tuple.dst.u.udp.port);
+			exp_exist = 1;
+			break;
+		}
+	}
+	spin_unlock_bh(&nf_conntrack_lock);
+
+	if (exp_exist) {
+		nf_ct_expect_related(rtp_exp);
+		nf_ct_expect_related(rtcp_exp);
+		goto modify_message;
+	}
+
+	/* Try to get a pair of ports. */
+	for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port) & (~1);
+	     nated_port != 0; nated_port += 2) {
+		rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
+		if (nf_ct_expect_related(rtp_exp) == 0) {
+			rtcp_exp->tuple.dst.u.udp.port =
+			    htons(nated_port + 1);
+			if (nf_ct_expect_related(rtcp_exp) == 0)
+				break;
+			nf_ct_unexpect_related(rtp_exp);
+		}
+	}
+
+	if (nated_port == 0) {	/* No port available */
+		if (net_ratelimit())
+			printk("nf_nat_rtsp: out of RTP/RTCP ports\n");
+		return 0;
+	}
+
+modify_message:
+	/* Modify message */
+	if (modify_ports(skb, ct, ctinfo, matchoff, matchlen,
+			 nated_port, nated_port + 1, dash, delta) < 0) {
+		nf_ct_unexpect_related(rtp_exp);
+		nf_ct_unexpect_related(rtcp_exp);
+		return -1;
+	}
+
+	/* Success */
+	pr_debug("nf_nat_rtsp: expect RTP ");
+	nf_ct_dump_tuple(&rtp_exp->tuple);
+	pr_debug("nf_nat_rtsp: expect RTCP ");
+	nf_ct_dump_tuple(&rtcp_exp->tuple);
+
+	return 0;
+}
+
+/****************************************************************************/
+static __be16 lookup_mapping_port(struct nf_conn *ct,
+				  enum ip_conntrack_info ctinfo,
+				  __be16 port)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_expect *exp;
+	struct nf_conn *child;
+	struct hlist_node *n;
+
+	/* Lookup existing expects */
+	pr_debug("nf_nat_rtsp: looking up existing expectations...\n");
+	hlist_for_each_entry(exp, n, &help->expectations, lnode) {
+		if (exp->tuple.dst.u.udp.port == port) {
+			pr_debug("nf_nat_rtsp: found port %hu mapped from "
+				 "%hu\n",
+			       	 ntohs(exp->tuple.dst.u.udp.port),
+			       	 ntohs(exp->saved_proto.all));
+			return exp->saved_proto.all;
+		}
+	}
+
+	/* Lookup existing connections */
+	pr_debug("nf_nat_rtsp: looking up existing connections...\n");
+	list_for_each_entry(child, &ct->derived_connections, derived_list) {
+		if (child->tuplehash[dir].tuple.dst.u.udp.port == port) {
+			pr_debug("nf_nat_rtsp: found port %hu mapped from "
+				 "%hu\n",
+			       	 ntohs(child->tuplehash[dir].
+			       	 tuple.dst.u.udp.port),
+			       	 ntohs(child->tuplehash[!dir].
+			       	 tuple.src.u.udp.port));
+			return child->tuplehash[!dir].tuple.src.u.udp.port;
+		}
+	}
+
+	return htons(0);
+}
+
+/****************************************************************************/
+static int nat_rtsp_modify_port (struct sk_buff *skb, struct nf_conn *ct,
+			      	 enum ip_conntrack_info ctinfo,
+				 unsigned int matchoff, unsigned int matchlen,
+			      	 __be16 rtpport, int *delta)
+{
+	__be16 orig_port;
+
+	orig_port = lookup_mapping_port(ct, ctinfo, rtpport);
+	if (orig_port == htons(0)) {
+		*delta = 0;
+		return 0;
+	}
+	if (modify_ports(skb, ct, ctinfo, matchoff, matchlen,
+			 ntohs(orig_port), 0, 0, delta) < 0)
+		return -1;
+	pr_debug("nf_nat_rtsp: Modified client_port from %hu to %hu\n",
+	       	 ntohs(rtpport), ntohs(orig_port));
+	return 0;
+}
+
+/****************************************************************************/
+static int nat_rtsp_modify_port2 (struct sk_buff *skb, struct nf_conn *ct,
+			       	  enum ip_conntrack_info ctinfo,
+				  unsigned int matchoff, unsigned int matchlen,
+			       	  __be16 rtpport, __be16 rtcpport,
+				  char dash, int *delta)
+{
+	__be16 orig_port;
+
+	orig_port = lookup_mapping_port(ct, ctinfo, rtpport);
+	if (orig_port == htons(0)) {
+		*delta = 0;
+		return 0;
+	}
+	if (modify_ports(skb, ct, ctinfo, matchoff, matchlen,
+			 ntohs(orig_port), ntohs(orig_port)+1, dash, delta) < 0)
+		return -1;
+	pr_debug("nf_nat_rtsp: Modified client_port from %hu to %hu\n",
+	       	 ntohs(rtpport), ntohs(orig_port));
+	return 0;
+}
+
+/****************************************************************************/
+static int nat_rtsp_modify_addr(struct sk_buff *skb, struct nf_conn *ct,
+				enum ip_conntrack_info ctinfo,
+				int matchoff, int matchlen, int *delta)
+{
+	char buf[sizeof("255.255.255.255")];
+	int dir = CTINFO2DIR(ctinfo);
+	int len;
+
+	/* Change the destination address to FW's WAN IP address */
+
+	len = sprintf(buf, "%pI4",
+		       &ct->tuplehash[!dir].tuple.dst.u3.ip);
+	if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, matchlen,
+				      buf, len)) {
+		if (net_ratelimit())
+			printk("nf_nat_rtsp: nf_nat_mangle_tcp_packet error\n");
+		return -1;
+	}
+	*delta = len - matchlen;
+	return 0;
+}
+
+/****************************************************************************/
+static int __init init(void)
+{
+	BUG_ON(rcu_dereference(nat_rtsp_channel_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_rtsp_channel2_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_rtsp_modify_port_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_rtsp_modify_port2_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_rtsp_modify_addr_hook) != NULL);
+	rcu_assign_pointer(nat_rtsp_channel_hook, nat_rtsp_channel);
+	rcu_assign_pointer(nat_rtsp_channel2_hook, nat_rtsp_channel2);
+	rcu_assign_pointer(nat_rtsp_modify_port_hook, nat_rtsp_modify_port);
+	rcu_assign_pointer(nat_rtsp_modify_port2_hook, nat_rtsp_modify_port2);
+	rcu_assign_pointer(nat_rtsp_modify_addr_hook, nat_rtsp_modify_addr);
+
+	pr_debug("nf_nat_rtsp: init success\n");
+	return 0;
+}
+
+/****************************************************************************/
+static void __exit fini(void)
+{
+	rcu_assign_pointer(nat_rtsp_channel_hook, NULL);
+	rcu_assign_pointer(nat_rtsp_channel2_hook, NULL);
+	rcu_assign_pointer(nat_rtsp_modify_port_hook, NULL);
+	rcu_assign_pointer(nat_rtsp_modify_port2_hook, NULL);
+	rcu_assign_pointer(nat_rtsp_modify_addr_hook, NULL);
+	synchronize_rcu();
+}
+
+/****************************************************************************/
+module_init(init);
+module_exit(fini);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("RTSP NAT helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat_rtsp");
+
+#endif // defined(CONFIG_BCM_KF_NETFILTER)
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 57932c43960ec27d32c55f6986ee28c74b138482..fdebfe45c3f01814de9808345a29de0e795809cc 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -24,12 +24,454 @@
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <linux/netfilter/nf_conntrack_sip.h>
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#include <net/bl_ops.h>
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <linux/iqos.h>
+#endif
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
 MODULE_DESCRIPTION("SIP NAT helper");
 MODULE_ALIAS("ip_nat_sip");
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+static void nf_nat_redirect(struct nf_conn *new,
+			    struct nf_conntrack_expect *exp)
+{
+	struct nf_nat_ipv4_range range;
+
+	/* This must be a fresh one. */
+	BUG_ON(new->status & IPS_NAT_DONE_MASK);
+
+	pr_debug("nf_nat_redirect: new ct ");
+	nf_ct_dump_tuple(&new->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+
+	/* Change src to where new ct comes from */
+	range.flags = NF_NAT_RANGE_MAP_IPS;
+	range.min_ip = range.max_ip =
+		new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+	nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
+	pr_debug("nf_nat_redirect: setup POSTROUTING map %pI4\n",
+	       	 &range.min_ip);
+
+	/* For DST manip, map ip:port here to where it's expected. */
+	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+	range.min = range.max = exp->saved_proto;
+	range.min_ip = range.max_ip = exp->saved_ip;
+	pr_debug("nf_nat_redirect: setup PREROUTING map %pI4:%hu\n",
+	       	 &range.min_ip, ntohs(range.min.udp.port));
+	nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
+
+	/* register the SIP Data RTP/RTCP ports with ingress QoS classifier */
+	pr_debug("adding iqos from %pI4:%hu->%pI4:%hu\n",
+		&new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip, ntohs(new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all),
+		&new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip, ntohs(new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all));
+
+	iqos_add_L4port( IPPROTO_UDP, new->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.udp.port,
+		IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+	iqos_add_L4port( IPPROTO_UDP, new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.udp.port,
+		IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+
+}
+
+static void nf_nat_snat_expect(struct nf_conn *new,
+			       struct nf_conntrack_expect *exp)
+{
+	struct nf_nat_ipv4_range range;
+
+	/* This must be a fresh one. */
+	BUG_ON(new->status & IPS_NAT_DONE_MASK);
+
+	pr_debug("nf_nat_snat_expect: new ct ");
+	nf_ct_dump_tuple(&new->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+
+	/* Change src to previously NATed address */
+	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+	range.min = range.max = exp->saved_proto;
+	range.min_ip = range.max_ip = exp->saved_ip;
+	nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
+	pr_debug("nf_nat_snat_expect: setup POSTROUTING map %pI4:%hu\n",
+	       	 &range.min_ip, ntohs(range.min.udp.port));
+
+	/* register the SIP Data RTP/RTCP ports with ingress QoS classifier */
+	pr_debug("adding iqos from %pI4:%hu->%pI4:%hu\n",
+		&new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip, ntohs(new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all),
+		&new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip, ntohs(new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all));
+
+	iqos_add_L4port( IPPROTO_UDP, new->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.udp.port,
+		IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+	iqos_add_L4port( IPPROTO_UDP, new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.udp.port,
+		IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+
+}
+
+static int nf_nat_addr(struct sk_buff *skb, unsigned int protoff,
+		       struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		       char **dptr, int *dlen, char **addr_begin,
+		       int *addr_len, struct nf_conntrack_man *addr)
+{
+	unsigned int matchoff = *addr_begin - *dptr;
+	unsigned int matchlen = (unsigned int)*addr_len;
+	char new_addr[32];
+	unsigned int new_len = 0;
+
+	if (addr->u3.ip)
+		new_len = sprintf(new_addr, "%pI4", &addr->u3.ip);
+	if (addr->u.all) {
+		if (new_len)
+			new_addr[new_len++] = ':';
+		new_len += sprintf(&new_addr[new_len], "%hu",
+				   ntohs(addr->u.all));
+	}
+	if (new_len == 0)
+		return NF_DROP;
+
+	if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
+				      matchoff, matchlen, new_addr, new_len))
+		return NF_DROP;
+	*dptr = skb->data + protoff + sizeof(struct udphdr);
+	*dlen += new_len - matchlen;
+	*addr_begin = *dptr + matchoff;
+	*addr_len = new_len;
+	return NF_ACCEPT;
+}
+
+static int lookup_existing_port(struct nf_conn *ct,
+				enum ip_conntrack_info ctinfo,
+				struct nf_conntrack_expect *exp)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_expect *i;
+	struct hlist_node *n;
+	int found = 0;
+
+	/* Lookup existing connections */
+	pr_debug("nf_nat_sip: looking up existing connections...\n");
+	if (!list_empty(&ct->derived_connections)) {
+		struct nf_conn *child;
+
+		list_for_each_entry(child, &ct->derived_connections,
+				    derived_list) {
+			if (child->tuplehash[dir].tuple.src.u3.ip ==
+			    exp->saved_ip &&
+			    child->tuplehash[dir].tuple.src.u.all ==
+			    exp->saved_proto.all) {
+				pr_debug("nf_nat_sip: found existing "
+				       	 "connection in same direction.\n");
+			    	exp->tuple.dst.u.all =
+					child->tuplehash[!dir].tuple.dst.u.all;
+				return 1;
+			}
+			else if (child->tuplehash[!dir].tuple.src.u3.ip ==
+				 exp->saved_ip &&
+				 child->tuplehash[!dir].tuple.src.u.all ==
+				 exp->saved_proto.all) {
+				pr_debug("nf_nat_sip: found existing "
+				       	 "connection in reverse direction.\n");
+			    	exp->tuple.dst.u.all =
+					child->tuplehash[dir].tuple.dst.u.all;
+				return 1;
+			}
+		}
+	}
+
+	/* Lookup existing expects */
+	pr_debug("nf_nat_sip: looking up existing expectations...\n");
+	hlist_for_each_entry(i, n, &help->expectations, lnode) {
+		if (!memcmp(&i->tuple.dst.u3, &exp->tuple.dst.u3,
+		    	    sizeof(i->tuple.dst.u3)) &&
+		    i->saved_ip == exp->saved_ip &&
+		    i->saved_proto.all == exp->saved_proto.all)  {
+			exp->tuple.dst.u.all = i->tuple.dst.u.all;
+			pr_debug("nf_nat_sip: found existing expectations.\n");
+			found = 1;
+			break;
+		}
+	}
+	return found;
+}
+
+/* Lookup existing expects that belong to the same master. If they have
+ * the same tuple but different saved address, they conflict */
+static int find_conflicting_expect(struct nf_conn *ct,
+				   enum ip_conntrack_info ctinfo,
+				   struct nf_conntrack_expect *exp)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_expect *i;
+	struct hlist_node *n;
+	int found = 0;
+
+	if (exp->tuple.dst.u.all == ct->tuplehash[!dir].tuple.dst.u.all)
+		return 1;
+
+	hlist_for_each_entry(i, n, &help->expectations, lnode) {
+		if (nf_ct_tuple_equal(&i->tuple, &exp->tuple) &&
+		    (i->saved_ip != exp->saved_ip ||
+		     i->saved_proto.all != exp->saved_proto.all))  {
+			pr_debug("nf_nat_sip: found conflicting "
+				 "expectation.\n");
+			found = 1;
+			break;
+		}
+	}
+
+	return found;
+}
+
+static int reexpect_snat_rtp(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			     struct nf_conntrack_expect *exp)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_expect *old_exp = NULL;
+	struct nf_conntrack_expect *new_exp;
+	union nf_inet_addr addr;
+	struct hlist_node *n;
+	int found = 0;
+
+	/* Look for reverse expectation */
+	hlist_for_each_entry(old_exp, n, &help->expectations, lnode) {
+		if (old_exp->class == exp->class &&
+		    old_exp->dir == dir) {
+			pr_debug("nf_nat_sip: found reverse expectation.\n");
+			found = 1;
+			break;
+		}
+	}
 
+	/* Not found */
+	if (!found) {
+		pr_debug("nf_nat_sip: not found reverse expectation.\n");
+		return 0;
+	}
+
+	if ((new_exp = nf_ct_expect_alloc(ct)) == NULL) {
+		pr_debug("nf_nat_sip: nf_ct_expect_alloc failed\n");
+		return 0;
+	}
+	addr.ip = exp->saved_ip;
+	nf_ct_expect_init(new_exp, old_exp->class, old_exp->tuple.src.l3num,
+			  &addr, &old_exp->tuple.dst.u3,
+			  old_exp->tuple.dst.protonum, 
+			  &exp->saved_proto.all, &old_exp->tuple.dst.u.all);
+	new_exp->saved_ip = exp->tuple.dst.u3.ip;
+	new_exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
+	new_exp->flags = old_exp->flags;
+	new_exp->derived_timeout = old_exp->derived_timeout;
+	new_exp->helper = old_exp->helper;
+	pr_debug("nf_nat_sip: reexpect SNAT RTP %pI4:%hu->%pI4:%hu->%pI4:%hu\n",
+	       	 &new_exp->tuple.src.u3.ip,
+		 ntohs(new_exp->tuple.src.u.udp.port),
+	       	 &new_exp->saved_ip,
+	       	 ntohs(new_exp->saved_proto.udp.port),
+	       	 &new_exp->tuple.dst.u3.ip,
+	       	 ntohs(new_exp->tuple.dst.u.udp.port));
+
+	nf_ct_unexpect_related(old_exp);
+	if (nf_ct_expect_related(new_exp) != 0) {
+		pr_debug("nf_nat_sip: nf_ct_expect_related failed\n");
+	}
+	nf_ct_expect_put(new_exp);
+	
+	return 1;
+}
+
+static int nf_nat_rtp(struct sk_buff *skb, unsigned int protoff,
+		      struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		      char **dptr, int *dlen, struct nf_conntrack_expect *exp,
+		      char **port_begin, int *port_len)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	u_int16_t nated_port, port_limit;
+	unsigned int matchoff = *port_begin - *dptr;
+	unsigned int matchlen = (unsigned int)*port_len;
+	char new_port[32];
+	unsigned int new_len;
+
+	/* Set expectations for NAT */
+	exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
+	exp->saved_ip = exp->tuple.dst.u3.ip;
+	exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
+	exp->expectfn = nf_nat_redirect;
+	exp->dir = !dir;
+
+	if (lookup_existing_port(ct, ctinfo, exp))
+		goto found;
+
+	/* Try to get a RTP ports. */
+	nated_port = ntohs(exp->tuple.dst.u.udp.port) & (~1);
+	if (nated_port < 1024)
+		nated_port = 1024;
+	port_limit = nated_port;
+	do {
+		exp->tuple.dst.u.udp.port = htons(nated_port);
+		if (!find_conflicting_expect(ct, ctinfo, exp)) {
+			if (nf_ct_expect_related(exp) == 0) {
+				reexpect_snat_rtp(ct, ctinfo, exp);
+				goto found;
+			}
+		}
+		nated_port += 2;
+		if (nated_port < 1024)
+			nated_port = 1024;
+	}while(nated_port != port_limit);
+
+	/* No port available */
+	if (net_ratelimit())
+		printk("nf_nat_sip: out of RTP ports\n");
+	return NF_DROP;
+
+found:
+	/* Modify signal */
+	new_len = sprintf(new_port, "%hu", ntohs(exp->tuple.dst.u.udp.port));
+	if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
+				      matchoff, matchlen, new_port, new_len)){
+		nf_ct_unexpect_related(exp);
+		return NF_DROP;
+	}
+	*dptr = skb->data + protoff + sizeof(struct udphdr);
+	*dlen += new_len - matchlen;
+	*port_begin = *dptr + matchoff;
+	*port_len = new_len;
+
+	/* Success */
+	pr_debug("nf_nat_sip: expect RTP %pI4:%hu->%pI4:%hu->%pI4:%hu\n",
+	       	 &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.udp.port),
+	       	 &exp->tuple.dst.u3.ip, ntohs(exp->tuple.dst.u.udp.port),
+	       	 &exp->saved_ip, ntohs(exp->saved_proto.udp.port));
+
+	return NF_ACCEPT;
+}
+
+static int nf_nat_snat(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		       struct nf_conntrack_expect *exp)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_expect *i;
+	struct hlist_node *n;
+
+	hlist_for_each_entry(i, n, &help->expectations, lnode) {
+		if (i->class == exp->class && i->dir == dir) {
+			pr_debug("nf_nat_sip: found reverse expectation.\n");
+			exp->tuple.src.u3.ip = i->saved_ip;
+			exp->tuple.src.u.udp.port = i->saved_proto.all;
+			exp->mask.src.u3.ip = 0xFFFFFFFF;
+			exp->mask.src.u.udp.port = 0xFFFF;
+			exp->saved_ip = i->tuple.dst.u3.ip;
+			exp->saved_proto.udp.port = i->tuple.dst.u.udp.port;
+			exp->expectfn = nf_nat_snat_expect;
+			exp->dir = !dir;
+		}
+	}
+	pr_debug("nf_nat_sip: expect SNAT RTP %pI4:%hu->%pI4:%hu->%pI4:%hu\n",
+ 	       	 &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.udp.port),
+	       	 &exp->saved_ip, ntohs(exp->saved_proto.udp.port),
+	       	 &exp->tuple.dst.u3.ip, ntohs(exp->tuple.dst.u.udp.port));
+	if (nf_ct_expect_related(exp) == 0) {
+		pr_debug("nf_nat_sip: nf_ct_expect_related failed\n");
+	}
+
+	return NF_ACCEPT;
+}
+
+static int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
+		      struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		      char **dptr, int *dlen, struct nf_conntrack_expect *exp,
+		      char **addr_begin, int *addr_len)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	u_int16_t nated_port, port_limit;
+	unsigned int matchoff = *addr_begin - *dptr;
+	unsigned int matchlen = (unsigned int)*addr_len;
+	char new_addr[32];
+	unsigned int new_len;
+
+	/* Set expectations for NAT */
+	exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
+	exp->saved_ip = exp->tuple.dst.u3.ip;
+	exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
+	exp->expectfn = nf_nat_redirect;
+	exp->dir = !dir;
+
+	if (lookup_existing_port(ct, ctinfo, exp))
+		goto found;
+
+	/* Try to get a UDP ports. */
+	nated_port = ntohs(exp->tuple.dst.u.udp.port);
+	if (nated_port < 1024)
+		nated_port = 1024;
+	port_limit = nated_port;
+	do {
+		exp->tuple.dst.u.udp.port = htons(nated_port);
+		if (nf_ct_expect_related(exp) == 0)
+			goto found;
+		nated_port++;
+		if (nated_port < 1024)
+			nated_port = 1024;
+	}while(nated_port != port_limit);
+
+	/* No port available */
+	if (net_ratelimit())
+		printk("nf_nat_sip: out of UDP ports\n");
+	return NF_DROP;
+
+found:
+	/* Modify signal */
+	new_len = sprintf(new_addr, "%pI4:%hu",
+			  &exp->tuple.dst.u3.ip,
+			  ntohs(exp->tuple.dst.u.udp.port));
+	if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
+				      matchoff, matchlen, new_addr, new_len)){
+		nf_ct_unexpect_related(exp);
+		return NF_DROP;
+	}
+	*dptr = skb->data + protoff + sizeof(struct udphdr);
+	*dlen += new_len - matchlen;
+	*addr_begin = *dptr + matchoff;
+	*addr_len = new_len;
+
+	/* Success */
+	pr_debug("nf_nat_sip: expect SIP %pI4:%hu->%pI4:%hu\n",
+	       	 &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.udp.port),
+	       	 &exp->tuple.dst.u3.ip, ntohs(exp->tuple.dst.u.udp.port));
+
+	return NF_ACCEPT;
+}
+
+static void __exit nf_nat_sip_fini(void)
+{
+	rcu_assign_pointer(nf_nat_sip_hook, NULL);
+	rcu_assign_pointer(nf_nat_rtp_hook, NULL);
+	rcu_assign_pointer(nf_nat_snat_hook, NULL);
+	rcu_assign_pointer(nf_nat_addr_hook, NULL);
+	synchronize_rcu();
+}
+
+static int __init nf_nat_sip_init(void)
+{
+	BUG_ON(rcu_dereference(nf_nat_sip_hook));
+	BUG_ON(rcu_dereference(nf_nat_rtp_hook));
+	BUG_ON(rcu_dereference(nf_nat_snat_hook));
+	BUG_ON(rcu_dereference(nf_nat_addr_hook));
+	rcu_assign_pointer(nf_nat_sip_hook, nf_nat_sip);
+	rcu_assign_pointer(nf_nat_rtp_hook, nf_nat_rtp);
+	rcu_assign_pointer(nf_nat_snat_hook, nf_nat_snat);
+	rcu_assign_pointer(nf_nat_addr_hook, nf_nat_addr);
+	return 0;
+}
+
+module_init(nf_nat_sip_init);
+module_exit(nf_nat_sip_fini);
+#else /* CONFIG_BCM_KF_NETFILTER */
 static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
 				  const char **dptr, unsigned int *datalen,
 				  unsigned int matchoff, unsigned int matchlen,
@@ -55,8 +497,8 @@ static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
 
 		if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
 					      matchoff, matchlen,
-					      buffer, buflen))
-			return 0;
+				      buffer, buflen))
+		return 0;
 	}
 
 	/* Reload data pointer and adjust datalen value */
@@ -502,7 +944,7 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
 		if (ret == 0)
 			break;
 		else if (ret != -EBUSY) {
-			nf_ct_unexpect_related(rtp_exp);
+		nf_ct_unexpect_related(rtp_exp);
 			port = 0;
 			break;
 		}
@@ -517,6 +959,12 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
 			     mediaoff, medialen, port))
 		goto err2;
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	BL_OPS(net_ipv4_netfilter_nf_nat_sip(ct, port, dir));   
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 	return NF_ACCEPT;
 
 err2:
@@ -566,3 +1014,4 @@ static int __init nf_nat_sip_init(void)
 
 module_init(nf_nat_sip_init);
 module_exit(nf_nat_sip_fini);
+#endif /* CONFIG_BCM_KF_NETFILTER */
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 50009c787bcdc3ae79291adf6e688dad2d612331..1c33d226d9f6a345eca4cf8aebb136165b3b6958 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -200,6 +200,37 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
 
 static int ping_init_sock(struct sock *sk)
 {
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2014-2851*/
+	struct net *net = sock_net(sk);
+	gid_t group = current_egid();
+	gid_t range[2];
+		struct group_info *group_info;
+		int i, j, count;
+		int ret = 0;
+
+	inet_get_ping_group_range_net(net, range, range+1);
+	if (range[0] <= group && group <= range[1])
+		return 0;
+
+		group_info = get_current_groups();
+		count = group_info->ngroups;
+	for (i = 0; i < group_info->nblocks; i++) {
+		int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
+		for (j = 0; j < cp_count; j++) {
+			group = group_info->blocks[i][j];
+			if (range[0] <= group && group <= range[1])
+				goto out_release_group;
+		}
+		count -= cp_count;
+	}
+	ret = -EACCES;
+
+out_release_group:
+	put_group_info(group_info);
+	return ret;
+	/*CVE-2014-2851*/
+#else
 	struct net *net = sock_net(sk);
 	gid_t group = current_egid();
 	gid_t range[2];
@@ -223,6 +254,8 @@ static int ping_init_sock(struct sock *sk)
 	}
 
 	return -EACCES;
+
+#endif
 }
 
 static void ping_close(struct sock *sk, long timeout)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index eea5d9ec2977c746806cdfc7800f5e06af794a8f..bfc87d1ba7d513d20d6db8282698767861d9aec8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2438,6 +2438,12 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 	int iif = dev->ifindex;
 	struct net *net;
 	int res;
+	__be32 newsaddr = saddr;
+
+	if (ipv4_is_multicast(daddr)) {
+		if(strchr(dev->name, '.') && saddr == 0xc0a80101)
+			newsaddr = 0xc6336401;
+	}
 
 	net = dev_net(dev);
 
@@ -2447,12 +2453,12 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 		goto skip_cache;
 
 	tos &= IPTOS_RT_MASK;
-	hash = rt_hash(daddr, saddr, iif, rt_genid(net));
+	hash = rt_hash(daddr, newsaddr, iif, rt_genid(net));
 
 	for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
 	     rth = rcu_dereference(rth->dst.rt_next)) {
 		if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
-		     ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
+		     ((__force u32)rth->rt_key_src ^ (__force u32)newsaddr) |
 		     (rth->rt_route_iif ^ iif) |
 		     (rth->rt_key_tos ^ tos)) == 0 &&
 		    rth->rt_mark == skb->mark &&
@@ -2489,7 +2495,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 		struct in_device *in_dev = __in_dev_get_rcu(dev);
 
 		if (in_dev) {
-			int our = ip_check_mc_rcu(in_dev, daddr, saddr,
+			int our = ip_check_mc_rcu(in_dev, daddr, newsaddr,
 						  ip_hdr(skb)->protocol);
 			if (our
 #ifdef CONFIG_IP_MROUTE
@@ -2498,7 +2504,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 			     IN_DEV_MFORWARD(in_dev))
 #endif
 			   ) {
-				int res = ip_route_input_mc(skb, daddr, saddr,
+				int res = ip_route_input_mc(skb, daddr, newsaddr,
 							    tos, dev, our);
 				rcu_read_unlock();
 				return res;
@@ -2507,7 +2513,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 		rcu_read_unlock();
 		return -EINVAL;
 	}
-	res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
+	res = ip_route_input_slow(skb, daddr, newsaddr, tos, dev);
 	rcu_read_unlock();
 	return res;
 }
@@ -3029,9 +3035,15 @@ static int rt_fill_info(struct net *net,
 
 		if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
 		    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
+#if defined(CONFIG_BCM_KF_IGMP)
+			int err = ipmr_get_route(net, skb,
+						 rt->rt_src, rt->rt_dst,
+						 r, nowait, rt->rt_iif);
+#else
 			int err = ipmr_get_route(net, skb,
 						 rt->rt_src, rt->rt_dst,
 						 r, nowait);
+#endif
 			if (err <= 0) {
 				if (!nowait) {
 					if (err == 0)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d6feb1ef4f2aa16533481aed8ae8bd0e73a8c2eb..8302b8a444694ebc15b394d3f1516dbac31d93d9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -278,6 +278,10 @@
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
+#if ((defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)) \
+     && (defined(CONFIG_BCM_KF_M2M_DMA) && defined(CONFIG_BCM_M2M_DMA)))
+#include <linux/bcm_m2mdma.h>
+#endif
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 
@@ -1418,6 +1422,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 	int copied_early = 0;
 	struct sk_buff *skb;
 	u32 urg_hole = 0;
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+	unsigned int dma_cookie=0;
+#endif
 
 	lock_sock(sk);
 
@@ -1464,8 +1471,28 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 	do {
 		u32 offset;
 
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+
+		if (flags & MSG_NOCATCHSIG) {
+			if (signal_pending(current)) {
+				if (sigismember(&current->pending.signal, SIGQUIT) || 
+				    sigismember(&current->pending.signal, SIGABRT) ||
+				    sigismember(&current->pending.signal, SIGKILL) ||
+				    sigismember(&current->pending.signal, SIGTERM) ||
+				    sigismember(&current->pending.signal, SIGSTOP)) {
+
+					if (copied)
+						break;
+					copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
+					break;
+				}
+			}
+		}
+		else if (tp->urg_data && tp->urg_seq == *seq) {
+#else
 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
 		if (tp->urg_data && tp->urg_seq == *seq) {
+#endif
 			if (copied)
 				break;
 			if (signal_pending(current)) {
@@ -1693,8 +1720,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 			} else
 #endif
 			{
-				err = skb_copy_datagram_iovec(skb, offset,
-						msg->msg_iov, used);
+#if defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)
+				if(msg->msg_flags & MSG_KERNSPACE)
+					err = skb_copy_datagram_to_kernel_iovec(skb,
+							offset, msg->msg_iov, used, &dma_cookie);
+				else
+#endif
+					err = skb_copy_datagram_iovec(skb, offset,
+							msg->msg_iov, used);
 				if (err) {
 					/* Exception. Bailout! */
 					if (!copied)
@@ -1773,6 +1806,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 	tcp_cleanup_rbuf(sk, copied);
 
 	release_sock(sk);
+
+#if ((defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)) \
+     && (defined(CONFIG_BCM_KF_M2M_DMA) && defined(CONFIG_BCM_M2M_DMA)))
+	bcm_m2m_wait_for_complete(dma_cookie);
+#endif
 	return copied;
 
 out:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 56a9c8d0bef1300aa5dd204b0b23a21374dbd865..e984f8dd9d7a5632f8e67fe19b46933fb82d2a69 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -75,6 +75,11 @@
 #include <asm/unaligned.h>
 #include <net/netdma.h>
 
+#if ((defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)) \
+     && (defined(CONFIG_BCM963138) || defined(CONFIG_BCM963148)))
+#include <linux/nbuff.h>
+#endif
+
 int sysctl_tcp_timestamps __read_mostly = 1;
 int sysctl_tcp_window_scaling __read_mostly = 1;
 int sysctl_tcp_sack __read_mostly = 1;
@@ -4499,9 +4504,23 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 		if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) {
 			NET_INC_STATS_BH(sock_net(sk),
 					 LINUX_MIB_TCPRCVCOALESCE);
+#if ((defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)) \
+		&& (defined(CONFIG_BCM963138) || defined(CONFIG_BCM963148)))
+			/* cache flush is needed for M2M DMA & for optimization of
+			 * cache invalidate during recycle using skb->dirty_p
+			 */ 
+			{
+				unsigned char *data = skb_put(skb1, skb->len);
+
+				BUG_ON(skb_copy_bits(skb, 0, data, skb->len));
+
+				cache_flush_len(data, skb->len);
+			}
+#else
 			BUG_ON(skb_copy_bits(skb, 0,
 					     skb_put(skb1, skb->len),
 					     skb->len));
+#endif
 			TCP_SKB_CB(skb1)->end_seq = end_seq;
 			TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
 			__kfree_skb(skb);
@@ -4803,6 +4822,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
 		skb_set_owner_r(nskb, sk);
 
 		/* Copy data, releasing collapsed skbs. */
+
 		while (copy > 0) {
 			int offset = start - TCP_SKB_CB(skb)->seq;
 			int size = TCP_SKB_CB(skb)->end_seq - start;
@@ -4810,8 +4830,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
 			BUG_ON(offset < 0);
 			if (size > 0) {
 				size = min(copy, size);
+#if ((defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)) \
+		&& (defined(CONFIG_BCM963138) || defined(CONFIG_BCM963148)))
+				/*cache flush is needed for M2M DMA & for optimization of
+				 * cache invalidate during recycle using skb->dirty_p
+				 */ 
+				{
+					unsigned char *data = skb_put(nskb, size);
+
+					if (skb_copy_bits(skb, offset, data, size))
+						BUG();
+
+					cache_flush_len(data, size);
+				}
+#else
 				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
 					BUG();
+#endif
 				TCP_SKB_CB(nskb)->end_seq += size;
 				copy -= size;
 				start += size;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0cb86ceb652ff66432ba584fedef8231d94decc6..bc9ef71d0bcc819b43724c0a6de5de710431c21d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -85,6 +85,10 @@
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/nbuff.h>
+#endif
+
 int sysctl_tcp_tw_reuse __read_mostly;
 int sysctl_tcp_low_latency __read_mostly;
 EXPORT_SYMBOL(sysctl_tcp_low_latency);
@@ -1657,6 +1661,147 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_v4_do_rcv);
 
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+
+inline struct sk_buff *bcm_find_skb_by_flow_id(uint32_t flowid)
+{
+	/* TODO add this function later,needed for coalescing */
+	return NULL;
+}
+
+inline struct sk_buff *bcm_fkb_to_skb_tcp(FkBuff_t *fkb, struct net_device *dev)
+{
+	struct sk_buff *skb;
+
+	/* find the skb for flowid or allocate a new skb */
+	skb = bcm_find_skb_by_flow_id(fkb->flowid);
+
+	if(!skb)
+	{
+		skb = skb_xlate_dp(fkb, NULL);
+		if(!skb)
+		{
+			nbuff_free(fkb);
+			return NULL;
+		}
+
+		skb->dev = dev;
+
+		skb->mark=0;
+		skb->priority=0;
+
+		/*TODO check if we can use skb_dst_set_noref as blog holds reference*/
+		dst_hold(fkb->dst_entry);
+		skb_dst_set(skb, fkb->dst_entry);
+
+		/*initialize ip & tcp header related fields in skb */
+		skb_set_mac_header(skb, 0); 
+		skb_set_network_header(skb, 14);
+		skb_set_transport_header(skb, 34);/*assumes no ip options*/
+
+		/*set the data pointer to start of TCP header */
+		skb->data += 34;
+		skb->len -= 34;
+
+		skb->pkt_type = PACKET_HOST;
+
+#if ((defined(CONFIG_BCM_KF_RECVFILE) && defined(CONFIG_BCM_RECVFILE)) \
+     && (defined(CONFIG_BCM963138) || defined(CONFIG_BCM963148)))
+		/* on 63138 & 63148 pkt is invalidated on RX, so we can optimize/reduce
+		 * invaldiation during recycle by setting dirtyp.
+		 * the assumption here is pkt is not modified(not dirty in cache)
+		 * after standard tcp header
+		 */
+		{
+			/*for now just use this for samba ports */
+			uint16_t dport=ntohs(tcp_hdr(skb)->dest);
+
+			if((dport == 139) ||  (dport == 445))
+				skb_shinfo(skb)->dirty_p = skb->data +20;
+		}
+#endif
+	}
+
+	return skb;
+}
+
+
+/* inject the packet into ipv4_tcp_stack  directly from the network driver */
+static int bcm_tcp_v4_recv(pNBuff_t pNBuff, struct net_device *dev)
+{
+
+	struct sk_buff *skb;
+
+	if(IS_FKBUFF_PTR(pNBuff))
+	{
+		/* Translate the fkb to skb */
+		skb = bcm_fkb_to_skb_tcp(PNBUFF_2_FKBUFF(pNBuff), dev); 
+	}
+	else
+	{
+		FkBuff_t * fkb;
+		skb = PNBUFF_2_SKBUFF(pNBuff);
+
+		fkb = (FkBuff_t *)&skb->fkbInSkb;
+
+		skb->dev = dev;
+
+		/*TODO check if we can use skb_dst_set_noref as blog holds reference*/
+		dst_hold(fkb->dst_entry);
+		skb_dst_set(skb, fkb->dst_entry);
+
+		/*initialize ip & tcp header related fields in skb */
+		skb_set_mac_header(skb, 0); 
+		skb_set_network_header(skb, 14);
+		skb_set_transport_header(skb, 34);/*assumes no ip options*/
+
+		/*set the data pointer to start of TCP header */
+		skb->data += 34;
+		skb->len -= 34;
+
+		skb->pkt_type = PACKET_HOST;
+	}
+
+	/* calling tcp_v4_rcv with blog lock can cause deadlock issue
+	 * if a xmit is trigged by tcp_v4_rcv
+	 *
+	 * For now release blog lock, as there is nothing to protect with blog 
+	 * lock from this point
+	 *
+	 * bh_disable is needed to prevent deadlock on sock_lock when TCP timers 
+	 * are executed
+	 */
+	if(skb)
+	{
+		local_bh_disable();
+		blog_unlock();
+		tcp_v4_rcv(skb);
+		blog_lock();
+		local_bh_enable();
+	}
+	return 0;
+}
+
+static const struct net_device_ops bcm_tcp4_netdev_ops = {
+	.ndo_open   = NULL,
+	.ndo_stop   = NULL,
+	.ndo_start_xmit  = (HardStartXmitFuncP)bcm_tcp_v4_recv,
+	.ndo_set_mac_address  = NULL,
+	.ndo_do_ioctl   = NULL,
+	.ndo_tx_timeout   = NULL,
+	.ndo_get_stats      = NULL,
+	.ndo_change_mtu     = NULL 
+};
+
+struct net_device  bcm_tcp4_netdev = {
+	.name = "tcp4_netdev",
+	.mtu  = 64*1024,/*set it to 64K incase we aggregate pkts in HW in future */
+	.netdev_ops = &bcm_tcp4_netdev_ops
+};
+
+#endif
+
 /*
  *	From tcp_input.c
  */
@@ -1710,6 +1855,26 @@ int tcp_v4_rcv(struct sk_buff *skb)
 	if (sk->sk_state == TCP_TIME_WAIT)
 		goto do_time_wait;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	/*TODO can we move this deeper into TCP stack*/
+
+	if((sk->sk_state == TCP_ESTABLISHED) && skb->blog_p
+			&& !(skb->dev->priv_flags & IFF_WANDEV))
+	{
+		/*retain the orignal netdev in skb */
+		struct net_device *tmpdev;
+
+		tmpdev = skb->dev;
+		skb->dev = &bcm_tcp4_netdev;
+		skb->data = skb_mac_header(skb);
+		skb->len += 34;
+		blog_emit(skb, tmpdev, TYPE_ETH, 0, BLOG_TCP4_LOCALPHY);
+		skb->dev= tmpdev;
+		skb->data = skb_transport_header(skb);
+		skb->len -= 34;
+	}
+#endif
+
 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 		goto discard_and_relse;
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 686934acfac18eac215a17b8d4d01ea5ad03c860..c7c00ce418645b5b950da86beca7941551321dd2 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -10,6 +10,12 @@ ipv6-objs :=	af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
 		raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
 		exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+ipv6-objs += ping.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+
 ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
 ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7d5cb975cc6f8e6c22581daaa638b6b3df5a52d2..5e6d91f19a7f80fea0d8ec0fe621eb3d1b1576d3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -162,6 +162,10 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
 			       struct net_device *dev);
 
+#if defined(CONFIG_BCM_KF_IP)
+static struct inet6_dev * ipv6_find_idev(struct net_device *dev);
+#endif
+
 static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
 
 static struct ipv6_devconf ipv6_devconf __read_mostly = {
@@ -196,7 +200,11 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
+#if defined(CONFIG_BCM_KF_IP)
+	.accept_dad		= 2,
+#else
 	.accept_dad		= 1,
+#endif
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -230,7 +238,11 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
+#if defined(CONFIG_BCM_KF_IP)
+	.accept_dad		= 2,
+#else
 	.accept_dad		= 1,
+#endif
 };
 
 /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
@@ -360,6 +372,19 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
 	memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
 	ndev->cnf.mtu6 = dev->mtu;
 	ndev->cnf.sysctl = NULL;
+
+#if defined(CONFIG_BCM_KF_IP)
+	/* 
+	* At bootup time, there is no interfaces attached to brX. Therefore, DAD of
+	* brX cannot take any effect and we cannot pass IPv6 ReadyLogo. We here
+	* increase DAD period of brX to 4 sec which should be long enough for our
+	* system to attach all interfaces to brX. Thus, DAD of brX can send/receive
+	* packets through attached interfaces.
+	*/
+	if ( !strncmp(dev->name, "br", 2) )
+		ndev->cnf.dad_transmits = 4;
+#endif
+
 	ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
 	if (ndev->nd_parms == NULL) {
 		kfree(ndev);
@@ -434,9 +459,18 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
 	/* Join all-node multicast group */
 	ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
 
+#if defined(CONFIG_BCM_KF_WANDEV)
+	/* Join all-router multicast group if forwarding is set */
+	if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST) &&
+	   !(dev->priv_flags & IFF_WANDEV))
+	{
+		ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+	}
+#else
 	/* Join all-router multicast group if forwarding is set */
 	if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
 		ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+#endif 
 
 	return ndev;
 }
@@ -2493,6 +2527,53 @@ static void addrconf_sit_config(struct net_device *dev)
 }
 #endif
 
+#if defined(CONFIG_BCM_KF_IP)
+static int addrconf_update_lladdr(struct net_device *dev)
+{
+	struct inet6_dev *idev;
+	struct inet6_ifaddr *ifladdr = NULL;
+	struct inet6_ifaddr *ifp;
+	struct in6_addr addr6;
+	int err = -EADDRNOTAVAIL;
+
+	ASSERT_RTNL();
+
+	idev = __in6_dev_get(dev);
+	if (idev != NULL)
+	{
+		read_lock_bh(&idev->lock);
+        list_for_each_entry(ifp, &idev->addr_list, if_list) {
+			if (IFA_LINK == ifp->scope)
+			{
+				ifladdr = ifp;
+				in6_ifa_hold(ifp);
+				break;
+			}
+		}
+		read_unlock_bh(&idev->lock);
+
+		if ( ifladdr )
+		{
+			/* delete the address */
+			ipv6_del_addr(ifladdr);
+
+			/* add new LLA */ 
+			memset(&addr6, 0, sizeof(struct in6_addr));
+			addr6.s6_addr32[0] = htonl(0xFE800000);
+
+			if (0 == ipv6_generate_eui64(addr6.s6_addr + 8, dev))
+			{
+				addrconf_add_linklocal(idev, &addr6);
+				err = 0;
+			}
+		}
+	}
+
+	return err;
+
+}
+#endif
+
 #if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
 static void addrconf_gre_config(struct net_device *dev)
 {
@@ -2712,6 +2793,12 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 		}
 		break;
 
+#if defined(CONFIG_BCM_KF_IP)
+	case NETDEV_CHANGEADDR:
+		addrconf_update_lladdr(dev);
+		break;
+#endif
+
 	case NETDEV_PRE_TYPE_CHANGE:
 	case NETDEV_POST_TYPE_CHANGE:
 		addrconf_type_change(dev, event);
@@ -2869,7 +2956,16 @@ static void addrconf_rs_timer(unsigned long data)
 	if (idev->dead || !(idev->if_flags & IF_READY))
 		goto out;
 
+#if defined(CONFIG_BCM_KF_WANDEV)
+	/* WAN interface needs to act as a host. */
+	if (idev->cnf.forwarding && 
+            (!(idev->dev->priv_flags & IFF_WANDEV) ||
+            ((idev->dev->priv_flags & IFF_WANDEV) && 
+            netdev_path_is_root(idev->dev))
+        ))
+#else
 	if (idev->cnf.forwarding)
+#endif
 		goto out;
 
 	/* Announcement received after solicitation was sent */
@@ -3032,8 +3128,15 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
 	   router advertisements, start sending router solicitations.
 	 */
 
+#if defined(CONFIG_BCM_KF_WANDEV)
+	/* WAN interface needs to act as a host. */
+	if (( (ifp->idev->cnf.forwarding == 0) || 
+		((ifp->idev->dev->priv_flags & IFF_WANDEV) && 
+		!netdev_path_is_root(ifp->idev->dev)) ) &&
+#else
 	if (((ifp->idev->cnf.accept_ra == 1 && !ifp->idev->cnf.forwarding) ||
 	     ifp->idev->cnf.accept_ra == 2) &&
+#endif
 	    ifp->idev->cnf.rtr_solicits > 0 &&
 	    (dev->flags&IFF_LOOPBACK) == 0 &&
 	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
@@ -3099,7 +3202,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
 			state->offset++;
 			if (net_eq(dev_net(ifa->idev->dev), net))
 				return ifa;
-		}
+	}
 
 		/* prepare for next bucket */
 		state->offset = 0;
@@ -4231,11 +4334,21 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 		 */
 		if (!(ifp->rt->rt6i_node))
 			ip6_ins_rt(ifp->rt);
+#if defined(CONFIG_BCM_KF_WANDEV)
+		if (ifp->idev->cnf.forwarding && 
+			!(ifp->idev->dev->priv_flags & IFF_WANDEV))
+#else
 		if (ifp->idev->cnf.forwarding)
+#endif
 			addrconf_join_anycast(ifp);
 		break;
 	case RTM_DELADDR:
+#if defined(CONFIG_BCM_KF_WANDEV)
+		if (ifp->idev->cnf.forwarding && 
+			!(ifp->idev->dev->priv_flags & IFF_WANDEV))
+#else
 		if (ifp->idev->cnf.forwarding)
+#endif
 			addrconf_leave_anycast(ifp);
 		addrconf_leave_solict(ifp->idev, &ifp->addr);
 		dst_hold(&ifp->rt->dst);
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 399287e595d7cb74c9018009305710077d30827a..acf24a7ab3e09167659f2370e5aee72ee9226b3b 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -28,7 +28,11 @@ int __ipv6_addr_type(const struct in6_addr *addr)
 {
 	__be32 st;
 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	memcpy(&st, &addr->s6_addr[0], sizeof(__be32));
+#else
 	st = addr->s6_addr32[0];
+#endif
 
 	/* Consider all addresses with the first three bits different of
 	   000 and 111 as unicasts.
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2ae79dbeec2feb356f1d91f10a9853cc5f1b5706..91c55e7b10c66468d1d0458b3c3834c033398c6f 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -38,6 +38,10 @@
 #include <net/protocol.h>
 #include <net/xfrm.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 #define IPV6HDR_BASELEN 8
 
 struct tmp_ext {
@@ -345,6 +349,10 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
 	struct ah_data *ahp;
 	struct tmp_ext *iph_ext;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
+
 	ahp = x->data;
 	ahash = ahp->ahash;
 
@@ -417,6 +425,22 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
 
 	AH_SKB_CB(skb)->tmp = iph_base;
 
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && !(defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	/* ensure there is enough headroom and tailroom for HW info */
+	if((skb_headroom(skb) < 12) ||
+	   (skb_tailroom(skb) < 20))
+	{
+		req->alloc_buff_spu = 1;
+	}
+	else
+	{
+		req->alloc_buff_spu = 0;
+	}
+
+	/* not used for output */   
+	req->headerLen = 0;
+#endif
+
 	err = crypto_ahash_digest(req);
 	if (err) {
 		if (err == -EINPROGRESS)
@@ -510,6 +534,10 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
 	int nfrags;
 	int err = -ENOMEM;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
+
 	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
 		goto out;
 
@@ -576,6 +604,22 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
 
 	AH_SKB_CB(skb)->tmp = work_iph;
 
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && !(defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	/* ensure there is enough headroom and tailroom for HW info */
+	if((skb_headroom(skb) < 12) ||
+	   (skb_tailroom(skb) < 20))
+	{
+		req->alloc_buff_spu = 1;
+	}
+	else
+	{
+		req->alloc_buff_spu = 0;
+	}
+
+	/* offset to icv */
+	req->headerLen = &ah->auth_data[0] - skb->data;
+#endif
+
 	err = crypto_ahash_digest(req);
 	if (err) {
 		if (err == -EINPROGRESS)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 65dd5433f08b5e57095e47097920fc5c4ac02c35..b3e6ffa276430029d8e5e4ee85eae57c06fdf7d9 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -42,6 +42,10 @@
 #include <net/protocol.h>
 #include <linux/icmpv6.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 struct esp_skb_cb {
 	struct xfrm_skb_cb xfrm;
 	void *tmp;
@@ -162,6 +166,14 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 	u8 *tail;
 	__be32 *seqhi;
 	struct esp_data *esp = x->data;
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	u8 next_hdr;
+#endif
+
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
 
 	/* skb is pure payload to encrypt */
 	err = -ENOMEM;
@@ -222,6 +234,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 	} while (0);
 	tail[plen - 2] = plen - 2;
 	tail[plen - 1] = *skb_mac_header(skb);
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	next_hdr = *skb_mac_header(skb);
+#endif
+
 	pskb_put(skb, trailer, clen - skb->len + alen);
 
 	skb_push(skb, -skb_network_offset(skb));
@@ -252,6 +268,26 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 			      XFRM_SKB_CB(skb)->seq.output.low);
 
 	ESP_SKB_CB(skb)->tmp = tmp;
+
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	req->areq.data_offset = (unsigned char *)esph - skb->data;
+	req->areq.next_hdr    = next_hdr;
+#else
+	/* ensure there is enough headroom and tailroom for HW info */
+	if((skb_headroom(skb) < 12) ||
+	   (skb_tailroom(skb) < 16))
+	{
+		req->areq.alloc_buff_spu = 1;
+	}
+	else
+	{
+		req->areq.alloc_buff_spu = 0;
+	}
+	req->areq.headerLen = esph->enc_data + crypto_aead_ivsize(aead) - skb->data;
+#endif
+#endif
+
 	err = crypto_aead_givencrypt(req);
 	if (err == -EINPROGRESS)
 		goto error;
@@ -334,6 +370,13 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 	u8 *iv;
 	struct scatterlist *sg;
 	struct scatterlist *asg;
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && !(defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+	int macLen;
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
 
 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
 		ret = -EINVAL;
@@ -396,6 +439,28 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 	aead_request_set_crypt(req, sg, sg, elen, iv);
 	aead_request_set_assoc(req, asg, assoclen);
 
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	req->data_offset = 0;
+	req->next_hdr    = 0;
+#else
+	/* ensure there is enough headroom and tailroom for HW info */
+	if ( (skb->data >= skb_mac_header(skb)) &&
+	     (skb_headroom(skb) >= ((skb->data - skb_mac_header(skb)) + 12)) &&
+	     (skb_tailroom(skb) >= 16))
+	{
+		macLen = skb->data - skb_mac_header(skb);
+		req->alloc_buff_spu = 0;
+	}
+	else
+	{
+		macLen = 0;
+		req->alloc_buff_spu = 1;
+	}
+	req->headerLen = sizeof(*esph) + crypto_aead_ivsize(aead) + macLen;
+#endif
+#endif
+
 	ret = crypto_aead_decrypt(req);
 	if (ret == -EINPROGRESS)
 		goto out;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 27ac95a6342927e5365af6616af82815e603619e..f5bf5eaa0de5b62f61a3a687e9f20cc9ad94aaf2 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -428,7 +428,11 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
 	 *	and anycast addresses will be checked later.
 	 */
 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
+#if defined(CONFIG_BCM_KF_FAP)	
+		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source type %d\n", type);
+#else
 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
+#endif				
 		return;
 	}
 
@@ -646,7 +650,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
 {
 	struct net_device *dev = skb->dev;
 	struct inet6_dev *idev = __in6_dev_get(dev);
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	struct in6_addr saddr, daddr;
+#else
 	const struct in6_addr *saddr, *daddr;
+#endif
 	const struct ipv6hdr *orig_hdr;
 	struct icmp6hdr *hdr;
 	u8 type;
@@ -673,22 +681,43 @@ static int icmpv6_rcv(struct sk_buff *skb)
 
 	ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(struct in6_addr));
+	memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(struct in6_addr));
+#else
 	saddr = &ipv6_hdr(skb)->saddr;
 	daddr = &ipv6_hdr(skb)->daddr;
+#endif
 
 	/* Perform checksum. */
 	switch (skb->ip_summed) {
 	case CHECKSUM_COMPLETE:
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		if (!csum_ipv6_magic(&saddr, &daddr, skb->len, IPPROTO_ICMPV6,
+				     skb->csum))
+			break;
+#else
 		if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
 				     skb->csum))
 			break;
+#endif
 		/* fall through */
 	case CHECKSUM_NONE:
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		skb->csum = ~csum_unfold(csum_ipv6_magic(&saddr, &daddr, skb->len,
+					     IPPROTO_ICMPV6, 0));
+#else
 		skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
 					     IPPROTO_ICMPV6, 0));
+#endif
 		if (__skb_checksum_complete(skb)) {
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n",
+				       &saddr, &daddr);
+#else
 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n",
 				       saddr, daddr);
+#endif
 			goto discard_it;
 		}
 	}
@@ -721,8 +750,15 @@ static int icmpv6_rcv(struct sk_buff *skb)
 			goto discard_it;
 		hdr = icmp6_hdr(skb);
 		orig_hdr = (struct ipv6hdr *) (hdr + 1);
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		memcpy(&saddr, &orig_hdr->saddr, sizeof(struct in6_addr));
+		memcpy(&daddr, &orig_hdr->daddr, sizeof(struct in6_addr));
+		rt6_pmtu_discovery(&daddr, &saddr, dev,
+				   ntohl(hdr->icmp6_mtu));
+#else
 		rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
 				   ntohl(hdr->icmp6_mtu));
+#endif
 
 		/*
 		 *	Drop through to notify
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 1ca5d45a12e8973408ececb5ebd4e1daa3e391f9..42022e8e8da976448d2feca36da7fc6890e5af55 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -45,8 +45,6 @@
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 
-
-
 inline int ip6_rcv_finish( struct sk_buff *skb)
 {
 	if (skb_dst(skb) == NULL)
@@ -195,7 +193,10 @@ static int ip6_input_finish(struct sk_buff *skb)
 
 		if (ipprot->flags & INET6_PROTO_FINAL) {
 			const struct ipv6hdr *hdr;
-
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+			struct in6_addr srcAddr;
+			struct in6_addr dstAddr;
+#endif
 			/* Free reference early: we don't need it any more,
 			   and it may hold ip_conntrack module loaded
 			   indefinitely. */
@@ -204,11 +205,21 @@ static int ip6_input_finish(struct sk_buff *skb)
 			skb_postpull_rcsum(skb, skb_network_header(skb),
 					   skb_network_header_len(skb));
 			hdr = ipv6_hdr(skb);
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+			memcpy(&srcAddr, &hdr->saddr, sizeof(struct in6_addr));
+			memcpy(&dstAddr, &hdr->daddr, sizeof(struct in6_addr));
+			if (ipv6_addr_is_multicast(&dstAddr) &&
+			    !ipv6_chk_mcast_addr(skb->dev, &dstAddr,
+			    &srcAddr) &&
+			    !ipv6_is_mld(skb, nexthdr))
+				goto discard;
+#else
 			if (ipv6_addr_is_multicast(&hdr->daddr) &&
 			    !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
 			    &hdr->saddr) &&
 			    !ipv6_is_mld(skb, nexthdr))
 				goto discard;
+#endif
 		}
 		if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
 		    !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
@@ -252,20 +263,32 @@ int ip6_mc_input(struct sk_buff *skb)
 {
 	const struct ipv6hdr *hdr;
 	int deliver;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	struct in6_addr dAddr;
+#endif
 
 	IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
 			 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
 			 skb->len);
 
 	hdr = ipv6_hdr(skb);
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	memcpy(&dAddr, &hdr->daddr, sizeof(struct in6_addr));
+	deliver = ipv6_chk_mcast_addr(skb->dev, &dAddr, NULL);
+#else
 	deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
+#endif
 
 #ifdef CONFIG_IPV6_MROUTE
 	/*
 	 *      IPv6 multicast router mode is now supported ;)
 	 */
 	if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	    !(ipv6_addr_type(&dAddr) & IPV6_ADDR_LINKLOCAL) &&
+#else
 	    !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
+#endif
 	    likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
 		/*
 		 * Okay, we try to forward - split and duplicate
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 13e5399b1cd95a4cba1dd639c53efcf6dc4e0198..c95d3c8e10a13f4713fa4cc8ea36e8fc8be2d4ac 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -163,6 +163,12 @@ int ip6_output(struct sk_buff *skb)
 {
 	struct net_device *dev = skb_dst(skb)->dev;
 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	uint32_t mtu = ip6_skb_dst_mtu(skb);
+	Blog_t * blog_p = blog_ptr(skb);
+	if (blog_p && blog_p->minMtu > mtu)
+		blog_p->minMtu = mtu;
+#endif
 	if (unlikely(idev->cnf.disable_ipv6)) {
 		IP6_INC_STATS(dev_net(dev), idev,
 			      IPSTATS_MIB_OUTDISCARDS);
@@ -382,6 +388,35 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
 	return dst_output(skb);
 }
 
+#if defined(CONFIG_BCM_KF_IP)
+static inline int isULA(const struct in6_addr *addr)
+{
+	__be32 st;
+
+	st = addr->s6_addr32[0];
+
+	/* RFC 4193 */
+	if ((st & htonl(0xFE000000)) == htonl(0xFC000000))
+		return	1;
+	else
+		return	0;
+}
+
+static inline int isSpecialAddr(const struct in6_addr *addr)
+{
+	__be32 st;
+
+	st = addr->s6_addr32[0];
+
+	/* RFC 5156 */
+	if (((st & htonl(0xFFFFFFFF)) == htonl(0x20010db8)) ||
+		((st & htonl(0xFFFFFFF0)) == htonl(0x20010010)))
+		return	1;
+	else
+		return	0;
+}
+#endif
+
 int ip6_forward(struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
@@ -439,6 +474,14 @@ int ip6_forward(struct sk_buff *skb)
 		return -ETIMEDOUT;
 	}
 
+#if defined(CONFIG_BCM_KF_IP)
+    /* No traffic with ULA address should be forwarded at WAN intf */
+	if ( isULA(&hdr->daddr) || isULA(&hdr->saddr) )
+		if ((skb->dev->priv_flags & IFF_WANDEV) || 
+			(dst->dev->priv_flags & IFF_WANDEV) )
+			goto drop;
+#endif
+
 	/* XXX: idev->cnf.proxy_ndp? */
 	if (net->ipv6.devconf_all->proxy_ndp &&
 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
@@ -490,7 +533,19 @@ int ip6_forward(struct sk_buff *skb)
 
 		/* This check is security critical. */
 		if (addrtype == IPV6_ADDR_ANY ||
+#if defined(CONFIG_BCM_KF_IP)
+			/* 
+			 * RFC 5156: IPv4 mapped addr and IPv4-compatible addr
+			 * should not appear on the Internet. In addition,
+			 * 2001:db8::/32 and 2001:10::/28 should not appear either.
+			 */
+			(addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK | 
+				IPV6_ADDR_COMPATv4 | IPV6_ADDR_MAPPED | 
+				IPV6_ADDR_SITELOCAL)) ||
+			isSpecialAddr(&hdr->saddr))
+#else
 		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
+#endif
 			goto error;
 		if (addrtype & IPV6_ADDR_LINKLOCAL) {
 			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
@@ -526,6 +581,13 @@ int ip6_forward(struct sk_buff *skb)
 
 	hdr->hop_limit--;
 
+#if defined(CONFIG_BCM_KF_WANDEV)
+	/* Never forward a packet from a WAN intf to the other WAN intf */
+	if( (skb->dev) && (dst->dev) && 
+		((skb->dev->priv_flags & dst->dev->priv_flags) & IFF_WANDEV) )
+		goto drop;
+#endif
+
 	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
 		       ip6_forward_finish);
@@ -898,6 +960,9 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 	kfree_skb(skb);
 	return err;
 }
+#if defined(CONFIG_BCM_KF_IP)
+EXPORT_SYMBOL_GPL(ip6_fragment);
+#endif
 
 static inline int ip6_rt_check(const struct rt6key *rt_key,
 			       const struct in6_addr *fl_addr,
@@ -1122,6 +1187,11 @@ static inline int ip6_ufo_append_data(struct sock *sk,
 	 * udp datagram
 	 */
 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)		
+		/*CVE-2013-4387*/
+		struct frag_hdr fhdr;
+#endif
+
 		skb = sock_alloc_send_skb(sk,
 			hh_len + fragheaderlen + transhdrlen + 20,
 			(flags & MSG_DONTWAIT), &err);
@@ -1142,12 +1212,16 @@ static inline int ip6_ufo_append_data(struct sock *sk,
 
 		skb->ip_summed = CHECKSUM_PARTIAL;
 		skb->csum = 0;
+
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 	}
 
 	err = skb_append_datato_frags(sk,skb, getfrag, from,
 				      (length - transhdrlen));
 	if (!err) {
 		struct frag_hdr fhdr;
+#endif
+		
 
 		/* Specify the length of each IPv6 datagram fragment.
 		 * It has to be a multiple of 8.
@@ -1158,15 +1232,22 @@ static inline int ip6_ufo_append_data(struct sock *sk,
 		ipv6_select_ident(&fhdr, rt);
 		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
 		__skb_queue_tail(&sk->sk_write_queue, skb);
-
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 		return 0;
+#endif
 	}
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 	/* There is not enough support do UPD LSO,
 	 * so follow normal path
 	 */
 	kfree_skb(skb);
 
 	return err;
+#else
+	/*CVE-2013-4387*/
+	return skb_append_datato_frags(sk, skb, getfrag, from,
+				       (length - transhdrlen));
+#endif
 }
 
 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
@@ -1337,7 +1418,31 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
 	 * --yoshfuji
 	 */
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2013-4387*/
+	if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
+					   sk->sk_protocol == IPPROTO_RAW)) {
+		ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
+		return -EMSGSIZE;
+	}
+
+	skb = skb_peek_tail(&sk->sk_write_queue);
+#endif
 	cork->length += length;
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)	
+	/*CVE-2013-4387*/
+	if (((length > mtu) ||
+		/*CVE-2013-4470*/
+	     (skb && skb_shinfo(skb)->nr_frags)) &&
+	    (sk->sk_protocol == IPPROTO_UDP) &&
+	    (rt->dst.dev->features & NETIF_F_UFO)) {
+		err = ip6_ufo_append_data(sk, getfrag, from, length,
+					  hh_len, fragheaderlen,
+					  transhdrlen, mtu, flags, rt);
+		if (err)
+			goto error;
+		return 0;
+#else
 	if (length > mtu) {
 		int proto = sk->sk_protocol;
 		if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
@@ -1355,9 +1460,15 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
 				goto error;
 			return 0;
 		}
+#endif
 	}
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)	
+	/*CVE-2013-4387*/
+	if (!skb)
+#else
 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+#endif
 		goto alloc_new_skb;
 
 	while (length > 0) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index aa21da6a09cd66bc2cad80cb227d8514e5466658..bcd8f6a6ede2bad2a8da3d76fe2ddd7a2a6fc796 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -54,6 +54,10 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
@@ -76,6 +80,9 @@ MODULE_ALIAS_NETDEV("ip6tnl0");
 
 static int ip6_tnl_dev_init(struct net_device *dev);
 static void ip6_tnl_dev_setup(struct net_device *dev);
+#if defined(CONFIG_BCM_KF_IP)
+extern int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+#endif
 
 static int ip6_tnl_net_id __read_mostly;
 struct ip6_tnl_net {
@@ -763,6 +770,13 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		blog_lock();
+		blog_link(TOS_MODE, blog_ptr(skb), NULL, DIR_RX, 
+			(t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) ?
+				BLOG_TOS_INHERIT : BLOG_TOS_FIXED);
+		blog_unlock();
+#endif
 		__skb_tunnel_rx(skb, t->dev);
 
 		dscp_ecn_decapsulate(t, ipv6h, skb);
@@ -898,7 +912,10 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
 	u8 proto;
 	int err = -1;
 	int pkt_len;
-
+#if defined(CONFIG_BCM_KF_IP)
+	u8 needFrag = 0;
+#endif
+   
 	if (!fl6->flowi6_mark)
 		dst = ip6_tnl_dst_check(t);
 	if (!dst) {
@@ -936,8 +953,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
 	if (skb->len > mtu) {
 		*pmtu = mtu;
+#if defined(CONFIG_BCM_KF_IP)
+		needFrag = 1;
+#else      
 		err = -EMSGSIZE;
 		goto tx_err_dst_release;
+#endif
 	}
 
 	/*
@@ -983,7 +1004,17 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
 	ipv6h->daddr = fl6->daddr;
 	nf_reset(skb);
 	pkt_len = skb->len;
+#if defined(CONFIG_BCM_KF_IP)
+	if (needFrag) {
+		skb->local_df = 1;
+		ip6_fragment(skb, ip6_local_out);
+	}
+	else {
+		err = ip6_local_out(skb);
+	}
+#else
 	err = ip6_local_out(skb);
+#endif            
 
 	if (net_xmit_eval(err) == 0) {
 		struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats);
@@ -1034,6 +1065,14 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
 		fl6.flowi6_mark = skb->mark;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	blog_link(TOS_MODE, blog_ptr(skb), NULL, DIR_TX,
+		(t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) ?
+			BLOG_TOS_INHERIT : BLOG_TOS_FIXED);
+	blog_unlock();
+#endif
+
 	err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
 	if (err != 0) {
 		/* XXX: send ICMP error even if DF is not set. */
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8110362e0af558a9ff77824bc78c1b3677369855..130f86c7788b01dac3e511c981b79216265f4850 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -990,6 +990,38 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
 	return 0;
 }
 
+#if defined(CONFIG_BCM_KF_IGMP)
+static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
+					   struct in6_addr *origin,
+					   struct in6_addr *mcastgrp,
+					   mifi_t mifi)
+{
+	int line = MFC6_HASH(mcastgrp, origin);
+	struct mfc6_cache *c = NULL;
+	struct in6_addr nullOrigin;
+
+	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
+		if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
+		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp) &&
+		    (c->mf6c_parent == mifi))
+			return c;
+	}
+
+	/* for ASM multicast source does not matter so need to check
+	   for an entry with NULL origin as well */
+	memset(&nullOrigin, 0, sizeof(struct in6_addr));
+	line = MFC6_HASH(mcastgrp, &nullOrigin);
+
+	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
+		if (ipv6_addr_equal(&c->mf6c_origin, &nullOrigin) &&
+			ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp) &&
+		    (c->mf6c_parent == mifi))
+			return c;
+	}
+   
+	return NULL;
+}
+#else
 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
 					   const struct in6_addr *origin,
 					   const struct in6_addr *mcastgrp)
@@ -1004,6 +1036,7 @@ static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
 	}
 	return NULL;
 }
+#endif
 
 /*
  *	Allocate a multicast cache entry
@@ -1243,8 +1276,14 @@ static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
 	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
 
 	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
+#if defined(CONFIG_BCM_KF_IGMP)
+		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
+		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr) &&
+		    (c->mf6c_parent == mfc->mf6cc_parent)) {
+#else
 		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
 		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
+#endif
 			write_lock_bh(&mrt_lock);
 			list_del(&c->list);
 			write_unlock_bh(&mrt_lock);
@@ -1399,8 +1438,14 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
 	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
 
 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
+#if defined(CONFIG_BCM_KF_IGMP)
+		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
+		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr) &&
+		    (c->mf6c_parent == mfc->mf6cc_parent)) {
+#else
 		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
 		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
+#endif
 			found = true;
 			break;
 		}
@@ -1790,7 +1835,11 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
 			return -EFAULT;
 
 		read_lock(&mrt_lock);
+#if defined(CONFIG_BCM_KF_IGMP)
+		c = NULL;
+#else
 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+#endif
 		if (c) {
 			sr.pktcnt = c->mfc_un.res.pkt;
 			sr.bytecnt = c->mfc_un.res.bytes;
@@ -1945,7 +1994,12 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
 	 * result in receiving multiple packets.
 	 */
 	dev = vif->dev;
+#if defined(CONFIG_BCM_KF_IGMP)
+   /* skb->dev is the soruce device. It should not be 
+      set to the destination device */
+#else
 	skb->dev = dev;
+#endif
 	vif->pkt_out++;
 	vif->bytes_out += skb->len;
 
@@ -2024,8 +2078,15 @@ static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 			if (psend != -1) {
 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 				if (skb2)
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+                                {
+					blog_clone(skb, blog_ptr(skb2));
+#endif
 					ip6mr_forward2(net, mrt, skb2, cache, psend);
-			}
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+                                }
+#endif
+                        }
 			psend = ct;
 		}
 	}
@@ -2062,8 +2123,26 @@ int ip6_mr_input(struct sk_buff *skb)
 	}
 
 	read_lock(&mrt_lock);
+
+
+#if defined(CONFIG_BCM_KF_IGMP)
+	/* mroute6 should not apply to MLD traffic
+	   in addition it does not make sense for TCP protocol to be used
+	   for multicast so just check for UDP */
+	if( ipv6_hdr(skb)->nexthdr == IPPROTO_UDP )
+	{
+		mifi_t mifi = ip6mr_find_vif(mrt, skb->dev);
+		cache = ip6mr_cache_find(mrt, &ipv6_hdr(skb)->saddr, 
+		                         &ipv6_hdr(skb)->daddr, mifi);
+	}
+	else
+	{
+		cache = NULL;
+	}
+#else
 	cache = ip6mr_cache_find(mrt,
 				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
+#endif
 
 	/*
 	 *	No usable cache entry
@@ -2129,8 +2208,14 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
 	return -EMSGSIZE;
 }
 
+#if defined(CONFIG_BCM_KF_IGMP)
+int ip6mr_get_route(struct net *net,
+		    struct sk_buff *skb, struct rtmsg *rtm, int nowait,
+		    unsigned short ifIndex)
+#else
 int ip6mr_get_route(struct net *net,
 		    struct sk_buff *skb, struct rtmsg *rtm, int nowait)
+#endif
 {
 	int err;
 	struct mr6_table *mrt;
@@ -2142,7 +2227,25 @@ int ip6mr_get_route(struct net *net,
 		return -ENOENT;
 
 	read_lock(&mrt_lock);
+#if defined(CONFIG_BCM_KF_IGMP)
+	/* mroute6 should not apply to MLD traffic
+	   in addition it does not make sense for TCP protocol to be used
+	   for multicast so just check for UDP */
+	if( (skb->dev == NULL) || (ipv6_hdr(skb) == NULL) ||
+	    (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) )
+	{
+		struct net_device *dev = dev_get_by_index(net, ifIndex);
+		mifi_t mifi = ip6mr_find_vif(mrt, dev);
+		cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, 
+		                         &rt->rt6i_dst.addr, mifi);
+	}
+	else
+	{
+		cache = NULL;
+	}
+#else
 	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
+#endif
 
 	if (!cache) {
 		struct sk_buff *skb2;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index b2869cab2092ae2d08e6b090c98fbe6aece35e75..c54a8d84d7eb4896c891755b62999ebd0040bb4d 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -866,6 +866,9 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
 	spin_lock_init(&mc->mca_lock);
 
 	/* initial mode is (EX, empty) */
+#if defined(CONFIG_BCM_KF_MLD) && defined(CC_BRCM_KF_MULTI_MLD_GR_SUPPRESSION)
+	mc->mca_osfmode = MCAST_INCLUDE;
+#endif
 	mc->mca_sfmode = MCAST_EXCLUDE;
 	mc->mca_sfcount[MCAST_EXCLUDE] = 1;
 
@@ -1681,24 +1684,41 @@ static void mld_send_cr(struct inet6_dev *idev)
 	/* change recs */
 	for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 		spin_lock_bh(&pmc->mca_lock);
-		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
-			type = MLD2_BLOCK_OLD_SOURCES;
-			dtype = MLD2_ALLOW_NEW_SOURCES;
-		} else {
-			type = MLD2_ALLOW_NEW_SOURCES;
-			dtype = MLD2_BLOCK_OLD_SOURCES;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CC_BRCM_KF_MULTI_MLD_GR_SUPPRESSION)
+		if ( pmc->mca_osfmode == pmc->mca_sfmode ) {
+#endif
+			if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
+				type = MLD2_BLOCK_OLD_SOURCES;
+				dtype = MLD2_ALLOW_NEW_SOURCES;
+			} else {
+				type = MLD2_ALLOW_NEW_SOURCES;
+				dtype = MLD2_BLOCK_OLD_SOURCES;
+			}
+			skb = add_grec(skb, pmc, type, 0, 0);
+			skb = add_grec(skb, pmc, dtype, 0, 1);	/* deleted sources */
+#if defined(CONFIG_BCM_KF_MLD) && defined(CC_BRCM_KF_MULTI_MLD_GR_SUPPRESSION)
 		}
-		skb = add_grec(skb, pmc, type, 0, 0);
-		skb = add_grec(skb, pmc, dtype, 0, 1);	/* deleted sources */
+#endif
 
 		/* filter mode changes */
 		if (pmc->mca_crcount) {
-			if (pmc->mca_sfmode == MCAST_EXCLUDE)
-				type = MLD2_CHANGE_TO_EXCLUDE;
-			else
-				type = MLD2_CHANGE_TO_INCLUDE;
-			skb = add_grec(skb, pmc, type, 0, 0);
+#if defined(CONFIG_BCM_KF_MLD) && defined(CC_BRCM_KF_MULTI_MLD_GR_SUPPRESSION)
+			if ( pmc->mca_osfmode != pmc->mca_sfmode ) {
+#endif
+				if (pmc->mca_sfmode == MCAST_EXCLUDE)
+					type = MLD2_CHANGE_TO_EXCLUDE;
+				else
+					type = MLD2_CHANGE_TO_INCLUDE;
+				skb = add_grec(skb, pmc, type, 0, 0);
+#if defined(CONFIG_BCM_KF_MLD) && defined(CC_BRCM_KF_MULTI_MLD_GR_SUPPRESSION)
+			}
+#endif
 			pmc->mca_crcount--;
+#if defined(CONFIG_BCM_KF_MLD) && defined(CC_BRCM_KF_MULTI_MLD_GR_SUPPRESSION)
+			if ( pmc->mca_crcount == 0 ) {
+				pmc->mca_osfmode = pmc->mca_sfmode;
+			}
+#endif
 		}
 		spin_unlock_bh(&pmc->mca_lock);
 	}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 176b469322acd0b63b6e8d7ad0f095129b165868..d8c2847a4cf8b70de5f882366633e958c88d27df 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1030,8 +1030,12 @@ static void ndisc_recv_rs(struct sk_buff *skb)
 		return;
 	}
 
+#if defined(CONFIG_BCM_KF_IP)
+	if (!idev->cnf.forwarding  || (idev->dev->priv_flags & IFF_WANDEV))
+#else
 	/* Don't accept RS if we're not in router mode */
 	if (!idev->cnf.forwarding)
+#endif
 		goto out;
 
 	/*
@@ -1115,11 +1119,20 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
 
 static inline int accept_ra(struct inet6_dev *in6_dev)
 {
+#if defined(CONFIG_BCM_KF_IP)
+	/* WAN interface needs to act like a host. */
+	if (((in6_dev->cnf.forwarding) && 
+		(!(in6_dev->dev->priv_flags & IFF_WANDEV) || 
+		((in6_dev->dev->priv_flags & IFF_WANDEV) && 
+		netdev_path_is_root(in6_dev->dev))))
+		&& (in6_dev->cnf.accept_ra < 2))
+#else
 	/*
 	 * If forwarding is enabled, RA are not accepted unless the special
 	 * hybrid mode (accept_ra=2) is enabled.
 	 */
 	if (in6_dev->cnf.forwarding && in6_dev->cnf.accept_ra < 2)
+#endif
 		return 0;
 
 	return in6_dev->cnf.accept_ra;
@@ -1479,7 +1492,13 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
 	in6_dev = __in6_dev_get(skb->dev);
 	if (!in6_dev)
 		return;
+#if defined(CONFIG_BCM_KF_IP)
+	/* WAN interface needs to act like a host. */
+	if (((in6_dev->cnf.forwarding) && !(in6_dev->dev->priv_flags & IFF_WANDEV))
+		|| (!in6_dev->cnf.accept_redirects))
+#else
 	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
+#endif
 		return;
 
 	/* RFC2461 8.1:
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index db31561cc8df31afbf7f18fd4dd849811be5a8dd..97f2186292fdfff84a596bfcfa8fa2e41efeab6b 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -118,11 +118,28 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 {
 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 	__sum16 csum = 0;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	struct in6_addr sAddr;
+	struct in6_addr dAddr;
+
+	memcpy(&sAddr, &ip6h->saddr, sizeof(struct in6_addr));
+	memcpy(&dAddr, &ip6h->daddr, sizeof(struct in6_addr));
+#endif
 
 	switch (skb->ip_summed) {
 	case CHECKSUM_COMPLETE:
 		if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
 			break;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		if (!csum_ipv6_magic(&sAddr, &dAddr,
+				     skb->len - dataoff, protocol,
+				     csum_sub(skb->csum,
+					      skb_checksum(skb, 0,
+							   dataoff, 0)))) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			break;
+		}
+#else
 		if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 				     skb->len - dataoff, protocol,
 				     csum_sub(skb->csum,
@@ -131,8 +148,18 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 			break;
 		}
+#endif
 		/* fall through */
 	case CHECKSUM_NONE:
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		skb->csum = ~csum_unfold(
+				csum_ipv6_magic(&sAddr, &dAddr,
+					     skb->len - dataoff,
+					     protocol,
+					     csum_sub(0,
+						      skb_checksum(skb, 0,
+								   dataoff, 0))));
+#else
 		skb->csum = ~csum_unfold(
 				csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 					     skb->len - dataoff,
@@ -140,6 +167,7 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 					     csum_sub(0,
 						      skb_checksum(skb, 0,
 								   dataoff, 0))));
+#endif
 		csum = __skb_checksum_complete(skb);
 	}
 	return csum;
@@ -153,6 +181,13 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 	__wsum hsum;
 	__sum16 csum = 0;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	struct in6_addr sAddr;
+	struct in6_addr dAddr;
+
+	memcpy(&sAddr, &ip6h->saddr, sizeof(struct in6_addr));
+	memcpy(&dAddr, &ip6h->daddr, sizeof(struct in6_addr));
+#endif
 
 	switch (skb->ip_summed) {
 	case CHECKSUM_COMPLETE:
@@ -161,11 +196,19 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
 		/* fall through */
 	case CHECKSUM_NONE:
 		hsum = skb_checksum(skb, 0, dataoff, 0);
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		skb->csum = ~csum_unfold(csum_ipv6_magic(&sAddr,
+							 &dAddr,
+							 skb->len - dataoff,
+							 protocol,
+							 csum_sub(0, hsum)));
+#else
 		skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
 							 &ip6h->daddr,
 							 skb->len - dataoff,
 							 protocol,
 							 csum_sub(0, hsum)));
+#endif
 		skb->ip_summed = CHECKSUM_NONE;
 		return __skb_checksum_complete_head(skb, dataoff + len);
 	}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index d33cddd16fbb6a720217a9db1f1eef353f990fcb..f047ae5ad60e2c0dfa7401072261fd5476320d2e 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -175,6 +175,19 @@ config IP6_NF_TARGET_REJECT
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP6_NF_TARGET_REJECT_SKERR
+	bool "Force socket error when rejecting with icmp*"
+	depends on IP6_NF_TARGET_REJECT
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+          This option enables turning a "--reject-with icmp*" into a matching
+          socket error also.
+	  The REJECT target normally allows sending an ICMP message. But it
+          leaves the local socket unaware of any ingress rejects.
+
+	  If unsure, say N.
+
 config IP6_NF_MANGLE
 	tristate "Packet mangling"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 38f00b0298d3f53327f2e35447e8ab97a1568e83..7e1a78ea60d629c1673de76332fdfc77c7c46894 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -316,9 +316,14 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
 		fq->nhoffset = nhoff;
 		fq->q.last_in |= INET_FRAG_FIRST_IN;
 	}
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 	write_lock(&nf_frags.lock);
 	list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list);
 	write_unlock(&nf_frags.lock);
+#else
+	/*CVE-2014-0100*/
+	inet_frag_lru_move(&fq->q);
+#endif
 	return 0;
 
 discard_fq:
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5bddea778840721b01d2aea417ed96475518f918..ef6ce73e07011b70327b32094a3bb1baed964596 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -155,15 +155,25 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
  */
 static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
 {
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	struct in6_addr saddr;
+	struct in6_addr daddr;
+#else
 	const struct in6_addr *saddr;
 	const struct in6_addr *daddr;
+#endif
 	struct sock *sk;
 	int delivered = 0;
 	__u8 hash;
 	struct net *net;
 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(struct in6_addr));
+	memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(struct in6_addr));
+#else
 	saddr = &ipv6_hdr(skb)->saddr;
 	daddr = saddr + 1;
+#endif
 
 	hash = nexthdr & (MAX_INET_PROTOS - 1);
 
@@ -174,8 +184,11 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
 		goto out;
 
 	net = dev_net(skb->dev);
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	sk = __raw_v6_lookup(net, sk, nexthdr, &daddr, &saddr, IP6CB(skb)->iif);
+#else
 	sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
-
+#endif
 	while (sk) {
 		int filtered;
 
@@ -217,8 +230,13 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
 				rawv6_rcv(sk, clone);
 			}
 		}
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, &daddr, &saddr,
+				     IP6CB(skb)->iif);
+#else
 		sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
 				     IP6CB(skb)->iif);
+#endif
 	}
 out:
 	read_unlock(&raw_v6_hashinfo.lock);
@@ -403,6 +421,10 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct raw6_sock *rp = raw6_sk(sk);
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	struct in6_addr dstAddr;
+	struct in6_addr srcAddr;
+#endif
 
 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
 		atomic_inc(&sk->sk_drops);
@@ -413,19 +435,37 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
 	if (!rp->checksum)
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	memcpy(&srcAddr, &ipv6_hdr(skb)->saddr, sizeof(struct in6_addr));
+	memcpy(&dstAddr, &ipv6_hdr(skb)->daddr, sizeof(struct in6_addr));
+#endif
+
 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 		skb_postpull_rcsum(skb, skb_network_header(skb),
 				   skb_network_header_len(skb));
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		if (!csum_ipv6_magic(&srcAddr, &dstAddr,
+				     skb->len, inet->inet_num, skb->csum))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+#else
 		if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 				     &ipv6_hdr(skb)->daddr,
 				     skb->len, inet->inet_num, skb->csum))
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
 	}
 	if (!skb_csum_unnecessary(skb))
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+		skb->csum = ~csum_unfold(csum_ipv6_magic(&srcAddr,
+							 &dstAddr,
+							 skb->len,
+							 inet->inet_num, 0));
+#else
 		skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 							 &ipv6_hdr(skb)->daddr,
 							 skb->len,
 							 inet->inet_num, 0));
+#endif
 
 	if (inet->hdrincl) {
 		if (skb_checksum_complete(skb)) {
@@ -965,6 +1005,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
 
 	switch (optname) {
 	case IPV6_CHECKSUM:
+#if !defined(CONFIG_BCM_KF_IP)
 		if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
 		    level == IPPROTO_IPV6) {
 			/*
@@ -977,6 +1018,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
 			 */
 			return -EINVAL;
 		}
+#endif
 
 		/* You may get strange result with a positive odd offset;
 		   RFC2292bis agrees with me. */
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 9447bd69873af6b7bb8979547eab9bd7624b6bd4..145b8583dc317dc8ad58c246519e006951969309 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -383,10 +383,14 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
 	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
 	    fq->q.meat == fq->q.len)
 		return ip6_frag_reasm(fq, prev, dev);
-
+#if !defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
 	write_lock(&ip6_frags.lock);
 	list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
 	write_unlock(&ip6_frags.lock);
+#else
+	/*CVE-2014-0100*/
+	inet_frag_lru_move(&fq->q);
+#endif
 	return -1;
 
 discard_fq:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c4920ca83f5fd8b5bbd9fb15d3ea8fa30889fe35..361ceaf4530509a6b67227ab025a8049c243e6c6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -908,6 +908,17 @@ void ip6_route_input(struct sk_buff *skb)
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
 	struct net *net = dev_net(skb->dev);
 	int flags = RT6_LOOKUP_F_HAS_SADDR;
+#if defined(CONFIG_MIPS_BCM963XX) && defined(CONFIG_BCM_KF_UNALIGNED_EXCEPTION)
+	struct flowi6 fl6;
+
+	fl6.flowi6_iif  = skb->dev->ifindex;
+	fl6.flowi6_mark = skb->mark;
+	fl6.flowi6_proto = iph->nexthdr;
+	fl6.daddr = iph->daddr;
+	fl6.saddr = iph->saddr;
+	memcpy(&fl6.flowlabel, iph, sizeof(__be32));
+	fl6.flowlabel &= IPV6_FLOWINFO_MASK;
+#else
 	struct flowi6 fl6 = {
 		.flowi6_iif = skb->dev->ifindex,
 		.daddr = iph->daddr,
@@ -916,6 +927,7 @@ void ip6_route_input(struct sk_buff *skb)
 		.flowi6_mark = skb->mark,
 		.flowi6_proto = iph->nexthdr,
 	};
+#endif
 
 	skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
 }
@@ -1292,7 +1304,12 @@ int ip6_route_add(struct fib6_config *cfg)
 	if (!table)
 		goto out;
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	/*CVE-2014-2309*/
+	rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
+#else	
 	rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
+#endif
 
 	if (!rt) {
 		err = -ENOMEM;
@@ -2467,7 +2484,11 @@ static int rt6_fill_node(struct net *net,
 	if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
 		if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
+#if defined(CONFIG_BCM_KF_MLD)
+			int err = ip6mr_get_route(net, skb, rtm, nowait, iif);
+#else
 			int err = ip6mr_get_route(net, skb, rtm, nowait);
+#endif
 			if (err <= 0) {
 				if (!nowait) {
 					if (err == 0)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c4ffd174352895630c878729cb7ce001ae9c9bf3..e94fd457f44b1e17577bd9b483f4cff7ef5c0ddc 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -54,6 +54,10 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 /*
    This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
 
@@ -66,6 +70,10 @@
 static int ipip6_tunnel_init(struct net_device *dev);
 static void ipip6_tunnel_setup(struct net_device *dev);
 static void ipip6_dev_free(struct net_device *dev);
+#if defined(CONFIG_BCM_KF_IPV6RD_SECURITY)
+static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
+                      __be32 *v4dst);
+#endif
 
 static int sit_net_id __read_mostly;
 struct sit_net {
@@ -556,6 +564,22 @@ static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff
 		IP6_ECN_set_ce(ipv6_hdr(skb));
 }
 
+#if defined(CONFIG_BCM_KF_IPV6RD_SECURITY)
+static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
+                                  const struct in6_addr *v6addr)
+{
+	__be32 v4embed = 0;
+	if (check_6rd(tunnel, v6addr, &v4embed)) {
+		if (v4addr != v4embed)
+			return true;
+	} else {
+		if (v4addr != tunnel->ip6rd.br_addr)
+			return true;
+	}
+	return false;
+}
+#endif
+
 static int ipip6_rcv(struct sk_buff *skb)
 {
 	const struct iphdr *iph;
@@ -586,11 +610,29 @@ static int ipip6_rcv(struct sk_buff *skb)
 			kfree_skb(skb);
 			return 0;
 		}
+#if defined(CONFIG_BCM_KF_IPV6RD_SECURITY)
+		else {
+			if (is_spoofed_6rd(tunnel, iph->saddr,
+					&ipv6_hdr(skb)->saddr) ||
+			    is_spoofed_6rd(tunnel, iph->daddr,
+					&ipv6_hdr(skb)->daddr)) {
+				tunnel->dev->stats.rx_errors++;
+				rcu_read_unlock();
+				kfree_skb(skb);
+				return 0;
+			}
+		}
+#endif
 
 		tstats = this_cpu_ptr(tunnel->dev->tstats);
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		blog_lock();
+		blog_link(TOS_MODE, blog_ptr(skb), tunnel, DIR_RX, BLOG_TOS_FIXED);
+		blog_unlock();
+#endif
 		__skb_tunnel_rx(skb, tunnel->dev);
 
 		ipip6_ecn_decapsulate(iph, skb);
@@ -609,6 +651,46 @@ static int ipip6_rcv(struct sk_buff *skb)
 	return 0;
 }
 
+#if defined(CONFIG_BCM_KF_IPV6RD_SECURITY)
+/*
+ * If the IPv6 address comes from 6rd / 6to4 (RFC 3056) addr space this function
+ * stores the embedded IPv4 address in v4dst and returns true.
+ */
+static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
+                      __be32 *v4dst)
+{
+#ifdef CONFIG_IPV6_SIT_6RD
+	if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
+                              tunnel->ip6rd.prefixlen)) {
+		unsigned int pbw0, pbi0;
+		int pbi1;
+		u32 d;
+
+		pbw0 = tunnel->ip6rd.prefixlen >> 5;
+		pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
+
+		d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+			tunnel->ip6rd.relay_prefixlen;
+
+		pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
+		if (pbi1 > 0)
+			d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
+				(32 - pbi1);
+
+		*v4dst = tunnel->ip6rd.relay_prefix | htonl(d);
+		return true;
+	}
+#else
+	if (v6dst->s6_addr16[0] == htons(0x2002)) {
+		/* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
+		memcpy(v4dst, &v6dst->s6_addr16[1], 4);
+		return true;
+	}
+#endif
+	return false;
+}
+#endif
+
 /*
  * Returns the embedded IPv4 address if the IPv6 address
  * comes from 6rd / 6to4 (RFC 3056) addr space.
@@ -830,12 +912,36 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
 	iph 			=	ip_hdr(skb);
 	iph->version		=	4;
 	iph->ihl		=	sizeof(struct iphdr)>>2;
+#if defined(CONFIG_BCM_KF_IP)
+	/*
+	 *	cd-router #1329: DF flag should not be set
+	 *	RFC 3056 sec 4: DF flag should not be set
+	 *	RFC 4213 sec 3.2.1: DF flag MUST NOT be set for static MTU cases.
+	 *	RFC 4213 sec 3.2.2: For dynamic MTU cases, the algorithm should be:
+	 *	if ( (v4MTU-20) < 1280 ) {
+	 *	    if ( v6Pkt > 1280 ) send ICMPv6 "TooBig" with MTU=1280;
+	 *	    else encapsulate to v4 packet and DF flag MUST NOT be set
+	 *	}
+	 *	else {
+	 *	    if ( v6Pkt > (v4MTU-20) ) send ICMPv6 "TooBig" with MTU=(v4MTU-20);
+	 *	    else encapsulate to v4 packet and DF flag MUST be set
+	 *	}
+	 */
+	iph->frag_off		=	0;
+#else
 	iph->frag_off		=	df;
+#endif
 	iph->protocol		=	IPPROTO_IPV6;
 	iph->tos		=	INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 	iph->daddr		=	fl4.daddr;
 	iph->saddr		=	fl4.saddr;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	blog_link(TOS_MODE, blog_ptr(skb), tunnel, DIR_TX, tunnel->parms.iph.tos);
+	blog_unlock();
+#endif
+
 	if ((iph->ttl = tiph->ttl) == 0)
 		iph->ttl	=	iph6->hop_limit;
 
@@ -1098,6 +1204,9 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
 			t->ip6rd.relay_prefix = relay_prefix;
 			t->ip6rd.prefixlen = ip6rd.prefixlen;
 			t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
+#if defined(CONFIG_BCM_KF_IPV6RD_SECURITY)
+			t->ip6rd.br_addr = ip6rd.br_addr;
+#endif
 		} else
 			ipip6_tunnel_clone_6rd(dev, sitn);
 
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7e5d927b576f79b8163b765ff5dcb6f6e0c4be43..3fea6b44c05b08d6b440159e15ea00d864b060c7 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2358,6 +2358,10 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
 
 out:
 	xfrm_pol_put(xp);
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+	if (err == 0)
+		xfrm_garbage_collect(net);
+#endif
 	return err;
 }
 
@@ -2607,6 +2611,10 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
 
 out:
 	xfrm_pol_put(xp);
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+	if (delete && err == 0)
+		xfrm_garbage_collect(net);
+#endif
 	return err;
 }
 
@@ -2702,6 +2710,10 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
 	audit_info.sessionid = audit_get_sessionid(current);
 	audit_info.secid = 0;
 	err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+	if (err == 0)
+		xfrm_garbage_collect(net);
+#endif
 	err2 = unicast_flush_resp(sk, hdr);
 	if (err || err2) {
 		if (err == -ESRCH) /* empty table - old silent behavior */
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 89ff8c67943e8af26efe1bac80b306e297921ebd..3cd78094ae6c01dab069ec87c627c4f254f0ea81 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -59,6 +59,11 @@
 
 #include "l2tp_core.h"
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/nbuff.h>
+#include <linux/blog.h>
+#endif
+
 #define L2TP_DRV_VERSION	"V2.0"
 
 /* L2TP header constants */
@@ -1649,6 +1654,30 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
 }
 EXPORT_SYMBOL_GPL(l2tp_session_create);
 
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+int l2tp_rcv_check(struct net_device *dev, uint16_t tunnel_id, uint16_t session_id)
+{
+    struct net *net = dev_net(dev);
+    struct l2tp_tunnel *tunnel;
+    struct l2tp_session *session = NULL;
+    int ret = BLOG_L2TP_RCV_NO_TUNNEL;
+    
+    tunnel = l2tp_tunnel_find(net, tunnel_id);
+    if (tunnel)
+    {   //printk("*** l2tp tunnel found!!!\n"); 
+        session = l2tp_session_find(net, tunnel, session_id);
+    }   
+    if (session)
+    {   
+        //printk("*** l2tp session found!!!\n");    
+        ret = BLOG_L2TP_RCV_TUNNEL_FOUND;
+    }   
+return ret; 
+}
+EXPORT_SYMBOL(l2tp_rcv_check);
+#endif
+
 /*****************************************************************************
  * Init and cleanup
  *****************************************************************************/
@@ -1683,6 +1712,11 @@ static int __init l2tp_init(void)
 	if (rc)
 		goto out;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+    printk(KERN_INFO "L2TP core: blog_l2tp_rcv_check \n" );
+    blog_l2tp_rcv_check_fn = (blog_l2tp_rcv_check_t) l2tp_rcv_check;
+#endif 
+
 	printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
 
 out:
diff --git a/net/mhi/Kconfig b/net/mhi/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..f2da44c10bc96a554afc954e2359d5f0a72efb81
--- /dev/null
+++ b/net/mhi/Kconfig
@@ -0,0 +1,91 @@
+if (BCM_KF_MHI)
+#
+# MHI protocol family and drivers
+#
+
+config MHI
+	bool "Modem-Host Interface"
+	default n
+	help
+	  The Modem-Host Interface (MHI) is a packet-oriented transport protocol
+	  developed by Renesas Mobile for use with their modems.
+
+	  If unsure, say N.
+
+
+if MHI
+
+config MHI_L2MUX
+	tristate "L2 MUX Protocol Layer for MHI"
+	default y
+	help
+	  L2 MUX is a protocol layer in the MHI stack. It is required
+	  by the MHI L3 components.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called l2mux. If unsure, say Y.
+
+config MHI_L3MHI
+	tristate "L3 MHI Protocol Family (AF_MHI)"
+	select MHI_L2MUX
+	default y
+	help
+	  AF_MHI provides datagram access to L2 channels in MHI,
+	  developed by Renesas Mobile for use with their modems.
+
+	  To compile this driver as a module, choose M here: the modules
+	  will be called l3mhi and af_mhi. If unsure, say Y.
+
+config MHI_L3PHONET
+	tristate "L3 PHONET Protocol bridge (AF_PHONET)"
+	select MHI_L2MUX
+	select PHONET
+	default y
+	help
+	  L3 PHONET protocol for MHI protocol family,
+	  developed by Renesas Mobile for use with their modems.
+
+	  This driver is a bridge between MHI L3 Phonet and Phonet Protocol Family.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called l3phonet. If unsure, say Y.
+
+config MHI_L3MHDP
+	tristate "L3 MHDP IP Tunneling Protocol"
+	select MHI_L2MUX
+	select NET_SCH_MHI
+	select INET_TUNNEL
+	default y
+	help
+	  Tunneling means encapsulating data of one protocol type within
+	  another protocol and sending it over a channel that understands the
+	  encapsulating protocol. This particular tunneling driver implements
+	  encapsulation of IP within MHDP (Modem Host Data Protocol), which
+	  is used for communication between the APE and the Modem.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called l3mhdp. If unsure, say Y.
+
+config MHI_DEBUG
+	bool "MHI Debugging"
+	default n
+	help
+	  Generate lots of debugging messages in the MHI stack.
+	  This option is useful when developing MHI. Otherwise it should be off.
+
+	  If unsure, say N.
+
+config MHI_DUMP_FRAMES
+	bool "Dump MHI frames on L2 layer"
+	default n
+	help
+	  Print out every frame passed through L2MUX into kernel log.
+	  This option is useful when developing MHI. Otherwise it should be off.
+
+	  If unsure, say N.
+
+config MHDP_BONDING_SUPPORT
+	bool "use mhdp as a bonding slave"
+	default n
+endif
+endif # BCM_KF_MHI
diff --git a/net/mhi/Makefile b/net/mhi/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..eea5bda04532c078907c7582af5c6dd0dafc6dbc
--- /dev/null
+++ b/net/mhi/Makefile
@@ -0,0 +1,11 @@
+
+obj-$(CONFIG_MHI_L3MHI)      += af_mhi.o
+
+af_mhi-objs		     := mhi_proto.o mhi_socket.o mhi_dgram.o mhi_raw.o
+
+obj-$(CONFIG_MHI_L2MUX)      += l2mux.o
+obj-$(CONFIG_MHI_L3MHI)      += l3mhi.o
+obj-$(CONFIG_MHI_L3MHDP)     += l3mhdp.o
+obj-$(CONFIG_MHI_L3PHONET)   += l3phonet.o
+subdir-ccflags-y	     += -Werror
+
diff --git a/net/mhi/l2mux.c b/net/mhi/l2mux.c
new file mode 100644
index 0000000000000000000000000000000000000000..7ac188099a0deae23085bdd1ede30562a7e9e750
--- /dev/null
+++ b/net/mhi/l2mux.c
@@ -0,0 +1,777 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: l2mux.c
+ *
+ * Modem-Host Interface (MHI) L2MUX layer
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/if_mhi.h>
+#include <linux/mhi.h>
+#include <linux/mhi_l2mux.h>
+
+#ifdef ACTIVATE_L2MUX_STAT
+#include <linux/proc_fs.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#endif /* ACTIVATE_L2MUX_STAT */
+
+#include <net/af_mhi.h>
+
+#ifdef CONFIG_MHI_DEBUG
+# define DPRINTK(...)    pr_debug("MHI/L2MUX: " __VA_ARGS__)
+#else
+# define DPRINTK(...)
+#endif
+
+#ifdef ACTIVATE_L2MUX_STAT
+#define MAX_COOKIE_LENGTH       PAGE_SIZE
+
+/* MAX_COOKIE_LENGTH/sizeof(struct l2muxstat) */
+#define MAX_DEBUG_MESSAGES      5000
+
+#define list_l2mux_first_entry_safe(head, type, member) \
+					(list_empty(head) ? NULL : \
+					list_first_entry(head, type, member))
+static DEFINE_RWLOCK(l2mux_stat_lock);
+
+static struct l2mux_stat_info l2mux_sinf;
+
+#endif
+
+/* Handle ONLY Non DIX types 0x00-0xff */
+#define ETH_NON_DIX_NPROTO   0x0100
+
+/* L2MUX master lock */
+static DEFINE_SPINLOCK(l2mux_lock);
+
+/* L3 ID -> RX function table */
+static l2mux_skb_fn *l2mux_id2rx_tab[MHI_L3_NPROTO] __read_mostly;
+
+/* Packet Type -> TX function table */
+static l2mux_skb_fn *l2mux_pt2tx_tab[ETH_NON_DIX_NPROTO] __read_mostly;
+
+/* audio RX/TX fn table */
+static l2mux_audio_fn *l2mux_audio_rx_fn __read_mostly;
+static int l2mux_audio_rx_handle __read_mostly;
+
+static l2mux_audio_fn *l2mux_audio_tx_tab[L2MUX_AUDIO_DEV_MAX] __read_mostly;
+static uint8_t l2mux_audio_tx_pn_map[L2MUX_AUDIO_DEV_MAX] __read_mostly;
+
+#ifdef ACTIVATE_L2MUX_STAT
+
+static void l2mux_write_stat(unsigned l3pid, unsigned l3len,
+			     enum l2mux_direction dir, struct net_device *dev);
+static ssize_t store_l2mux_traces_state(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count);
+static ssize_t show_l2mux_traces_state(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf);
+
+static struct device_attribute l2mux_dev_attrs[] = {
+	__ATTR(l2mux_trace_status,
+	       S_IRUGO | S_IWUSR,
+	       show_l2mux_traces_state,
+	       store_l2mux_traces_state),
+	__ATTR_NULL,
+};
+
+void l2mux_stat_dowork(struct work_struct *work)
+{
+	int err;
+	struct l2mux_stat_info *info =
+	    container_of(work, struct l2mux_stat_info, l2mux_stat_work);
+
+	struct net_device *dev = info->dev;
+
+	if (l2mux_sinf.l2mux_traces_activation_done != 1) {
+
+		err = device_create_file(&dev->dev, &l2mux_dev_attrs[0]);
+
+		if (err == 0)
+			l2mux_sinf.l2mux_traces_activation_done = 1;
+		else
+			pr_err("L2MUX cannot create device file\n");
+	}
+}
+
+/*call this function to update the l2mux write statistic*/
+static void
+l2mux_write_stat(unsigned l3pid,
+		 unsigned l3len,
+		 enum l2mux_direction dir, struct net_device *dev)
+{
+
+	struct l2muxstat *tmp_stat;
+	struct l2muxstat *old_stat;
+
+	l2mux_sinf.l2mux_total_stat_counter++;
+
+	if ((dev != NULL) && (l2mux_sinf.l2mux_traces_activation_done == 0)) {
+		l2mux_sinf.dev = dev;
+		schedule_work(&l2mux_sinf.l2mux_stat_work);
+		return;
+
+	} else {
+
+		if ((ON == l2mux_sinf.l2mux_traces_state) ||
+		    (KERNEL == l2mux_sinf.l2mux_traces_state)) {
+
+			if (write_trylock(&l2mux_stat_lock)) {
+
+				tmp_stat = kmalloc(sizeof(struct l2muxstat),
+						   GFP_ATOMIC);
+				if (NULL == tmp_stat) {
+					write_unlock(&l2mux_stat_lock);
+					return;
+				}
+
+				tmp_stat->l3pid = l3pid;
+				tmp_stat->l3len = l3len;
+				tmp_stat->dir = dir;
+				do_gettimeofday(&(tmp_stat->time_val));
+				tmp_stat->stat_counter =
+				    l2mux_sinf.l2mux_total_stat_counter;
+
+				if (l2mux_sinf.l2mux_stat_id < 0)
+					l2mux_sinf.l2mux_stat_id = 0;
+
+				l2mux_sinf.l2mux_stat_id++;
+
+				if (l2mux_sinf.l2mux_stat_id >=
+				    MAX_DEBUG_MESSAGES) {
+
+					old_stat =
+					    list_l2mux_first_entry_safe
+					    (&l2mux_sinf.l2muxstat_tab.list,
+					     struct l2muxstat, list);
+					if (old_stat != NULL) {
+						list_del(&old_stat->list);
+						kfree(old_stat);
+						l2mux_sinf.l2mux_stat_id =
+						    MAX_DEBUG_MESSAGES;
+					}
+				}
+
+				list_add_tail(&(tmp_stat->list),
+					      &(l2mux_sinf.l2muxstat_tab.list));
+
+				write_unlock(&l2mux_stat_lock);
+			}
+		}
+	}
+	/*in the case lock is taken, information is missed */
+}
+
+/* start() method */
+static void *l2mux_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	void *ret = NULL;
+
+	if (l2mux_sinf.l2mux_traces_state == OFF) {
+		pr_err("L2MUX traces are off. activation -echo on > /sys/class/net/my_modem_net_device/l2mux_trace_status -sizeof(l2muxstat) = %zu\n",
+		       sizeof(struct l2muxstat));
+	} else {
+		if (write_trylock(&l2mux_stat_lock)) {
+			ret =
+			    list_l2mux_first_entry_safe(&l2mux_sinf.
+							l2muxstat_tab.list,
+							struct l2muxstat, list);
+			write_unlock(&l2mux_stat_lock);
+		}
+	}
+
+	return ret;
+}
+
+/* next() method */
+static void *l2mux_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return list_l2mux_first_entry_safe(&l2mux_sinf.l2muxstat_tab.list,
+					   struct l2muxstat, list);
+}
+
+/* show() method */
+static int l2mux_seq_show(struct seq_file *seq, void *v)
+{
+	struct l2muxstat *tmp_stat = v;
+	char temp_string[100];
+
+	if (write_trylock(&l2mux_stat_lock)) {
+
+		while (l2mux_sinf.previous_stat_counter !=
+		       (tmp_stat->stat_counter - 1)) {
+
+			sprintf(temp_string,
+				"L2MHI_%d : missed : NA : NA : NA : NA\n",
+				l2mux_sinf.previous_stat_counter + 1);
+
+			/* Interpret the iterator, 'v' */
+			seq_puts(seq, temp_string);
+
+			l2mux_sinf.previous_stat_counter++;
+		}
+
+		l2mux_sinf.previous_stat_counter = tmp_stat->stat_counter;
+
+		sprintf(temp_string, "L2MHI_%d : %d : %d : %x : %d : %d\n",
+			tmp_stat->stat_counter, tmp_stat->dir,
+			tmp_stat->l3pid, tmp_stat->l3len,
+			(unsigned int)tmp_stat->time_val.tv_sec,
+			(unsigned int)tmp_stat->time_val.tv_usec);
+
+		/* Interpret the iterator, 'v' */
+		seq_puts(seq, temp_string);
+
+		if (l2mux_sinf.l2mux_traces_state == KERNEL)
+			pr_err("%s", temp_string);
+
+		list_del(&tmp_stat->list);
+		kfree(tmp_stat);
+		tmp_stat = NULL;
+		l2mux_sinf.l2mux_stat_id--;
+
+		write_unlock(&l2mux_stat_lock);
+	}
+
+	return 0;
+}
+
+/* stop() method */
+static void l2mux_seq_stop(struct seq_file *seq, void *v)
+{
+	/* No cleanup needed */
+}
+
+/* Define iterator operations */
+static const struct seq_operations l2mux_seq_ops = {
+	.start = l2mux_seq_start,
+	.next = l2mux_seq_next,
+	.stop = l2mux_seq_stop,
+	.show = l2mux_seq_show,
+};
+
+static int l2mux_seq_open(struct inode *inode, struct file *file)
+{
+	/* Register the operators */
+	return seq_open(file, &l2mux_seq_ops);
+}
+
+static const struct file_operations l2mux_proc_fops = {
+	.owner = THIS_MODULE,
+	.open = l2mux_seq_open,	/* User supplied */
+	.read = seq_read,	/* Built-in helper function */
+	.llseek = seq_lseek,	/* Built-in helper function */
+	.release = seq_release,	/* Built-in helper funciton */
+};
+
+/*call this function to init the l2mux write statistic*/
+void init_l2mux_stat(void)
+{
+	l2mux_sinf.proc_entry =
+	    proc_create("l2mux_mhi", 0644, NULL, &l2mux_proc_fops);
+
+	if (l2mux_sinf.proc_entry == NULL)
+		DPRINTK("cannot create proc file l2mux_mhi\n");
+	else {
+
+		l2mux_sinf.l2mux_stat_id = 0;
+		l2mux_sinf.previous_stat_counter = 0;
+		l2mux_sinf.l2mux_total_stat_counter = 0;
+		l2mux_sinf.l2mux_traces_state = OFF;
+		l2mux_sinf.l2mux_traces_activation_done = 0;
+		INIT_LIST_HEAD(&l2mux_sinf.l2muxstat_tab.list);
+		INIT_WORK(&l2mux_sinf.l2mux_stat_work, l2mux_stat_dowork);
+	}
+}
+
+/*call this function to exit the l2mux write statistic*/
+void exit_l2mux_stat(void)
+{
+	remove_proc_entry("l2mux_mhi", l2mux_sinf.proc_entry);
+}
+
+/**
+ * store_l2mux_traces_state - store the l2mux traces status
+ * @dev: Device to be created
+ * @attr: attribute of sysfs
+ * @buf: output stringwait
+ */
+static ssize_t
+store_l2mux_traces_state(struct device *dev,
+			 struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int retval = count;
+
+	if (sysfs_streq(buf, "on")) {
+		l2mux_sinf.l2mux_traces_state = ON;
+		pr_err("L2MUX traces activated and available in proc fs\n");
+	} else if (sysfs_streq(buf, "off")) {
+		l2mux_sinf.l2mux_traces_state = OFF;
+	} else if (sysfs_streq(buf, "kernel")) {
+		l2mux_sinf.l2mux_traces_state = KERNEL;
+	} else {
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+/**
+ * show_l2mux_traces_state - show l2mux traces state
+ * @dev: Funnel device
+ * @attr: attribute of sysfs
+ * @buf: string written to sysfs file
+ */
+static ssize_t
+show_l2mux_traces_state(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int retval = 0;
+	char *temp_buf = buf;
+
+	switch (l2mux_sinf.l2mux_traces_state) {
+	case ON:
+		return sprintf(temp_buf, "on\n");
+	case OFF:
+		return sprintf(temp_buf, "off\n");
+	case KERNEL:
+		return sprintf(temp_buf, "kernel\n");
+	default:
+		return -ENODEV;
+	}
+
+	return retval;
+}
+#endif /* ACTIVATE_L2MUX_STAT */
+
+int l2mux_netif_rx_register(int l3, l2mux_skb_fn *fn)
+{
+	int err = 0;
+
+	DPRINTK("l2mux_netif_rx_register(l3:%d, fn:%p)\n", l3, fn);
+
+	if (l3 < 0 || l3 >= MHI_L3_NPROTO)
+		return -EINVAL;
+
+	if (!fn)
+		return -EINVAL;
+
+	spin_lock(&l2mux_lock);
+	{
+		if (l2mux_id2rx_tab[l3] == NULL)
+			l2mux_id2rx_tab[l3] = fn;
+		else
+			err = -EBUSY;
+	}
+	spin_unlock(&l2mux_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(l2mux_netif_rx_register);
+
+int l2mux_netif_rx_unregister(int l3)
+{
+	int err = 0;
+
+	DPRINTK("l2mux_netif_rx_unregister(l3:%d)\n", l3);
+
+	if (l3 < 0 || l3 >= MHI_L3_NPROTO)
+		return -EINVAL;
+
+	spin_lock(&l2mux_lock);
+	{
+		if (l2mux_id2rx_tab[l3])
+			l2mux_id2rx_tab[l3] = NULL;
+		else
+			err = -EPROTONOSUPPORT;
+	}
+	spin_unlock(&l2mux_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(l2mux_netif_rx_unregister);
+
+int l2mux_netif_tx_register(int pt, l2mux_skb_fn *fn)
+{
+	int err = 0;
+
+	DPRINTK("l2mux_netif_tx_register(pt:%d, fn:%p)\n", pt, fn);
+
+	if (pt <= 0 || pt >= ETH_NON_DIX_NPROTO)
+		return -EINVAL;
+
+	if (!fn)
+		return -EINVAL;
+
+	spin_lock(&l2mux_lock);
+	{
+		if (l2mux_pt2tx_tab[pt] == NULL)
+			l2mux_pt2tx_tab[pt] = fn;
+		else
+			err = -EBUSY;
+	}
+	spin_unlock(&l2mux_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(l2mux_netif_tx_register);
+
+int l2mux_netif_tx_unregister(int pt)
+{
+	int err = 0;
+
+	DPRINTK("l2mux_netif_tx_unregister(pt:%d)\n", pt);
+
+	if (pt <= 0 || pt >= ETH_NON_DIX_NPROTO)
+		return -EINVAL;
+
+	spin_lock(&l2mux_lock);
+	{
+		if (l2mux_pt2tx_tab[pt])
+			l2mux_pt2tx_tab[pt] = NULL;
+		else
+			err = -EPROTONOSUPPORT;
+	}
+	spin_unlock(&l2mux_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(l2mux_netif_tx_unregister);
+
+int l2mux_skb_rx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct l2muxhdr *l2hdr;
+	unsigned l3pid;
+	unsigned l3len;
+	l2mux_skb_fn *rxfn;
+
+	/* Set the device in the skb */
+	skb->dev = dev;
+
+	/* Set MAC header here */
+	skb_reset_mac_header(skb);
+
+	/* L2MUX header */
+	l2hdr = l2mux_hdr(skb);
+
+	/* proto id and length in L2 header */
+	l3pid = l2mux_get_proto(l2hdr);
+	l3len = l2mux_get_length(l2hdr);
+
+#ifdef ACTIVATE_L2MUX_STAT
+	l2mux_write_stat(l3pid, l3len, DOWNLINK_DIR, dev);
+#endif /* ACTIVATE_L2MUX_STAT */
+
+#ifdef CONFIG_MHI_DUMP_FRAMES
+	{
+		u8 *ptr = skb->data;
+		int len = skb_headlen(skb);
+		int i;
+
+		pr_debug("L2MUX: RX dev:%d skb_len:%d l3_len:%d l3_pid:%d\n",
+		       dev->ifindex, skb->len, l3len, l3pid);
+
+		for (i = 0; i < len; i++) {
+			if (i % 8 == 0)
+				pr_debug("L2MUX: RX [%04X] ", i);
+			pr_debug(" 0x%02X", ptr[i]);
+			if (i % 8 == 7 || i == len - 1)
+				pr_debug("\n");
+		}
+	}
+#endif
+	/* check that the advertised length is correct */
+	if (l3len != skb->len - L2MUX_HDR_SIZE) {
+		pr_warn("L2MUX: l2mux_skb_rx: L3_id:%d - skb length mismatch L3:%d (+4) <> SKB:%d",
+		       l3pid, l3len, skb->len);
+		goto drop;
+	}
+
+	/* get RX function */
+	rxfn = l2mux_id2rx_tab[l3pid];
+
+	/* Not registered */
+	if (!rxfn)
+		goto drop;
+
+	/* Update RX statistics */
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += skb->len;
+
+	/* Call the receiver function */
+	return rxfn(skb, dev);
+
+drop:
+	dev->stats.rx_dropped++;
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+EXPORT_SYMBOL(l2mux_skb_rx);
+
+int l2mux_skb_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	l2mux_skb_fn *txfn;
+	unsigned type;
+	int err = 0;
+#ifdef ACTIVATE_L2MUX_STAT
+	struct l2muxhdr *l2hdr;
+	unsigned l3pid;
+	unsigned l3len;
+#endif /* ACTIVATE_L2MUX_STAT */
+
+	if (unlikely(!skb)) {
+		pr_err("L2MUX TX skb invalid\n");
+		return -EINVAL;
+	}
+
+	/* Packet type ETH_P_XXX */
+	type = ntohs(skb->protocol);
+
+#ifdef CONFIG_MHI_DUMP_FRAMES
+	{
+		u8 *ptr = skb->data;
+		int len = skb_headlen(skb);
+		int i;
+
+		pr_debug("L2MUX: TX dev:%d skb_len:%d ETH_P:%d\n",
+		       dev->ifindex, skb->len, type);
+
+		for (i = 0; i < len; i++) {
+			if (i % 8 == 0)
+				pr_debug("L2MUX: TX [%04X] ", i);
+			pr_debug(" 0x%02X", ptr[i]);
+			if (i % 8 == 7 || i == len - 1)
+				pr_debug("\n");
+		}
+	}
+#endif
+	/* Only handling non DIX types */
+	if (type <= 0 || type >= ETH_NON_DIX_NPROTO)
+		return -EINVAL;
+
+	/* TX function for this packet type */
+	txfn = l2mux_pt2tx_tab[type];
+
+	if (txfn)
+		err = txfn(skb, dev);
+
+#ifdef ACTIVATE_L2MUX_STAT
+
+	if (0 == err) {
+		/* L2MUX header */
+		l2hdr = l2mux_hdr(skb);
+		/* proto id and length in L2 header */
+		l3pid = l2mux_get_proto(l2hdr);
+		l3len = l2mux_get_length(l2hdr);
+
+		l2mux_write_stat(l3pid, l3len, UPLINK_DIR, dev);
+	} else {
+		pr_err("L2MUX TX skb invalid\n");
+	}
+
+#endif /* ACTIVATE_L2MUX_STAT */
+
+	return err;
+}
+EXPORT_SYMBOL(l2mux_skb_tx);
+
+int l2mux_audio_rx_register(l2mux_audio_fn *fn)
+{
+	int err = -EBUSY, handle;
+
+	handle = ((int)fn & 0x00ffffff) | L2MUX_AUDIO_DEV_TYPE_RX;
+	spin_lock(&l2mux_lock);
+	if (l2mux_audio_rx_fn == NULL) {
+		l2mux_audio_rx_handle = handle;
+		l2mux_audio_rx_fn = fn;
+		err = handle;
+	}
+	spin_unlock(&l2mux_lock);
+	return err;
+}
+EXPORT_SYMBOL(l2mux_audio_rx_register);
+
+int l2mux_audio_rx_unregister(int handle)
+{
+	int err = -EPROTONOSUPPORT;
+
+	spin_lock(&l2mux_lock);
+	if (l2mux_audio_rx_handle == handle) {
+		l2mux_audio_rx_handle = 0;
+		l2mux_audio_rx_fn = NULL;
+		err = 0;
+	}
+	spin_unlock(&l2mux_lock);
+	return err;
+}
+EXPORT_SYMBOL(l2mux_audio_rx_unregister);
+
+int l2mux_audio_tx_register(uint8_t phonet_dev_id, l2mux_audio_fn *fn)
+{
+	int i;
+
+	spin_lock(&l2mux_lock);
+	for (i = 0; i < L2MUX_AUDIO_DEV_MAX; i++) {
+		if (l2mux_audio_tx_tab[i] == NULL) {
+			l2mux_audio_tx_tab[i] = fn;
+			l2mux_audio_tx_pn_map[i] = phonet_dev_id;
+			spin_unlock(&l2mux_lock);
+			return (i | L2MUX_AUDIO_DEV_TYPE_TX);
+		}
+	}
+	spin_unlock(&l2mux_lock);
+	return -EBUSY;
+}
+EXPORT_SYMBOL(l2mux_audio_tx_register);
+
+int l2mux_audio_tx_unregister(int handle)
+{
+	int err = -EPROTONOSUPPORT;
+	int internal_dev_id = handle & (~L2MUX_AUDIO_DEV_TYPE_TX);
+
+	if ((internal_dev_id < 0) || (internal_dev_id >= L2MUX_AUDIO_DEV_MAX))
+		return err;
+
+	spin_lock(&l2mux_lock);
+	if (l2mux_audio_tx_tab[internal_dev_id] != NULL) {
+		l2mux_audio_tx_tab[internal_dev_id] = NULL;
+		l2mux_audio_tx_pn_map[internal_dev_id] = 0;
+		err = 0;
+	}
+	spin_unlock(&l2mux_lock);
+	return err;
+}
+EXPORT_SYMBOL(l2mux_audio_tx_unregister);
+
+int l2mux_audio_rx(unsigned char *buffer, uint8_t pn_dev_id)
+{
+	struct l2muxhdr *l2hdr = (struct l2muxhdr *)buffer;
+	unsigned l3len;
+
+	/* proto id and length in L2 header */
+	if (l2mux_get_proto(l2hdr) != MHI_L3_CELLULAR_AUDIO)
+		return -EINVAL;
+
+	l3len = l2mux_get_length(l2hdr);
+
+	if (l2mux_audio_rx_fn == NULL)
+		return -EINVAL;
+
+	return l2mux_audio_rx_fn(buffer + L2MUX_HDR_SIZE, l3len, pn_dev_id);
+}
+EXPORT_SYMBOL(l2mux_audio_rx);
+
+int l2mux_audio_tx(unsigned char *buffer, size_t size, uint8_t pn_dev_id)
+{
+	int i;
+	l2mux_audio_fn *txfn = NULL;
+	struct l2muxhdr *l2hdr;
+
+	/* TODO for the future!!
+	 * since we don't support multiple devices in RIL yet, we do not
+	 * want to create confusion here, so we simply search for the
+	 * first available txfn registered.  (supposedly, there should
+	 * only be one */
+#if 0
+	for (i = 0; i < L2MUX_AUDIO_DEV_MAX; i++) {
+		if (l2mux_audio_tx_pn_map[i] == pn_dev_id) {
+			txfn = l2mux_audio_tx_tab[i];
+			break;
+		}
+	}
+#else
+	for (i = 0; i < L2MUX_AUDIO_DEV_MAX; i++) {
+		if (l2mux_audio_tx_tab[i] != NULL) {
+			txfn = l2mux_audio_tx_tab[i];
+			break;
+		}
+	}
+#endif
+	
+	/* didn't find txfn for the pn_dev_id */
+	if (txfn == NULL)
+		return -EINVAL;
+
+	l2hdr = (struct l2muxhdr *)(buffer - 4);
+	l2mux_set_proto(l2hdr, MHI_L3_CELLULAR_AUDIO);
+	l2mux_set_length(l2hdr, size);
+
+	/* we don't care about return value from txfn */
+	return txfn((unsigned char *)l2hdr, size + L2MUX_HDR_SIZE, pn_dev_id);
+}
+EXPORT_SYMBOL(l2mux_audio_tx);
+
+static int __init l2mux_init(void)
+{
+	int i;
+
+	DPRINTK("l2mux_init\n");
+
+	for (i = 0; i < MHI_L3_NPROTO; i++)
+		l2mux_id2rx_tab[i] = NULL;
+
+	for (i = 0; i < ETH_NON_DIX_NPROTO; i++)
+		l2mux_pt2tx_tab[i] = NULL;
+
+	l2mux_audio_rx_fn = NULL;
+	l2mux_audio_rx_handle = 0;
+
+	for (i = 0; i < L2MUX_AUDIO_DEV_MAX; i++) {
+		l2mux_audio_tx_tab[i] = NULL;
+		l2mux_audio_tx_pn_map[i] = 0;
+	}
+
+#ifdef ACTIVATE_L2MUX_STAT
+	init_l2mux_stat();
+
+#endif /* ACTIVATE_L2MUX_STAT */
+
+	return 0;
+}
+
+static void __exit l2mux_exit(void)
+{
+#ifdef ACTIVATE_L2MUX_STAT
+	exit_l2mux_stat();
+#endif /* ACTIVATE_L2MUX_STAT */
+	DPRINTK("l2mux_exit\n");
+}
+
+module_init(l2mux_init);
+module_exit(l2mux_exit);
+
+MODULE_DESCRIPTION("L2MUX for MHI Protocol Stack");
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/net/mhi/l3mhdp.c b/net/mhi/l3mhdp.c
new file mode 100644
index 0000000000000000000000000000000000000000..0a8e9cc36c6ab4d12fbe0359c051fc39085e5dca
--- /dev/null
+++ b/net/mhi/l3mhdp.c
@@ -0,0 +1,1516 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: l3mhdp.c
+ *
+ * MHDP - Modem Host Data Protocol for MHI protocol family.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/interrupt.h>
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/mhi_l2mux.h>
+#include <linux/etherdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/udp.h>
+#include <net/mhi/sock.h>
+#include <linux/skbuff.h>
+
+/* wake_lock prevents the system from entering suspend or other
+ * low power states when active.  This seems to be used in an
+ * Android system, but it is not present in Vanilla Kernel */
+//#define SUPPORT_WAKE_LOCK	1
+
+#ifdef SUPPORT_WAKE_LOCK
+#include <linux/wakelock.h>
+#endif
+
+#ifdef CONFIG_MHDP_BONDING_SUPPORT
+#define MHDP_BONDING_SUPPORT
+#endif
+
+//#define MHDP_USE_NAPI
+
+#ifdef MHDP_BONDING_SUPPORT
+#include <linux/etherdevice.h>
+#endif /*MHDP_BONDING_SUPPORT */
+
+#include <net/netns/generic.h>
+#include <net/mhi/mhdp.h>
+
+/* MHDP device MTU limits */
+#define MHDP_MTU_MAX		0x2400
+#define MHDP_MTU_MIN		0x44
+#define MAX_MHDP_FRAME_SIZE	16000
+
+/* MHDP device names */
+#define MHDP_IFNAME			"rmnet%d"
+#define MHDP_CTL_IFNAME		"rmnetctl"
+
+/* Print every MHDP SKB content */
+/* #define MHDP_DEBUG_SKB */
+
+/* #define CONFIG_MHI_DEBUG */
+
+#define UDP_PROT_TYPE	17
+
+#define EPRINTK(...)    pr_debug("MHI/MHDP: " __VA_ARGS__)
+
+#ifdef CONFIG_MHI_DEBUG
+# define DPRINTK(...)    pr_debug("MHI/MHDP: " __VA_ARGS__)
+#else
+# define DPRINTK(...)
+#endif
+
+#ifdef MHDP_DEBUG_SKB
+# define SKBPRINT(a, b)    __print_skb_content(a, b)
+#else
+# define SKBPRINT(a, b)
+#endif
+
+/* IPv6 support */
+#define VER_IPv4 0x04
+#define VER_IPv6 0x06
+#define ETH_IP_TYPE(x) (((0x00|(x>>4)) == VER_IPv4) ? ETH_P_IP : ETH_P_IPV6)
+
+/*** Type definitions ***/
+
+#define MAX_MHDPHDR_SIZE MAX_SKB_FRAGS
+
+#ifdef MHDP_USE_NAPI
+#define NAPI_WEIGHT 64
+#endif /*MHDP_USE_NAPI */
+
+
+struct mhdp_tunnel {
+	struct mhdp_tunnel *next;
+	struct net_device *dev;
+	struct net_device *master_dev;
+	struct sk_buff *skb;
+	int sim_id;
+	int pdn_id;
+	int free_pdn;
+	struct hrtimer tx_timer;
+	struct tasklet_struct taskl;
+	struct sk_buff *skb_to_free[MAX_MHDPHDR_SIZE];
+	spinlock_t timer_lock;
+};
+
+struct mhdp_net {
+	struct mhdp_tunnel *tunnels;
+	struct net_device *ctl_dev;
+	struct mhdp_udp_filter udp_filter;
+	spinlock_t udp_lock;
+#ifdef MHDP_USE_NAPI
+	struct net_device *dev;
+	struct napi_struct napi;
+	struct sk_buff_head skb_list;
+#endif				/*#ifdef MHDP_USE_NAPI */
+#ifdef SUPPORT_WAKE_LOCK
+	int wake_lock_time;
+	struct wake_lock wakelock;
+	spinlock_t wl_lock;
+#endif
+};
+
+struct packet_info {
+	uint32_t pdn_id;
+	uint32_t packet_offset;
+	uint32_t packet_length;
+};
+
+struct mhdp_hdr {
+	uint32_t packet_count;
+	struct packet_info info[MAX_MHDPHDR_SIZE];
+};
+
+/*** Prototypes ***/
+
+static void mhdp_netdev_setup(struct net_device *dev);
+
+static void mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel, int force_send);
+
+static int mhdp_netdev_event(struct notifier_block *this,
+			     unsigned long event, void *ptr);
+
+static enum hrtimer_restart tx_timer_timeout(struct hrtimer *timer);
+static void tx_timer_timeout_tasklet(unsigned long arg);
+
+#ifdef SUPPORT_WAKE_LOCK
+static ssize_t mhdp_write_wakelock_value(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count);
+static ssize_t mhdp_read_wakelock_value(struct device *dev,
+					struct device_attribute *attr,
+					char *buf);
+#endif
+
+#ifdef MHDP_USE_NAPI
+
+static int mhdp_poll(struct napi_struct *napi, int budget);
+
+#endif /*MHDP_USE_NAPI */
+
+/*** Global Variables ***/
+
+static int mhdp_net_id __read_mostly;
+
+static struct notifier_block mhdp_netdev_notifier = {
+	.notifier_call = mhdp_netdev_event,
+};
+
+#ifdef SUPPORT_WAKE_LOCK
+static struct device_attribute mhdpwl_dev_attrs[] = {
+	__ATTR(mhdp_wakelock_time,
+	       S_IRUGO | S_IWUSR,
+	       mhdp_read_wakelock_value,
+	       mhdp_write_wakelock_value),
+	__ATTR_NULL,
+};
+#endif
+
+/*** Funtions ***/
+
+#ifdef MHDP_DEBUG_SKB
+static void __print_skb_content(struct sk_buff *skb, const char *tag)
+{
+	struct page *page;
+	skb_frag_t *frag;
+	int len;
+	int i, j;
+	u8 *ptr;
+
+	/* Main SKB buffer */
+	ptr = (u8 *)skb->data;
+	len = skb_headlen(skb);
+
+	pr_debug("MHDP: SKB buffer lenght %02u\n", len);
+	for (i = 0; i < len; i++) {
+		if (i % 8 == 0)
+			pr_debug("%s DATA: ", tag);
+		pr_debug(" 0x%02X", ptr[i]);
+		if (i % 8 == 7 || i == len - 1)
+			pr_debug("\n");
+	}
+
+	/* SKB fragments */
+	for (i = 0; i < (skb_shinfo(skb)->nr_frags); i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		page = skb_frag_page(frag);
+
+		ptr = page_address(page);
+
+		for (j = 0; j < frag->size; j++) {
+			if (j % 8 == 0)
+				pr_debug("%s FRAG[%d]: ", tag, i);
+			pr_debug(" 0x%02X", ptr[frag->page_offset + j]);
+			if (j % 8 == 7 || j == frag->size - 1)
+				pr_debug("\n");
+		}
+	}
+}
+#endif
+
+/**
+ * mhdp_net_dev - Get mhdp_net structure of mhdp tunnel
+ */
+static inline struct mhdp_net *mhdp_net_dev(struct net_device *dev)
+{
+	return net_generic(dev_net(dev), mhdp_net_id);
+}
+
+/**
+ * mhdp_tunnel_init - Initialize MHDP tunnel
+ */
+static void
+mhdp_tunnel_init(struct net_device *dev,
+		 struct mhdp_tunnel_parm *parms, struct net_device *master_dev)
+{
+	struct mhdp_net *mhdpn = mhdp_net_dev(dev);
+	struct mhdp_tunnel *tunnel = netdev_priv(dev);
+
+	DPRINTK("mhdp_tunnel_init: dev:%s", dev->name);
+
+	tunnel->next = mhdpn->tunnels;
+	mhdpn->tunnels = tunnel;
+#ifdef SUPPORT_WAKE_LOCK
+	spin_lock_init(&mhdpn->wl_lock);
+#endif
+
+	tunnel->dev = dev;
+	tunnel->master_dev = master_dev;
+	tunnel->skb = NULL;
+	tunnel->sim_id = parms->sim_id;
+	tunnel->pdn_id = parms->pdn_id;
+	tunnel->free_pdn = 0;
+	netdev_path_add(dev, master_dev);
+
+	hrtimer_init(&tunnel->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	tunnel->tx_timer.function = &tx_timer_timeout;
+	tasklet_init(&tunnel->taskl,
+		     tx_timer_timeout_tasklet, (unsigned long)tunnel);
+
+	spin_lock_init(&tunnel->timer_lock);
+}
+
+/**
+ * mhdp_tunnel_destroy - Destroy MHDP tunnel
+ */
+static void mhdp_tunnel_destroy(struct net_device *dev)
+{
+	DPRINTK("mhdp_tunnel_destroy: dev:%s", dev->name);
+
+	netdev_path_remove(dev);
+	unregister_netdevice(dev);
+}
+
+/**
+ * mhdp_destroy_tunnels - Initialize all MHDP tunnels
+ */
+static void mhdp_destroy_tunnels(struct mhdp_net *mhdpn)
+{
+	struct mhdp_tunnel *tunnel;
+
+	for (tunnel = mhdpn->tunnels; (tunnel); tunnel = tunnel->next) {
+		mhdp_tunnel_destroy(tunnel->dev);
+		if (hrtimer_active(&tunnel->tx_timer))
+			hrtimer_cancel(&tunnel->tx_timer);
+		tasklet_kill(&tunnel->taskl);
+	}
+
+	mhdpn->tunnels = NULL;
+}
+
+/**
+ * mhdp_locate_tunnel - Retrieve MHDP tunnel thanks to PDN
+ */
+static inline struct mhdp_tunnel *mhdp_locate_tunnel(struct mhdp_net *mhdpn,
+					      int pdn_id)
+{
+	struct mhdp_tunnel *tunnel;
+
+	for (tunnel = mhdpn->tunnels; tunnel; tunnel = tunnel->next)
+		if (tunnel->pdn_id == pdn_id)
+			return tunnel;
+
+	return NULL;
+}
+
+/**
+ * mhdp_add_tunnel - Add MHDP tunnel
+ */
+static struct net_device *mhdp_add_tunnel(struct net *net,
+					  struct mhdp_tunnel_parm *parms)
+{
+	struct net_device *mhdp_dev, *master_dev;
+
+	DPRINTK("mhdp_add_tunnel: adding a tunnel to %s\n", parms->master);
+
+	master_dev = dev_get_by_name(net, parms->master);
+	if (!master_dev)
+		goto err_alloc_dev;
+
+	mhdp_dev = alloc_netdev(sizeof(struct mhdp_tunnel),
+				MHDP_IFNAME, mhdp_netdev_setup);
+	if (!mhdp_dev)
+		goto err_alloc_dev;
+
+	dev_net_set(mhdp_dev, net);
+
+	if (dev_alloc_name(mhdp_dev, MHDP_IFNAME) < 0)
+		goto err_reg_dev;
+
+	strcpy(parms->name, mhdp_dev->name);
+
+#if defined(CONFIG_BCM_KF_WANDEV)
+	mhdp_dev->priv_flags |= IFF_WANDEV;
+#endif
+
+	if (register_netdevice(mhdp_dev)) {
+		pr_err("MHDP: register_netdev failed\n");
+		goto err_reg_dev;
+	}
+
+	dev_hold(mhdp_dev);
+
+	mhdp_tunnel_init(mhdp_dev, parms, master_dev);
+
+	dev_put(master_dev);
+
+	return mhdp_dev;
+
+err_reg_dev:
+	netdev_path_remove(mhdp_dev);
+	free_netdev(mhdp_dev);
+err_alloc_dev:
+	return NULL;
+}
+
+#ifdef SUPPORT_WAKE_LOCK
+/**
+ * mhdp_write_wakelock_value - store the wakelock value in mhdp
+ * @dev: Device to be created
+ * @attr: attribute of sysfs
+ * @buf: output stringwait
+ */
+static ssize_t
+mhdp_write_wakelock_value(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	int retval = count;
+	unsigned long flags;
+	struct mhdp_net *mhdpn = dev_get_drvdata(dev);
+	long int time;
+
+	if (kstrtol(buf, 10, &time)) {
+		EPRINTK("%s cannot access to wake lock time", __func__);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&mhdpn->wl_lock, flags);
+	mhdpn->wake_lock_time = (int)time;
+	spin_unlock_irqrestore(&mhdpn->wl_lock, flags);
+
+	DPRINTK("%s wake_lock_time = %d\n", __func__, mhdpn->wake_lock_time);
+
+	if ((wake_lock_active(&mhdpn->wakelock)) &&
+	    (mhdpn->wake_lock_time <= 0)) {
+
+		wake_unlock(&mhdpn->wakelock);
+
+	} else if ((wake_lock_active(&mhdpn->wakelock)) &&
+		   (mhdpn->wake_lock_time > 0)) {
+
+		wake_lock_timeout(&mhdpn->wakelock, mhdpn->wake_lock_time * HZ);
+	}
+	return retval;
+}
+
+/**
+ * mhdp_read_wakelock_value - read the wakelock value in mhdp
+ * @dev: Device to be created
+ * @attr: attribute of sysfs
+ * @buf: output stringwait
+ */
+static ssize_t
+mhdp_read_wakelock_value(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct mhdp_net *mhdpn = dev_get_drvdata(dev);
+
+	if (mhdpn)
+		return sprintf(buf, "%d\n", mhdpn->wake_lock_time);
+
+	return sprintf(buf, "%d\n", 0);
+}
+
+/**
+ * mmhdp_check_wake_lock - check the wakelock state and restart wake lock if any
+ * @dev: net Device pointer
+ */
+void mhdp_check_wake_lock(struct net_device *dev)
+{
+	unsigned long flags;
+	struct mhdp_net *mhdpn = mhdp_net_dev(dev);
+
+	spin_lock_irqsave(&mhdpn->wl_lock, flags);
+
+	if (mhdpn->wake_lock_time != 0) {
+
+		spin_unlock_irqrestore(&mhdpn->wl_lock, flags);
+
+		wake_lock_timeout(&mhdpn->wakelock, mhdpn->wake_lock_time * HZ);
+	} else {
+		spin_unlock_irqrestore(&mhdpn->wl_lock, flags);
+	}
+}
+#endif /* SUPPORT_WAKE_LOCK */
+
+static void
+mhdp_set_udp_filter(struct mhdp_net *mhdpn, struct mhdp_udp_filter *filter)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&mhdpn->udp_lock, flags);
+	mhdpn->udp_filter.port_id = filter->port_id;
+	mhdpn->udp_filter.active = 1;
+	spin_unlock_irqrestore(&mhdpn->udp_lock, flags);
+}
+
+static void mhdp_reset_udp_filter(struct mhdp_net *mhdpn)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&mhdpn->udp_lock, flags);
+	mhdpn->udp_filter.port_id = 0;
+	mhdpn->udp_filter.active = 0;
+	spin_unlock_irqrestore(&mhdpn->udp_lock, flags);
+
+}
+
+static int mhdp_is_filtered(struct mhdp_net *mhdpn, struct sk_buff *skb)
+{
+	struct ipv6hdr *ipv6header;
+	struct iphdr *ipv4header;
+	struct udphdr *udphdr;
+	int ret = 0;
+	__be16 frag_off;
+	int offset = 0;
+	u8 next_hdr;
+	unsigned int size_of_previous_hdr;
+	struct sk_buff *newskb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mhdpn->udp_lock, flags);
+
+	if (mhdpn->udp_filter.active == 0) {
+		spin_unlock_irqrestore(&mhdpn->udp_lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&mhdpn->udp_lock, flags);
+
+	/*if udp, check port number */
+	if (skb->protocol == htons(ETH_P_IP)) {
+
+		ipv4header = ip_hdr(skb);
+
+		if (ipv4header->protocol == UDP_PROT_TYPE) {
+
+			udphdr = (struct udphdr *)((unsigned int *)ipv4header +
+						   ipv4header->ihl);
+
+			if (htons(udphdr->dest) == mhdpn->udp_filter.port_id) {
+				size_of_previous_hdr = ipv4header->ihl *
+				    sizeof(unsigned int);
+				ret = 1;
+				DPRINTK("MHDP_FIL: IPv4 packet filtered out\n");
+			}
+		}
+
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+
+		ipv6header = ipv6_hdr(skb);
+		next_hdr = ipv6header->nexthdr;
+
+		if ((next_hdr == NEXTHDR_TCP) || (next_hdr == NEXTHDR_ICMP))
+			goto no_filter;
+		else if (next_hdr == UDP_PROT_TYPE)
+			goto treat_udp;
+
+		if (!ipv6_ext_hdr(next_hdr)) {
+			DPRINTK("!ipv6_ext_hdr(next_hdr): %d\n", next_hdr);
+			goto no_filter;
+		}
+
+		offset = ipv6_skip_exthdr(skb,
+					  sizeof(struct ipv6hdr),
+					  &next_hdr, &frag_off);
+
+		if (offset < 0) {
+			DPRINTK("MHDP_FILTER offset < 0: %d\n", next_hdr);
+			goto no_filter;
+		}
+
+treat_udp:
+		if (next_hdr == UDP_PROT_TYPE) {
+
+			udphdr = (struct udphdr *)((unsigned char *)ipv6header +
+						   sizeof(struct ipv6hdr) +
+						   offset);
+
+			DPRINTK("MHDP_FILTER: UDP header found\n");
+
+			if (htons(udphdr->dest) == mhdpn->udp_filter.port_id) {
+				ret = 1;
+				size_of_previous_hdr =
+				    (unsigned int)((unsigned char *)udphdr -
+						   (unsigned char *)ipv6header);
+				DPRINTK("MHDP_FIL: IPv6 packet filtered out\n");
+			} else {
+				DPRINTK("MHDP_FILTER: wrong port %d != %d\n",
+					htons(udphdr->dest),
+					mhdpn->udp_filter.port_id);
+			}
+		}
+	}
+
+	if (ret == 1) {
+
+		newskb = skb_clone(skb, GFP_ATOMIC);
+
+		if (unlikely(!newskb)) {
+			ret = 0;
+			goto no_filter;
+		}
+
+		skb_pull(newskb, (size_of_previous_hdr + sizeof(unsigned int)));
+
+		newskb->len = (unsigned int)htons(udphdr->len) -
+		    sizeof(unsigned int);
+		newskb->protocol = UDP_PROT_TYPE;
+		skb_set_tail_pointer(newskb, newskb->len);
+
+		newskb->truesize = newskb->len + sizeof(struct sk_buff);
+
+		mhi_sock_rcv_multicast(newskb,
+				       MHI_L3_MHDP_UDP_FILTER, newskb->len);
+
+		dev_kfree_skb(skb);
+	}
+no_filter:
+
+	return ret;
+}
+
+/**
+ * mhdp_netdev_ioctl - I/O control on mhdp tunnel
+ */
+static int mhdp_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct net *net = dev_net(dev);
+	struct mhdp_net *mhdpn = mhdp_net_dev(dev);
+	struct mhdp_tunnel *tunnel, *pre_dev;
+	struct mhdp_tunnel_parm __user *u_parms;
+	struct mhdp_tunnel_parm k_parms;
+	struct mhdp_udp_filter __user *u_filter;
+	struct mhdp_udp_filter k_filter;
+
+	int err = 0;
+
+	DPRINTK("mhdp tunnel ioctl %X", cmd);
+
+	switch (cmd) {
+
+	case SIOCADDPDNID:
+		u_parms = (struct mhdp_tunnel_parm *)ifr->ifr_data;
+		if (copy_from_user(&k_parms, u_parms,
+				   sizeof(struct mhdp_tunnel_parm))) {
+			DPRINTK("Error: Failed to copy data from user space");
+			return -EFAULT;
+		}
+
+		DPRINTK("pdn_id:%d sim_id:%d master_device:%s",
+				k_parms.pdn_id,
+				k_parms.sim_id,
+				k_parms.master);
+		tunnel = mhdp_locate_tunnel(mhdpn, k_parms.pdn_id);
+
+		if (NULL == tunnel) {
+			if (mhdp_add_tunnel(net, &k_parms)) {
+				if (copy_to_user(u_parms, &k_parms,
+						 sizeof(struct
+							mhdp_tunnel_parm)))
+					err = -EINVAL;
+			} else {
+				err = -EINVAL;
+			}
+
+		} else if (1 == tunnel->free_pdn) {
+
+			tunnel->free_pdn = 0;
+
+			tunnel->sim_id = k_parms.sim_id;
+			strcpy(k_parms.name, tunnel->dev->name);
+
+			if (copy_to_user(u_parms, &k_parms,
+					 sizeof(struct mhdp_tunnel_parm)))
+				err = -EINVAL;
+		} else {
+			err = -EBUSY;
+		}
+		break;
+
+	case SIOCDELPDNID:
+		u_parms = (struct mhdp_tunnel_parm *)ifr->ifr_data;
+
+		if (copy_from_user(&k_parms, u_parms,
+				   sizeof(struct mhdp_tunnel_parm))) {
+			DPRINTK("Error: Failed to copy data from user space");
+			return -EFAULT;
+		}
+
+		DPRINTK("pdn_id:%d sim_id:%d", k_parms.pdn_id, k_parms.sim_id);
+
+		for (tunnel = mhdpn->tunnels, pre_dev = NULL;
+		     tunnel; pre_dev = tunnel, tunnel = tunnel->next) {
+			if (tunnel->pdn_id == k_parms.pdn_id)
+				tunnel->free_pdn = 1;
+		}
+		break;
+
+	case SIOCRESETMHDP:
+		mhdp_destroy_tunnels(mhdpn);
+		break;
+
+	case SIOSETUDPFILTER:
+
+		u_filter = (struct mhdp_udp_filter *)ifr->ifr_data;
+
+		if (copy_from_user(&k_filter, u_filter,
+				   sizeof(struct mhdp_udp_filter))) {
+			DPRINTK("Err: cannot cp filter data from user space\n");
+			return -EFAULT;
+		}
+		if (k_filter.active == 1) {
+			DPRINTK("mhdp SIOSETUDPFILTER active on port %d\n",
+				k_filter.port_id);
+			mhdp_set_udp_filter(mhdpn, &k_filter);
+		} else {
+			DPRINTK("mhdp SIOSETUDPFILTER filter reset\n");
+			mhdp_reset_udp_filter(mhdpn);
+		}
+
+		break;
+
+	default:
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+/**
+ * mhdp_netdev_change_mtu - Change mhdp tunnel MTU
+ */
+static int mhdp_netdev_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (new_mtu < MHDP_MTU_MIN || new_mtu > MHDP_MTU_MAX)
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+/**
+ * mhdp_netdev_uninit - Un initialize mhdp tunnel
+ */
+static void mhdp_netdev_uninit(struct net_device *dev)
+{
+	dev_put(dev);
+}
+
+/**
+ * mhdp_submit_queued_skb - Submit packets to master netdev (IPC)
+ Packets can be concatenated or not
+ */
+static void mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel, int force_send)
+{
+	struct sk_buff *skb = tunnel->skb;
+	struct l2muxhdr *l2hdr;
+	struct mhdp_hdr *p_mhdp_hdr;
+	int i, nb_frags;
+
+	BUG_ON(!tunnel->master_dev);
+
+	if (skb) {
+
+		p_mhdp_hdr = (struct mhdp_hdr *)tunnel->skb->data;
+		nb_frags = p_mhdp_hdr->packet_count;
+
+		if (hrtimer_active(&tunnel->tx_timer))
+			hrtimer_cancel(&tunnel->tx_timer);
+
+		skb->protocol = htons(ETH_P_MHDP);
+		skb->priority = 1;
+
+		skb->dev = tunnel->master_dev;
+
+		skb_reset_network_header(skb);
+
+		skb_push(skb, L2MUX_HDR_SIZE);
+		skb_reset_mac_header(skb);
+
+		l2hdr = l2mux_hdr(skb);
+		l2mux_set_proto(l2hdr, (tunnel->sim_id == 1) ?
+			MHI_L3_MHDP_UL_PS2 : MHI_L3_MHDP_UL);
+		l2mux_set_length(l2hdr, skb->len - L2MUX_HDR_SIZE);
+
+		SKBPRINT(skb, "MHDP: TX");
+
+		tunnel->dev->stats.tx_packets++;
+		tunnel->dev->stats.tx_bytes += skb->len;
+		tunnel->skb = NULL;
+
+		dev_queue_xmit(skb);
+
+		for (i = 0; i < nb_frags; i++) {
+			if (tunnel->skb_to_free[i])
+				dev_kfree_skb(tunnel->skb_to_free[i]);
+			else
+				EPRINTK("%s error no skb to free\n", __func__);
+		}
+	}
+}
+
+/**
+ * mhdp_netdev_rx - Received packets from master netdev (IPC)
+  Packets can be concatenated or not
+ */
+static int mhdp_netdev_rx(struct sk_buff *skb, struct net_device *dev)
+{
+	skb_frag_t *frag = NULL;
+	struct page *page = NULL;
+	struct sk_buff *newskb = NULL;
+	struct mhdp_hdr *p_mhdp_hdr;
+	struct mhdp_hdr *p_mhdp_hdr_tmp = NULL;
+	int offset, length;
+	int err = 0, i, pdn_id;
+	int mhdp_header_len;
+	struct mhdp_tunnel *tunnel = NULL;
+#if 0
+	int start = 0;
+#endif
+	int has_frag = skb_shinfo(skb)->nr_frags;
+	uint32_t packet_count;
+	unsigned char ip_ver;
+
+#ifdef SUPPORT_WAKE_LOCK
+	mhdp_check_wake_lock(dev);
+#endif
+
+#if 0
+	if (has_frag) {
+		frag = &skb_shinfo(skb)->frags[0];
+		page = skb_frag_page(frag);
+	}
+
+	if (skb_headlen(skb) > L2MUX_HDR_SIZE)
+		skb_pull(skb, L2MUX_HDR_SIZE);
+	else if (has_frag)
+		frag->page_offset += L2MUX_HDR_SIZE;
+#else
+	skb_pull(skb, L2MUX_HDR_SIZE);
+#endif
+
+	packet_count = *((unsigned char *)skb->data);
+
+	mhdp_header_len = sizeof(packet_count) +
+	    (packet_count * sizeof(struct packet_info));
+
+#if 0
+	if (mhdp_header_len > skb_headlen(skb)) {
+		int skbheadlen = skb_headlen(skb);
+
+		DPRINTK("mhdp header length: %d, skb_headerlen: %d",
+			mhdp_header_len, skbheadlen);
+
+		p_mhdp_hdr = kmalloc(mhdp_header_len, GFP_ATOMIC);
+
+		if (NULL == p_mhdp_hdr)
+			goto error;
+
+		p_mhdp_hdr_tmp = p_mhdp_hdr;
+
+		if ((skbheadlen == 0) && (has_frag)) {
+			memcpy((__u8 *) p_mhdp_hdr, page_address(page) +
+			       frag->page_offset, mhdp_header_len);
+
+		} else if (has_frag) {
+			memcpy((__u8 *) p_mhdp_hdr, skb->data, skbheadlen);
+
+			memcpy((__u8 *) p_mhdp_hdr + skbheadlen,
+			       page_address(page) +
+			       frag->page_offset, mhdp_header_len - skbheadlen);
+
+			start = mhdp_header_len - skbheadlen;
+		} else {
+			EPRINTK("not a valid mhdp frame");
+			goto error;
+		}
+
+		DPRINTK("page start: %d", start);
+	} else {
+		DPRINTK("skb->data has whole mhdp header");
+		p_mhdp_hdr = (struct mhdp_hdr *)(((__u8 *) skb->data));
+	}
+
+	DPRINTK("MHDP PACKET COUNT : %d", p_mhdp_hdr->packet_count);
+#else
+	p_mhdp_hdr = (struct mhdp_hdr *)(((__u8 *) skb->data));
+#endif
+
+	if (p_mhdp_hdr->packet_count == 1) {
+		pdn_id = p_mhdp_hdr->info[0].pdn_id;
+		offset = p_mhdp_hdr->info[0].packet_offset;
+		length = p_mhdp_hdr->info[0].packet_length;
+
+		skb_pull(skb, mhdp_header_len + offset);
+		skb_trim(skb, length);
+
+		ip_ver = (u8)*skb->data;
+		
+		skb_reset_network_header(skb);
+		skb->protocol = htons(ETH_IP_TYPE(ip_ver));
+		skb->ip_summed = CHECKSUM_NONE;
+		skb->pkt_type = PACKET_HOST;
+
+//		rcu_read_lock();
+//		if (!mhdp_is_filtered(mhdp_net_dev(dev), skb)) {
+			//skb_tunnel_rx(skb, dev);
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += skb->len;
+
+			tunnel = mhdp_locate_tunnel(mhdp_net_dev(dev), pdn_id);
+			if (tunnel) {
+				struct net_device_stats *stats =
+				    &tunnel->dev->stats;
+				stats->rx_packets++;
+				stats->rx_bytes += skb->len;
+				skb->dev = tunnel->dev;
+				SKBPRINT(skb, "SKB: RX");
+
+#if 0
+{
+	/* debug purpose, dump out the packet content */
+	int i;
+	uint32_t *u32_ptr = (uint32_t *)skb->data;
+	for (i = 0; i < (skb->len >> 2); i++) {
+		printk("0x%08x: %08x\n", (uint32_t)(skb->data + (i << 2)), u32_ptr[i]);
+	}
+}
+#endif
+				netif_receive_skb(skb);
+#if 0
+#ifdef MHDP_USE_NAPI
+				netif_receive_skb(skb);
+#else
+				netif_rx(skb);
+#endif /*#ifdef MHDP_USE_NAPI */
+#endif
+			}
+//		}
+//		rcu_read_unlock();
+		kfree(p_mhdp_hdr_tmp);
+		return 0;
+	}
+
+	for (i = 0; i < p_mhdp_hdr->packet_count; i++) {
+		DPRINTK(" pkt_info[%d] - PDNID:%d, pkt_off: %d, pkt_len: %d\n",
+		     i, p_mhdp_hdr->info[i].pdn_id,
+		     p_mhdp_hdr->info[i].packet_offset,
+		     p_mhdp_hdr->info[i].packet_length);
+
+		pdn_id = p_mhdp_hdr->info[i].pdn_id;
+		offset = p_mhdp_hdr->info[i].packet_offset;
+		length = p_mhdp_hdr->info[i].packet_length;
+
+		if (skb_headlen(skb) > (mhdp_header_len + offset)) {
+
+			newskb = skb_clone(skb, GFP_ATOMIC);
+			if (unlikely(!newskb))
+				goto error;
+
+			skb_pull(newskb, mhdp_header_len + offset);
+
+			skb_trim(newskb, length);
+			newskb->truesize = SKB_TRUESIZE(length);
+
+			ip_ver = (u8)*newskb->data;
+
+		} else if (has_frag) {
+
+			newskb = netdev_alloc_skb(dev, skb_headlen(skb));
+
+			if (unlikely(!newskb))
+				goto error;
+
+			get_page(page);
+			skb_add_rx_frag(newskb,
+					skb_shinfo(newskb)->nr_frags,
+					page,
+					frag->page_offset +
+					((mhdp_header_len - skb_headlen(skb)) +
+					 offset), length, length);
+
+			ip_ver = *((unsigned char *)page_address(page) +
+				   (frag->page_offset +
+				    ((mhdp_header_len - skb_headlen(skb)) +
+				     offset)));
+			if ((ip_ver >> 4) != VER_IPv4 &&
+			    (ip_ver >> 4) != VER_IPv6)
+				goto error;
+		} else {
+			DPRINTK("Error in the data received");
+			goto error;
+		}
+
+		skb_reset_network_header(newskb);
+
+		/* IPv6 Support - Check the IP version */
+		/* and set ETH_P_IP or ETH_P_IPv6 for received packets */
+
+		newskb->protocol = htons(ETH_IP_TYPE(ip_ver));
+		newskb->ip_summed = CHECKSUM_NONE;
+		newskb->pkt_type = PACKET_HOST;
+
+		rcu_read_lock();
+		if (!mhdp_is_filtered(mhdp_net_dev(dev), newskb)) {
+
+			skb_tunnel_rx(newskb, dev);
+
+			tunnel = mhdp_locate_tunnel(mhdp_net_dev(dev), pdn_id);
+			if (tunnel) {
+				struct net_device_stats *stats =
+				    &tunnel->dev->stats;
+				stats->rx_packets++;
+				stats->rx_bytes += newskb->len;
+				newskb->dev = tunnel->dev;
+				SKBPRINT(newskb, "NEWSKB: RX");
+
+#ifdef MHDP_USE_NAPI
+				netif_receive_skb(newskb);
+#else
+				netif_rx(newskb);
+#endif /*#ifdef MHDP_USE_NAPI */
+			}
+		}
+		rcu_read_unlock();
+	}
+
+	kfree(p_mhdp_hdr_tmp);
+
+	dev_kfree_skb(skb);
+
+	return err;
+
+error:
+	kfree(p_mhdp_hdr_tmp);
+
+	EPRINTK("%s - error detected\n", __func__);
+
+	dev_kfree_skb(skb);
+
+	if (newskb)
+		dev_kfree_skb(newskb);
+
+	return err;
+}
+
+#ifdef MHDP_USE_NAPI
+/*
+static int mhdp_poll(struct napi_struct *napi, int budget)
+function called through napi to read current ip frame received
+*/
+static int mhdp_poll(struct napi_struct *napi, int budget)
+{
+	struct mhdp_net *mhdpn = container_of(napi, struct mhdp_net, napi);
+	int err = 0;
+	struct sk_buff *skb;
+
+	while (!skb_queue_empty(&mhdpn->skb_list)) {
+
+		skb = skb_dequeue(&mhdpn->skb_list);
+		err = mhdp_netdev_rx(skb, mhdpn->dev);
+	}
+
+	napi_complete(napi);
+
+	return err;
+}
+
+/*l2mux callback*/
+static int mhdp_netdev_rx_napi(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mhdp_net *mhdpn = mhdp_net_dev(dev);
+
+	if (mhdpn) {
+
+		mhdpn->dev = dev;
+		skb_queue_tail(&mhdpn->skb_list, skb);
+
+		napi_schedule(&mhdpn->napi);
+
+	} else {
+		EPRINTK("mhdp_netdev_rx_napi-MHDP driver init not correct\n");
+	}
+
+	return 0;
+}
+
+#endif /*MHDP_USE_NAPI */
+
+/**
+ * tx_timer_timeout - Timer expiration function for TX packet concatenation
+  => will then call mhdp_submit_queued_skb to pass concatenated packets to IPC
+ */
+static enum hrtimer_restart tx_timer_timeout(struct hrtimer *timer)
+{
+	struct mhdp_tunnel *tunnel = container_of(timer,
+						  struct mhdp_tunnel,
+						  tx_timer);
+
+	tasklet_hi_schedule(&tunnel->taskl);
+
+	return HRTIMER_NORESTART;
+}
+
+static void tx_timer_timeout_tasklet(unsigned long arg)
+{
+	struct mhdp_tunnel *tunnel = (struct mhdp_tunnel *)arg;
+
+	spin_lock_bh(&tunnel->timer_lock);
+
+	mhdp_submit_queued_skb(tunnel, 1);
+
+	spin_unlock_bh(&tunnel->timer_lock);
+}
+
+static int mhdp_netdev_xmit_single(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mhdp_hdr *p_mhdphdr;
+	struct mhdp_tunnel *tunnel = netdev_priv(dev);
+	uint32_t pkt_len = skb->len;
+
+	skb_push(skb, sizeof(uint32_t) + sizeof(struct packet_info));
+	memset(skb->data, 0, sizeof(uint32_t) + sizeof(struct packet_info));
+	p_mhdphdr = (struct mhdp_hdr *)skb->data;
+	p_mhdphdr->packet_count = 1;
+	p_mhdphdr->info[0].pdn_id = tunnel->pdn_id;
+	p_mhdphdr->info[0].packet_length = pkt_len;
+	spin_lock_bh(&tunnel->timer_lock);
+	tunnel->skb = skb;
+
+	mhdp_submit_queued_skb(tunnel, 1);
+	spin_unlock_bh(&tunnel->timer_lock);
+	return NETDEV_TX_OK;
+}
+
+/* mhdp_netdev_xmit_chain
+ * if TX packet doezn't fit in max MHDP frame length, send previous
+ * MHDP frame asap else concatenate TX packet.
+ * If nb concatenated packets reach max MHDP packets, send current
+ * MHDP frame asap else start TX timer (if no further packets
+ * to be transmitted, MHDP frame will be send on timer expiry) */
+static int mhdp_netdev_xmit_chain(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mhdp_hdr *p_mhdp_hdr;
+	struct mhdp_tunnel *tunnel = netdev_priv(dev);
+	struct net_device_stats *stats = &tunnel->dev->stats;
+	struct page *page = NULL;
+	int i;
+	int packet_count, offset, len;
+
+#ifdef SUPPORT_WAKE_LOCK
+	mhdp_check_wake_lock(dev);
+#endif
+
+	spin_lock_bh(&tunnel->timer_lock);
+
+	SKBPRINT(skb, "SKB: TX");
+
+#if 0
+	{
+		int i;
+		int len = skb->len;
+		u8 *ptr = skb->data;
+
+		for (i = 0; i < len; i++) {
+			if (i % 8 == 0)
+				pr_debug("MHDP mhdp_netdev_xmit:TX [%04X] ", i);
+			pr_debug(" 0x%02X", ptr[i]);
+			if (i % 8 == 7 || i == len - 1)
+				pr_debug("\n");
+		}
+	}
+#endif
+xmit_again:
+
+	if (tunnel->skb == NULL) {
+
+		tunnel->skb = netdev_alloc_skb(dev,
+					       L2MUX_HDR_SIZE +
+					       sizeof(struct mhdp_hdr));
+
+		if (!tunnel->skb) {
+			EPRINTK("mhdp_netdev_xmit error1");
+			goto tx_error;
+		}
+
+		/* Place holder for the mhdp packet count */
+		len = skb_headroom(tunnel->skb) - L2MUX_HDR_SIZE;
+
+		skb_push(tunnel->skb, len);
+		len -= 4;
+
+		memset(tunnel->skb->data, 0, len);
+
+		/*
+		 * Need to replace following logic, with something better like
+		 * __pskb_pull_tail or pskb_may_pull(tunnel->skb, len);
+		 */
+		{
+			tunnel->skb->tail -= len;
+			tunnel->skb->len -= len;
+		}
+
+		p_mhdp_hdr = (struct mhdp_hdr *)tunnel->skb->data;
+		p_mhdp_hdr->packet_count = 0;
+
+		hrtimer_start(&tunnel->tx_timer,
+			      ktime_set(0, NSEC_PER_SEC / 600),
+			      HRTIMER_MODE_REL);
+	}
+
+	/* This new frame is to big for the current mhdp frame, */
+	/* send the frame first */
+	if (tunnel->skb->len + skb->len >= MAX_MHDP_FRAME_SIZE) {
+
+		mhdp_submit_queued_skb(tunnel, 1);
+
+		goto xmit_again;
+
+	} else {
+
+		/*
+		 * skb_put cannot be called as the (data_len != 0)
+		 */
+
+		tunnel->skb->tail += sizeof(struct packet_info);
+		tunnel->skb->len += sizeof(struct packet_info);
+
+		DPRINTK("new - skb->tail:%lu skb->end:%lu skb->data_len:%lu",
+			(unsigned long)tunnel->skb->tail,
+			(unsigned long)tunnel->skb->end,
+			(unsigned long)tunnel->skb->data_len);
+
+		p_mhdp_hdr = (struct mhdp_hdr *)tunnel->skb->data;
+
+		tunnel->skb_to_free[p_mhdp_hdr->packet_count] = skb;
+
+		packet_count = p_mhdp_hdr->packet_count;
+		p_mhdp_hdr->info[packet_count].pdn_id = tunnel->pdn_id;
+		if (packet_count == 0) {
+			p_mhdp_hdr->info[packet_count].packet_offset = 0;
+		} else {
+			p_mhdp_hdr->info[packet_count].packet_offset =
+			    p_mhdp_hdr->info[packet_count - 1].packet_offset +
+			    p_mhdp_hdr->info[packet_count - 1].packet_length;
+		}
+
+		p_mhdp_hdr->info[packet_count].packet_length = skb->len;
+		p_mhdp_hdr->packet_count++;
+
+		page = virt_to_page(skb->data);
+
+		get_page(page);
+
+		offset = ((unsigned long)skb->data -
+			  (unsigned long)page_address(page));
+
+		skb_add_rx_frag(tunnel->skb, skb_shinfo(tunnel->skb)->nr_frags,
+				page, offset, skb_headlen(skb),
+				skb_headlen(skb));
+
+		if (skb_shinfo(skb)->nr_frags) {
+
+			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+
+				skb_frag_t *frag =
+				    &skb_shinfo(tunnel->skb)->frags[i];
+
+				get_page(skb_frag_page(frag));
+
+				skb_add_rx_frag(tunnel->skb,
+						skb_shinfo(tunnel->skb)->
+						nr_frags, skb_frag_page(frag),
+						frag->page_offset, frag->size,
+						frag->size);
+			}
+		}
+
+		if (p_mhdp_hdr->packet_count >= MAX_MHDPHDR_SIZE)
+			mhdp_submit_queued_skb(tunnel, 1);
+	}
+
+	spin_unlock_bh(&tunnel->timer_lock);
+	return NETDEV_TX_OK;
+
+tx_error:
+	spin_unlock_bh(&tunnel->timer_lock);
+	stats->tx_errors++;
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+/* mhdp_netdev_xmit - Hard xmit for MHDP tunnel net device.
+ * If master device supports MHDP chain, it will use mhdp_netdev_xmit_chain.
+ * otherwise, it will use mhdp_net_xmit_single */
+static int mhdp_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mhdp_tunnel *tunnel = netdev_priv(dev);
+
+	BUG_ON(!tunnel->master_dev);
+
+	if (tunnel->master_dev->features & NETIF_F_SG)
+		return mhdp_netdev_xmit_chain(skb, dev);
+	else
+		return mhdp_netdev_xmit_single(skb, dev);
+}
+
+struct net_device *mhdp_get_netdev_by_pdn_id(struct net_device *dev, int pdn_id)
+{
+	struct mhdp_tunnel *tunnel;
+	tunnel = mhdp_locate_tunnel(mhdp_net_dev(dev), pdn_id);
+	if (tunnel == NULL)
+		return NULL;
+	else
+		return tunnel->dev;
+}
+EXPORT_SYMBOL(mhdp_get_netdev_by_pdn_id);
+
+/**
+ * mhdp_netdev_event -  Catch MHDP tunnel net dev states
+ */
+static int
+mhdp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+	struct net_device *event_dev = (struct net_device *)ptr;
+
+	DPRINTK("event_dev: %s, event: %lx\n",
+		event_dev ? event_dev->name : "None", event);
+
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		{
+			struct mhdp_net *mhdpn = mhdp_net_dev(event_dev);
+			struct mhdp_tunnel *iter, *prev;
+
+			DPRINTK("event_dev: %s, event: %lx\n",
+				event_dev ? event_dev->name : "None", event);
+
+			for (iter = mhdpn->tunnels, prev = NULL;
+			     iter; prev = iter, iter = iter->next) {
+				if (event_dev == iter->master_dev) {
+					if (!prev)
+						mhdpn->tunnels =
+						    mhdpn->tunnels->next;
+					else
+						prev->next = iter->next;
+					mhdp_tunnel_destroy(iter->dev);
+				}
+			}
+		}
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+#ifdef MHDP_BONDING_SUPPORT
+
+static void cdma_netdev_uninit(struct net_device *dev)
+{
+	dev_put(dev);
+}
+
+static void mhdp_ethtool_get_drvinfo(struct net_device *dev,
+				     struct ethtool_drvinfo *drvinfo)
+{
+	strncpy(drvinfo->driver, dev->name, 32);
+}
+
+static const struct ethtool_ops mhdp_ethtool_ops = {
+	.get_drvinfo = mhdp_ethtool_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
+#endif /*MHDP_BONDING_SUPPORT */
+
+static const struct net_device_ops mhdp_netdev_ops = {
+	.ndo_uninit = mhdp_netdev_uninit,
+	.ndo_start_xmit = mhdp_netdev_xmit,
+	.ndo_do_ioctl = mhdp_netdev_ioctl,
+	.ndo_change_mtu = mhdp_netdev_change_mtu,
+};
+
+/**
+ * mhdp_netdev_setup -  Setup MHDP tunnel
+ */
+static void mhdp_netdev_setup(struct net_device *dev)
+{
+	dev->netdev_ops = &mhdp_netdev_ops;
+#ifdef MHDP_BONDING_SUPPORT
+	dev->ethtool_ops = &mhdp_ethtool_ops;
+#endif /*MHDP_BONDING_SUPPORT */
+
+	dev->destructor = free_netdev;
+
+#ifdef MHDP_BONDING_SUPPORT
+	ether_setup(dev);
+	dev->flags |= IFF_NOARP;
+	dev->iflink = 0;
+	dev->features |= (ETIF_F_NETNS_LOCAL | NETIF_F_SG);
+#else
+	dev->type = ARPHRD_TUNNEL;
+	dev->hard_header_len = L2MUX_HDR_SIZE + sizeof(struct mhdp_hdr);
+	dev->mtu = ETH_DATA_LEN;
+	dev->flags = IFF_NOARP;
+	dev->iflink = 0;
+	dev->addr_len = 4;
+	dev->features |= (NETIF_F_NETNS_LOCAL);	/* temporary removing NETIF_F_SG
+						 * support due to problem with
+						 * skb gets freed before being
+						 * transmitted */
+#endif /* MHDP_BONDING_SUPPORT */
+
+}
+
+/**
+ * mhdp_init_net -  Initalize MHDP net structure
+ */
+static int __net_init mhdp_init_net(struct net *net)
+{
+	struct mhdp_net *mhdpn = net_generic(net, mhdp_net_id);
+	int err;
+
+	mhdpn->tunnels = NULL;
+
+	mhdpn->ctl_dev = alloc_netdev(sizeof(struct mhdp_tunnel),
+				      MHDP_CTL_IFNAME, mhdp_netdev_setup);
+	if (!mhdpn->ctl_dev)
+		return -ENOMEM;
+
+	dev_net_set(mhdpn->ctl_dev, net);
+	dev_hold(mhdpn->ctl_dev);
+
+	err = register_netdev(mhdpn->ctl_dev);
+	if (err) {
+		pr_err(MHDP_CTL_IFNAME " register failed");
+		free_netdev(mhdpn->ctl_dev);
+		return err;
+	}
+	spin_lock_init(&mhdpn->udp_lock);
+
+	mhdp_reset_udp_filter(mhdpn);
+#ifdef MHDP_USE_NAPI
+
+	netif_napi_add(mhdpn->ctl_dev, &mhdpn->napi, mhdp_poll, NAPI_WEIGHT);
+	napi_enable(&mhdpn->napi);
+	skb_queue_head_init(&mhdpn->skb_list);
+
+#endif /*#ifdef MHDP_USE_NAPI */
+
+	dev_set_drvdata(&mhdpn->ctl_dev->dev, mhdpn);
+#ifdef SUPPORT_WAKE_LOCK
+	err = device_create_file(&mhdpn->ctl_dev->dev, &mhdpwl_dev_attrs[0]);
+
+	if (err)
+		pr_err("MHDP cannot create wakelock file");
+
+	mhdpn->wake_lock_time = 0;
+
+	wake_lock_init(&mhdpn->wakelock, WAKE_LOCK_SUSPEND, "mhdp_wake_lock");
+#endif
+
+	return 0;
+}
+
+/**
+ * mhdp_exit_net -  destroy MHDP net structure
+ */
+static void __net_exit mhdp_exit_net(struct net *net)
+{
+	struct mhdp_net *mhdpn = net_generic(net, mhdp_net_id);
+
+	rtnl_lock();
+	mhdp_destroy_tunnels(mhdpn);
+	unregister_netdevice(mhdpn->ctl_dev);
+#ifdef SUPPORT_WAKE_LOCK
+	device_remove_file(&mhdpn->ctl_dev->dev, &mhdpwl_dev_attrs[0]);
+	wake_lock_destroy(&mhdpn->wakelock);
+#endif
+
+	rtnl_unlock();
+}
+
+static struct pernet_operations mhdp_net_ops = {
+	.init = mhdp_init_net,
+	.exit = mhdp_exit_net,
+	.id = &mhdp_net_id,
+	.size = sizeof(struct mhdp_net),
+};
+
+/**
+ * mhdp_init -  Initalize MHDP
+ */
+static int __init mhdp_init(void)
+{
+	int err;
+
+#ifdef MHDP_USE_NAPI
+	err = l2mux_netif_rx_register(MHI_L3_MHDP_DL, mhdp_netdev_rx_napi);
+	err = l2mux_netif_rx_register(MHI_L3_MHDP_DL_PS2, mhdp_netdev_rx_napi);
+#else
+	err = l2mux_netif_rx_register(MHI_L3_MHDP_DL, mhdp_netdev_rx);
+	err = l2mux_netif_rx_register(MHI_L3_MHDP_DL_PS2, mhdp_netdev_rx);
+
+#endif /*MHDP_USE_NAPI */
+	if (err)
+		goto rollback0;
+
+	err = register_pernet_device(&mhdp_net_ops);
+	if (err < 0)
+		goto rollback1;
+
+	err = register_netdevice_notifier(&mhdp_netdev_notifier);
+	if (err < 0)
+		goto rollback2;
+
+	return 0;
+
+rollback2:
+	unregister_pernet_device(&mhdp_net_ops);
+rollback1:
+	l2mux_netif_rx_unregister(MHI_L3_MHDP_DL_PS2);
+	l2mux_netif_rx_unregister(MHI_L3_MHDP_DL);
+rollback0:
+	return err;
+}
+
+static void __exit mhdp_exit(void)
+{
+	l2mux_netif_rx_unregister(MHI_L3_MHDP_DL_PS2);
+	l2mux_netif_rx_unregister(MHI_L3_MHDP_DL);
+	unregister_netdevice_notifier(&mhdp_netdev_notifier);
+	unregister_pernet_device(&mhdp_net_ops);
+}
+
+module_init(mhdp_init);
+module_exit(mhdp_exit);
+
+MODULE_DESCRIPTION("Modem Host Data Protocol for MHI");
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/net/mhi/l3mhi.c b/net/mhi/l3mhi.c
new file mode 100644
index 0000000000000000000000000000000000000000..51d3adb5f70424e85c8662a4a0ee8beec31dc6ca
--- /dev/null
+++ b/net/mhi/l3mhi.c
@@ -0,0 +1,140 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: l3mhi.c
+ *
+ * L2 channels to AF_MHI binding.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/mhi.h>
+#include <linux/mhi_l2mux.h>
+
+#include <net/af_mhi.h>
+#include <net/mhi/sock.h>
+#include <net/mhi/dgram.h>
+
+#define MAX_CHANNELS  256
+
+#ifdef CONFIG_MHI_DEBUG
+# define DPRINTK(...)    pr_debug("L3MHI: " __VA_ARGS__)
+#else
+# define DPRINTK(...)
+#endif
+
+/* Module parameters - with defaults */
+static int l2chs[] = {
+	MHI_L3_FILE,
+	MHI_L3_XFILE,
+	MHI_L3_SECURITY,
+	MHI_L3_TEST,
+	MHI_L3_TEST_PRIO,
+	MHI_L3_LOG,
+	MHI_L3_IMS,
+	MHI_L3_OEM_CP,
+	MHI_L3_THERMAL,
+	MHI_L3_MHDP_UDP_FILTER,
+	MHI_L3_HIGH_PRIO_TEST,
+	MHI_L3_MED_PRIO_TEST,
+	MHI_L3_LOW_PRIO_TEST,
+};
+
+static int l2cnt = sizeof(l2chs) / sizeof(int);
+
+/* Functions */
+
+static int mhi_netif_rx(struct sk_buff *skb, struct net_device *dev)
+{
+	skb->protocol = htons(ETH_P_MHI);
+
+	return netif_rx(skb);
+}
+
+/* Module registration */
+
+int __init l3mhi_init(void)
+{
+	int ch, i;
+	int err;
+
+	pr_info("MHI: %d Channels\n", l2cnt);
+	for (i = 0; i < l2cnt; i++) {
+		ch = l2chs[i];
+		if (ch >= 0 && ch < MHI_L3_NPROTO) {
+			err = l2mux_netif_rx_register(ch, mhi_netif_rx);
+			if (err)
+				goto error;
+
+			err = mhi_register_protocol(ch);
+			if (err)
+				goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	for (i = 0; i < l2cnt; i++) {
+		ch = l2chs[i];
+		if (ch >= 0 && ch < MHI_L3_NPROTO) {
+			if (mhi_protocol_registered(ch)) {
+				l2mux_netif_rx_unregister(ch);
+				mhi_unregister_protocol(ch);
+			}
+		}
+	}
+
+	return err;
+}
+
+void __exit l3mhi_exit(void)
+{
+	int ch, i;
+
+	for (i = 0; i < l2cnt; i++) {
+		ch = l2chs[i];
+		if (ch >= 0 && ch < MHI_L3_NPROTO) {
+			if (mhi_protocol_registered(ch)) {
+				l2mux_netif_rx_unregister(ch);
+				mhi_unregister_protocol(ch);
+			}
+		}
+	}
+}
+
+module_init(l3mhi_init);
+module_exit(l3mhi_exit);
+
+module_param_array_named(l2_channels, l2chs, int, &l2cnt, 0444);
+
+MODULE_DESCRIPTION("L3 MHI Binding");
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/net/mhi/l3phonet.c b/net/mhi/l3phonet.c
new file mode 100644
index 0000000000000000000000000000000000000000..a61c2380565c74a3c5d0c015f6de489b8ef96b2d
--- /dev/null
+++ b/net/mhi/l3phonet.c
@@ -0,0 +1,121 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: l3phonet.c
+ *
+ * L2 PHONET channel to AF_PHONET binding.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/mhi.h>
+#include <linux/mhi_l2mux.h>
+
+/* Functions */
+
+static int mhi_pn_netif_rx(struct sk_buff *skb, struct net_device *dev)
+{
+	/* Set Protocol Family */
+	skb->protocol = htons(ETH_P_PHONET);
+
+	/* Remove L2MUX header and Phonet media byte */
+	skb_pull(skb, L2MUX_HDR_SIZE + 1);
+
+	/* Pass upwards to the Procotol Family */
+	return netif_rx(skb);
+}
+
+static int mhi_pn_netif_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct l2muxhdr *l2hdr;
+	int l3len;
+	u8 *ptr;
+
+	/* Add media byte */
+	ptr = skb_push(skb, 1);
+
+	/* Set media byte */
+	ptr[0] = dev->dev_addr[0];
+
+	/* L3 length */
+	l3len = skb->len;
+
+	/* Add L2MUX header */
+	skb_push(skb, L2MUX_HDR_SIZE);
+
+	/* Mac header starts here */
+	skb_reset_mac_header(skb);
+
+	/* L2MUX header pointer */
+	l2hdr = l2mux_hdr(skb);
+
+	/* L3 Proto ID */
+	l2mux_set_proto(l2hdr, MHI_L3_PHONET);
+
+	/* L3 payload length */
+	l2mux_set_length(l2hdr, l3len);
+
+	return 0;
+}
+
+/* Module registration */
+
+int __init mhi_pn_init(void)
+{
+	int err;
+
+	err = l2mux_netif_rx_register(MHI_L3_PHONET, mhi_pn_netif_rx);
+	if (err)
+		goto err1;
+
+	err = l2mux_netif_tx_register(ETH_P_PHONET, mhi_pn_netif_tx);
+	if (err)
+		goto err2;
+
+	return 0;
+
+err2:
+	l2mux_netif_rx_unregister(MHI_L3_PHONET);
+err1:
+	return err;
+}
+
+void __exit mhi_pn_exit(void)
+{
+	l2mux_netif_rx_unregister(MHI_L3_PHONET);
+	l2mux_netif_tx_unregister(ETH_P_PHONET);
+}
+
+module_init(mhi_pn_init);
+module_exit(mhi_pn_exit);
+
+MODULE_DESCRIPTION("MHI Phonet protocol family bridge");
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/net/mhi/mhi_dgram.c b/net/mhi/mhi_dgram.c
new file mode 100644
index 0000000000000000000000000000000000000000..71afde77ca240d7f809c090868b9d357f54ca459
--- /dev/null
+++ b/net/mhi/mhi_dgram.c
@@ -0,0 +1,317 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi_dgram.c
+ *
+ * DGRAM socket implementation for MHI protocol family.
+ *
+ * It uses the MHI socket framework in mhi_socket.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/mhi.h>
+#include <linux/mhi_l2mux.h>
+
+#include <asm/ioctls.h>
+
+#include <net/af_mhi.h>
+#include <net/mhi/sock.h>
+#include <net/mhi/dgram.h>
+
+#ifdef CONFIG_MHI_DEBUG
+# define DPRINTK(...)    pr_debug("MHI/DGRAM: " __VA_ARGS__)
+#else
+# define DPRINTK(...)
+#endif
+
+/*** Prototypes ***/
+
+static struct proto mhi_dgram_proto;
+
+static void mhi_dgram_destruct(struct sock *sk);
+
+/*** Functions ***/
+
+int mhi_dgram_sock_create(struct net *net,
+			  struct socket *sock, int proto, int kern)
+{
+	struct sock *sk;
+	struct mhi_sock *msk;
+
+	DPRINTK("mhi_dgram_sock_create: proto:%d type:%d\n", proto, sock->type);
+
+	if (sock->type != SOCK_DGRAM)
+		return -EPROTONOSUPPORT;
+
+	if (proto == MHI_L3_ANY)
+		return -EPROTONOSUPPORT;
+
+	sk = sk_alloc(net, PF_MHI, GFP_KERNEL, &mhi_dgram_proto);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+
+	sock->ops = &mhi_socket_ops;
+	sock->state = SS_UNCONNECTED;
+
+	sk->sk_protocol = proto;
+	sk->sk_destruct = mhi_dgram_destruct;
+	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+
+	sk->sk_prot->init(sk);
+
+	msk = mhi_sk(sk);
+
+	msk->sk_l3proto = proto;
+	msk->sk_ifindex = -1;
+
+	return 0;
+}
+
+static int mhi_dgram_init(struct sock *sk)
+{
+	return 0;
+}
+
+static void mhi_dgram_destruct(struct sock *sk)
+{
+	skb_queue_purge(&sk->sk_receive_queue);
+}
+
+static void mhi_dgram_close(struct sock *sk, long timeout)
+{
+	sk_common_release(sk);
+}
+
+static int mhi_dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+	int err;
+
+	DPRINTK("mhi_dgram_ioctl: cmd:%d arg:%lu\n", cmd, arg);
+
+	switch (cmd) {
+	case SIOCOUTQ:
+		{
+			int len;
+			len = sk_wmem_alloc_get(sk);
+			err = put_user(len, (int __user *)arg);
+		}
+		break;
+
+	case SIOCINQ:
+		{
+			struct sk_buff *skb;
+			int len;
+
+			lock_sock(sk);
+			{
+				skb = skb_peek(&sk->sk_receive_queue);
+				len = skb ? skb->len : 0;
+			}
+			release_sock(sk);
+
+			err = put_user(len, (int __user *)arg);
+		}
+		break;
+
+	default:
+		err = -ENOIOCTLCMD;
+	}
+
+	return err;
+}
+
+static int mhi_dgram_sendmsg(struct kiocb *iocb,
+			     struct sock *sk, struct msghdr *msg, size_t len)
+{
+	struct mhi_sock *msk = mhi_sk(sk);
+	struct net_device *dev = NULL;
+	struct l2muxhdr *l2hdr;
+	struct sk_buff *skb;
+
+	int err = -EFAULT;
+
+	if (msg->msg_flags &
+	    ~(MSG_DONTWAIT | MSG_EOR | MSG_NOSIGNAL | MSG_CMSG_COMPAT)) {
+		pr_warn("mhi_dgram_sendmsg: incompatible socket msg_flags: 0x%08X\n",
+		       msg->msg_flags);
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	skb = sock_alloc_send_skb(sk, len + L2MUX_HDR_SIZE,
+				  (msg->msg_flags & MSG_DONTWAIT), &err);
+	if (!skb) {
+		pr_err("mhi_dgram_sendmsg: sock_alloc_send_skb failed: %d\n",
+			err);
+		goto out;
+	}
+
+	skb_reserve(skb, L2MUX_HDR_SIZE);
+	skb_reset_transport_header(skb);
+
+	err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
+	if (err < 0) {
+		pr_err("mhi_dgram_sendmsg: memcpy_fromiovec failed: %d\n",
+			err);
+		goto drop;
+	}
+
+	if (msk->sk_ifindex)
+		dev = dev_get_by_index(sock_net(sk), msk->sk_ifindex);
+
+	if (!dev) {
+		pr_err("mhi_dgram_sendmsg: no device for ifindex:%d\n",
+			msk->sk_ifindex);
+		goto drop;
+	}
+
+	if (!(dev->flags & IFF_UP)) {
+		pr_err("mhi_dgram_sendmsg: device %d not IFF_UP\n",
+			msk->sk_ifindex);
+		err = -ENETDOWN;
+		goto drop;
+	}
+
+	if (len + L2MUX_HDR_SIZE > dev->mtu) {
+		err = -EMSGSIZE;
+		goto drop;
+	}
+
+	skb_reset_network_header(skb);
+
+	skb_push(skb, L2MUX_HDR_SIZE);
+	skb_reset_mac_header(skb);
+
+	l2hdr = l2mux_hdr(skb);
+	l2mux_set_proto(l2hdr, sk->sk_protocol);
+	l2mux_set_length(l2hdr, len);
+
+	err = mhi_skb_send(skb, dev, sk->sk_protocol);
+
+	goto put;
+
+drop:
+	kfree(skb);
+put:
+	if (dev)
+		dev_put(dev);
+out:
+	return err;
+}
+
+static int mhi_dgram_recvmsg(struct kiocb *iocb,
+			     struct sock *sk,
+			     struct msghdr *msg,
+			     size_t len, int noblock, int flags, int *addr_len)
+{
+	struct sk_buff *skb = NULL;
+	int cnt, err;
+
+	err = -EOPNOTSUPP;
+
+	if (flags &
+	    ~(MSG_PEEK | MSG_TRUNC | MSG_DONTWAIT |
+	      MSG_NOSIGNAL | MSG_CMSG_COMPAT)) {
+		pr_warn("mhi_dgram_recvmsg: incompatible socket flags: 0x%08X",
+			flags);
+		goto out2;
+	}
+
+	if (addr_len)
+		addr_len[0] = 0;
+
+	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	if (!skb)
+		goto out2;
+
+	cnt = skb->len - L2MUX_HDR_SIZE;
+	if (len < cnt) {
+		msg->msg_flags |= MSG_TRUNC;
+		cnt = len;
+	}
+
+	err = skb_copy_datagram_iovec(skb, L2MUX_HDR_SIZE, msg->msg_iov, cnt);
+	if (err)
+		goto out;
+
+	if (flags & MSG_TRUNC)
+		err = skb->len - L2MUX_HDR_SIZE;
+	else
+		err = cnt;
+
+out:
+	skb_free_datagram(sk, skb);
+out2:
+	return err;
+}
+
+static int mhi_dgram_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+	if (sock_queue_rcv_skb(sk, skb) < 0) {
+		kfree_skb(skb);
+		return NET_RX_DROP;
+	}
+
+	return NET_RX_SUCCESS;
+}
+
+static struct proto mhi_dgram_proto = {
+	.name = "MHI-DGRAM",
+	.owner = THIS_MODULE,
+	.close = mhi_dgram_close,
+	.ioctl = mhi_dgram_ioctl,
+	.init = mhi_dgram_init,
+	.sendmsg = mhi_dgram_sendmsg,
+	.recvmsg = mhi_dgram_recvmsg,
+	.backlog_rcv = mhi_dgram_backlog_rcv,
+	.hash = mhi_sock_hash,
+	.unhash = mhi_sock_unhash,
+	.obj_size = sizeof(struct mhi_sock),
+};
+
+int mhi_dgram_proto_init(void)
+{
+	DPRINTK("mhi_dgram_proto_init\n");
+
+	return proto_register(&mhi_dgram_proto, 1);
+}
+
+void mhi_dgram_proto_exit(void)
+{
+	DPRINTK("mhi_dgram_proto_exit\n");
+
+	proto_unregister(&mhi_dgram_proto);
+}
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/net/mhi/mhi_proto.c b/net/mhi/mhi_proto.c
new file mode 100644
index 0000000000000000000000000000000000000000..61c698b8bb737875849ac7231e39c3fe81567e19
--- /dev/null
+++ b/net/mhi/mhi_proto.c
@@ -0,0 +1,206 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi_proto.c
+ *
+ * Modem-Host Interface (MHI) Protocol Family
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/if_mhi.h>
+#include <linux/mhi.h>
+#include <linux/mhi_l2mux.h>
+
+#include <net/af_mhi.h>
+#include <net/mhi/sock.h>
+#include <net/mhi/dgram.h>
+#include <net/mhi/raw.h>
+
+#ifdef CONFIG_MHI_DEBUG
+# define DPRINTK(...)    pr_debug("AF_MHI: " __VA_ARGS__)
+#else
+# define DPRINTK(...)
+#endif
+
+/* Supported L2 protocols */
+static __u8 mhi_protocols[MHI_L3_NPROTO] __read_mostly = { 0, };
+
+/*** Functions ***/
+
+int mhi_protocol_registered(int protocol)
+{
+	if (protocol >= 0 && protocol < MHI_L3_NPROTO)
+		return mhi_protocols[protocol];
+	if (protocol == MHI_L3_ANY)
+		return 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_protocol_registered);
+
+int mhi_register_protocol(int protocol)
+{
+	DPRINTK("mhi_register_protocol: %d\n", protocol);
+
+	if (protocol < 0 || protocol >= MHI_L3_NPROTO)
+		return -EINVAL;
+
+	mhi_protocols[protocol] = 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_register_protocol);
+
+int mhi_unregister_protocol(int protocol)
+{
+	DPRINTK("mhi_unregister_protocol: %d\n", protocol);
+
+	if (protocol < 0 || protocol >= MHI_L3_NPROTO)
+		return -EINVAL;
+
+	mhi_protocols[protocol] = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_unregister_protocol);
+
+int mhi_skb_send(struct sk_buff *skb, struct net_device *dev, u8 proto)
+{
+	int err = 0;
+
+	DPRINTK("mhi_skb_send: proto:%d skb_len:%d\n", proto, skb->len);
+
+	skb->protocol = htons(ETH_P_MHI);
+	skb->dev = dev;
+
+	if (skb->pkt_type == PACKET_LOOPBACK) {
+		skb_orphan(skb);
+		netif_rx_ni(skb);
+	} else {
+
+		if ((proto == MHI_L3_XFILE) || (proto == MHI_L3_LOW_PRIO_TEST))
+			skb->priority = 1;	/* Low prio */
+		else if ((proto == MHI_L3_AUDIO)
+			 || (proto == MHI_L3_TEST_PRIO)
+			 || (proto == MHI_L3_HIGH_PRIO_TEST))
+			skb->priority = 6;	/* high prio */
+		else
+			skb->priority = 0;	/* medium prio */
+		err = dev_queue_xmit(skb);
+	}
+
+	return err;
+}
+
+int
+mhi_skb_recv(struct sk_buff *skb,
+	     struct net_device *dev,
+	     struct packet_type *type, struct net_device *orig_dev)
+{
+	struct l2muxhdr *l2hdr;
+
+	u8 l3pid;
+	u32 l3len;
+	int err;
+
+	l2hdr = l2mux_hdr(skb);
+
+	l3pid = l2mux_get_proto(l2hdr);
+	l3len = l2mux_get_length(l2hdr);
+
+	DPRINTK("mhi_skb_recv: skb_len:%d l3pid:%d l3len:%d\n",
+		skb->len, l3pid, l3len);
+
+	err = mhi_sock_rcv_multicast(skb, l3pid, l3len);
+
+	return err;
+}
+
+static struct packet_type mhi_packet_type __read_mostly = {
+	.type = cpu_to_be16(ETH_P_MHI),
+	.func = mhi_skb_recv,
+};
+
+static int __init mhi_proto_init(void)
+{
+	int err;
+
+	DPRINTK("mhi_proto_init\n");
+
+	err = mhi_sock_init();
+	if (err) {
+		pr_alert("MHI socket layer registration failed\n");
+		goto err0;
+	}
+
+	err = mhi_dgram_proto_init();
+	if (err) {
+		pr_alert("MHI DGRAM protocol layer registration failed\n");
+		goto err1;
+	}
+
+	err = mhi_raw_proto_init();
+	if (err) {
+		pr_alert("MHI RAW protocol layer registration failed\n");
+		goto err2;
+	}
+
+	dev_add_pack(&mhi_packet_type);
+
+	return 0;
+
+err2:
+	mhi_dgram_proto_exit();
+err1:
+	mhi_sock_exit();
+err0:
+	return err;
+}
+
+static void __exit mhi_proto_exit(void)
+{
+	DPRINTK("mhi_proto_exit\n");
+
+	dev_remove_pack(&mhi_packet_type);
+
+	mhi_dgram_proto_exit();
+	mhi_raw_proto_exit();
+	mhi_sock_exit();
+}
+
+module_init(mhi_proto_init);
+module_exit(mhi_proto_exit);
+
+MODULE_ALIAS_NETPROTO(PF_MHI);
+
+MODULE_DESCRIPTION("MHI Protocol Family for Linux");
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/net/mhi/mhi_raw.c b/net/mhi/mhi_raw.c
new file mode 100644
index 0000000000000000000000000000000000000000..d4f57509e9bf20d835e39578b9c71d882222a9f9
--- /dev/null
+++ b/net/mhi/mhi_raw.c
@@ -0,0 +1,307 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi_raw.c
+ *
+ * RAW socket implementation for MHI protocol family.
+ *
+ * It uses the MHI socket framework in mhi_socket.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/mhi.h>
+#include <linux/mhi_l2mux.h>
+
+#include <asm/ioctls.h>
+
+#include <net/af_mhi.h>
+#include <net/mhi/sock.h>
+#include <net/mhi/raw.h>
+
+#ifdef CONFIG_MHI_DEBUG
+# define DPRINTK(...)    pr_debug("MHI/RAW: " __VA_ARGS__)
+#else
+# define DPRINTK(...)
+#endif
+
+/*** Prototypes ***/
+
+static struct proto mhi_raw_proto;
+
+static void mhi_raw_destruct(struct sock *sk);
+
+/*** Functions ***/
+
+int mhi_raw_sock_create(struct net *net,
+			struct socket *sock, int proto, int kern)
+{
+	struct sock *sk;
+	struct mhi_sock *msk;
+
+	DPRINTK("mhi_raw_sock_create: proto:%d type:%d\n", proto, sock->type);
+
+	if (sock->type != SOCK_RAW)
+		return -EPROTONOSUPPORT;
+
+	sk = sk_alloc(net, PF_MHI, GFP_KERNEL, &mhi_raw_proto);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+
+	sock->ops = &mhi_socket_ops;
+	sock->state = SS_UNCONNECTED;
+
+	if (proto != MHI_L3_ANY)
+		sk->sk_protocol = proto;
+	else
+		sk->sk_protocol = 0;
+
+	sk->sk_destruct = mhi_raw_destruct;
+	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+
+	sk->sk_prot->init(sk);
+
+	msk = mhi_sk(sk);
+
+	msk->sk_l3proto = proto;
+	msk->sk_ifindex = -1;
+
+	return 0;
+}
+
+static int mhi_raw_init(struct sock *sk)
+{
+	return 0;
+}
+
+static void mhi_raw_destruct(struct sock *sk)
+{
+	skb_queue_purge(&sk->sk_receive_queue);
+}
+
+static void mhi_raw_close(struct sock *sk, long timeout)
+{
+	sk_common_release(sk);
+}
+
+static int mhi_raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+	int err;
+
+	DPRINTK("mhi_raw_ioctl: cmd:%d arg:%lu\n", cmd, arg);
+
+	switch (cmd) {
+	case SIOCOUTQ:
+		{
+			int len;
+			len = sk_wmem_alloc_get(sk);
+			err = put_user(len, (int __user *)arg);
+		}
+		break;
+
+	case SIOCINQ:
+		{
+			struct sk_buff *skb;
+			int len;
+
+			lock_sock(sk);
+			{
+				skb = skb_peek(&sk->sk_receive_queue);
+				len = skb ? skb->len : 0;
+			}
+			release_sock(sk);
+
+			err = put_user(len, (int __user *)arg);
+		}
+		break;
+
+	default:
+		err = -ENOIOCTLCMD;
+	}
+
+	return err;
+}
+
+static int mhi_raw_sendmsg(struct kiocb *iocb,
+			   struct sock *sk, struct msghdr *msg, size_t len)
+{
+	struct mhi_sock *msk = mhi_sk(sk);
+	struct net_device *dev = NULL;
+	struct sk_buff *skb;
+
+	int err = -EFAULT;
+
+	if (msg->msg_flags &
+	    ~(MSG_DONTWAIT | MSG_EOR | MSG_NOSIGNAL | MSG_CMSG_COMPAT)) {
+		pr_warn("mhi_raw_sendmsg: incompatible socket msg_flags: 0x%08X\n",
+			msg->msg_flags);
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	skb = sock_alloc_send_skb(sk,
+				  len, (msg->msg_flags & MSG_DONTWAIT), &err);
+	if (!skb) {
+		pr_err("mhi_raw_sendmsg: sock_alloc_send_skb failed: %d\n",
+			err);
+		goto out;
+	}
+
+	err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
+	if (err < 0) {
+		pr_err("mhi_raw_sendmsg: memcpy_fromiovec failed: %d\n", err);
+		goto drop;
+	}
+
+	if (msk->sk_ifindex)
+		dev = dev_get_by_index(sock_net(sk), msk->sk_ifindex);
+
+	if (!dev) {
+		pr_err("mhi_raw_sendmsg: no device for ifindex:%d\n",
+			msk->sk_ifindex);
+		goto drop;
+	}
+
+	if (!(dev->flags & IFF_UP)) {
+		pr_err("mhi_raw_sendmsg: device %d not IFF_UP\n",
+			msk->sk_ifindex);
+		err = -ENETDOWN;
+		goto drop;
+	}
+
+	if (len > dev->mtu) {
+		err = -EMSGSIZE;
+		goto drop;
+	}
+
+	skb_reset_network_header(skb);
+	skb_reset_mac_header(skb);
+
+	err = mhi_skb_send(skb, dev, sk->sk_protocol);
+
+	goto put;
+
+drop:
+	kfree(skb);
+put:
+	if (dev)
+		dev_put(dev);
+out:
+	return err;
+}
+
+static int mhi_raw_recvmsg(struct kiocb *iocb,
+			   struct sock *sk,
+			   struct msghdr *msg,
+			   size_t len, int noblock, int flags, int *addr_len)
+{
+	struct sk_buff *skb = NULL;
+	int cnt, err;
+
+	err = -EOPNOTSUPP;
+
+	if (flags &
+	    ~(MSG_PEEK | MSG_TRUNC | MSG_DONTWAIT |
+	      MSG_NOSIGNAL | MSG_CMSG_COMPAT)) {
+		pr_warn("mhi_raw_recvmsg: incompatible socket flags: 0x%08X",
+		       flags);
+		goto out2;
+	}
+
+	if (addr_len)
+		addr_len[0] = 0;
+
+	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	if (!skb)
+		goto out2;
+
+	cnt = skb->len;
+	if (len < cnt) {
+		msg->msg_flags |= MSG_TRUNC;
+		cnt = len;
+	}
+
+	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, cnt);
+	if (err)
+		goto out;
+
+	if (flags & MSG_TRUNC)
+		err = skb->len;
+	else
+		err = cnt;
+
+out:
+	skb_free_datagram(sk, skb);
+out2:
+	return err;
+}
+
+static int mhi_raw_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+	if (sock_queue_rcv_skb(sk, skb) < 0) {
+		kfree_skb(skb);
+		return NET_RX_DROP;
+	}
+
+	return NET_RX_SUCCESS;
+}
+
+static struct proto mhi_raw_proto = {
+	.name = "MHI-RAW",
+	.owner = THIS_MODULE,
+	.close = mhi_raw_close,
+	.ioctl = mhi_raw_ioctl,
+	.init = mhi_raw_init,
+	.sendmsg = mhi_raw_sendmsg,
+	.recvmsg = mhi_raw_recvmsg,
+	.backlog_rcv = mhi_raw_backlog_rcv,
+	.hash = mhi_sock_hash,
+	.unhash = mhi_sock_unhash,
+	.obj_size = sizeof(struct mhi_sock),
+};
+
+int mhi_raw_proto_init(void)
+{
+	DPRINTK("mhi_raw_proto_init\n");
+
+	return proto_register(&mhi_raw_proto, 1);
+}
+
+void mhi_raw_proto_exit(void)
+{
+	DPRINTK("mhi_raw_proto_exit\n");
+
+	proto_unregister(&mhi_raw_proto);
+}
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/net/mhi/mhi_socket.c b/net/mhi/mhi_socket.c
new file mode 100644
index 0000000000000000000000000000000000000000..cb5d374eae2b19a76c1b8cac4d5fcd553bb7aa59
--- /dev/null
+++ b/net/mhi/mhi_socket.c
@@ -0,0 +1,334 @@
+#ifdef CONFIG_BCM_KF_MHI
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: mhi_socket.c
+ *
+ * Socket layer implementation for AF_MHI.
+ *
+ * This module implements generic sockets for MHI.
+ * The protocol is implemented separately, like mhi_dgram.c.
+ *
+ * As MHI does not have addressed, the MHI interface is
+ * defined by sa_ifindex field in sockaddr_mhi.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/gfp.h>
+#include <linux/net.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/mhi.h>
+#include <linux/mhi_l2mux.h>
+#include <linux/if_mhi.h>
+
+#include <net/tcp_states.h>
+#include <net/af_mhi.h>
+#include <net/mhi/sock.h>
+#include <net/mhi/dgram.h>
+#include <net/mhi/raw.h>
+
+#ifdef CONFIG_MHI_DEBUG
+# define DPRINTK(...)    pr_debug("MHI/SOCKET: " __VA_ARGS__)
+#else
+# define DPRINTK(...)
+#endif
+
+/* Master lock for MHI sockets */
+static DEFINE_SPINLOCK(mhi_sock_lock);
+
+/* List of MHI sockets */
+static struct hlist_head mhi_sock_list;
+
+static int mhi_sock_create(struct net *net,
+			   struct socket *sock, int proto, int kern)
+{
+	int err = 0;
+
+	DPRINTK("mhi_sock_create: type:%d proto:%d\n", sock->type, proto);
+
+	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_NET_ADMIN)) {
+		pr_warn("AF_MHI: socket create failed: PERMISSION DENIED\n");
+		return -EPERM;
+	}
+
+	if (!mhi_protocol_registered(proto)) {
+		pr_warn("AF_MHI: socket create failed: No support for L2 channel %d\n",
+			proto);
+		return -EPROTONOSUPPORT;
+	}
+
+	if (sock->type == SOCK_DGRAM)
+		err = mhi_dgram_sock_create(net, sock, proto, kern);
+	else if (sock->type == SOCK_RAW)
+		err = mhi_raw_sock_create(net, sock, proto, kern);
+	else {
+		pr_warn("AF_MHI: trying to create a socket with unknown type %d\n",
+			sock->type);
+		err = -EPROTONOSUPPORT;
+	}
+
+	if (err)
+		pr_warn("AF_MHI: socket create failed: %d\n", err);
+
+	return err;
+}
+
+static int mhi_sock_release(struct socket *sock)
+{
+	if (sock->sk) {
+		DPRINTK("mhi_sock_release: proto:%d type:%d\n",
+			sock->sk->sk_protocol, sock->type);
+
+		sock->sk->sk_prot->close(sock->sk, 0);
+		sock->sk = NULL;
+	}
+
+	return 0;
+}
+
+static int mhi_sock_bind(struct socket *sock, struct sockaddr *addr, int len)
+{
+	struct sock *sk = sock->sk;
+	struct mhi_sock *msk = mhi_sk(sk);
+	struct sockaddr_mhi *sam = sa_mhi(addr);
+
+	int err = 0;
+
+	DPRINTK("mhi_sock_bind: proto:%d state:%d\n",
+		sk->sk_protocol, sk->sk_state);
+
+	if (sk->sk_prot->bind)
+		return sk->sk_prot->bind(sk, addr, len);
+
+	if (len < sizeof(struct sockaddr_mhi))
+		return -EINVAL;
+
+	lock_sock(sk);
+	{
+		if (sk->sk_state == TCP_CLOSE) {
+			msk->sk_ifindex = sam->sa_ifindex;
+			WARN_ON(sk_hashed(sk));
+			sk->sk_prot->hash(sk);
+		} else {
+			err = -EINVAL;	/* attempt to rebind */
+		}
+	}
+	release_sock(sk);
+
+	return err;
+}
+
+int mhi_sock_rcv_unicast(struct sk_buff *skb, u8 l3proto, u32 l3length)
+{
+	struct sock *sknode;
+	struct mhi_sock *msk;
+	struct hlist_node *node;
+
+	DPRINTK("mhi_sock_rcv_unicast: proto:%d, len:%d\n", l3proto, l3length);
+
+	spin_lock(&mhi_sock_lock);
+	{
+		sk_for_each(sknode, node, &mhi_sock_list) {
+			msk = mhi_sk(sknode);
+			if ((msk->sk_l3proto == MHI_L3_ANY ||
+			     msk->sk_l3proto == l3proto) &&
+			    (msk->sk_ifindex == skb->dev->ifindex)) {
+				sock_hold(sknode);
+				sk_receive_skb(sknode, skb, 0);
+				skb = NULL;
+				break;
+			}
+		}
+	}
+	spin_unlock(&mhi_sock_lock);
+
+	if (skb)
+		kfree_skb(skb);
+
+	return NET_RX_SUCCESS;
+}
+
+int mhi_sock_rcv_multicast(struct sk_buff *skb, u8 l3proto, u32 l3length)
+{
+	struct sock *sknode;
+	struct mhi_sock *msk;
+	struct sk_buff *clone;
+	struct hlist_node *node;
+
+	DPRINTK("mhi_sock_rcv_multicast: proto:%d, len:%d\n",
+		l3proto, l3length);
+
+	spin_lock(&mhi_sock_lock);
+	{
+		sk_for_each(sknode, node, &mhi_sock_list) {
+			msk = mhi_sk(sknode);
+			if ((msk->sk_l3proto == MHI_L3_ANY ||
+			     msk->sk_l3proto == l3proto) &&
+			    (msk->sk_ifindex == skb->dev->ifindex)) {
+				clone = skb_clone(skb, GFP_ATOMIC);
+				if (likely(clone)) {
+					sock_hold(sknode);
+					sk_receive_skb(sknode, clone, 0);
+				}
+			}
+		}
+	}
+	spin_unlock(&mhi_sock_lock);
+
+	kfree_skb(skb);
+
+	return NET_RX_SUCCESS;
+}
+
+int mhi_sock_sendmsg(struct kiocb *iocb,
+		     struct socket *sock, struct msghdr *msg, size_t len)
+{
+	DPRINTK("mhi_sock_sendmsg: len:%u\n", len);
+
+	return sock->sk->sk_prot->sendmsg(iocb, sock->sk, msg, len);
+}
+
+int mhi_sock_recvmsg(struct kiocb *iocb,
+		     struct socket *sock,
+		     struct msghdr *msg, size_t len, int flags)
+{
+	int addrlen = 0;
+	int err;
+
+	err = sock->sk->sk_prot->recvmsg(iocb, sock->sk, msg, len,
+					 flags & MSG_DONTWAIT,
+					 flags & ~MSG_DONTWAIT, &addrlen);
+
+	if (err >= 0)
+		msg->msg_namelen = addrlen;
+
+	return err;
+}
+
+int mhi_getsockopt(struct socket *sock, int level, int optname,
+		   char __user *optval, int __user *optlen)
+{
+	struct sock *sk = sock->sk;
+	int len, val;
+	void *data;
+
+	if (get_user(len, optlen))
+		return -EFAULT;
+
+	if (len < 0)
+		return -EINVAL;
+
+	switch (optname) {
+	case MHI_DROP_COUNT:
+		if (len > sizeof(int))
+			len = sizeof(int);
+		spin_lock_bh(&sk->sk_receive_queue.lock);
+		val = atomic_read(&sk->sk_drops);
+		spin_unlock_bh(&sk->sk_receive_queue.lock);
+		data = &val;
+		break;
+	default:
+		return -ENOPROTOOPT;
+	}
+
+	if (put_user(len, optlen))
+		return -EFAULT;
+	if (copy_to_user(optval, data, len))
+		return -EFAULT;
+	return 0;
+}
+
+void mhi_sock_hash(struct sock *sk)
+{
+	DPRINTK("mhi_sock_hash: proto:%d\n", sk->sk_protocol);
+
+	spin_lock_bh(&mhi_sock_lock);
+	sk_add_node(sk, &mhi_sock_list);
+	spin_unlock_bh(&mhi_sock_lock);
+}
+
+void mhi_sock_unhash(struct sock *sk)
+{
+	DPRINTK("mhi_sock_unhash: proto:%d\n", sk->sk_protocol);
+
+	spin_lock_bh(&mhi_sock_lock);
+	sk_del_node_init(sk);
+	spin_unlock_bh(&mhi_sock_lock);
+}
+
+const struct proto_ops mhi_socket_ops = {
+	.family = AF_MHI,
+	.owner = THIS_MODULE,
+	.release = mhi_sock_release,
+	.bind = mhi_sock_bind,
+	.connect = sock_no_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = datagram_poll,
+	.ioctl = sock_no_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = mhi_getsockopt,
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt = sock_no_setsockopt,
+	.compat_getsockopt = sock_no_getsockopt,
+#endif
+	.sendmsg = mhi_sock_sendmsg,
+	.recvmsg = mhi_sock_recvmsg,
+	.mmap = sock_no_mmap,
+	.sendpage = sock_no_sendpage,
+};
+
+static const struct net_proto_family mhi_proto_family = {
+	.family = PF_MHI,
+	.create = mhi_sock_create,
+	.owner = THIS_MODULE,
+};
+
+int mhi_sock_init(void)
+{
+	DPRINTK("mhi_sock_init\n");
+
+	INIT_HLIST_HEAD(&mhi_sock_list);
+	spin_lock_init(&mhi_sock_lock);
+
+	return sock_register(&mhi_proto_family);
+}
+
+void mhi_sock_exit(void)
+{
+	DPRINTK("mhi_sock_exit\n");
+
+	sock_unregister(PF_MHI);
+}
+#endif /* CONFIG_BCM_KF_MHI */
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0c6f67e8f2e5c22e281c34b62b27f171db0c2fb5..fbdbe2051f0691b4c65e4e74f346dc2386baf585 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -159,6 +159,15 @@ config NF_CT_PROTO_UDPLITE
 	  connections.
 
 	  To compile it as a module, choose M here.  If unsure, say N.
+	  
+config NF_CT_PROTO_ESP
+	tristate 'ESP protocol connection tracking support'
+	depends on NETFILTER_ADVANCED && BCM_KF_NETFILTER
+	help
+	  With this option enabled, the layer 3 ESP protocol 
+	  tracking will be able to do tracking on ESP connections
+	  
+	  To compile it as a module, choose M here.  If unsure, say N.
 
 config NF_CONNTRACK_AMANDA
 	tristate "Amanda backup protocol support"
@@ -306,6 +315,16 @@ config NF_CONNTRACK_SIP
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_IPSEC
+	tristate "IPSEC protocol support"
+	depends on BCM_KF_NETFILTER
+	default m if NETFILTER_ADVANCED=n
+	help
+	  IPSec is used for for securing IP communications by authenticating and 
+	  encrypting each IP packet of a communication session
+	  
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NF_CONNTRACK_TFTP
 	tristate "TFTP protocol support"
 	depends on NETFILTER_ADVANCED
@@ -317,6 +336,45 @@ config NF_CONNTRACK_TFTP
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+#BRCM begin
+config NF_DYNDSCP
+  tristate  "Dynamic DSCP Mangling support "
+  depends on NF_CONNTRACK && BCM_KF_NETFILTER
+  default n
+  help
+  	This option enables support for dynamic DSCP, i.e tos will be derived from 
+  	tos value of WAN packets of each connection.
+
+config NF_CONNTRACK_RTSP
+	tristate "RTSP protocol support"
+	depends on NF_CONNTRACK && BCM_KF_NETFILTER
+	help
+	  RTSP (Real Time Streaming Protocol) support.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_LAYER7
+	tristate '"layer7" match support'
+	depends on NF_CONNTRACK && BCM_KF_NETFILTER
+	help
+	  Say Y if you want to be able to classify connections (and their
+	  packets) based on regular expression matching of their application
+	  layer data.   This is one way to classify applications such as
+	  peer-to-peer filesharing systems that do not always use the same
+	  port.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_TARGET_DC
+	tristate '"DC" target support'
+	depends on NETFILTER_XTABLES && BCM_KF_NETFILTER
+	help
+	  Say Y if you want to be able to do data connections (and their
+	  packets) 
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+#BRCM end
+
 config NF_CT_NETLINK
 	tristate 'Connection tracking netlink interface'
 	select NETFILTER_NETLINK
@@ -691,6 +749,16 @@ config NETFILTER_XT_TARGET_TCPOPTSTRIP
 	  This option adds a "TCPOPTSTRIP" target, which allows you to strip
 	  TCP options from TCP packets.
 
+config NETFILTER_XT_TARGET_SKIPLOG
+	tristate '"SKIPLOG" target support'
+	depends on NETFILTER_XTABLES && (IPV6 || IPV6=n) && BCM_KF_NETFILTER
+	---help---
+	  configuration like:
+
+	  iptables -A FORWARD -p tcp -j SKIPLOG
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 # alphabetically ordered list of matches
 
 comment "Xtables matches"
@@ -1000,6 +1068,23 @@ config NETFILTER_XT_MATCH_PKTTYPE
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_QTAGUID
+	bool '"quota, tag, owner" match and stats support'
+        depends on NETFILTER_XT_MATCH_SOCKET
+	depends on NETFILTER_XT_MATCH_OWNER=n
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  This option replaces the `owner' match. In addition to matching
+	  on uid, it keeps stats based on a tag assigned to a socket.
+	  The full tag is comprised of a UID and an accounting tag.
+	  The tags are assignable to sockets from user space (e.g. a download
+	  manager can assign the socket to another UID for accounting).
+	  Stats and control are done via /proc/net/xt_qtaguid/.
+	  It replaces owner as it takes the same arguments, but should
+	  really be recognized by the iptables tool.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_QUOTA
 	tristate '"quota" match support'
 	depends on NETFILTER_ADVANCED
@@ -1010,6 +1095,32 @@ config NETFILTER_XT_MATCH_QUOTA
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+	tristate '"quota2" match support'
+	depends on NETFILTER_ADVANCED
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	help
+	  This option adds a `quota2' match, which allows to match on a
+	  byte counter correctly and not per CPU.
+	  It allows naming the quotas.
+	  This is based on http://xtables-addons.git.sourceforge.net
+
+	  If you want to compile it as a module, say M here and read
+	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+	bool '"quota2" Netfilter LOG support'
+	depends on NETFILTER_XT_MATCH_QUOTA2
+	depends on IP_NF_TARGET_ULOG=n
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  This option allows `quota2' to log ONCE when a quota limit
+	  is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+	  It logs similarly to how ipt_ULOG would without data.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
 	tristate '"rateest" match support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ca3676586f51570c49fed1312390111e13cc10db..3d4352592de97ce341ecd7e40ac883ffdd5a6185 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,3 +1,7 @@
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+EXTRA_CFLAGS	+= -I$(INC_BRCMDRIVER_PUB_PATH)/$(BRCM_BOARD)
+endif #BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+
 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
 
 nf_conntrack-y	:= nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
@@ -20,6 +24,9 @@ obj-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
 obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
 obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
 obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_NF_CT_PROTO_ESP) += nf_conntrack_proto_esp.o
+endif #BCM_KF
 
 # netlink interface for nf_conntrack
 obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
@@ -39,6 +46,11 @@ obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
 obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
 obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
 obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_NF_CONNTRACK_RTSP) += nf_conntrack_rtsp.o
+obj-$(CONFIG_NF_DYNDSCP) += nf_dyndscp.o
+obj-$(CONFIG_NF_CONNTRACK_IPSEC) += nf_conntrack_ipsec.o
+endif #BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
 
 # transparent proxy support
 obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
@@ -72,6 +84,10 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_NETFILTER_XT_TARGET_SKIPLOG) += xt_SKIPLOG.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_DC)      += xt_DC.o
+endif #BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
 
 # matches
 obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o
@@ -101,7 +117,17 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
@@ -113,6 +139,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
+ifdef BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
+obj-$(CONFIG_NETFILTER_XT_MATCH_LAYER7) += xt_layer7.o
+endif #BCM_KF # defined(CONFIG_BCM_KF_NETFILTER)
 
 # ipset
 obj-$(CONFIG_IP_SET) += ipset/
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e1b7e051332edb58b4752fb3ca0f6b447c50239d..a37ab9952e59d17d18e72c6aed086b3f24e1aaf6 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -25,6 +25,17 @@
 
 #include "nf_internals.h"
 
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RUNNER_RG) || defined(CONFIG_BCM_RUNNER_RG_MODULE)
+#include <net/bl_ops.h>
+struct bl_ops_t *bl_ops = NULL;
+EXPORT_SYMBOL(bl_ops);
+#endif /* CONFIG_BCM_RUNNER_RG || CONFIG_BCM_RUNNER_RG_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 static DEFINE_MUTEX(afinfo_mutex);
 
 const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index f4f8cda05986cc09d4283c9a753e7d29a5508e0b..bcb5cb850dea64fa2d7010d738eef281f3d259fb 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -13,12 +13,19 @@
 #include <linux/kernel.h>
 #include <linux/moduleparam.h>
 #include <linux/export.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/flwstif.h>
+#endif
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+static bool nf_ct_acct __read_mostly = 1;
+#else
 static bool nf_ct_acct __read_mostly;
+#endif
 
 module_param_named(acct, nf_ct_acct, bool, 0644);
 MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
@@ -45,12 +52,181 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
 	if (!acct)
 		return 0;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	{
+		unsigned long long pkts;
+		unsigned long long bytes;
+		FlwStIf_t fast_stats;
+
+		pkts = (unsigned long long)atomic64_read(&acct[dir].packets);
+		bytes = (unsigned long long)atomic64_read(&acct[dir].bytes);
+
+        fast_stats.rx_packets = 0;
+        fast_stats.rx_bytes = 0;
+
+		if (ct->blog_key[dir] != BLOG_KEY_NONE)
+		{
+			flwStIf_request(FLWSTIF_REQ_GET, &fast_stats,
+							ct->blog_key[dir], 0, 0);
+			acct[dir].ts = fast_stats.pollTS_ms;
+		}
+
+		return seq_printf(s, "packets=%llu bytes=%llu ",
+			  pkts+acct[dir].cum_fast_pkts+fast_stats.rx_packets,
+			  bytes+acct[dir].cum_fast_bytes+fast_stats.rx_bytes);
+	}
+#else
 	return seq_printf(s, "packets=%llu bytes=%llu ",
 			  (unsigned long long)atomic64_read(&acct[dir].packets),
 			  (unsigned long long)atomic64_read(&acct[dir].bytes));
+#endif
 };
 EXPORT_SYMBOL_GPL(seq_print_acct);
 
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+unsigned int
+seq_print_acct_dpi(struct seq_file *s, const struct nf_conn *ct, int dir)
+{
+	struct nf_conn_counter *acct;
+
+	acct = nf_conn_acct_find(ct);
+	if (!acct)
+		return 0;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	{
+		unsigned long long pkts;
+		unsigned long long bytes;
+		FlwStIf_t fast_stats;
+
+		pkts = (unsigned long long)atomic64_read(&acct[dir].packets);
+		bytes = (unsigned long long)atomic64_read(&acct[dir].bytes);
+
+        fast_stats.rx_packets = 0;
+        fast_stats.rx_bytes = 0;
+
+		if (ct->blog_key[dir] != BLOG_KEY_NONE)
+		{
+			flwStIf_request(FLWSTIF_REQ_GET, &fast_stats,
+							ct->blog_key[dir], 0, 0);
+			acct[dir].ts = fast_stats.pollTS_ms;
+		}
+
+		return seq_printf(s, "%llu %llu %lu ",
+			  pkts+acct[dir].cum_fast_pkts+fast_stats.rx_packets,
+			  bytes+acct[dir].cum_fast_bytes+fast_stats.rx_bytes,
+              acct[dir].ts);
+	}
+#else
+	return seq_printf(s, "%llu %llu %lu ",
+			  (unsigned long long)atomic64_read(&acct[dir].packets),
+			  (unsigned long long)atomic64_read(&acct[dir].bytes),
+              0);
+#endif
+}
+EXPORT_SYMBOL_GPL(seq_print_acct_dpi);
+
+int conntrack_get_stats( const struct nf_conn *ct, int dir,
+                         CtkStats_t *stats_p )
+{
+	struct nf_conn_counter *acct;
+
+	acct = nf_conn_acct_find(ct);
+	if (!acct)
+		return 0;
+
+	if (!stats_p)
+		return -1;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	{
+		unsigned long long pkts;
+		unsigned long long bytes;
+		FlwStIf_t fast_stats;
+
+		pkts = (unsigned long long)atomic64_read(&acct[dir].packets);
+		bytes = (unsigned long long)atomic64_read(&acct[dir].bytes);
+
+        fast_stats.rx_packets = 0;
+        fast_stats.rx_bytes = 0;
+
+		if (ct->blog_key[dir] != BLOG_KEY_NONE)
+		{
+			flwStIf_request(FLWSTIF_REQ_GET, &fast_stats,
+							ct->blog_key[dir], 0, 0);
+
+			acct[dir].ts = fast_stats.pollTS_ms;
+		}
+
+		stats_p->pkts = pkts + acct[dir].cum_fast_pkts + 
+                        fast_stats.rx_packets;
+		stats_p->bytes = bytes + acct[dir].cum_fast_bytes +
+                         fast_stats.rx_bytes;
+		stats_p->ts = acct[dir].ts;
+	}
+#else
+	stats_p->pkts = atomic64_read(&acct[dir].packets);
+	stats_p->bytes = atomic64_read(&acct[dir].bytes);
+	stats_p->ts = 0;
+#endif
+	return 0;
+}
+EXPORT_SYMBOL_GPL(conntrack_get_stats);
+
+int conntrack_evict_stats( const struct nf_conn *ct, int dir,
+                           CtkStats_t *stats_p )
+{
+	struct nf_conn_counter *acct;
+
+	acct = nf_conn_acct_find(ct);
+	if (!acct)
+		return 0;
+
+	if (!stats_p)
+		return -1;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	{
+		unsigned long long pkts;
+		unsigned long long bytes;
+
+		pkts = (unsigned long long)atomic64_read(&acct[dir].packets);
+		bytes = (unsigned long long)atomic64_read(&acct[dir].bytes);
+
+		stats_p->pkts = pkts + acct[dir].cum_fast_pkts; 
+		stats_p->bytes = bytes + acct[dir].cum_fast_bytes;
+		stats_p->ts = acct[dir].ts;
+	}
+#else
+	stats_p->pkts = atomic64_read(&acct[dir].packets);
+	stats_p->bytes = atomic64_read(&acct[dir].bytes);
+	stats_p->ts = 0;
+#endif
+	return 0;
+}
+EXPORT_SYMBOL_GPL(conntrack_evict_stats);
+
+#if 0
+int conntrack_max_dpi_pkt( struct nf_conn *ct, int max_pkt )
+{
+	struct nf_conn_counter *acct;
+	unsigned long long pkt1, pkt2;
+
+	acct = nf_conn_acct_find(ct);
+	if (!acct)
+		return 0;
+
+	pkt1 = atomic64_read(&acct[0].packets);
+	pkt2 = atomic64_read(&acct[1].packets);
+	if ((pkt1 + pkt2) > max_pkt)
+		return 1;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(conntrack_max_dpi_pkt);
+#endif
+#endif
+
 static struct nf_ct_ext_type acct_extend __read_mostly = {
 	.len	= sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]),
 	.align	= __alignof__(struct nf_conn_counter[IP_CT_DIR_MAX]),
@@ -102,6 +278,46 @@ static void nf_conntrack_acct_fini_sysctl(struct net *net)
 }
 #endif
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+/*
+ *---------------------------------------------------------------------------
+ * Function Name: flwStPushFunc
+ *---------------------------------------------------------------------------
+ */
+int flwStPushFunc( void *ctk1, void *ctk2, uint32_t dir,
+                   FlwStIf_t *flwSt_p )
+{
+	struct nf_conn_counter *acct;
+
+	if (flwSt_p == NULL)
+		return -1;
+
+	if (ctk1 != NULL)
+	{
+		acct = nf_conn_acct_find((struct nf_conn *)ctk1);
+		if (acct)
+		{
+			acct[dir].cum_fast_pkts += flwSt_p->rx_packets;
+			acct[dir].cum_fast_bytes += flwSt_p->rx_bytes;
+			acct[dir].ts = flwSt_p->pollTS_ms;
+		}
+	}
+
+	if (ctk2 != NULL)
+	{
+		acct = nf_conn_acct_find((struct nf_conn *)ctk2);
+		if (acct)
+		{
+			acct[dir].cum_fast_pkts += flwSt_p->rx_packets;
+			acct[dir].cum_fast_bytes += flwSt_p->rx_bytes;
+			acct[dir].ts = flwSt_p->pollTS_ms;
+		}
+	}
+
+	return 0;
+}
+#endif
+
 int nf_conntrack_acct_init(struct net *net)
 {
 	int ret;
@@ -120,6 +336,13 @@ int nf_conntrack_acct_init(struct net *net)
 	if (ret < 0)
 		goto out_sysctl;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	flwStIf_bind(NULL, flwStPushFunc);
+#endif
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+	dpistats_init();
+#endif
+
 	return 0;
 
 out_sysctl:
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 729f157a0efa690cf877dbd9dbb94dd1b09f1be6..04966554dbdd90577a634ac4fe0a8532b4ce9839 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -32,6 +32,19 @@
 #include <linux/mm.h>
 #include <linux/nsproxy.h>
 #include <linux/rculist_nulls.h>
+#if defined(CONFIG_BCM_KF_BLOG)
+#include <linux/blog.h>
+#include <linux/iqos.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <linux/iqos.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+#include <linux/dpistats.h>
+#include <linux/dpi_ctk.h>
+#endif
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
@@ -48,6 +61,12 @@
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#include <net/bl_ops.h>
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 #define NF_CONNTRACK_VERSION	"0.5.0"
 
 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
@@ -70,6 +89,18 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 unsigned int nf_conntrack_hash_rnd __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+/* bugfix for lost connection */
+LIST_HEAD(lo_safe_list);
+LIST_HEAD(hi_safe_list);
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+int blog_dpi_ctk_update(uint32_t appid);
+#endif
+#endif
+
 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
 {
 	unsigned int n;
@@ -187,6 +218,62 @@ clean_from_lists(struct nf_conn *ct)
 	nf_ct_remove_expectations(ct);
 }
 
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+static inline void evict_ctk_update( struct nf_conn *ct )
+{
+#if 0
+	if (ct->stats_idx == DPISTATS_IX_INVALID) {
+		if (ct->dpi.app_id == 0) goto stats_done;
+
+		ct->stats_idx = dpistats_lookup(&ct->dpi);
+	}
+#endif
+	if (ct->dpi.app_id == 0)
+		return;
+
+	ct->stats_idx = dpistats_lookup(&ct->dpi);
+
+	if (ct->stats_idx != DPISTATS_IX_INVALID) {
+		DpiStatsEntry_t stats;
+
+		if (!IS_CTK_INIT_FROM_WAN(ct)) {
+			if (conntrack_evict_stats(ct, IP_CT_DIR_ORIGINAL,
+						  &stats.upstream))
+				printk("1conntrack_evict_stats(upstream) fails");
+
+			if ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) {
+				if (conntrack_evict_stats(ct, IP_CT_DIR_REPLY,
+							  &stats.dnstream))
+					printk("1conntrack_evict_stats(dnstream) fails");
+			} else
+				memset(&stats.dnstream, 0 , sizeof(CtkStats_t));
+		} else {	/* origin direction is dnstream */
+			if (conntrack_evict_stats(ct, IP_CT_DIR_ORIGINAL,
+						  &stats.dnstream))
+				printk("2conntrack_evict_stats(dnstream) fails");
+
+			if ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) {
+				if (conntrack_evict_stats(ct, IP_CT_DIR_REPLY,
+							  &stats.upstream))
+					printk("2conntrack_evict_stats(upstream) fails");
+			} else
+				memset(&stats.upstream, 0 , sizeof(CtkStats_t));
+		}
+
+		dpistats_update(ct->stats_idx, &stats);
+	}
+
+	return;
+}
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+static void death_by_timeout(unsigned long ul_conntrack);
+#endif
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BCM_KF_NETFILTER) && defined(CONFIG_BLOG)
+static void blog_death_by_timeout(unsigned long ul_conntrack);
+#endif
+
 static void
 destroy_conntrack(struct nf_conntrack *nfct)
 {
@@ -194,7 +281,41 @@ destroy_conntrack(struct nf_conntrack *nfct)
 	struct net *net = nf_ct_net(ct);
 	struct nf_conntrack_l4proto *l4proto;
 
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	pr_debug("%s(%p) blog keys[0x%08x,0x%08x]\n", __func__,
+		ct, ct->blog_key[IP_CT_DIR_ORIGINAL],
+		ct->blog_key[IP_CT_DIR_REPLY]);
+
+
+	/* Conntrack going away, notify blog client */
+	if ((ct->blog_key[IP_CT_DIR_ORIGINAL] != BLOG_KEY_NONE) ||
+			(ct->blog_key[IP_CT_DIR_REPLY] != BLOG_KEY_NONE)) {
+		/*
+		 *  Blog client may perform the following blog requests:
+		 *	- FLOWTRACK_KEY_SET BLOG_PARAM1_DIR_ORIG 0
+		 *	- FLOWTRACK_KEY_SET BLOG_PARAM1_DIR_REPLY 0
+		 *	- FLOWTRACK_EXCLUDE
+		 */
+		blog_notify(DESTROY_FLOWTRACK, (void*)ct,
+					(uint32_t)ct->blog_key[IP_CT_DIR_ORIGINAL],
+					(uint32_t)ct->blog_key[IP_CT_DIR_REPLY]);
+
+		/* Safe: In case blog client does not set key to 0 explicilty */
+		ct->blog_key[IP_CT_DIR_ORIGINAL] = BLOG_KEY_NONE;
+		ct->blog_key[IP_CT_DIR_REPLY]    = BLOG_KEY_NONE;
+		ct->prev_idle = 0;
+	}
+	clear_bit(IPS_BLOG_BIT, &ct->status);	/* Disable further blogging */
+	blog_unlock();
+#else
 	pr_debug("destroy_conntrack(%p)\n", ct);
+#endif
+
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+	evict_ctk_update(ct);
+#endif
 	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
 	NF_CT_ASSERT(!timer_pending(&ct->timeout));
 
@@ -215,6 +336,20 @@ destroy_conntrack(struct nf_conntrack *nfct)
 	 * too. */
 	nf_ct_remove_expectations(ct);
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	BL_OPS(net_netfilter_nf_conntrack_core_destroy_conntrack(ct));
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#if defined(CONFIG_BCM_KF_XT_MATCH_LAYER7) && \
+	(defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE))
+	if (ct->layer7.app_proto)
+		kfree(ct->layer7.app_proto);
+	if (ct->layer7.app_data)
+		kfree(ct->layer7.app_data);
+#endif
+
 	/* We overload first tuple to link into unconfirmed list. */
 	if (!nf_ct_is_confirmed(ct)) {
 		BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
@@ -225,7 +360,63 @@ destroy_conntrack(struct nf_conntrack *nfct)
 	spin_unlock_bh(&nf_conntrack_lock);
 
 	if (ct->master)
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	{
+		struct nf_conn_help * help;
+		struct nf_conntrack_helper *helper;
+
+		help = test_bit(IPS_DYING_BIT, &ct->master->status)? 
+			NULL: nfct_help(ct->master);
+
+		if (help) {
+			rcu_read_lock();
+			helper = rcu_dereference(help->helper);
+
+			if (helper && helper->name) {
+
+				pr_debug("helper->name:%s\n", helper->name);
+
+				if ( (!strncmp(helper->name, "sip", 3)
+				      && (nfct_help(ct) == NULL))
+				     || !strncmp(helper->name, "H.245", 5)
+				     || !strncmp(helper->name, "Q.931", 5)
+				     || !strncmp(helper->name, "RAS", 3)
+				     || !strncmp(helper->name, "rtsp", 4)) {
+
+					iqos_rem_L4port(IQOS_IPPROTO_UDP,
+							ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.udp.port,
+							IQOS_ENT_DYN);
+					iqos_rem_L4port(IQOS_IPPROTO_UDP,
+							ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.udp.port, 
+							IQOS_ENT_DYN);
+					pr_debug("remove iqos port :%u\n",
+						 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.udp.port);
+					pr_debug("remove iqos port :%u\n",
+						 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.udp.port);
+				}
+			}
+			rcu_read_unlock();
+		}
+		list_del(&ct->derived_list);
+#endif
 		nf_ct_put(ct->master);
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	}
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	/* Disconnect all child connections that have infinit timeout */
+	if (!list_empty(&ct->derived_connections)) {
+		struct nf_conn *child, *tmp;
+
+		list_for_each_entry_safe(child, tmp, &ct->derived_connections,
+			derived_list) {
+			if (child->derived_timeout == 0xFFFFFFFF &&
+			    del_timer(&child->timeout))
+				death_by_timeout((unsigned long)child);
+		}
+	}
+#endif
 
 	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
 	nf_conntrack_free(ct);
@@ -287,6 +478,15 @@ static void death_by_timeout(unsigned long ul_conntrack)
 	struct nf_conn *ct = (void *)ul_conntrack;
 	struct nf_conn_tstamp *tstamp;
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	BL_OPS_CR(net_netfilter_nf_conntrack_core_death_by_timeout(ct));
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+	evict_ctk_update(ct);
+#endif
 	tstamp = nf_conn_tstamp_find(ct);
 	if (tstamp && tstamp->stop == 0)
 		tstamp->stop = ktime_to_ns(ktime_get_real());
@@ -303,6 +503,89 @@ static void death_by_timeout(unsigned long ul_conntrack)
 	nf_ct_put(ct);
 }
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BCM_KF_NETFILTER) && defined(CONFIG_BLOG)
+void __nf_ct_time(struct nf_conn *ct, BlogCtTime_t *ct_time_p)
+{
+	/* Cases:
+	* a) conn has been active, prev_idle = 0, idle_jiffies = 0
+	* b) conn becomes idle,  prev_idle = 0, idle_jiffies != 0
+	* c) conn becomes idle in prev timeout and then becomes active again.
+	*    prev_idle = 0, and idle_jiffies != 0.
+	* d) conn was idle in prev timeout and is still idle.
+	*    prev_idle != 0, and idle_jiffies != 0.
+	*
+	*    In the first three cases (a) to (c), timer should be restarted
+	*    after adjustment for idle_jiffies.
+	*
+	*    In the last case (d), on expiry it is time to destroy the conn.
+	*/
+	if ((!ct->prev_idle) || (!ct_time_p->idle_jiffies)) {
+		unsigned long newtime;
+
+		if (timer_pending(&ct->timeout))
+			del_timer(&ct->timeout);
+
+		ct->prev_timeout.expires = ct->timeout.expires;
+		newtime= jiffies + (ct_time_p->extra_jiffies - ct_time_p->idle_jiffies);
+		ct->timeout.expires = newtime;
+		add_timer(&ct->timeout);
+		ct->prev_idle = ct_time_p->idle_jiffies;
+	} else {
+		if (timer_pending(&ct->timeout))
+			del_timer(&ct->timeout);
+
+		death_by_timeout((unsigned long) ct);
+	}
+}
+
+static void blog_death_by_timeout(unsigned long ul_conntrack)
+{
+	struct nf_conn *ct = (void *)ul_conntrack;
+	BlogCtTime_t ct_time;
+	uint32_t ct_blog_key = 0;
+
+	blog_lock();
+	if (ct->blog_key[BLOG_PARAM1_DIR_ORIG] != BLOG_KEY_NONE ||
+	    ct->blog_key[BLOG_PARAM1_DIR_REPLY] != BLOG_KEY_NONE) {
+		blog_query(QUERY_FLOWTRACK, (void*)ct,
+			ct->blog_key[BLOG_PARAM1_DIR_ORIG],
+			ct->blog_key[BLOG_PARAM1_DIR_REPLY], (uint32_t) &ct_time);
+
+		ct_blog_key = 1;
+	}
+	blog_unlock();
+
+	if (ct_blog_key)
+		__nf_ct_time(ct, &ct_time);
+	else {
+		if (timer_pending(&ct->timeout))
+			del_timer(&ct->timeout);
+
+		death_by_timeout((unsigned long) ct);
+	}
+}
+
+void __nf_ct_time_update(struct nf_conn *ct, BlogCtTime_t *ct_time_p)
+{
+	unsigned long newtime;
+
+	if (!timer_pending(&ct->timeout))
+		return;
+
+	if (ct->blog_key[BLOG_PARAM1_DIR_ORIG] != BLOG_KEY_NONE ||
+	    ct->blog_key[BLOG_PARAM1_DIR_REPLY] != BLOG_KEY_NONE) {
+		ct->prev_idle = 0;
+
+		del_timer(&ct->timeout);
+
+		newtime = jiffies + (ct_time_p->extra_jiffies - ct_time_p->idle_jiffies);
+		ct->prev_timeout.expires = jiffies;
+		ct->timeout.expires = newtime;
+		add_timer(&ct->timeout);
+	}
+}
+#endif
+
 /*
  * Warning :
  * - Caller must take a reference on returned object
@@ -524,6 +807,13 @@ __nf_conntrack_confirm(struct sk_buff *skb)
 	   weird delay cases. */
 	ct->timeout.expires += jiffies;
 	add_timer(&ct->timeout);
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	BL_OPS(net_netfilter_nf_conntrack_core_nf_conntrack_confirm(ct, skb));
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 	atomic_inc(&ct->ct_general.use);
 	ct->status |= IPS_CONFIRMED;
 
@@ -593,6 +883,60 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+static int regardless_drop(struct net *net, struct sk_buff *skb)
+{
+	struct nf_conn *ct = NULL;
+	struct list_head *tmp;
+	int dropped = 0;
+
+	/* Choose the first one (also the oldest one). LRU */
+	spin_lock_bh(&nf_conntrack_lock);
+	if (!list_empty(&lo_safe_list)) {
+		list_for_each(tmp, &lo_safe_list) {
+			ct = container_of(tmp, struct nf_conn, safe_list);
+			if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+				ct = NULL;
+
+			if (ct)
+				break;
+		}
+	}
+
+	if (!ct && (blog_iq(skb) == IQOS_PRIO_HIGH)) {
+		list_for_each(tmp, &hi_safe_list) {
+			ct = container_of(tmp, struct nf_conn, safe_list);
+			if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+				ct = NULL;
+
+			if (ct)
+				break;
+		}
+	}
+	spin_unlock_bh(&nf_conntrack_lock);
+
+	if (!ct)
+		return dropped;
+
+	if (del_timer(&ct->timeout)) {
+		death_by_timeout((unsigned long)ct);
+		if (test_bit(IPS_DYING_BIT, &ct->status)) {
+			dropped = 1;
+			NF_CT_STAT_INC_ATOMIC(net, early_drop);
+		}
+	}
+	/* else {
+	 * this happens when the ct at safelist head is removed from the timer list 
+	 * but not yet freed due to ct->ct_general.use > 1. This ct will be freed when its
+	 * ref count is dropped to zero. At this point we dont create new connections 
+	 * until some old connection are freed.
+	 * }
+	 */
+
+	nf_ct_put(ct);
+	return dropped;
+}
+#else
 #define NF_CT_EVICTION_RANGE	8
 
 /* There's a small race here where we may free a just-assured
@@ -646,6 +990,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
 	nf_ct_put(ct);
 	return dropped;
 }
+#endif
 
 void init_nf_conntrack_hash_rnd(void)
 {
@@ -662,11 +1007,21 @@ void init_nf_conntrack_hash_rnd(void)
 	cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
 }
 
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
 static struct nf_conn *
 __nf_conntrack_alloc(struct net *net, u16 zone,
+		     struct sk_buff *skb,
 		     const struct nf_conntrack_tuple *orig,
 		     const struct nf_conntrack_tuple *repl,
 		     gfp_t gfp, u32 hash)
+#else
+static struct nf_conn *
+__nf_conntrack_alloc(struct net *net, u16 zone,
+		     const struct nf_conntrack_tuple *orig,
+		     const struct nf_conntrack_tuple *repl,
+		     gfp_t gfp, u32 hash)
+#endif
 {
 	struct nf_conn *ct;
 
@@ -681,6 +1036,17 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
 
 	if (nf_conntrack_max &&
 	    unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
+#if defined(CONFIG_BCM_KF_NETFILTER)
+		/* Sorry, we have to kick LRU out regardlessly. */
+		if (!regardless_drop(net, skb)) {
+				atomic_dec(&net->ct.count);
+			if (net_ratelimit())
+			printk(KERN_WARNING
+				"nf_conntrack: table full, dropping"
+				" packet.\n");
+			return ERR_PTR(-ENOMEM);
+		}
+#else
 		if (!early_drop(net, hash_bucket(hash, net))) {
 			atomic_dec(&net->ct.count);
 			if (net_ratelimit())
@@ -689,6 +1055,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
 				       " packet.\n");
 			return ERR_PTR(-ENOMEM);
 		}
+#endif
 	}
 
 	/*
@@ -700,6 +1067,14 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
 		atomic_dec(&net->ct.count);
 		return ERR_PTR(-ENOMEM);
 	}
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	INIT_LIST_HEAD(&ct->safe_list);
+	INIT_LIST_HEAD(&ct->derived_connections);
+	INIT_LIST_HEAD(&ct->derived_list);
+	ct->derived_timeout = 0;
+#endif
+
 	/*
 	 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
 	 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
@@ -707,6 +1082,30 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
 	memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
 	       offsetof(struct nf_conn, proto) -
 	       offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	/* Broadcom changed the position of these two fields.  They used to be
+	   in the area being memset to 0 */
+	ct->master = 0;
+	ct->status = 0;
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER) && (defined(CONFIG_NF_DYNDSCP) || defined(CONFIG_NF_DYNDSCP_MODULE))
+	ct->dyndscp.status = 0;
+	ct->dyndscp.dscp[0] = 0;
+	ct->dyndscp.dscp[1] = 0;
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	pr_debug("nf_conntrack_alloc: ct<%p> BLOGible\n", ct );
+	set_bit(IPS_BLOG_BIT, &ct->status);  /* Enable conntrack blogging */
+
+	/* new conntrack: reset blog keys */
+	ct->blog_key[IP_CT_DIR_ORIGINAL] = BLOG_KEY_NONE;
+	ct->blog_key[IP_CT_DIR_REPLY]    = BLOG_KEY_NONE;
+	ct->prev_idle = 0;
+	ct->iq_prio = blog_iq(skb);
+	ct->prev_timeout.expires = jiffies;
+#endif
 	spin_lock_init(&ct->lock);
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -714,7 +1113,38 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
 	/* save hash for reusing when confirming */
 	*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
 	/* Don't set timer yet: wait for confirmation */
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	setup_timer(&ct->timeout, blog_death_by_timeout, (unsigned long)ct);
+#else
 	setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
+#endif
+
+#if defined(CONFIG_BCM_KF_XT_MATCH_LAYER7) && \
+	(defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE))
+	ct->layer7.app_proto = NULL;
+	ct->layer7.app_data = NULL;
+	ct->layer7.app_data_len = 0;
+#endif
+
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+	ct->dpi.app_id = 0;
+	ct->dpi.dev_key = 0;
+	ct->dpi.flags = 0;
+	ct->dpi.url_id = 0;
+	ct->stats_idx = DPISTATS_IX_INVALID;
+
+	if (skb && (skb->dev) && (skb->dev->priv_flags & IFF_WANDEV))
+		ct->dpi.flags |= CTK_INIT_FROM_WAN;
+#endif
+
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#if defined(CONFIG_BCM_RUNNER_RG) || defined(CONFIG_BCM_RUNNER_RG_MODULE)
+	ct->bl_ctx = NULL;
+	BL_OPS(net_netfilter_nf_conntrack_core_nf_conntrack_alloc(ct));
+#endif /* CONFIG_BCM_RUNNER_RG || CONFIG_BCM_RUNNER_RG_MODULE */
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
 	write_pnet(&ct->ct_net, net);
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 	if (zone) {
@@ -741,6 +1171,16 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
 #endif
 }
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+				   struct sk_buff *skb,
+				   const struct nf_conntrack_tuple *orig,
+				   const struct nf_conntrack_tuple *repl,
+				   gfp_t gfp)
+{
+	return __nf_conntrack_alloc(net, zone, skb, orig, repl, gfp, 0);
+}
+#else
 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
 				   const struct nf_conntrack_tuple *orig,
 				   const struct nf_conntrack_tuple *repl,
@@ -748,12 +1188,26 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
 {
 	return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
 }
+#endif
 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
 
 void nf_conntrack_free(struct nf_conn *ct)
 {
 	struct net *net = nf_ct_net(ct);
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	BL_OPS(net_netfilter_nf_conntrack_core_nf_conntrack_free(ct));
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	/* bugfix for lost connections */
+	spin_lock_bh(&nf_conntrack_lock);
+	list_del(&ct->safe_list);
+	spin_unlock_bh(&nf_conntrack_lock);
+#endif
+
 	nf_ct_ext_destroy(ct);
 	atomic_dec(&net->ct.count);
 	nf_ct_ext_free(ct);
@@ -785,8 +1239,13 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
 		return NULL;
 	}
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	ct = __nf_conntrack_alloc(net, zone, skb, tuple, &repl_tuple, GFP_ATOMIC,
+				  hash);
+#else
 	ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
 				  hash);
+#endif
 	if (IS_ERR(ct))
 		return (struct nf_conntrack_tuple_hash *)ct;
 
@@ -814,6 +1273,13 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
 			     GFP_ATOMIC);
 
 	spin_lock_bh(&nf_conntrack_lock);
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	/* bugfix for lost connections */
+	if (ct->iq_prio == IQOS_PRIO_HIGH)
+		list_add_tail(&ct->safe_list, &hi_safe_list);
+	else
+		list_add_tail(&ct->safe_list, &lo_safe_list);
+#endif
 	exp = nf_ct_find_expectation(net, zone, tuple);
 	if (exp) {
 		pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
@@ -821,6 +1287,12 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
 		/* Welcome, Mr. Bond.  We've been expecting you... */
 		__set_bit(IPS_EXPECTED_BIT, &ct->status);
 		ct->master = exp->master;
+#if defined(CONFIG_BCM_KF_NETFILTER)
+		list_add(&ct->derived_list,
+			 &exp->master->derived_connections);
+		if (exp->flags & NF_CT_EXPECT_DERIVED_TIMEOUT)
+			ct->derived_timeout = exp->derived_timeout;
+#endif
 		if (exp->helper) {
 			help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
 			if (help)
@@ -915,6 +1387,37 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
 	}
 	skb->nfct = &ct->ct_general;
 	skb->nfctinfo = *ctinfo;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	{
+		struct nf_conn_help * help = nfct_help(ct);
+
+		blog_lock();
+		if ((help != (struct nf_conn_help *)NULL) &&
+		    (help->helper != (struct nf_conntrack_helper *)NULL) &&
+		    (help->helper->name && strcmp(help->helper->name, "BCM-NAT"))) {
+			pr_debug("nf_conntrack_in: skb<%p> ct<%p> helper<%s> found\n",
+					skb, ct, help->helper->name);
+			clear_bit(IPS_BLOG_BIT, &ct->status);
+		}
+		if (test_bit(IPS_BLOG_BIT, &ct->status)) {	/* OK to blog ? */
+			uint32_t ct_type=(l3num==PF_INET)?BLOG_PARAM2_IPV4:BLOG_PARAM2_IPV6;
+			pr_debug("nf_conntrack_in: skb<%p> blog<%p> ct<%p>\n",
+						skb, blog_ptr(skb), ct);
+
+			if (protonum == IPPROTO_GRE)
+				ct_type = BLOG_PARAM2_GRE_IPV4;
+
+			blog_link(FLOWTRACK, blog_ptr(skb),
+					(void*)ct, CTINFO2DIR(skb->nfctinfo), ct_type);
+		} else {
+			pr_debug("nf_conntrack_in: skb<%p> ct<%p> NOT BLOGible<%p>\n",
+					skb, ct, blog_ptr(skb));
+			blog_skip(skb);		/* No blogging */
+		}
+		blog_unlock();
+	}
+#endif
+
 	return ct;
 }
 
@@ -1013,8 +1516,30 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
 		goto out;
 	}
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+	BL_OPS(net_netfilter_nf_conntrack_core_nf_conntrack_in(ct, skb));
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
 	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
 		nf_conntrack_event_cache(IPCT_REPLY, ct);
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	/* Maintain LRU list. The least recently used ctt is on the head */
+	if (ctinfo == IP_CT_ESTABLISHED ||
+	    ctinfo == IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+		spin_lock_bh(&nf_conntrack_lock);
+		/* Update ct as latest used */
+		if (ct->iq_prio == IQOS_PRIO_HIGH)
+			list_move_tail(&ct->safe_list, &hi_safe_list);
+		else
+			list_move_tail(&ct->safe_list, &lo_safe_list);
+
+		spin_unlock_bh(&nf_conntrack_lock);
+	}
+#endif
+
 out:
 	if (tmpl) {
 		/* Special case: we have to repeat this hook, assign the
@@ -1095,6 +1620,24 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
 			mod_timer_pending(&ct->timeout, newtime);
 	}
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BCM_KF_NETFILTER) && defined(CONFIG_BLOG)
+	/* Check if the flow is blogged i.e. currently being accelerated */
+	if (ct->blog_key[BLOG_PARAM1_DIR_ORIG] != BLOG_KEY_NONE ||
+		ct->blog_key[BLOG_PARAM1_DIR_REPLY] != BLOG_KEY_NONE) {
+		/* Maintain LRU list. The least recently used ct is on the head */
+		/*
+		 * safe_list through blog refresh is updated at an interval refresh is called 
+		 * If that interval is large - it is possible that a connection getting high traffic 
+		 * may be seen as LRU by conntrack. 
+		 */
+		spin_lock_bh(&nf_conntrack_lock);
+		if (ct->iq_prio == IQOS_PRIO_HIGH)
+			list_move_tail(&ct->safe_list, &hi_safe_list);
+		else
+			list_move_tail(&ct->safe_list, &lo_safe_list);
+		spin_unlock_bh(&nf_conntrack_lock);
+	}
+#endif
 acct:
 	if (do_acct) {
 		struct nf_conn_counter *acct;
@@ -1344,10 +1887,24 @@ static void nf_conntrack_cleanup_init_net(void)
 
 static void nf_conntrack_cleanup_net(struct net *net)
 {
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	int try_counter = 0;
+	unsigned long start = jiffies;
+	unsigned long end = start + HZ;
+#endif
  i_see_dead_people:
 	nf_ct_iterate_cleanup(net, kill_all, NULL);
 	nf_ct_release_dying_list(net);
 	if (atomic_read(&net->ct.count) != 0) {
+#if defined(CONFIG_BCM_KF_NETFILTER)
+		if (jiffies >= end) {
+			printk("waiting for %d conntrack to be cleaned, "
+			       "tried %d times\n",
+			       atomic_read(&net->ct.count), try_counter);
+			end += HZ;
+		}
+		try_counter++;
+#endif
 		schedule();
 		goto i_see_dead_people;
 	}
@@ -1361,6 +1918,12 @@ static void nf_conntrack_cleanup_net(struct net *net)
 	kmem_cache_destroy(net->ct.nf_conntrack_cachep);
 	kfree(net->ct.slabname);
 	free_percpu(net->ct.stat);
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	end = jiffies;
+	if (end - start > HZ)
+		printk("nf_conntrack took %lu milliseconds to clean up\n",
+		       (end - start) * 1000 / HZ);
+#endif
 }
 
 /* Mishearing the voices in his head, our hero wonders how he's
@@ -1589,6 +2152,10 @@ static int nf_conntrack_init_net(struct net *net)
 	if (ret < 0)
 		goto err_timeout;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_cttime_update_fn = (blog_cttime_upd_t)__nf_ct_time_update;
+#endif
+
 	return 0;
 
 err_timeout:
@@ -1616,6 +2183,45 @@ s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
 			u32 seq);
 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+int blog_dpi_ctk_update(uint32_t appid)
+{
+	struct nf_conn *ct = NULL;
+	struct list_head *tmp;
+
+	spin_lock_bh(&nf_conntrack_lock);
+	if (!list_empty(&lo_safe_list)) {
+		list_for_each(tmp, &lo_safe_list) {
+			ct = container_of(tmp, struct nf_conn, safe_list);
+			if ( (ct->dpi.app_id == appid) &&
+				 ( (ct->blog_key[IP_CT_DIR_ORIGINAL] != BLOG_KEY_NONE) ||
+				   (ct->blog_key[IP_CT_DIR_REPLY] != BLOG_KEY_NONE) ) )
+				blog_notify(CONFIG_CHANGE, (void*)ct,
+							(uint32_t)ct->blog_key[IP_CT_DIR_ORIGINAL],
+							(uint32_t)ct->blog_key[IP_CT_DIR_REPLY]);
+		}
+	}
+
+	if (!list_empty(&hi_safe_list)) {
+		list_for_each(tmp, &hi_safe_list) {
+			ct = container_of(tmp, struct nf_conn, safe_list);
+			if ( (ct->dpi.app_id == appid) &&
+				 ( (ct->blog_key[IP_CT_DIR_ORIGINAL] != BLOG_KEY_NONE) ||
+				   (ct->blog_key[IP_CT_DIR_REPLY] != BLOG_KEY_NONE) ) )
+				blog_notify(CONFIG_CHANGE, (void*)ct,
+							(uint32_t)ct->blog_key[IP_CT_DIR_ORIGINAL],
+							(uint32_t)ct->blog_key[IP_CT_DIR_REPLY]);
+		}
+	}
+	spin_unlock_bh(&nf_conntrack_lock);
+
+    return 0;
+}
+EXPORT_SYMBOL(blog_dpi_ctk_update);
+#endif
+#endif
+
 int nf_conntrack_init(struct net *net)
 {
 	int ret;
@@ -1637,6 +2243,11 @@ int nf_conntrack_init(struct net *net)
 		/* Howto get NAT offsets */
 		RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
 	}
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+	blog_dpi_ctk_update_fn = (blog_dpi_ctk_update_t) blog_dpi_ctk_update;
+#endif
+#endif
 	return 0;
 
 out_net:
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 8c5c95c6d34f1be6912d6ffe5ad7066b23c24f5d..4fdc6b399fb30be2683c47bafd007c223a99d8bb 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -26,6 +26,13 @@
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <linux/netfilter/nf_conntrack_ftp.h>
 
+#if defined(CONFIG_BCM_KF_RUNNER)
+#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)
+#include <net/bl_ops.h>
+#endif /* CONFIG_BCM_RUNNER */
+#endif /* CONFIG_BCM_KF_RUNNER */
+
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
 MODULE_DESCRIPTION("ftp connection tracking helper");
@@ -34,6 +41,9 @@ MODULE_ALIAS_NFCT_HELPER("ftp");
 
 /* This is slow, but it's simple. --RR */
 static char *ftp_buffer;
+#if defined(CONFIG_BCM_KF_NETFILTER)
+static char *ftp_big_buffer = NULL;
+#endif
 
 static DEFINE_SPINLOCK(nf_ftp_lock);
 
@@ -341,7 +351,7 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq,
 			oldest = 1;
 
 		if (after(nl_seq, info->seq_aft_nl[dir][oldest]))
-			info->seq_aft_nl[dir][oldest] = nl_seq;
+		info->seq_aft_nl[dir][oldest] = nl_seq;
 	}
 }
 
@@ -354,7 +364,11 @@ static int help(struct sk_buff *skb,
 	const struct tcphdr *th;
 	struct tcphdr _tcph;
 	const char *fb_ptr;
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	int ret = NF_ACCEPT;
+#else
 	int ret;
+#endif
 	u32 seq;
 	int dir = CTINFO2DIR(ctinfo);
 	unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff);
@@ -387,6 +401,17 @@ static int help(struct sk_buff *skb,
 	datalen = skb->len - dataoff;
 
 	spin_lock_bh(&nf_ftp_lock);
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	/* In worst case, the packet size will increase by 20 bytes after
+	 * NAT modification */
+	if (datalen > NF_ALG_BUFFER_SIZE - 20) {
+		ftp_big_buffer = kmalloc(datalen + 20, GFP_ATOMIC);
+		if (!ftp_big_buffer)
+			goto out;
+		fb_ptr = skb_header_pointer(skb, dataoff, datalen,
+					    ftp_big_buffer);
+	} else
+#endif
 	fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer);
 	BUG_ON(fb_ptr == NULL);
 
@@ -495,7 +520,14 @@ static int help(struct sk_buff *skb,
 		if (nf_ct_expect_related(exp) != 0)
 			ret = NF_DROP;
 		else
+#if defined(CONFIG_BCM_KF_RUNNER) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE))
+		{
+			BL_OPS(net_netfilter_nf_conntrack_ftp(ct, ctinfo, exp, search[dir][i].ftptype));
 			ret = NF_ACCEPT;
+		}
+#else /* CONFIG_BCM_KF_RUNNER && CONFIG_BCM_RUNNER */
+			ret = NF_ACCEPT;
+#endif /* CONFIG_BCM_KF_RUNNER && CONFIG_BCM_RUNNER */
 	}
 
 out_put_expect:
@@ -507,6 +539,12 @@ static int help(struct sk_buff *skb,
 	if (ends_in_nl)
 		update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
  out:
+#if defined(CONFIG_BCM_KF_NETFILTER)
+ 	if (ftp_big_buffer) {
+		kfree(ftp_big_buffer);
+		ftp_big_buffer = NULL;
+	}
+#endif
 	spin_unlock_bh(&nf_ftp_lock);
 	return ret;
 }
@@ -543,7 +581,11 @@ static int __init nf_conntrack_ftp_init(void)
 	int i, j = -1, ret = 0;
 	char *tmpname;
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	ftp_buffer = kmalloc(NF_ALG_BUFFER_SIZE, GFP_KERNEL);
+#else
 	ftp_buffer = kmalloc(65536, GFP_KERNEL);
+#endif
 	if (!ftp_buffer)
 		return -ENOMEM;
 
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 722291f8af72f79379a10cad03cc8658e2959397..785149e89c29b00b8768a46d18702e1a639d3b19 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -32,6 +32,9 @@
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <linux/netfilter/nf_conntrack_h323.h>
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <linux/iqos.h>
+#endif
 
 /* Parameters */
 static unsigned int default_rrq_ttl __read_mostly = 300;
@@ -318,6 +321,11 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 	nf_ct_expect_put(rtp_exp);
 	nf_ct_expect_put(rtcp_exp);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+    iqos_add_L4port( IPPROTO_UDP, rtp_port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+    iqos_add_L4port( IPPROTO_UDP, rtcp_port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+#endif
+
 	return ret;
 }
 
@@ -368,6 +376,10 @@ static int expect_t120(struct sk_buff *skb,
 
 	nf_ct_expect_put(exp);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+    iqos_add_L4port( IPPROTO_TCP, port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+#endif
+
 	return ret;
 }
 
@@ -703,6 +715,10 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
 
 	nf_ct_expect_put(exp);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+    iqos_add_L4port( IPPROTO_TCP, port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+#endif
+
 	return ret;
 }
 
@@ -826,6 +842,10 @@ static int expect_callforwarding(struct sk_buff *skb,
 
 	nf_ct_expect_put(exp);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+    iqos_add_L4port( IPPROTO_TCP, port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+#endif
+
 	return ret;
 }
 
@@ -1294,6 +1314,10 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
 
 	nf_ct_expect_put(exp);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+    iqos_add_L4port( IPPROTO_TCP, port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+#endif
+
 	return ret;
 }
 
@@ -1354,6 +1378,10 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
 
 	nf_ct_expect_put(exp);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+    iqos_add_L4port( IPPROTO_UDP, port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+#endif
+
 	return ret;
 }
 
@@ -1559,6 +1587,10 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
 
 	nf_ct_expect_put(exp);
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+    iqos_add_L4port( IPPROTO_TCP, port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+#endif
+
 	return ret;
 }
 
@@ -1614,6 +1646,10 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
 
 	/* Ignore rasAddress */
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+    iqos_add_L4port( IPPROTO_TCP, port, IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+#endif
+
 	return ret;
 }
 
@@ -1766,6 +1802,16 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
 /****************************************************************************/
 static void __exit nf_conntrack_h323_fini(void)
 {
+#if defined(CONFIG_BCM_KF_NETFILTER)
+        /* unregister the Q.931 ports with ingress QoS classifier */
+        iqos_rem_L4port( nf_conntrack_helper_q931[0].tuple.dst.protonum, 
+              nf_conntrack_helper_q931[0].tuple.src.u.tcp.port, IQOS_ENT_STAT );
+
+        /* unregister the RAS ports with ingress QoS classifier */
+        iqos_rem_L4port( nf_conntrack_helper_ras[0].tuple.dst.protonum, 
+            nf_conntrack_helper_ras[0].tuple.src.u.udp.port, IQOS_ENT_STAT );
+#endif
+
 	nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[1]);
 	nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]);
 	nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]);
@@ -1799,6 +1845,17 @@ static int __init nf_conntrack_h323_init(void)
 	if (ret < 0)
 		goto err5;
 	pr_debug("nf_ct_h323: init success\n");
+#if defined(CONFIG_BCM_KF_NETFILTER)
+        /* register the Q.931 ports with ingress QoS classifier */
+        iqos_add_L4port( nf_conntrack_helper_q931[0].tuple.dst.protonum, 
+                          nf_conntrack_helper_q931[0].tuple.src.u.tcp.port,
+                          IQOS_ENT_STAT, IQOS_PRIO_HIGH );
+
+        /* register the RAS ports with ingress QoS classifier */
+        iqos_add_L4port( nf_conntrack_helper_ras[0].tuple.dst.protonum, 
+                          nf_conntrack_helper_ras[0].tuple.src.u.udp.port,
+                          IQOS_ENT_STAT, IQOS_PRIO_HIGH );
+#endif
 	return 0;
 
 err5:
diff --git a/net/netfilter/nf_conntrack_ipsec.c b/net/netfilter/nf_conntrack_ipsec.c
new file mode 100644
index 0000000000000000000000000000000000000000..dd57fc3440ac354a0da27ff823bfc51853c7083e
--- /dev/null
+++ b/net/netfilter/nf_conntrack_ipsec.c
@@ -0,0 +1,397 @@
+/*
+<:copyright-BRCM:2012:GPL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:> 
+*/
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/udp.h>
+#include <linux/netfilter/nf_conntrack_ipsec.h>
+
+#ifdef CONFIG_NF_BL_EXT
+#include <linux/netfilter.h>
+#endif /* CONFIG_NF_BL_EXT */
+
+MODULE_AUTHOR("Pavan Kumar <pavank@broadcom.com>");
+MODULE_DESCRIPTION("Netfilter connection tracking module for ipsec");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_conntrack_ipsec");
+
+static DEFINE_SPINLOCK(nf_ipsec_lock);
+
+int
+(*nf_nat_ipsec_hook_outbound)(struct sk_buff *skb,
+      struct nf_conn *ct, enum ip_conntrack_info ctinfo) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_ipsec_hook_outbound);
+
+int
+(*nf_nat_ipsec_hook_inbound)(struct sk_buff *skb,
+      struct nf_conn *ct, enum ip_conntrack_info ctinfo, __be32 lan_ip) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_ipsec_hook_inbound);
+
+static void __exit nf_conntrack_ipsec_fini(void);
+
+#define CT_REFRESH_TIMEOUT (60 * HZ)	 /* KT: Changed from 13 Sec to 1 Min */
+
+static unsigned int nf_conntrack_ipsec_refresh_timeout = CT_REFRESH_TIMEOUT;
+
+/* Internal table for ISAKMP */
+struct _ipsec_table 
+{
+   u_int32_t initcookie;
+   __be32 lan_ip;
+   struct nf_conn *ct;
+   int pkt_rcvd;
+   int inuse;
+} ipsec_table[MAX_VPN_CONNECTION];
+
+static struct _ipsec_table *ipsec_alloc_entry(int *index)
+{
+   int idx = 0;
+
+   for( ; idx < MAX_VPN_CONNECTION; idx++ ) 
+   {
+      if( ipsec_table[idx].inuse )
+         continue;
+   
+      *index = idx;
+      memset(&ipsec_table[idx], 0, sizeof(struct _ipsec_table));
+
+      pr_debug("([%d] alloc_entry()\n", idx);
+
+      return (&ipsec_table[idx]);
+   }
+   
+   return NULL;
+}
+
+/*
+ * Search an IPsec table entry by ct.
+ */
+struct _ipsec_table *search_ipsec_entry_by_ct(struct nf_conn *ct)
+{
+   int idx = 0;
+
+   for( ; idx < MAX_VPN_CONNECTION; idx++)
+   {
+	  if (!ipsec_table[idx].inuse)
+		 continue;
+
+      pr_debug("Searching entry->ct(%p) <--> ct(%p)\n",
+         ipsec_table[idx].ct, ct);
+
+      /* check ct */
+      if (ipsec_table[idx].ct == ct)
+      {
+         pr_debug("Found entry with ct(%p)\n", ct);
+
+         return &ipsec_table[idx];
+      }
+   }
+   pr_debug("No Entry for ct(%p)\n", ct);
+   return NULL;
+}
+
+/*
+ * Search an IPSEC table entry by the initiator cookie.
+ */
+struct _ipsec_table *
+search_ipsec_entry_by_cookie(struct isakmp_pkt_hdr *isakmph)
+{
+   int idx = 0;
+   struct _ipsec_table *ipsec_entry = ipsec_table;
+
+   for( ; idx < MAX_VPN_CONNECTION; idx++ ) 
+   {
+	   pr_debug("Searching initcookie %x <-> %x\n",
+          ntohl(isakmph->initcookie), ntohl(ipsec_entry->initcookie));
+      
+      if( isakmph->initcookie == ipsec_entry->initcookie ) 
+         return ipsec_entry;
+      
+      ipsec_entry++;
+   }
+   
+   return NULL;
+}
+
+/*
+ * Search an IPSEC table entry by the source IP address.
+ */
+struct _ipsec_table *
+search_ipsec_entry_by_addr(const __be32 lan_ip, int *index)
+{
+   int idx = 0;
+   struct _ipsec_table *ipsec_entry = ipsec_table;
+
+   for( ; idx < MAX_VPN_CONNECTION; idx++ ) 
+   {
+	   pr_debug("Looking up lan_ip=%pI4 table entry %pI4\n",
+              &lan_ip, &ipsec_entry->lan_ip);
+
+      if( ntohl(ipsec_entry->lan_ip) == ntohl(lan_ip) ) 
+      {
+    	  pr_debug("Search by addr returning entry %p\n", ipsec_entry);
+
+         *index = idx;
+         return ipsec_entry;
+      }
+      ipsec_entry++;
+   }
+   
+   return NULL;
+}
+
+static inline int
+ipsec_inbound_pkt(struct sk_buff *skb, struct nf_conn *ct,
+		  enum ip_conntrack_info ctinfo, __be32 lan_ip)
+{
+//   struct nf_ct_ipsec_master *info = &nfct_help(ct)->help.ct_ipsec_info;
+   typeof(nf_nat_ipsec_hook_inbound) nf_nat_ipsec_inbound;
+
+   pr_debug("inbound ISAKMP packet for LAN %pI4\n", &lan_ip);
+
+   nf_nat_ipsec_inbound = rcu_dereference(nf_nat_ipsec_hook_inbound);
+   if (nf_nat_ipsec_inbound && ct->status & IPS_NAT_MASK)
+      return nf_nat_ipsec_inbound(skb, ct, ctinfo, lan_ip);
+   
+   return NF_ACCEPT;
+}
+
+/*
+ * For outgoing ISAKMP packets, we need to make sure UDP ports=500
+ */
+static inline int
+ipsec_outbound_pkt(struct sk_buff *skb,
+                   struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+   typeof(nf_nat_ipsec_hook_outbound) nf_nat_ipsec_outbound;
+
+   pr_debug("outbound ISAKMP packet skb(%p)\n", skb);
+
+   nf_nat_ipsec_outbound = rcu_dereference(nf_nat_ipsec_hook_outbound);
+   if( nf_nat_ipsec_outbound && ct->status & IPS_NAT_MASK )
+      return nf_nat_ipsec_outbound(skb, ct, ctinfo);
+   
+   return NF_ACCEPT;
+}
+
+/* track cookies inside ISAKMP, call expect_related */
+static int conntrack_ipsec_help(struct sk_buff *skb, unsigned int protoff,
+                             struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+   int dir = CTINFO2DIR(ctinfo);
+   struct nf_ct_ipsec_master *info = &nfct_help(ct)->help.ct_ipsec_info;
+   struct isakmp_pkt_hdr _isakmph, *isakmph = NULL;
+   struct _ipsec_table *ipsec_entry = ipsec_table;
+   int ret, index=0;
+
+   pr_debug("skb(%p) skb->data(%p) ct(%p) protoff(%d) offset(%d)\n", skb, skb->data, ct, protoff, (int) (protoff + sizeof(struct udphdr)));
+
+   isakmph = skb_header_pointer(skb, protoff + sizeof(struct udphdr), sizeof(_isakmph), &_isakmph);
+   if (isakmph == NULL)
+   {
+      pr_debug("ERR: no full ISAKMP header, can't track. isakmph=[%p]\n", isakmph);
+      return NF_ACCEPT;
+   }
+
+   if ( 0 == isakmph->initcookie )
+   {
+      pr_debug("ERR: all zero ISAKMP initcookie.\n");
+      return NF_ACCEPT;
+   }
+
+   spin_lock_bh(&nf_ipsec_lock);
+
+   if( dir == IP_CT_DIR_ORIGINAL )
+   {
+      int lan_ip = ct->tuplehash[dir].tuple.src.u3.ip;
+      
+      /* create one entry in the internal table if a new connection is found */
+      if( (ipsec_entry = search_ipsec_entry_by_cookie(isakmph)) == NULL ) 
+      {
+         /* NOTE: cookies may be updated in the connection */
+         if( (ipsec_entry = 
+              search_ipsec_entry_by_addr(lan_ip, &index)) == NULL ) 
+         {
+            ipsec_entry = ipsec_alloc_entry(&index);
+            if( ipsec_entry == NULL ) 
+            {
+               /* All entries are currently in use */
+               pr_debug("ERR: Too many sessions. ct(%p)\n", ct);
+               spin_unlock_bh(&nf_ipsec_lock);
+               return NF_DROP;
+            }
+            
+            ipsec_entry->ct = ct; /* KT: Guess it should be here */
+            ipsec_entry->initcookie = isakmph->initcookie; /* KT: Update our cookie information - moved to here */
+            ipsec_entry->lan_ip = ct->tuplehash[dir].tuple.src.u3.ip;
+            ipsec_entry->inuse = 1;
+
+            pr_debug("NEW ipsec_entry[%d] with ct=%p, lan_ip=%pI4, initcookie=%x\n",
+				index, ipsec_entry->ct, &ipsec_entry->lan_ip,
+				ntohl(ipsec_entry->initcookie) );
+         } else {
+             pr_debug("EXISTING ipsec_entry[%d] with ct=%p, lan_ip=%pI4, initcookie=%x\n",
+ 				index, ipsec_entry->ct, &ipsec_entry->lan_ip,
+ 				ntohl(ipsec_entry->initcookie) );
+         }
+      }
+      ipsec_entry->pkt_rcvd++;
+
+      info->initcookie = isakmph->initcookie;
+      info->lan_ip = ct->tuplehash[dir].tuple.src.u3.ip;
+
+      pr_debug("L->W: initcookie=%x, lan_ip=%pI4, dir[%d] src.u3.ip=%pI4, dst.u3.ip=%pI4\n",
+              info->initcookie, &info->lan_ip,
+              dir,
+              &ct->tuplehash[dir].tuple.src.u3.ip,
+              &ct->tuplehash[dir].tuple.dst.u3.ip);
+      
+      nf_ct_refresh_acct(ipsec_entry->ct, 0, skb, CT_REFRESH_TIMEOUT);
+
+      ret = ipsec_outbound_pkt(skb, ct, ctinfo);
+   }
+   else
+   {
+	  pr_debug("WAN->LAN ct=%p\n", ct);
+      
+      if( (ipsec_entry = search_ipsec_entry_by_cookie(isakmph)) != NULL )
+      {
+    	 nf_ct_refresh_acct(ipsec_entry->ct, 0, skb, CT_REFRESH_TIMEOUT);
+         ipsec_entry->pkt_rcvd++;
+
+         pr_debug("W->L: initcookie=%x, lan_ip=%pI4, dir[%d] src.u3.ip=%pI4, dst.u3.ip=%pI4\n",
+              info->initcookie, &info->lan_ip,
+              dir,
+              &ct->tuplehash[dir].tuple.src.u3.ip,
+              &ct->tuplehash[dir].tuple.dst.u3.ip);
+
+         ret = ipsec_inbound_pkt(skb, ct, ctinfo, ipsec_entry->lan_ip);
+      }
+      else
+      {
+    	 pr_debug("WARNNING: client from WAN tries to connect to VPN server in the LAN. ipsec_entry=[%p]\n", ipsec_entry);
+         ret = NF_ACCEPT;
+      }
+   }
+
+   spin_unlock_bh(&nf_ipsec_lock);
+
+   return ret;
+}
+
+/* Called when the connection is deleted. */
+static void ipsec_destroy(struct nf_conn *ct)
+{
+	struct _ipsec_table *ipsec_entry = NULL;
+
+	spin_lock_bh(&nf_ipsec_lock);
+	pr_debug("DEL IPsec entry ct(%p)\n", ct);
+	if ((ipsec_entry = search_ipsec_entry_by_ct(ct))) {
+		memset(ipsec_entry, 0, sizeof(struct _ipsec_table));
+	} else {
+		pr_debug("DEL IPsec entry failed: ct(%p)\n", ct);
+	}
+	spin_unlock_bh(&nf_ipsec_lock);
+}
+
+#ifdef CONFIG_SYSCTL
+
+static struct ctl_table_header *nf_ct_netfilter_header;
+
+static ctl_table nf_child_table[] = {
+	{
+		.procname	= "nf_conntrack_ipsec_refresh_timeout",
+		.data		= &nf_conntrack_ipsec_refresh_timeout,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{ }
+};
+
+static ctl_table nf_dir_table[] = {
+	{
+		.procname	= "netfilter",
+		.mode		= 0644,
+		.child		= nf_child_table
+	},
+	{ .procname = NULL }
+};
+
+static ctl_table nf_root_table[] = {
+	{
+		.procname	= "net",
+		.mode		= 0644,
+		.child		= nf_dir_table
+	},
+	{ .procname = NULL }
+};
+
+
+#endif /* CONFIG_SYSCTL */
+
+static const struct nf_conntrack_expect_policy ipsec_exp_policy = {
+	.max_expected	= 3,
+	.timeout		= 300,
+};
+
+/* ISAKMP protocol helper */
+static struct nf_conntrack_helper ipsec __read_mostly = {
+   .name = "ipsec",
+   .me = THIS_MODULE,
+   .tuple.src.l3num = AF_INET,
+   .tuple.dst.protonum = IPPROTO_UDP,
+   .tuple.src.u.udp.port = __constant_htons(IPSEC_PORT),
+
+   .help = conntrack_ipsec_help,
+   .destroy = ipsec_destroy,
+   .expect_policy		= &ipsec_exp_policy,
+};
+
+static int __init nf_conntrack_ipsec_init(void)
+{
+#ifdef CONFIG_SYSCTL
+   nf_ct_netfilter_header = register_sysctl_table(nf_root_table);
+#endif /* CONFIG_SYSCTL */
+
+   return nf_conntrack_helper_register(&ipsec);
+}
+
+static void __exit nf_conntrack_ipsec_fini(void)
+{
+#ifdef CONFIG_SYSCTL
+   unregister_sysctl_table(nf_ct_netfilter_header);
+#endif /* CONFIG_SYSCTL */
+
+   nf_conntrack_helper_unregister(&ipsec);
+}
+
+module_init(nf_conntrack_ipsec_init);
+module_exit(nf_conntrack_ipsec_fini);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 4f9390b98697e55ec00cd81c9c6d327a43e908d7..9e46ead6eff7ae17964c5093a98c7905c0df6342 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -29,6 +29,9 @@ static unsigned int max_dcc_channels = 8;
 static unsigned int dcc_timeout __read_mostly = 300;
 /* This is slow, but it's simple. --RR */
 static char *irc_buffer;
+#if defined(CONFIG_BCM_KF_NETFILTER)
+static char *irc_big_buffer = NULL;
+#endif
 static DEFINE_SPINLOCK(irc_buffer_lock);
 
 unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
@@ -139,6 +142,19 @@ static int help(struct sk_buff *skb, unsigned int protoff,
 		return NF_ACCEPT;
 
 	spin_lock_bh(&irc_buffer_lock);
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	/* In worst case, the packet size will increase by 16 bytes after
+	 * NAT modification */
+	if (skb->len > NF_ALG_BUFFER_SIZE - 16) {
+		irc_big_buffer = kmalloc(skb->len - dataoff + 16,
+					 GFP_ATOMIC);
+		if (!irc_big_buffer)
+			goto out;
+		ib_ptr = skb_header_pointer(skb, dataoff,
+					    skb->len - dataoff,
+					    irc_big_buffer);
+	} else
+#endif
 	ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
 				    irc_buffer);
 	BUG_ON(ib_ptr == NULL);
@@ -218,6 +234,12 @@ static int help(struct sk_buff *skb, unsigned int protoff,
 		}
 	}
  out:
+#if defined(CONFIG_BCM_KF_NETFILTER)
+ 	if (irc_big_buffer) {
+		kfree(irc_big_buffer);
+		irc_big_buffer = NULL;
+	}
+#endif
 	spin_unlock_bh(&irc_buffer_lock);
 	return ret;
 }
@@ -241,7 +263,11 @@ static int __init nf_conntrack_irc_init(void)
 	irc_exp_policy.max_expected = max_dcc_channels;
 	irc_exp_policy.timeout = dcc_timeout;
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	irc_buffer = kmalloc(NF_ALG_BUFFER_SIZE, GFP_KERNEL);
+#else
 	irc_buffer = kmalloc(65536, GFP_KERNEL);
+#endif
 	if (!irc_buffer)
 		return -ENOMEM;
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ca7e8354e4f89d3fd3d6d641e4dd5a0738cadb05..fea09d2a10fb8d56dbff8bb489c253fa76b34128 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1386,7 +1386,11 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
 	struct nf_conntrack_helper *helper;
 	struct nf_conn_tstamp *tstamp;
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	ct = nf_conntrack_alloc(net, zone, NULL, otuple, rtuple, GFP_ATOMIC);
+#else
 	ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
+#endif	
 	if (IS_ERR(ct))
 		return ERR_PTR(-ENOMEM);
 
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 24fdce256cb0a65ee9996dfa7fa2569e499eebd0..47efe6b47e6b157e9f9fdc4057363761fbaadfa6 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -431,7 +431,11 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
 	const char *msg;
 	u_int8_t state;
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+#else
 	dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+#endif
 	BUG_ON(dh == NULL);
 
 	state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
@@ -488,7 +492,11 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
 	u_int8_t type, old_state, new_state;
 	enum ct_dccp_roles role;
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+#else
 	dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+#endif
 	BUG_ON(dh == NULL);
 	type = dh->dccph_type;
 
@@ -579,7 +587,11 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
 	unsigned int cscov;
 	const char *msg;
 
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+	dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+#else
 	dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+#endif
 	if (dh == NULL) {
 		msg = "nf_ct_dccp: short packet ";
 		goto out_invalid;
diff --git a/net/netfilter/nf_conntrack_proto_esp.c b/net/netfilter/nf_conntrack_proto_esp.c
new file mode 100644
index 0000000000000000000000000000000000000000..181b48053366b04e46f7e114f08d4f8a89108261
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_esp.c
@@ -0,0 +1,453 @@
+/*
+<:copyright-BRCM:2012:GPL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:> 
+*/
+/******************************************************************************
+ Filename:       nf_conntrack_proto_esp.c
+ Author:         Pavan Kumar
+ Creation Date:  05/27/04
+
+ Description:
+  Implements the ESP ALG connection tracking.
+  Migrated to kernel 2.6.21.5 on April 16, 2008 by Dan-Han Tsai.
+  Migrated to kernel 3.4.11 on Jan 21, 2013 by Kirill Tsym
+*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <linux/netfilter/nf_conntrack_proto_esp.h>
+
+enum grep_conntrack {
+	ESP_CT_UNREPLIED,
+	ESP_CT_REPLIED,
+	ESP_CT_MAX
+};
+
+static unsigned int esp_timeouts[ESP_CT_MAX] = {
+	[ESP_CT_UNREPLIED]	= 30*HZ,
+	[ESP_CT_REPLIED]	= 60*HZ,
+};
+
+#define IPSEC_INUSE    1
+#define MAX_PORTS      8 			/* KT: Changed to match MAX_VPN_CONNECTION */
+#define TEMP_SPI_START 1500
+
+struct _esp_table 
+{
+   u_int32_t l_spi;
+   u_int32_t r_spi;
+   u_int32_t l_ip;
+   u_int32_t r_ip;
+   u_int32_t timeout;
+   u_int16_t tspi;
+   struct nf_conn *ct;
+   int    pkt_rcvd;
+   int    inuse;
+};
+
+static struct _esp_table esp_table[MAX_PORTS];
+
+static unsigned int nf_conntrack_esp_stream_timeout = ESP_CT_REPLIED;
+
+/*
+ * Allocate a free IPSEC table entry.
+ */
+struct _esp_table *alloc_esp_entry( void )
+{
+   int idx = 0;
+
+   for( ; idx < MAX_PORTS; idx++ )
+   {
+      if( esp_table[idx].inuse == IPSEC_INUSE )
+         continue;
+
+      memset(&esp_table[idx], 0, sizeof(struct _esp_table));
+      esp_table[idx].tspi  = idx + TEMP_SPI_START;
+      esp_table[idx].inuse = IPSEC_INUSE;
+
+      pr_debug("[%d] alloc_entry() tspi(%u)\n", idx, esp_table[idx].tspi);
+
+      return (&esp_table[idx]);
+   }
+   return NULL;
+}
+
+/*
+ * Search an ESP table entry by ct.
+ */
+struct _esp_table *search_esp_entry_by_ct(struct nf_conn *ct)
+{
+   int idx = 0;
+
+   for( ; idx < MAX_PORTS; idx++)
+   {
+	  if(esp_table[idx].inuse != IPSEC_INUSE )
+		 continue;
+
+      pr_debug("Searching entry->ct(%p) <--> ct(%p)\n",
+         esp_table[idx].ct, ct);
+
+      /* checking ct */
+      if(esp_table[idx].ct == ct)
+      {
+         pr_debug("Found entry with ct(%p)\n", ct);
+
+         return &esp_table[idx];
+      }
+   }
+
+   pr_debug("No Entry for ct(%p)\n", ct);
+   return NULL;
+}
+
+/*
+ * Search an ESP table entry by source IP.
+ * If found one, update the spi value
+ */
+struct _esp_table *search_esp_entry_by_ip( const struct nf_conntrack_tuple *tuple, const __u32 spi )
+{
+   int idx = 0;
+   __u32 srcIP = tuple->src.u3.ip;
+   __u32 dstIP = tuple->dst.u3.ip;
+   struct _esp_table *esp_entry = esp_table;
+
+   for( ; idx < MAX_PORTS; idx++, esp_entry++ )
+   {
+      pr_debug("   Searching IP %pI4 <-> %pI4,  %pI4\n",
+          &srcIP, &esp_entry->l_ip,
+          &esp_entry->r_ip);
+      
+      /* make sure l_ip is LAN IP */
+      if( (srcIP == esp_entry->l_ip) && (((unsigned char *)&(srcIP))[0] == 192) )
+      {
+         pr_debug("   found entry with l_ip\n");
+         esp_entry->l_spi = spi;
+
+         /* This is a new connection of the same LAN host */
+         if( dstIP != esp_entry->r_ip )
+         {
+            esp_entry->r_ip = dstIP;
+            esp_entry->r_spi = 0;
+         }
+         return esp_entry;
+      }
+      else if( srcIP == esp_entry->r_ip )
+      {
+         pr_debug("   found entry with r_ip\n");
+         /* FIXME */
+         if( esp_entry->r_spi == 0 )
+         {
+            pr_debug("   found entry with r_ip and r_spi == 0\n");
+            esp_entry->r_spi = spi;
+            return esp_entry;
+         }
+	 /* We cannot handle spi changed at WAN side */
+         pr_debug("   found entry with r_ip but r_spi != 0\n");
+      }
+   }
+   pr_debug("No Entry for spi(0x%x)\n", spi);
+   return NULL;
+}
+
+/*
+ * Search an ESP table entry by spi
+ */
+struct _esp_table *search_esp_entry_by_spi( const __u32 spi, const __u32 srcIP )
+{
+	int idx = 0;
+	struct _esp_table *esp_entry = esp_table;
+
+	for( ; idx < MAX_PORTS; idx++, esp_entry++ )
+	{
+		pr_debug("   Searching spi 0x%x <-> 0x%x, 0x%x\n",
+		spi, esp_entry->l_spi, esp_entry->r_spi);
+
+		if( (spi == esp_entry->l_spi) || (spi == esp_entry->r_spi) )
+		{
+			pr_debug("   In %s, found entry %d with tspi %u\n",
+			  __FUNCTION__, idx, esp_entry->tspi);
+
+			/* l_spi and r_spi may be the same */
+			if( (spi == esp_entry->l_spi) && (srcIP == esp_entry->r_ip) )
+			{
+				pr_debug("l_spi(0x%x)==r_spi\n", spi);
+				esp_entry->r_spi = spi;
+			}
+
+			return esp_entry;
+		}
+	}
+	pr_debug("No Entry for spi(0x%x)\n", spi);
+
+	return NULL;
+}
+
+/* invert esp part of tuple */
+static bool esp_invert_tuple(struct nf_conntrack_tuple *tuple,
+			    const struct nf_conntrack_tuple *orig)
+{
+   pr_debug("with spi = %u\n", orig->src.u.esp.spi);
+
+   tuple->dst.u.esp.spi = orig->dst.u.esp.spi;
+   tuple->src.u.esp.spi = orig->src.u.esp.spi;
+   return true;
+}
+
+/* esp hdr info to tuple */
+static bool esp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+                            struct nf_conntrack_tuple *tuple)
+{
+   struct esphdr _esphdr, *esphdr;
+   struct _esp_table *esp_entry = NULL;
+
+   esphdr = skb_header_pointer(skb, dataoff, sizeof(_esphdr), &_esphdr);
+   if( !esphdr ) 
+   {
+      /* try to behave like "nf_conntrack_proto_generic" */
+      tuple->src.u.all = 0;
+      tuple->dst.u.all = 0;
+      return true;
+   }
+
+   pr_debug("Enter pkt_to_tuple() with spi 0x%x\n", esphdr->spi);
+   /* check if esphdr has a new SPI:
+    *   if no, update tuple with correct tspi and increment pkt count;
+    *   if yes, check if we have seen the source IP:
+    *             if yes, do the tspi and pkt count update
+    *             if no, create a new entry
+    */
+
+   if( ((esp_entry = search_esp_entry_by_spi(esphdr->spi, tuple->src.u3.ip)) == NULL) )
+   {
+      if( (esp_entry = 
+           search_esp_entry_by_ip(tuple, esphdr->spi)) == NULL )
+      {
+#if 0
+      /* Because SA is simplex, it's possible that WAN starts connection first.
+	  * We need to make sure that the connection starts from LAN.
+	  */
+         if( ((unsigned char *)&(tuple->src.u3.ip))[0] != 192 )
+	 {
+ 	      pr_debug("srcIP %pI4 is WAN IP, DROP packet\n", &tuple->src.u3.ip);
+	      return false;
+	 }
+#endif
+         esp_entry = alloc_esp_entry();
+         if( esp_entry == NULL ) 
+         {
+            pr_debug("Too many entries. New spi(0x%x)\n", esphdr->spi);
+            return false;
+         }
+
+         esp_entry->l_spi = esphdr->spi;
+         esp_entry->l_ip = tuple->src.u3.ip;
+         esp_entry->r_ip = tuple->dst.u3.ip;
+      }
+
+   }
+
+   pr_debug("esp_entry: tspi(%u) l_ip[%pI4]-->r_ip[%pI4] tuple: srcIP[%pI4]-->dstIP[%pI4]\n",
+         esp_entry->tspi,
+         &esp_entry->l_ip, &esp_entry->r_ip,
+         &tuple->src.u3.ip, &tuple->dst.u3.ip);
+
+   tuple->dst.u.esp.spi = tuple->src.u.esp.spi = esp_entry->tspi;
+   esp_entry->pkt_rcvd++;
+
+   return true;
+}
+
+/* print esp part of tuple */
+static int esp_print_tuple(struct seq_file *s,
+                           const struct nf_conntrack_tuple *tuple)
+{
+   return seq_printf(s, "srcspi=0x%x dstspi=0x%x ",
+          tuple->src.u.esp.spi, tuple->dst.u.esp.spi);
+}
+
+/* print private data for conntrack */
+static int esp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
+{
+   return seq_printf(s, "timeout=%u, stream_timeout=%u ",
+                    (ct->proto.esp.timeout / HZ),
+                    (ct->proto.esp.stream_timeout / HZ));
+}
+
+static unsigned int *esp_get_timeouts(struct net *net)
+{
+	return esp_timeouts;
+}
+
+/* Returns verdict for packet, and may modify conntrack */
+static int esp_packet(struct nf_conn *ct,
+				const struct sk_buff *skb,
+                unsigned int dataoff,
+                enum ip_conntrack_info ctinfo,
+                u_int8_t pf,
+                unsigned int hooknum,
+  		        unsigned int *timeouts)
+{
+   struct esphdr _esphdr, *esphdr;
+   struct iphdr *iph = ip_hdr(skb);
+
+   esphdr = skb_header_pointer(skb, dataoff, sizeof(_esphdr), &_esphdr);
+
+   pr_debug("(0x%x) %pI4 <-> %pI4 status %s info %d %s\n",
+	  esphdr->spi, &iph->saddr, &iph->daddr, (ct->status & IPS_SEEN_REPLY) ? "SEEN" : "NOT_SEEN",
+	  ctinfo, (ctinfo == IP_CT_NEW ) ? "CT_NEW" : "SEEN_REPLY" );
+
+   /* If we've seen traffic both ways, this is some kind of ESP
+      stream.  Extend timeout. */
+   if( test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ) 
+   {
+      nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.esp.stream_timeout);
+      /* Also, more likely to be important, and not a probe */
+      if( !test_and_set_bit(IPS_ASSURED_BIT, &ct->status) )
+         nf_conntrack_event_cache(IPCT_ASSURED, ct);
+   } 
+   else
+      nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.esp.timeout);
+
+   return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static bool esp_new(struct nf_conn *ct, const struct sk_buff *skb,
+                   unsigned int dataoff, unsigned int *timeouts)
+{
+   struct iphdr *iph = ip_hdr(skb);
+   struct _esp_table *esp_entry;
+   struct esphdr _esphdr, *esphdr;
+
+   ct->proto.esp.stream_timeout = timeouts[ESP_CT_UNREPLIED];
+   ct->proto.esp.timeout = timeouts[ESP_CT_UNREPLIED];
+
+   esphdr = skb_header_pointer(skb, dataoff, sizeof(_esphdr), &_esphdr);
+
+   pr_debug("NEW SPI(0x%x) %pI4 <-> %pI4 ct(%p)\n",
+     esphdr->spi, &iph->saddr, &iph->daddr, ct);
+
+   if( (esp_entry = search_esp_entry_by_spi(esphdr->spi, 0)) != NULL ) {
+      esp_entry->ct = ct;
+   } else {
+	  pr_debug("ERR: In esp_new(), cannot find an entry with SPI %x\n", esphdr->spi);
+      return false;
+   }
+
+   return true;
+}
+
+/* Called when the connection is deleted. */
+static void esp_destroy(struct nf_conn *ct)
+{
+	struct _esp_table *esp_entry = NULL;
+
+	pr_debug("DEL ESP entry ct(%p)\n", ct);
+	if ((esp_entry = search_esp_entry_by_ct(ct))) {
+		memset(esp_entry, 0, sizeof(struct _esp_table));
+	} else {
+		pr_debug("ERR: DEL ESP Failed for ct(%p): no such entry\n", ct);
+	}
+}
+
+#ifdef CONFIG_SYSCTL
+
+static struct ctl_table_header *nf_ct_netfilter_header;
+
+static ctl_table nf_child_table[] = {
+	{
+		.procname	= "nf_conntrack_esp_stream_timeout",
+		.data		= &nf_conntrack_esp_stream_timeout,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{ }
+};
+
+static ctl_table nf_dir_table[] = {
+	{
+		.procname	= "netfilter",
+		.mode		= 0644,
+		.child		= nf_child_table
+	},
+	{ .procname = NULL }
+};
+
+static ctl_table nf_root_table[] = {
+	{
+		.procname	= "net",
+		.mode		= 0644,
+		.child		= nf_dir_table
+	},
+	{ .procname = NULL }
+};
+
+
+#endif /* CONFIG_SYSCTL */
+
+/* protocol helper struct */
+struct nf_conntrack_l4proto nf_conntrack_l4proto_esp4 = {
+   .l3proto = PF_INET,
+   .l4proto = IPPROTO_ESP,
+   .name = "esp",
+   .pkt_to_tuple = esp_pkt_to_tuple,
+   .invert_tuple = esp_invert_tuple,
+   .print_tuple = esp_print_tuple,
+   .print_conntrack = esp_print_conntrack,
+   .get_timeouts    = esp_get_timeouts,
+   .packet = esp_packet,
+   .new = esp_new,
+   .destroy = esp_destroy,
+   .me = THIS_MODULE,
+};
+
+int __init nf_ct_proto_esp_init(void)
+{
+#ifdef CONFIG_SYSCTL
+   nf_ct_netfilter_header = register_sysctl_table(nf_root_table);
+#endif /* CONFIG_SYSCTL */
+
+   return nf_conntrack_l4proto_register(&nf_conntrack_l4proto_esp4);
+}
+
+void __exit nf_ct_proto_esp_fini(void)
+{
+#ifdef CONFIG_SYSCTL
+   unregister_sysctl_table(nf_ct_netfilter_header);
+#endif /* CONFIG_SYSCTL */
+
+   nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_esp4);
+}
+module_init(nf_ct_proto_esp_init);
+module_exit(nf_ct_proto_esp_fini);
+
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 0d07a1dcf60504758aace258dd0347f06f305797..32eef66c3824c1668e6352a6b4fd2a940445ca9b 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -29,10 +29,18 @@
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+static int nf_ct_tcp_be_liberal __read_mostly = 1;
+#else
 /* "Be conservative in what you do,
     be liberal in what you accept from others."
     If it's non-zero, we mark only out of window RST segments as INVALID. */
 static int nf_ct_tcp_be_liberal __read_mostly = 0;
+#endif
 
 /* If it is set to zero, we disable picking up already established
    connections. */
@@ -67,7 +75,11 @@ static const char *const tcp_conntrack_names[] = {
 static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
 	[TCP_CONNTRACK_SYN_SENT]	= 2 MINS,
 	[TCP_CONNTRACK_SYN_RECV]	= 60 SECS,
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	[TCP_CONNTRACK_ESTABLISHED]	= BLOG_NAT_TCP_DEFAULT_IDLE_TIMEOUT,
+#else
 	[TCP_CONNTRACK_ESTABLISHED]	= 5 DAYS,
+#endif
 	[TCP_CONNTRACK_FIN_WAIT]	= 2 MINS,
 	[TCP_CONNTRACK_CLOSE_WAIT]	= 60 SECS,
 	[TCP_CONNTRACK_LAST_ACK]	= 30 SECS,
@@ -881,6 +893,12 @@ static int tcp_packet(struct nf_conn *ct,
 		}
 		/* Fall through */
 	case TCP_CONNTRACK_IGNORE:
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		blog_lock();
+		blog_skip((struct sk_buff *)skb); /* abort blogging this packet */
+		blog_unlock();
+#endif
+
 		/* Ignored packets:
 		 *
 		 * Our connection entry may be out of sync, so ignore
@@ -1015,6 +1033,29 @@ static int tcp_packet(struct nf_conn *ct,
 		 old_state, new_state);
 
 	ct->proto.tcp.state = new_state;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_lock();
+	/* Abort and make this conntrack not BLOG eligible */
+	if (th->fin) {
+		if ((ct->blog_key[IP_CT_DIR_ORIGINAL] != BLOG_KEY_NONE)
+		    || (ct->blog_key[IP_CT_DIR_REPLY] != BLOG_KEY_NONE)) {
+			blog_notify(DESTROY_FLOWTRACK, (void*)ct,
+					(uint32_t)ct->blog_key[IP_CT_DIR_ORIGINAL],
+					(uint32_t)ct->blog_key[IP_CT_DIR_REPLY]);
+
+			/* Safe: In case blog client does not set key to 0 explicilty */
+			ct->blog_key[IP_CT_DIR_ORIGINAL] = BLOG_KEY_NONE;
+			ct->blog_key[IP_CT_DIR_REPLY] = BLOG_KEY_NONE;
+		}
+
+		clear_bit(IPS_BLOG_BIT, &ct->status);
+	}
+	if (ct->proto.tcp.state !=  TCP_CONNTRACK_ESTABLISHED)
+		blog_skip((struct sk_buff *)skb); /* abort blogging this packet */
+	blog_unlock();
+#endif
+
 	if (old_state != new_state
 	    && new_state == TCP_CONNTRACK_FIN_WAIT)
 		ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
@@ -1052,6 +1093,14 @@ static int tcp_packet(struct nf_conn *ct,
 		set_bit(IPS_ASSURED_BIT, &ct->status);
 		nf_conntrack_event_cache(IPCT_ASSURED, ct);
 	}
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	if (new_state == TCP_CONNTRACK_ESTABLISHED) {
+		if (ct->derived_timeout == 0xFFFFFFFF)
+			timeout = 0xFFFFFFFF - jiffies;
+		else if (ct->derived_timeout > 0)
+			timeout = ct->derived_timeout;
+	}
+#endif
 	nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
 
 	return NF_ACCEPT;
@@ -1351,6 +1400,19 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
 };
 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+int tcp_timeout_estd_proc_hndlr(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int ret;
+	ret = proc_dointvec_jiffies(table, write, buffer, lenp, ppos);
+	/* on success update the blog time out to be same as tcp_timeout_established */
+	if (!ret)
+		blog_nat_tcp_def_idle_timeout = tcp_timeouts[TCP_CONNTRACK_ESTABLISHED];
+	return ret;
+}
+#endif
+
 #ifdef CONFIG_SYSCTL
 static unsigned int tcp_sysctl_table_users;
 static struct ctl_table_header *tcp_sysctl_header;
@@ -1374,7 +1436,11 @@ static struct ctl_table tcp_sysctl_table[] = {
 		.data		= &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		.proc_handler	= tcp_timeout_estd_proc_hndlr,
+#else
 		.proc_handler	= proc_dointvec_jiffies,
+#endif
 	},
 	{
 		.procname	= "nf_conntrack_tcp_timeout_fin_wait",
@@ -1477,7 +1543,11 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
 		.data		= &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		.proc_handler	= tcp_timeout_estd_proc_hndlr,
+#else
 		.proc_handler	= proc_dointvec_jiffies,
+#endif
 	},
 	{
 		.procname	= "ip_conntrack_tcp_timeout_fin_wait",
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index a9073dc1548d087fbe694f898ff30072792bc2e6..5a3401dbb247bfb407eee133f3023fbad6908bb8 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,6 +25,10 @@
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
 enum udp_conntrack {
 	UDP_CT_UNREPLIED,
 	UDP_CT_REPLIED,
@@ -88,8 +92,18 @@ static int udp_packet(struct nf_conn *ct,
 	/* If we've seen traffic both ways, this is some kind of UDP
 	   stream.  Extend timeout. */
 	if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+#if defined(CONFIG_BCM_KF_NETFILTER)
+                unsigned timeout = udp_timeouts[UDP_CT_REPLIED];
+                if (ct->derived_timeout == 0xFFFFFFFF){
+                        timeout = 60*60*HZ;
+                } else if(ct->derived_timeout > 0) {
+                        timeout = ct->derived_timeout;
+                }
+                nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
+#else
 		nf_ct_refresh_acct(ct, ctinfo, skb,
 				   timeouts[UDP_CT_REPLIED]);
+#endif
 		/* Also, more likely to be important, and not a probe */
 		if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
 			nf_conntrack_event_cache(IPCT_ASSURED, ct);
diff --git a/net/netfilter/nf_conntrack_rtsp.c b/net/netfilter/nf_conntrack_rtsp.c
new file mode 100644
index 0000000000000000000000000000000000000000..aa25197b9d0aa304155e3c3da34f666ed1b61671
--- /dev/null
+++ b/net/netfilter/nf_conntrack_rtsp.c
@@ -0,0 +1,851 @@
+#if defined(CONFIG_BCM_KF_NETFILTER)
+/* RTSP helper for connection tracking. */
+
+/* (C) 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netfilter.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/ctype.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <net/checksum.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_rtsp.h>
+#include <linux/iqos.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("RTSP connection tracking helper");
+MODULE_ALIAS("ip_conntrack_rtsp");
+
+#define RTSP_PORT 554
+
+/* This is slow, but it's simple. --RR */
+static char *rtsp_buffer;
+
+static DEFINE_SPINLOCK(nf_rtsp_lock);
+
+#define MAX_PORTS 8
+static u_int16_t ports[MAX_PORTS];
+static unsigned int ports_c;
+module_param_array(ports, ushort, &ports_c, 0400);
+MODULE_PARM_DESC(ports, "port numbers of RTSP servers");
+
+#define RTSP_CHANNEL_MAX 8
+static int max_outstanding = RTSP_CHANNEL_MAX;
+module_param(max_outstanding, int, 0600);
+MODULE_PARM_DESC(max_outstanding,
+		 "max number of outstanding SETUP requests per RTSP session");
+
+/* Single data channel */
+int (*nat_rtsp_channel_hook) (struct sk_buff *skb, struct nf_conn *ct,
+			      enum ip_conntrack_info ctinfo,
+			      unsigned int matchoff, unsigned int matchlen,
+			      struct nf_conntrack_expect *exp, int *delta);
+EXPORT_SYMBOL_GPL(nat_rtsp_channel_hook);
+
+/* A pair of data channels (RTP/RTCP) */
+int (*nat_rtsp_channel2_hook) (struct sk_buff *skb, struct nf_conn *ct,
+			       enum ip_conntrack_info ctinfo,
+			       unsigned int matchoff, unsigned int matchlen,
+			       struct nf_conntrack_expect *rtp_exp,
+			       struct nf_conntrack_expect *rtcp_exp,
+			       char dash, int *delta);
+EXPORT_SYMBOL_GPL(nat_rtsp_channel2_hook);
+
+/* Modify parameters like client_port in Transport for single data channel */
+int (*nat_rtsp_modify_port_hook) (struct sk_buff *skb, struct nf_conn *ct,
+			      	  enum ip_conntrack_info ctinfo,
+			      	  unsigned int matchoff, unsigned int matchlen,
+			      	  __be16 rtpport, int *delta);
+EXPORT_SYMBOL_GPL(nat_rtsp_modify_port_hook);
+
+/* Modify parameters like client_port in Transport for multiple data channels*/
+int (*nat_rtsp_modify_port2_hook) (struct sk_buff *skb, struct nf_conn *ct,
+			       	   enum ip_conntrack_info ctinfo,
+			       	   unsigned int matchoff, unsigned int matchlen,
+			       	   __be16 rtpport, __be16 rtcpport,
+				   char dash, int *delta);
+EXPORT_SYMBOL_GPL(nat_rtsp_modify_port2_hook);
+
+/* Modify parameters like destination in Transport */
+int (*nat_rtsp_modify_addr_hook) (struct sk_buff *skb, struct nf_conn *ct,
+			 	  enum ip_conntrack_info ctinfo,
+			 	  int matchoff, int matchlen, int *delta);
+EXPORT_SYMBOL_GPL(nat_rtsp_modify_addr_hook);
+
+static int memmem(const char *haystack, int haystacklen,
+		  const char *needle, int needlelen)
+{
+	const char *p = haystack;
+	int l = haystacklen - needlelen + 1;
+	char c = *needle;
+
+	while(l-- > 0) { /* "!=0" won't handle haystacklen less than needlelen, need ">" */
+		if (*p++ == c) {
+			if (memcmp(p, needle+1, needlelen-1) == 0)
+				return p - haystack - 1;
+		}
+	}
+	return -1;
+}
+
+static int memstr(const char *haystack, int haystacklen,
+		  const char *needle, int needlelen)
+{
+	const char *p = haystack;
+	int l = haystacklen - needlelen + 1;
+	char c = *needle;
+
+	if (isalpha(c)) {
+		char lower = __tolower(c);
+		char upper = __toupper(c);
+
+		while(l-- > 0) {  /* "!=0" won't handle haystacklen less than needlelen, need ">" */
+			if (*p == lower || *p == upper) {
+				if (strncasecmp(p, needle, needlelen) == 0)
+					return p - haystack;
+			}
+			p++;
+		}
+	} else {
+		while(l-- > 0) {
+			if (*p++ == c) {
+				if (strncasecmp(p, needle+1, needlelen-1) == 0)
+					return p - haystack - 1;
+			}
+		}
+	}
+	return -1;
+}
+
+static int get_cseq(const char *str)
+{
+	unsigned long cseq = 0, i = 0;
+	char c = *str;
+	while(i++ < 10 && c && c != 0xd && c>='0' && c <= '9'){
+		cseq = (cseq * 10) + (c - '0');
+		c = *(str + i);
+	}
+	if(!cseq)
+		cseq = -1;
+	return (int) cseq;
+}
+
+/* Get next message in a packet */
+static int get_next_message(const char *tcpdata, int tcpdatalen,
+			    int *msgoff, int *msglen, int *msghdrlen)
+{
+	if (*msglen == 0) { /* The first message */
+		*msgoff = 0;
+	} else {
+		*msgoff += *msglen;
+		if ((*msgoff + 4) >= tcpdatalen) /* No more message */
+			return 0;
+	}
+
+	/* Get message header length */
+	*msghdrlen = memmem(tcpdata+*msgoff, tcpdatalen-*msgoff, "\r\n\r\n", 4);
+	if (*msghdrlen < 0) {
+		*msghdrlen = *msglen = tcpdatalen - *msgoff;
+	} else {
+		/* Get message length including SDP */
+		int cloff = memstr(tcpdata+*msgoff, *msghdrlen, "Content-Length: ", 16);
+		if (cloff < 0) {
+			*msglen = *msghdrlen + 4;
+		} else {
+			unsigned long cl = simple_strtoul(tcpdata+*msgoff+cloff+16, NULL, 10);
+			*msglen = *msghdrlen + 4 + cl;
+		}
+	}
+
+	return 1;
+}
+
+/* Get next client_port parameter in a Transport header */
+static int get_next_client_port(const char *tcpdata, int tpoff, int tplen,
+				int *portoff, int *portlen,
+				__be16 *rtpport, __be16 *rtcpport,
+				char *dash)
+{
+	int off;
+	char *p;
+
+	if (*portlen == 0) { /* The first client_port */
+		*portoff = tpoff;
+	} else {
+		*portoff += *portlen;
+		if (*portoff >= tpoff + tplen) /* No more data */
+			return 0;
+	}
+
+	off = memmem(tcpdata+*portoff, tplen-(*portoff-tpoff),
+		     ";client_port=", 13);
+	if (off < 0)
+		return 0;
+	*portoff += off + 13;
+
+	*rtpport = htons((unsigned short)simple_strtoul(tcpdata+*portoff,
+							&p, 10));
+	if (*p != '-' && *p != '/') {
+		*dash = 0;
+	} else {
+		*dash = *p++;
+		*rtcpport = htons((unsigned short)simple_strtoul(p, &p, 10));
+	}
+	*portlen = p - tcpdata - *portoff;
+	return 1;
+}
+
+/* Get next destination=<ip>:<port> parameter in a Transport header
+ * This is not a standard parameter, so far, it's only seen in some customers'
+ * products.
+ */
+static int get_next_dest_ipport(const char *tcpdata, int tpoff, int tplen,
+				int *destoff, int *destlen, __be32 *dest,
+				int *portoff, int *portlen, __be16 *port)
+{
+	int off;
+	char *p;
+
+	if (*destlen == 0) { /* The first destination */
+		*destoff = tpoff;
+	} else {
+		*destoff += *destlen + 1 + *portlen;
+		if (*destoff >= tpoff + tplen) /* No more data */
+			return 0;
+	}
+
+	off = memmem(tcpdata+*destoff, tplen-(*destoff-tpoff),
+		     ";destination=", 13);
+	if (off < 0)
+		return 0;
+	*destoff += off + 13;
+
+        if (in4_pton(tcpdata+*destoff, tplen-(*destoff-tpoff), (u8 *)dest,
+                     -1, (const char **)&p) == 0) {
+		return 0;
+	}
+	*destlen = p - tcpdata - *destoff;
+
+	if (*p != ':') {
+		return 0;
+	}
+	*portoff = p - tcpdata + 1;
+
+	*port = htons((unsigned short)simple_strtoul(tcpdata+*portoff, &p, 10));
+	*portlen = p - tcpdata - *portoff;
+
+	return 1;
+}
+
+/* Get next destination parameter in a Transport header */
+static int get_next_destination(const char *tcpdata, int tpoff, int tplen,
+				int *destoff, int *destlen, __be32 *dest)
+{
+	int off;
+	char *p;
+
+	if (*destlen == 0) { /* The first destination */
+		*destoff = tpoff;
+	} else {
+		*destoff += *destlen;
+		if (*destoff >= tpoff + tplen) /* No more data */
+			return 0;
+	}
+
+	off = memmem(tcpdata+*destoff, tplen-(*destoff-tpoff),
+		     ";destination=", 13);
+	if (off < 0)
+		return 0;
+	*destoff += off + 13;
+
+        if (in4_pton(tcpdata+*destoff, tplen-(*destoff-tpoff), (u8 *)dest,
+                     -1, (const char **)&p)) {
+		*destlen = p - tcpdata - *destoff;
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static int expect_rtsp_channel(struct sk_buff *skb, struct nf_conn *ct,
+			       enum ip_conntrack_info ctinfo,
+			       int portoff, int portlen,
+			       __be16 rtpport, int *delta)
+{
+	int ret = 0;
+	int dir = CTINFO2DIR(ctinfo);
+	struct nf_conntrack_expect *rtp_exp;
+	typeof(nat_rtsp_channel_hook) nat_rtsp_channel;
+
+	if (rtpport == 0)
+		return -1;
+
+	/* Create expect for RTP */
+	if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
+		return -1;
+
+	nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
+			  NULL, &ct->tuplehash[!dir].tuple.dst.u3,
+			  IPPROTO_UDP, NULL, &rtpport);
+
+	if ((nat_rtsp_channel = rcu_dereference(nat_rtsp_channel_hook)) &&
+	    ct->status & IPS_NAT_MASK) {
+		/* NAT needed */
+		ret = nat_rtsp_channel(skb, ct, ctinfo, portoff, portlen,
+				       rtp_exp, delta);
+	} else {		/* Conntrack only */
+		if (nf_ct_expect_related(rtp_exp) == 0) {
+			pr_debug("nf_ct_rtsp: expect RTP ");
+			nf_ct_dump_tuple(&rtp_exp->tuple);
+		} else
+			ret = -1;
+	}
+
+	nf_ct_expect_put(rtp_exp);
+
+	return ret;
+}
+
+static int expect_rtsp_channel2(struct sk_buff *skb, struct nf_conn *ct,
+				enum ip_conntrack_info ctinfo,
+				int portoff, int portlen,
+				__be16 rtpport, __be16 rtcpport,
+				char dash, int *delta)
+{
+	int ret = 0;
+	int dir = CTINFO2DIR(ctinfo);
+	struct nf_conntrack_expect *rtp_exp;
+	struct nf_conntrack_expect *rtcp_exp;
+	typeof(nat_rtsp_channel2_hook) nat_rtsp_channel2;
+
+	if (rtpport == 0 || rtcpport == 0)
+		return -1;
+
+	/* Create expect for RTP */
+	if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT ,nf_ct_l3num(ct),
+			  NULL, &ct->tuplehash[!dir].tuple.dst.u3,
+			  IPPROTO_UDP, NULL, &rtpport);
+
+	/* Create expect for RTCP */
+	if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) {
+		nf_ct_expect_put(rtp_exp);
+		return -1;
+	}
+	nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
+			  NULL, &ct->tuplehash[!dir].tuple.dst.u3,
+			  IPPROTO_UDP, NULL, &rtcpport);
+
+	if ((nat_rtsp_channel2 = rcu_dereference(nat_rtsp_channel2_hook)) &&
+	    ct->status & IPS_NAT_MASK) {
+		/* NAT needed */
+		ret = nat_rtsp_channel2(skb, ct, ctinfo, portoff, portlen,
+				   	rtp_exp, rtcp_exp, dash, delta);
+	} else {		/* Conntrack only */
+		if (nf_ct_expect_related(rtp_exp) == 0) {
+			if (nf_ct_expect_related(rtcp_exp) == 0) {
+				pr_debug("nf_ct_rtsp: expect RTP ");
+				nf_ct_dump_tuple(&rtp_exp->tuple);
+				pr_debug("nf_ct_rtsp: expect RTCP ");
+				nf_ct_dump_tuple(&rtcp_exp->tuple);
+			} else {
+				nf_ct_unexpect_related(rtp_exp);
+				ret = -1;
+			}
+		} else
+			ret = -1;
+	}
+
+	nf_ct_expect_put(rtp_exp);
+	nf_ct_expect_put(rtcp_exp);
+
+	return ret;
+}
+
+static void set_normal_timeout(struct nf_conn *ct, struct sk_buff *skb)
+{
+	struct nf_conn *child;
+
+	/* nf_conntrack_lock is locked inside __nf_ct_refresh_acct, locking here results in a deadlock */
+	/* write_lock_bh(&nf_conntrack_lock); */ 
+	list_for_each_entry(child, &ct->derived_connections, derived_list) {
+		child->derived_timeout = 5*HZ;
+		nf_ct_refresh(child, skb, 5*HZ);
+	}
+	/* write_unlock_bh(&nf_conntrack_lock); */
+}
+
+static void set_long_timeout(struct nf_conn *ct, struct sk_buff *skb)
+{
+	struct nf_conn *child;
+
+	/* write_lock_bh(&nf_conntrack_lock); */
+	list_for_each_entry(child, &ct->derived_connections, derived_list) {
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+		blog_lock();
+		if ((child->blog_key[IP_CT_DIR_ORIGINAL] != BLOG_KEY_NONE)
+			|| (child->blog_key[IP_CT_DIR_REPLY] != BLOG_KEY_NONE)) {
+			/* remove flow from flow cache */
+			blog_notify(DESTROY_FLOWTRACK, (void*)child,
+								(uint32_t)child->blog_key[IP_CT_DIR_ORIGINAL],
+								(uint32_t)child->blog_key[IP_CT_DIR_REPLY]);
+
+			/* Safe: In case blog client does not set key to 0 explicilty */
+			child->blog_key[IP_CT_DIR_ORIGINAL] = BLOG_KEY_NONE;
+			child->blog_key[IP_CT_DIR_REPLY]    = BLOG_KEY_NONE;
+			set_bit(IPS_BLOG_BIT, &child->status);  /* Enable conntrack blogging */
+		}
+		blog_unlock();
+#endif
+		nf_ct_refresh(child, skb, 3600*HZ);
+	}
+	/*	write_unlock_bh(&nf_conntrack_lock); */
+}
+
+
+static int help(struct sk_buff *skb, unsigned int protoff,
+		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	struct nf_conn_help *hlp = nfct_help(ct);
+	struct tcphdr _tcph, *th;
+	unsigned int tcpdataoff, tcpdatalen;
+	char *tcpdata;
+	int msgoff, msglen, msghdrlen;
+	int tpoff, tplen;
+	int portlen = 0;
+	int portoff = 0;
+	__be16 rtpport = 0;
+	__be16 rtcpport = 0;
+	char dash = 0;
+	int destlen = 0;
+	int destoff = 0;
+	__be32 dest = 0;
+	typeof(nat_rtsp_modify_addr_hook) nat_rtsp_modify_addr;
+	typeof(nat_rtsp_modify_port_hook) nat_rtsp_modify_port;
+	typeof(nat_rtsp_modify_port2_hook) nat_rtsp_modify_port2;
+
+	/* Until there's been traffic both ways, don't look in packets. */
+	if (ctinfo != IP_CT_ESTABLISHED
+	    && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
+		return NF_ACCEPT;
+	}
+	pr_debug("nf_ct_rtsp: skblen = %u\n", skb->len);
+
+	/* Get TCP header */
+	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
+	if (th == NULL) {
+		return NF_ACCEPT;
+	}
+
+	/* Get TCP payload offset */
+	tcpdataoff = protoff + th->doff * 4;
+	if (tcpdataoff >= skb->len) { /* No data? */
+		return NF_ACCEPT;
+	}
+
+	/* Get TCP payload length */
+	tcpdatalen = skb->len - tcpdataoff;
+
+	spin_lock_bh(&nf_rtsp_lock);
+
+	/* Get TCP payload pointer */
+	tcpdata = skb_header_pointer(skb, tcpdataoff, tcpdatalen, rtsp_buffer);
+	BUG_ON(tcpdata == NULL);
+
+	/* There may be more than one message in a packet, check them
+	 * one by one */
+	msgoff = msglen = msghdrlen = 0;
+	while(get_next_message(tcpdata, tcpdatalen, &msgoff, &msglen,
+			       &msghdrlen)) {
+		/* Messages from LAN side through MASQUERADED connections */
+		if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+			   &ct->tuplehash[!dir].tuple.dst.u3,
+			   sizeof(ct->tuplehash[dir].tuple.src.u3)) != 0) {
+			if(memcmp(tcpdata+msgoff, "PAUSE ", 6) == 0) {
+				int cseq = memmem(tcpdata+msgoff, msglen, "CSeq: ", 6);
+				if(cseq == -1) {
+				        /* Fix the IOP issue with DSS on Drawin system */
+				        cseq = memmem(tcpdata+msgoff, msglen, "Cseq: ", 6);
+				        if(cseq == -1) {
+					   pr_debug("nf_ct_rtsp: wrong PAUSE msg\n");
+				        } else {
+					   cseq = get_cseq(tcpdata+msgoff+cseq+6);
+				        }
+				} else {
+					cseq = get_cseq(tcpdata+msgoff+cseq+6);
+				}
+				
+				pr_debug("nf_ct_rtsp: PAUSE, CSeq=%d\n", cseq);
+				hlp->help.ct_rtsp_info.paused = cseq;
+				continue;
+			} else {
+				hlp->help.ct_rtsp_info.paused = 0;
+			}
+			if(memcmp(tcpdata+msgoff, "TEARDOWN ", 9) == 0) {
+				pr_debug("nf_ct_rtsp: TEARDOWN\n");
+				set_normal_timeout(ct, skb);
+				continue;
+			} else if(memcmp(tcpdata+msgoff, "SETUP ", 6) != 0) {
+				continue;
+			}
+			
+			/* Now begin to process SETUP message */
+			pr_debug("nf_ct_rtsp: SETUP\n");
+		/* Reply message that's from WAN side. */
+		} else {
+			/* We only check replies */
+			if(memcmp(tcpdata+msgoff, "RTSP/", 5) != 0)
+				continue;
+			
+			pr_debug("nf_ct_rtsp: Reply message\n");
+
+		 	/* Response to a previous PAUSE message */
+			if (hlp->help.ct_rtsp_info.paused) {
+				int cseq = memmem(tcpdata+msgoff, msglen, "CSeq: ", 6);
+				if(cseq == -1) {
+				        /* Fix the IOP issue with DSS on Drawin system */
+				        cseq = memmem(tcpdata+msgoff, msglen, "Cseq: ", 6);
+				        if(cseq == -1) {
+					   pr_debug("nf_ct_rtsp: wrong reply msg\n");
+				        } else {
+					   cseq = get_cseq(tcpdata+msgoff+cseq+6);
+				        }
+				} else {
+					cseq = get_cseq(tcpdata+msgoff+cseq+6);
+				}
+				if(cseq == hlp->help.ct_rtsp_info.paused) {
+				pr_debug("nf_ct_rtsp: Reply to PAUSE\n");
+				set_long_timeout(ct, skb);
+				hlp->help.ct_rtsp_info.paused = 0;
+				goto end;
+			}
+			
+			}
+			
+			/* Now begin to process other reply message */
+		}
+
+		/* Get Transport header offset */
+		tpoff = memmem(tcpdata+msgoff+6, msghdrlen-6,
+			       "\r\nTransport: ", 13);
+		if (tpoff < 0)
+			continue;
+		tpoff += msgoff + 6 + 13;
+
+		/* Get Transport header length */
+		tplen = memmem(tcpdata+tpoff, msghdrlen-(tpoff - msgoff),
+			       "\r\n", 2);
+		if (tplen < 0)
+			tplen = msghdrlen - (tpoff - msgoff);
+
+		/* There maybe more than one client_port parameter in this
+		 * field, we'll process each of them. I know not all of them
+		 * are unicast UDP ports, but that is the only situation we
+		 * care about so far. So just KISS. */
+		portoff = portlen = 0;
+		while(get_next_client_port(tcpdata, tpoff, tplen,
+					   &portoff, &portlen,
+					   &rtpport, &rtcpport, &dash)) {
+			int ret=0, delta;
+
+			if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+			   	   &ct->tuplehash[!dir].tuple.dst.u3,
+			   	   sizeof(ct->tuplehash[dir].tuple.src.u3))
+			    != 0) {
+				/* LAN to WAN */
+				if (dash == 0) {
+					/* Single data channel */
+					ret = expect_rtsp_channel(skb, ct,
+								  ctinfo,
+			 					  portoff,
+								  portlen,
+								  rtpport,
+								  &delta);
+				} else {
+					/* A pair of data channels (RTP/RTCP)*/
+					ret = expect_rtsp_channel2(skb, ct,
+								   ctinfo,
+								   portoff,
+								   portlen,
+								   rtpport,
+								   rtcpport,
+								   dash,
+								   &delta);
+				}
+			} else {
+				nat_rtsp_modify_port = rcu_dereference(
+					nat_rtsp_modify_port_hook);
+				nat_rtsp_modify_port2 = rcu_dereference(
+					nat_rtsp_modify_port2_hook);
+				/* WAN to LAN */
+				if (dash == 0 ) {
+					/* Single data channel */
+					if (nat_rtsp_modify_port) {
+					ret = nat_rtsp_modify_port(skb, ct,
+								   ctinfo,
+								   portoff,
+								   portlen,
+								   rtpport,
+								   &delta);
+					}
+				} else {
+					/* A pair of data channels (RTP/RTCP)*/
+					if (nat_rtsp_modify_port2) {
+					ret = nat_rtsp_modify_port2(skb, ct,
+								    ctinfo,
+								    portoff,
+								    portlen,
+								    rtpport,
+								    rtcpport,
+								    dash,
+								    &delta);
+					}
+				}
+			}
+
+            /* register the RTP ports with ingress QoS classifier */
+            pr_debug("\n RTP Port = %d, RTCP Port = %d\n", rtpport, rtcpport);
+            iqos_add_L4port(IPPROTO_UDP, rtpport, IQOS_ENT_DYN, IQOS_PRIO_HIGH);
+            iqos_add_L4port(IPPROTO_UDP, rtcpport, IQOS_ENT_DYN, IQOS_PRIO_HIGH);
+
+			if (ret < 0)
+				goto end;
+
+			if (delta) {
+				/* Packet length has changed, we need to adjust
+				 * everthing */
+				tcpdatalen += delta;
+				msglen += delta;
+				msghdrlen += delta;
+				tplen += delta;
+				portlen += delta;
+
+				/* Relocate TCP payload pointer */
+				tcpdata = skb_header_pointer(skb,
+							     tcpdataoff,
+							     tcpdatalen,
+							     rtsp_buffer);
+				BUG_ON(tcpdata == NULL);
+			}
+		}
+
+		/* Process special destination=<ip>:<port> parameter in 
+		 * Transport header. This is not a standard parameter,
+		 * so far, it's only seen in some customers' products.
+ 		 */
+		while(get_next_dest_ipport(tcpdata, tpoff, tplen,
+					   &destoff, &destlen, &dest,
+					   &portoff, &portlen, &rtpport)) {
+			int ret = 0, delta;
+
+			/* Process the port part */
+			if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+			   	   &ct->tuplehash[!dir].tuple.dst.u3,
+			   	   sizeof(ct->tuplehash[dir].tuple.src.u3))
+			    != 0) {
+				/* LAN to WAN */
+				ret = expect_rtsp_channel(skb, ct, ctinfo,
+							  portoff, portlen,
+							  rtpport, &delta);
+			} else {
+				/* WAN to LAN */
+				if ((nat_rtsp_modify_port = rcu_dereference(
+				    nat_rtsp_modify_port_hook))) {
+					ret = nat_rtsp_modify_port(skb, ct,
+								   ctinfo,
+								   portoff,
+								   portlen,
+								   rtpport,
+								   &delta);
+				}
+			}
+            
+            /* register the RTP ports with ingress QoS classifier */
+            pr_debug("\n RTP Port = %d\n", rtpport);
+            iqos_add_L4port(IPPROTO_UDP, rtpport, IQOS_ENT_DYN, IQOS_PRIO_HIGH);
+
+			if (ret < 0)
+				goto end;
+
+			if (delta) {
+				/* Packet length has changed, we need to adjust
+				 * everthing */
+				tcpdatalen += delta;
+				msglen += delta;
+				msghdrlen += delta;
+				tplen += delta;
+				portlen += delta;
+
+				/* Relocate TCP payload pointer */
+				tcpdata = skb_header_pointer(skb,
+							     tcpdataoff,
+							     tcpdatalen,
+							     rtsp_buffer);
+				BUG_ON(tcpdata == NULL);
+			}
+
+			/* Then the IP part */
+			if (dest != ct->tuplehash[dir].tuple.src.u3.ip)
+				continue;
+			if ((nat_rtsp_modify_addr =
+			     rcu_dereference(nat_rtsp_modify_addr_hook)) &&
+			    ct->status & IPS_NAT_MASK) {
+			}
+			/* NAT needed */
+			ret = nat_rtsp_modify_addr(skb, ct, ctinfo,
+						   destoff, destlen, &delta);
+			if (ret < 0)
+				goto end;
+
+			if (delta) {
+				/* Packet length has changed, we need
+				 * to adjust everthing */
+				tcpdatalen += delta;
+				msglen += delta;
+				msghdrlen += delta;
+				tplen += delta;
+				portlen += delta;
+
+				/* Relocate TCP payload pointer */
+				tcpdata = skb_header_pointer(skb, tcpdataoff,
+							     tcpdatalen,
+							     rtsp_buffer);
+				BUG_ON(tcpdata == NULL);
+			}
+		}
+
+		if ((nat_rtsp_modify_addr =
+		     rcu_dereference(nat_rtsp_modify_addr_hook)) &&
+		    ct->status & IPS_NAT_MASK) {
+			destoff = destlen = 0;
+			while(get_next_destination(tcpdata, tpoff, tplen,
+					   	   &destoff, &destlen, &dest)) {
+				int ret, delta;
+				
+				if (dest != ct->tuplehash[dir].tuple.src.u3.ip)
+					continue;
+
+				/* NAT needed */
+				ret = nat_rtsp_modify_addr(skb, ct, ctinfo,
+							   destoff, destlen,
+				   			   &delta);
+				if (ret < 0)
+					goto end;
+
+				if (delta) {
+					/* Packet length has changed, we need
+					 * to adjust everthing */
+					tcpdatalen += delta;
+					msglen += delta;
+					msghdrlen += delta;
+					tplen += delta;
+					portlen += delta;
+
+					/* Relocate TCP payload pointer */
+					tcpdata = skb_header_pointer(skb,
+							     tcpdataoff,
+							     tcpdatalen,
+							     rtsp_buffer);
+					BUG_ON(tcpdata == NULL);
+				}
+
+			}
+		}
+	}
+
+end:
+	spin_unlock_bh(&nf_rtsp_lock);
+	return NF_ACCEPT;
+}
+
+static struct nf_conntrack_helper rtsp[MAX_PORTS];
+static char rtsp_names[MAX_PORTS][sizeof("rtsp-65535")];
+static struct nf_conntrack_expect_policy rtsp_exp_policy;
+
+/* don't make this __exit, since it's called from __init ! */
+static void nf_conntrack_rtsp_fini(void)
+{
+	int i;
+
+	for (i = 0; i < ports_c; i++) {
+		if (rtsp[i].me == NULL)
+			continue;
+
+        /* unregister the RTSP ports with ingress QoS classifier */
+        iqos_rem_L4port( rtsp[i].tuple.dst.protonum, 
+                         rtsp[i].tuple.src.u.tcp.port, IQOS_ENT_STAT );
+		pr_debug("nf_ct_rtsp: unregistering helper for port %d\n",
+		       	 ports[i]);
+		nf_conntrack_helper_unregister(&rtsp[i]);
+	}
+
+	kfree(rtsp_buffer);
+}
+
+static int __init nf_conntrack_rtsp_init(void)
+{
+	int i, ret = 0;
+	char *tmpname;
+
+	rtsp_buffer = kmalloc(4000, GFP_KERNEL);
+	if (!rtsp_buffer)
+		return -ENOMEM;
+
+	if (ports_c == 0)
+		ports[ports_c++] = RTSP_PORT;
+
+	rtsp_exp_policy.max_expected = max_outstanding;
+	rtsp_exp_policy.timeout	= 5 * 60;
+	for (i = 0; i < ports_c; i++) {
+		rtsp[i].tuple.src.l3num = PF_INET;
+		rtsp[i].tuple.src.u.tcp.port = htons(ports[i]);
+		rtsp[i].tuple.dst.protonum = IPPROTO_TCP;
+		rtsp[i].expect_policy = &rtsp_exp_policy;
+		rtsp[i].expect_class_max = 1;
+		rtsp[i].me = THIS_MODULE;
+		rtsp[i].help = help;
+		tmpname = &rtsp_names[i][0];
+		if (ports[i] == RTSP_PORT)
+			sprintf(tmpname, "rtsp");
+		else
+			sprintf(tmpname, "rtsp-%d", ports[i]);
+		rtsp[i].name = tmpname;
+
+		pr_debug("nf_ct_rtsp: registering helper for port %d\n",
+		       	 ports[i]);
+		ret = nf_conntrack_helper_register(&rtsp[i]);
+		if (ret) {
+			printk("nf_ct_rtsp: failed to register helper "
+			       "for port %d\n", ports[i]);
+			nf_conntrack_rtsp_fini();
+			return ret;
+		}
+
+        /* register the RTSP ports with ingress QoS classifier */
+        iqos_add_L4port( IPPROTO_TCP, ports[i], IQOS_ENT_STAT, IQOS_PRIO_HIGH );
+	}
+
+	return 0;
+}
+
+module_init(nf_conntrack_rtsp_init);
+module_exit(nf_conntrack_rtsp_fini);
+#endif
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 93faf6a3a6379d5f8c4c9e52fcaf91cebdfabf66..0732bd1072ac0c3522fb3889437d0275ea4fd788 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -25,6 +25,11 @@
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <linux/netfilter/nf_conntrack_sip.h>
+#if defined(CONFIG_BCM_KF_NETFILTER)
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <linux/iqos.h>
+#endif
+
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
@@ -42,6 +47,965 @@ static unsigned int sip_timeout __read_mostly = SIP_TIMEOUT;
 module_param(sip_timeout, uint, 0600);
 MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session");
 
+#if defined(CONFIG_BCM_KF_NETFILTER)
+
+int (*nf_nat_addr_hook)(struct sk_buff *skb, unsigned int protoff,
+			struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			char **dptr, int *dlen, char **addr_begin,
+			int *addr_len, struct nf_conntrack_man *addr);
+EXPORT_SYMBOL_GPL(nf_nat_addr_hook);
+
+int (*nf_nat_rtp_hook)(struct sk_buff *skb, unsigned int protoff,
+		       struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		       char **dptr, int *dlen, struct nf_conntrack_expect *exp,
+		       char **port_begin, int *port_len);
+EXPORT_SYMBOL_GPL(nf_nat_rtp_hook);
+
+int (*nf_nat_snat_hook)(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			struct nf_conntrack_expect *exp);
+EXPORT_SYMBOL_GPL(nf_nat_snat_hook);
+
+int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
+		       struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		       char **dptr, int *dlen, struct nf_conntrack_expect *exp,
+		       char **addr_begin, int *addr_len);
+EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
+
+struct sip_header_nfo {
+	const char	*lname;
+	const char	*sname;
+	const char	*ln_str;
+	size_t		lnlen;
+	size_t		snlen;
+	size_t		ln_strlen;
+	int		case_sensitive;
+	int		(*match_len)(struct nf_conn *, const char *,
+				     const char *, int *);
+};
+
+static const struct sip_header_nfo ct_sip_hdrs[] = {
+	[POS_VIA] = { 		/* SIP Via header */
+		.lname		= "Via:",
+		.lnlen		= sizeof("Via:") - 1,
+		.sname		= "\r\nv:",
+		.snlen		= sizeof("\r\nv:") - 1, /* rfc3261 "\r\n" */
+		.ln_str		= "UDP ",
+		.ln_strlen	= sizeof("UDP ") - 1,
+	},
+	[POS_CONTACT] = { 	/* SIP Contact header */
+		.lname		= "Contact:",
+		.lnlen		= sizeof("Contact:") - 1,
+		.sname		= "\r\nm:",
+		.snlen		= sizeof("\r\nm:") - 1,
+		.ln_str		= "sip:",
+		.ln_strlen	= sizeof("sip:") - 1,
+	},
+	[POS_CONTENT] = { 	/* SIP Content length header */
+		.lname		= "Content-Length:",
+		.lnlen		= sizeof("Content-Length:") - 1,
+		.sname		= "\r\nl:",
+		.snlen		= sizeof("\r\nl:") - 1,
+		.ln_str		= NULL,
+		.ln_strlen	= 0,
+	},
+	[POS_OWNER_IP4] = {	/* SDP owner address*/
+		.case_sensitive	= 1,
+		.lname		= "\no=",
+		.lnlen		= sizeof("\no=") - 1,
+		.sname		= "\ro=",
+		.snlen		= sizeof("\ro=") - 1,
+		.ln_str		= "IN IP4 ",
+		.ln_strlen	= sizeof("IN IP4 ") - 1,
+	},
+	[POS_CONNECTION_IP4] = {/* SDP connection info */
+		.case_sensitive	= 1,
+		.lname		= "\nc=",
+		.lnlen		= sizeof("\nc=") - 1,
+		.sname		= "\rc=",
+		.snlen		= sizeof("\rc=") - 1,
+		.ln_str		= "IN IP4 ",
+		.ln_strlen	= sizeof("IN IP4 ") - 1,
+	},
+	[POS_ANAT] = {		/* SDP Alternative Network Address Types */
+		.case_sensitive	= 1,
+		.lname		= "\na=",
+		.lnlen		= sizeof("\na=") - 1,
+		.sname		= "\ra=",
+		.snlen		= sizeof("\ra=") - 1,
+		.ln_str		= "alt:",
+		.ln_strlen	= sizeof("alt:") - 1,
+	},
+	[POS_MEDIA_AUDIO] = {		/* SDP media audio info */
+		.case_sensitive	= 1,
+		.lname		= "\nm=audio ",
+		.lnlen		= sizeof("\nm=audio ") - 1,
+		.sname		= "\rm=audio ",
+		.snlen		= sizeof("\rm=audio ") - 1,
+		.ln_str		= NULL,
+		.ln_strlen	= 0,
+	},
+	[POS_MEDIA_VIDEO] = {		/* SDP media video info */
+		.case_sensitive	= 1,
+		.lname		= "\nm=video ",
+		.lnlen		= sizeof("\nm=video ") - 1,
+		.sname		= "\rm=video ",
+		.snlen		= sizeof("\rm=video ") - 1,
+		.ln_str		= NULL,
+		.ln_strlen	= 0,
+	},
+};
+
+//BRCM: move these vars here to allow sip_help() use it
+static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly;
+static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly;
+static const struct nf_conntrack_expect_policy
+sip_exp_policy[SIP_EXPECT_CLASS_MAX + 1] = {
+	[SIP_EXPECT_CLASS_SIGNALLING] = {
+		.max_expected	= 1,
+		.timeout	= 3 * 60,
+	},
+	[SIP_EXPECT_CLASS_AUDIO] = {
+		.max_expected	= 2 * IP_CT_DIR_MAX,
+		.timeout	= 3 * 60,
+	},
+	[SIP_EXPECT_CLASS_VIDEO] = {
+		.max_expected	= 2 * IP_CT_DIR_MAX,
+		.timeout	= 3 * 60,
+	},
+	[SIP_EXPECT_CLASS_OTHER] = {
+		.max_expected	= 2 * IP_CT_DIR_MAX,
+		.timeout	= 3 * 60,
+	},
+};
+
+#if 0 // Don't register the helper such that fc can accelerate the RTP streams.
+static int rtp_help(struct sk_buff *skb, unsigned int protoff,
+		    struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	return NF_ACCEPT;
+}
+
+/* Null RTP helper to avoid flow cache bypassing it */
+static struct nf_conntrack_helper nf_conntrack_helper_rtp __read_mostly = {
+	.name			= "RTP",
+	.me			= THIS_MODULE,
+	.help			= rtp_help
+};
+#endif
+
+int find_inline_str(char **begin, char *end, const char *str, int str_len,
+		    int case_sensitive)
+{
+	char *p = *begin;
+	char *q = end - str_len;
+
+	if (!str || str_len == 0)
+		return 1;
+
+	while(p <= q && *p != '\r' && *p != '\n') {
+		if (case_sensitive) {
+			if (strncmp(p, str, str_len) == 0)
+				goto found;
+		} else {
+			if (strnicmp(p, str, str_len) == 0)
+				goto found;
+		}
+		p++;
+	}
+	return 0;
+found:
+	*begin = p + str_len;
+	return 1;
+}
+
+int find_field(char **begin, char *end, int field)
+{
+	const struct sip_header_nfo *hnfo = &ct_sip_hdrs[field];
+	char *p = *begin;
+	char *q = end - hnfo->lnlen;
+
+	while (p <= q) {
+		if (hnfo->lname == NULL ||
+		    (strncmp(p, hnfo->lname, hnfo->lnlen) == 0)) {
+		    	p += hnfo->lnlen;
+		} else {
+			if (hnfo->sname != NULL &&
+			    strncmp(p, hnfo->sname, hnfo->snlen) == 0) {
+			    	p += hnfo->snlen;
+			} else {
+				p++;
+				continue;
+			}
+		}
+		if (!find_inline_str(&p, end, hnfo->ln_str, hnfo->ln_strlen,
+				     hnfo->case_sensitive)) {
+			pr_debug("'%s' not found in '%s'.\n", hnfo->ln_str,
+			       	 hnfo->lname);
+			return 0;
+		}
+		*begin = p;
+		return 1;
+	}
+	return 0;
+}
+
+int parse_digits(char **begin, char *end, int *n)
+{
+	char *p = *begin;
+	char *q;
+	long num;
+
+	/* Skip spaces */
+	while (*p == ' ')
+		p++;
+	
+	if (!isdigit((int)*p))
+		return 0;
+
+	num = simple_strtol(p, &q, 10);
+	if (q == p)
+		return 0;
+	if (n)
+		*n = (int)num;
+	*begin = p;
+	return q - p;
+}
+
+int parse_addr(char **begin, char *end, struct nf_conntrack_man *addr)
+{
+	char *p;
+
+	memset(addr, 0, sizeof(*addr));
+	if (in4_pton((const char *)*begin, end - *begin, (u8 *)&addr->u3.ip,
+		     -1, (const char **)&p))
+		addr->l3num = AF_INET;
+	else if (in6_pton(**begin == '[' ? (const char *)(*begin + 1) : (const char *)*begin, end - *begin,
+			  (u8 *)&addr->u3.ip6, -1, (const char **)&p))
+		addr->l3num = AF_INET6;
+	else
+		return 0;
+
+	addr->u.all = 0;
+
+	return (*p == ']') ? (p - *begin + 1) : (p - *begin);
+}
+
+int parse_sip_uri(char **begin, char *end, struct nf_conntrack_man *addr)
+{
+	char *p = *begin;
+	char *p0;
+	int port;
+	int len;
+
+	/* Search for '@' in this line */
+	while (p < end && *p != '\r' && *p != '\n' && *p != ';') {
+		if (*p == '@')
+			break;
+		p++;
+	}
+
+	/* We found user part */
+	if (*p == '@')
+		p0 = ++p;
+	/* No user part */
+	else 
+		p = p0 = *begin;
+
+	/* Address */
+	if ((len = parse_addr(&p, end, addr)) == 0)
+		return 0;
+
+	/* Port number */
+	if (p[len] == ':') {
+		p += len + 1;
+		if ((len = parse_digits(&p, end, &port)) == 0)
+			return 0;
+		if (port < 1 || port > 65535)
+			return 0;
+		addr->u.all = htons((unsigned short)port);
+	} else {
+		addr->u.all = 0;
+	}
+
+	*begin = p0;
+	return p + len - p0;
+}
+
+static int process_owner(struct sk_buff *skb, unsigned int protoff,
+			 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			 char **dptr, int *dlen, struct nf_conntrack_man *addr)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	int ret = NF_ACCEPT;
+	char *p = *dptr;
+	int len;
+	struct nf_conntrack_man a;
+	typeof(nf_nat_addr_hook) nf_nat_addr;
+
+	if (!find_field(&p, *dptr+*dlen, POS_OWNER_IP4))
+		goto end;
+	if ((len = parse_addr(&p, *dptr+*dlen, &a)) == 0)
+		goto end;  // brcm: that might be an owner with SIP URL, let him go.
+	pr_debug("nf_conntrack_sip: owner=%.*s\n", len, p);
+	*addr = a;
+
+	/* We care only LAN->WAN situations */
+	if (!memcmp(&ct->tuplehash[dir].tuple.src.u3,
+		    &ct->tuplehash[!dir].tuple.dst.u3,
+		    sizeof(ct->tuplehash[dir].tuple.src.u3)))
+		goto end;
+
+	/* LAN->WAN. Change the LAN IP to WAN. */
+	if (!memcmp(&a.u3, &ct->tuplehash[dir].tuple.src.u3, sizeof(a.u3)) &&
+	    (nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+	    	a.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+	    	ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr, dlen, &p,
+				  &len, &a);
+	    	pr_debug("nf_conntrack_sip: owner changed to %.*s\n",
+		       	 len, p);
+	}
+	/* LAN->WAN, with firewall's external IP address that has been set by
+	 * some 'smart' UAs. We need to change the parsed IP to LAN. */ 
+	else if (!memcmp(&a.u3, &ct->tuplehash[!dir].tuple.dst.u3,
+			 sizeof(a.u3))) {
+		addr->u3 = ct->tuplehash[dir].tuple.src.u3;
+		pr_debug("nf_conntrack_sip: owner is auto-detected WAN "
+		       	 "address\n");
+	}
+end:
+	return ret;
+}
+
+static int process_connection(struct sk_buff *skb, unsigned int protoff,
+			      struct nf_conn *ct,
+			      enum ip_conntrack_info ctinfo, char **dptr,
+			      int *dlen, struct nf_conntrack_man *addr)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	int ret = NF_ACCEPT;
+	char *p = *dptr;
+	int len;
+	struct nf_conntrack_man a;
+	typeof(nf_nat_addr_hook) nf_nat_addr;
+
+	if (!find_field(&p, *dptr+*dlen, POS_CONNECTION_IP4))
+		goto end;
+	if ((len = parse_addr(&p, *dptr+*dlen, &a)) == 0)
+		goto err;
+	pr_debug("nf_conntrack_sip: connection=%.*s\n", len, p);
+	*addr = a;
+	
+	/* We care only LAN->WAN situations */
+	if (!memcmp(&ct->tuplehash[dir].tuple.src.u3,
+		    &ct->tuplehash[!dir].tuple.dst.u3,
+		    sizeof(ct->tuplehash[dir].tuple.src.u3)))
+		goto end;
+
+	/* LAN->WAN. Change the LAN IP to WAN. */
+	if (!memcmp(&a.u3, &ct->tuplehash[dir].tuple.src.u3, sizeof(a.u3)) &&
+	    (nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+	    	a.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+	    	ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr, dlen, &p,
+				  &len, &a);
+	    	pr_debug("nf_conntrack_sip: connection changed to %.*s\n",
+		       	 len, p);
+	}
+	/* LAN->WAN, with firewall's external IP address that has been set by
+	 * some 'smart' UAs. We need to change the parsed IP to LAN. */ 
+	else if (!memcmp(&a.u3, &ct->tuplehash[!dir].tuple.dst.u3,
+			 sizeof(a.u3))) {
+		addr->u3 = ct->tuplehash[dir].tuple.src.u3;
+		pr_debug("nf_conntrack_sip: connection is auto-detected WAN "
+		       	 "address\n");
+	}
+end:
+	return ret;
+err:
+	return NF_DROP;
+}
+
+static void iqos_expect(struct nf_conn *new, struct nf_conntrack_expect *exp)
+{
+	/* register the SIP Data RTP/RTCP ports with ingress QoS classifier */
+	pr_debug("adding iqos from %pI4:%hu->%pI4:%hu\n",
+		 &new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip,
+		 ntohs(new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all),
+		 &new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip,
+		 ntohs(new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all));
+
+	iqos_add_L4port(IPPROTO_UDP, new->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.udp.port, 
+			IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+	iqos_add_L4port( IPPROTO_UDP, new->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.udp.port, 
+			 IQOS_ENT_DYN, IQOS_PRIO_HIGH );
+}
+
+static int expect_rtp(struct sk_buff *skb, unsigned int protoff,
+		      struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		      char **dptr, int *dlen, char **port_begin, int *port_len,
+		      struct nf_conntrack_man *addr, int class)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	struct nf_conntrack_expect *exp;
+	typeof(nf_nat_rtp_hook) nf_nat_rtp;
+	typeof(nf_nat_snat_hook) nf_nat_snat;
+	int ret = NF_ACCEPT;
+
+	if ((exp = nf_ct_expect_alloc(ct)) == NULL)
+		return ret;
+	nf_ct_expect_init(exp, class, addr->l3num, NULL, &addr->u3,
+			  IPPROTO_UDP, NULL, &addr->u.all);
+	/* Set the child connection as slave (disconnected when master
+	 * disconnects */
+	exp->flags |= NF_CT_EXPECT_DERIVED_TIMEOUT;
+	exp->derived_timeout = 0xFFFFFFFF;
+	exp->expectfn= iqos_expect;
+	// Don't register the helper such that fc can accelerate the RTP streams.
+	// exp->helper = &nf_conntrack_helper_rtp;
+	pr_debug("nf_conntrack_sip: expect_rtp %pI4:%hu->%pI4:%hu\n",
+	       	 &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.udp.port),
+	       	 &exp->tuple.dst.u3.ip, ntohs(exp->tuple.dst.u.udp.port));
+
+	if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+	    	   &ct->tuplehash[!dir].tuple.dst.u3,
+		   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
+	    (nf_nat_rtp = rcu_dereference(nf_nat_rtp_hook))) {
+		ret = nf_nat_rtp(skb, protoff, ct, ctinfo, dptr, dlen, exp,
+				 port_begin, port_len);
+	} else if (!memcmp(&ct->tuplehash[dir].tuple.src.u3,
+	    	   	   &ct->tuplehash[!dir].tuple.dst.u3,
+		   	   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
+	    	   (nf_nat_snat = rcu_dereference(nf_nat_snat_hook))) {
+			ret = nf_nat_snat(ct, ctinfo, exp);
+	} else {
+		if (nf_ct_expect_related(exp) != 0) {
+			pr_debug("nf_conntrack_sip: nf_ct_expect_related() "
+				 "failed\n");
+		}
+	}
+	nf_ct_expect_put(exp);
+
+	return ret;
+}
+
+static int process_audio(struct sk_buff *skb, unsigned int protoff,
+			 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			 char **dptr, int *dlen, struct nf_conntrack_man *addr)
+{
+	char *p = *dptr;
+	int port;
+	int len;
+
+	if (!find_field(&p, *dptr+*dlen, POS_MEDIA_AUDIO))
+		return NF_ACCEPT;
+	if ((len = parse_digits(&p, *dptr+*dlen, &port)) == 0)
+		return NF_DROP;
+	pr_debug("nf_conntrack_sip: audio=%d\n", port);
+	addr->u.all = htons((u_int16_t)port);
+	len = expect_rtp(skb, protoff, ct, ctinfo, dptr, dlen, &p, &len,
+			 addr, SIP_EXPECT_CLASS_AUDIO);
+	return len;
+}
+
+static int process_video(struct sk_buff *skb, unsigned int protoff,
+			 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			 char **dptr, int *dlen, struct nf_conntrack_man *addr)
+{
+	char *p = *dptr;
+	int port;
+	int len;
+
+	if (!find_field(&p, *dptr+*dlen, POS_MEDIA_VIDEO))
+		return NF_ACCEPT;
+	if ((len = parse_digits(&p, *dptr+*dlen, &port)) == 0)
+		return NF_DROP;
+	pr_debug("nf_conntrack_sip: video=%d\n", port);
+	addr->u.all = htons((u_int16_t)port);
+	return expect_rtp(skb, protoff, ct, ctinfo, dptr, dlen, &p, &len,
+			  addr, SIP_EXPECT_CLASS_VIDEO);
+}
+
+static int process_anat(struct sk_buff *skb, unsigned int protoff,
+			struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			char **dptr, int *dlen)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	int ret = NF_ACCEPT;
+	char *p = *dptr;
+	int port;
+	int len;
+	struct nf_conntrack_man addr;
+	typeof(nf_nat_addr_hook) nf_nat_addr;
+
+	while (find_field(&p, *dptr+*dlen, POS_ANAT)) {
+		int count;
+
+		/* There are 5 spaces in the leading parameters */
+		count = 0;
+		while(p < *dptr+*dlen && *p != '\r' && *p != '\n') {
+			if(*p++ == ' ') {
+				if (++count == 5)
+					break;
+			}
+		}
+		if (count < 5)
+			continue;
+
+		if ((len = parse_addr(&p, *dptr+*dlen, &addr)) == 0)
+			continue;
+		pr_debug("nf_conntrack_sip: alt ip=%.*s\n", len, p);
+		if (memcmp(&addr.u3, &ct->tuplehash[dir].tuple.src.u3,
+		    sizeof(addr.u3)))
+	    		continue;
+		if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+			   &ct->tuplehash[!dir].tuple.dst.u3,
+			   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
+		    (nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+	    		addr.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+	    		ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr,
+					  dlen, &p, &len, &addr);
+			if (ret != NF_ACCEPT)
+				break;
+			pr_debug("nf_conntrack_sip: alt ip changed to %.*s\n",
+			       	 len, p);
+		}
+
+		/* Port */
+		p += len + 1;
+		if ((len = parse_digits(&p, *dptr+*dlen, &port)) == 0)
+			return NF_DROP;
+		pr_debug("nf_conntrack_sip: alt port=%.*s\n", len, p);
+		addr.u.all = htons((u_int16_t)port);
+		ret = expect_rtp(skb, protoff, ct, ctinfo, dptr, dlen, &p,
+				 &len, &addr, SIP_EXPECT_CLASS_OTHER);
+		if (ret != NF_ACCEPT)
+			break;
+		pr_debug("nf_conntrack_sip: alt port changed to %.*s\n",
+			 len, p);
+	}
+	return ret;
+}
+
+static int update_content_length(struct sk_buff *skb, unsigned int protoff,
+				 struct nf_conn *ct,
+				 enum ip_conntrack_info ctinfo, char **dptr,
+				 int *dlen)
+{
+	int ret = NF_ACCEPT;
+	int len;
+	int clen;
+	int real_clen;
+	char *p = *dptr;
+	char *clen_start;
+	typeof(nf_nat_addr_hook) nf_nat_addr;
+
+	/* Look for Content-Length field */
+	if (!find_field(&p, *dptr + *dlen, POS_CONTENT))
+		return NF_ACCEPT;
+	if ((len = parse_digits(&p, *dptr+*dlen, &clen)) == 0)
+		return NF_DROP;
+	pr_debug("nf_conntrack_sip: Content-Length=%d\n", clen);
+	clen_start = p;
+
+	/* Look for the end of header fields */
+	while(p < *dptr+*dlen) {
+		if (*p == '\r') {
+			if (memcmp(p, "\r\n\r\n", 4) == 0) {
+				p += 4;
+				break;
+			} else if (p[1] == '\r') {
+				p += 2;
+				break;
+			}
+		} else if (*p == '\n') {
+			if (p[1] == '\n') {
+				p += 2;
+				break;
+			}
+		}
+		p++;
+	}
+
+	/* Calulate real content length */
+	if (p > *dptr+*dlen)
+		return NF_DROP;
+	real_clen = *dlen - (p - *dptr);
+	pr_debug("nf_conntrack_sip: Real content length=%d\n", real_clen);
+	if (real_clen == clen)
+		return NF_ACCEPT;
+	
+	/* Modify content length */
+	if ((nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+		struct nf_conntrack_man addr;
+
+		memset(&addr, 0, sizeof(addr));
+		addr.l3num = AF_INET;
+	    	addr.u.all = htons((u_int16_t)real_clen);
+	    	ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr, dlen,
+				  &clen_start, &len, &addr);
+		pr_debug("nf_conntrack_sip: Content-Length changed to %.*s\n",
+		       	 len, clen_start);
+	}
+
+	return ret;
+}
+
+static int expect_sip(struct sk_buff *skb, unsigned int protoff,
+		      struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		      char **dptr, int *dlen, char **addr_begin, int *addr_len,
+		      struct nf_conntrack_man *addr)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	struct nf_conntrack_expect *exp;
+	typeof(nf_nat_sip_hook) nf_nat_sip;
+	int ret = NF_ACCEPT;
+
+	if ((exp = nf_ct_expect_alloc(ct)) == NULL)
+		return ret;
+	nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, addr->l3num, NULL,
+			  &addr->u3, IPPROTO_UDP, NULL, &addr->u.udp.port);
+	exp->helper = addr->l3num == AF_INET?  &sip[0][0] : &sip[0][1];
+	exp->derived_timeout = 0;
+
+	if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+	    	   &ct->tuplehash[!dir].tuple.dst.u3,
+		   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
+	    (nf_nat_sip = rcu_dereference(nf_nat_sip_hook))) {
+		ret = nf_nat_sip(skb, protoff, ct, ctinfo, dptr, dlen, exp,
+				 addr_begin, addr_len);
+	} else {
+		if (nf_ct_expect_related(exp) != 0) {
+			pr_debug("nf_conntrack_sip: nf_ct_expect_related() "
+				 "failed\n");
+		}
+	}
+	nf_ct_expect_put(exp);
+	return ret;
+
+}
+static int process_via(struct sk_buff *skb, unsigned int protoff,
+		       struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		       char **dptr, int *dlen)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	int ret = NF_ACCEPT;
+	char *p = *dptr;
+	struct nf_conntrack_man addr;
+	int len;
+	typeof(nf_nat_addr_hook) nf_nat_addr;
+
+	while (find_field(&p, *dptr + *dlen, POS_VIA)) {
+
+		if ((len = parse_sip_uri(&p, *dptr + *dlen, &addr)) == 0)
+			continue;
+		pr_debug("nf_conntrack_sip: Via=%.*s\n", len, p);
+
+		/* Different SIP port than this one */
+		if (!memcmp(&addr.u3, &ct->tuplehash[dir].tuple.src.u3,
+			    sizeof(addr.u3)) && addr.u.udp.port != htons(0) &&
+		    addr.u.udp.port != ct->tuplehash[dir].tuple.src.u.udp.port){
+		    	pr_debug("nf_conntrack_sip: different message port\n");
+		    	ret = expect_sip(skb, protoff, ct, ctinfo, dptr, dlen,
+					 &p, &len, &addr);
+			break;
+		}
+		/* LAN->WAN. Change the LAN address to WAN address */ 
+		else if (!memcmp(&addr.u3, &ct->tuplehash[dir].tuple.src.u3,
+				 sizeof(addr.u3)) &&
+			 addr.u.all == ct->tuplehash[dir].tuple.src.u.all &&
+			 memcmp(&ct->tuplehash[dir].tuple.src.u3,
+			 	&ct->tuplehash[!dir].tuple.dst.u3,
+				sizeof(ct->tuplehash[dir].tuple.dst.u3)) &&
+			 (nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+			addr.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+			addr.u.all = ct->tuplehash[!dir].tuple.dst.u.all;
+			ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr,
+					  dlen, &p, &len, &addr);
+		    	pr_debug("nf_conntrack_sip: LAN address in Via "
+			       	 "changed to WAN address %.*s\n", len, p);
+			break;
+		}
+		/* LAN->WAN, with firewall's external IP address that has been
+		 * set by some 'smart' UAs. We need to change the port. */ 
+		else if (!memcmp(&addr.u3, &ct->tuplehash[!dir].tuple.dst.u3,
+				 sizeof(addr.u3)) &&
+			 memcmp(&ct->tuplehash[dir].tuple.src.u3,
+			 	&ct->tuplehash[!dir].tuple.dst.u3,
+				sizeof(ct->tuplehash[dir].tuple.dst.u3)) &&
+			 (nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+			addr.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+			addr.u.all = ct->tuplehash[!dir].tuple.dst.u.all;
+			ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr,
+					  dlen, &p, &len, &addr);
+		    	pr_debug("nf_conntrack_sip: Auto-detected WAN address "
+			       	 "in Via changed to %.*s\n", len, p);
+			break;
+		}
+		/* WAN->LAN. Change the WAN address to LAN address */ 
+		else if (!memcmp(&addr.u3, &ct->tuplehash[dir].tuple.dst.u3,
+				 sizeof(addr.u3)) &&
+			 addr.u.udp.port ==
+			 ct->tuplehash[dir].tuple.dst.u.udp.port &&
+			 memcmp(&ct->tuplehash[dir].tuple.dst.u3,
+			 	&ct->tuplehash[!dir].tuple.src.u3,
+				sizeof(ct->tuplehash[dir].tuple.dst.u3)) &&
+			 (nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+			addr = ct->tuplehash[!dir].tuple.src;
+			ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr,
+					  dlen, &p, &len, &addr);
+		    	pr_debug("nf_conntrack_sip: WAN address in Via "
+			       	 "changed to LAN address %.*s\n", len, p);
+			break;
+		}
+		p += len;
+	}
+	return ret;
+}
+
+static int process_contact(struct sk_buff *skb, unsigned int protoff,
+			   struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			   char **dptr, int *dlen)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	int ret = NF_ACCEPT;
+	char *p = *dptr;
+	int len;
+	struct nf_conntrack_man addr;
+	typeof(nf_nat_addr_hook) nf_nat_addr;
+
+	if (!find_field(&p, *dptr+*dlen, POS_CONTACT))
+		return ret;
+	if ((len = parse_sip_uri(&p, *dptr+*dlen, &addr)) == 0)
+		return ret;  // brcm: that might be a contact with SIP URL, let him go.
+	pr_debug("nf_conntrack_sip: Contact=%.*s\n", len, p);
+
+	/* Different SIP port than this one */
+	if (!memcmp(&addr.u3, &ct->tuplehash[dir].tuple.src.u3,
+		    sizeof(addr.u3)) && addr.u.udp.port != htons(0) &&
+	    addr.u.udp.port != ct->tuplehash[dir].tuple.src.u.udp.port) {
+		pr_debug("nf_conntrack_sip: different message port\n");
+	    	ret = expect_sip(skb, protoff, ct, ctinfo, dptr, dlen, &p,
+				 &len, &addr);
+	}
+	/* LAN->WAN. Change the LAN address to WAN address */ 
+	else if (!memcmp(&addr.u3, &ct->tuplehash[dir].tuple.src.u3,
+			 sizeof(addr.u3)) &&
+		 addr.u.all == ct->tuplehash[dir].tuple.src.u.all &&
+		 memcmp(&ct->tuplehash[dir].tuple.src.u3,
+		 	&ct->tuplehash[!dir].tuple.dst.u3,
+			sizeof(ct->tuplehash[dir].tuple.dst.u3)) &&
+		 (nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+		addr.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+		addr.u.all = ct->tuplehash[!dir].tuple.dst.u.all;
+		ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr, dlen, &p,
+				  &len, &addr);
+		pr_debug("nf_conntrack_sip: LAN address in Contact "
+		       	 "changed to WAN address %.*s\n", len, p);
+	}
+	/* LAN->WAN, with firewall's external IP address that has been
+	 * set by some 'smart' UAs. We need to change the port. */ 
+	else if (!memcmp(&addr.u3, &ct->tuplehash[!dir].tuple.dst.u3,
+			 sizeof(addr.u3)) &&
+		 memcmp(&ct->tuplehash[dir].tuple.src.u3,
+		 	&ct->tuplehash[!dir].tuple.dst.u3,
+			sizeof(ct->tuplehash[dir].tuple.dst.u3)) &&
+		 (nf_nat_addr = rcu_dereference(nf_nat_addr_hook))) {
+		addr.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+		addr.u.all = ct->tuplehash[!dir].tuple.dst.u.all;
+		ret = nf_nat_addr(skb, protoff, ct, ctinfo, dptr, dlen, &p,
+				  &len, &addr);
+		pr_debug("nf_conntrack_sip: Auto-detected WAN address in "
+		       	 "Contact changed to %.*s\n", len, p);
+	}
+	return ret;
+}
+
+static int process_bye(struct sk_buff *skb, struct nf_conn *ct)
+{
+
+	/* Disconnect all child connections that have infinite timeout */
+	pr_debug("iterate each derived connections");
+
+	if (!list_empty(&ct->derived_connections)) {
+		struct nf_conn *child, *tmp;
+		pr_debug("derived connection list is not empty"); 
+		list_for_each_entry_safe(child, tmp, &ct->derived_connections,
+                    derived_list) {
+			struct nf_conn_help * help;
+			help = nfct_help(child);
+			if (!help) {
+				child->derived_timeout = 5 * HZ;
+				nf_ct_refresh(child, skb, 5 * HZ);
+			}
+		}
+	}
+
+	return NF_ACCEPT;
+}
+
+static int sip_help(struct sk_buff *skb,
+		    unsigned int protoff,
+		    struct nf_conn *ct,
+		    enum ip_conntrack_info ctinfo)
+{
+	unsigned int dataoff, datalen;
+	char *dptr;
+	int ret = NF_ACCEPT;
+	struct nf_conntrack_man addr;
+
+	/* Owned by local application (FXS), just accept it */
+	if (skb->sk)
+		return NF_ACCEPT;
+	
+	/* No Data ? */
+	dataoff = protoff + sizeof(struct udphdr);
+	if (dataoff >= skb->len)
+		return NF_ACCEPT;
+
+	if (ct->derived_timeout == 0)
+		nf_ct_refresh(ct, skb, sip_timeout * HZ);
+
+	if (!skb_is_nonlinear(skb))
+		dptr = skb->data + dataoff;
+	else {
+		pr_debug("Copy of skbuff not supported yet.\n");
+		goto out;
+	}
+	pr_debug("nf_conntrack_sip: received message \"%.14s\"\n", dptr);
+
+	datalen = skb->len - dataoff;
+	if (datalen < sizeof("SIP/2.0 200") - 1)
+		goto out;
+
+	/* Process Via field */
+	pr_debug("nf_conntrack_sip: process_via\n");
+	ret = process_via(skb, protoff, ct, ctinfo, &dptr, &datalen);
+	if (ret != NF_ACCEPT)
+		goto out;
+
+	/* Process Contact field */
+	pr_debug("nf_conntrack_sip: process_contact\n");
+	ret = process_contact(skb, protoff, ct, ctinfo, &dptr, &datalen);
+	if (ret != NF_ACCEPT)
+		goto out;
+	
+	/* Process BYE and status code 400 (disconnect) */
+	if (memcmp(dptr, "BYE", sizeof("BYE") - 1) == 0 ||
+	    memcmp(dptr, "SIP/2.0 400", sizeof("SIP/2.0 400") - 1) == 0) {
+		pr_debug("nf_conntrack_sip: process_bye\n");
+		ret = process_bye(skb, ct);
+		goto out;
+	}
+
+	/* RTP info only in some SDP pkts */
+	if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) == 0 ||
+	    memcmp(dptr, "UPDATE", sizeof("UPDATE") - 1) == 0 ||
+	    memcmp(dptr, "SIP/2.0 180", sizeof("SIP/2.0 180") - 1) == 0 ||
+	    memcmp(dptr, "SIP/2.0 183", sizeof("SIP/2.0 183") - 1) == 0 ||
+	    memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) == 0) {
+		pr_debug("nf_conntrack_sip: process_owner\n");
+		ret = process_owner(skb, protoff, ct, ctinfo, &dptr,
+				    &datalen, &addr);
+		if (ret != NF_ACCEPT)
+			goto out;
+		ret = process_connection(skb, protoff, ct, ctinfo, &dptr,
+					 &datalen, &addr);
+		pr_debug("nf_conntrack_sip: process_connection\n");
+		if (ret != NF_ACCEPT)
+			goto out;
+		pr_debug("nf_conntrack_sip: process_audio\n");
+		ret = process_audio(skb, protoff, ct, ctinfo, &dptr,
+				    &datalen, &addr);
+		if (ret != NF_ACCEPT)
+			goto out;
+		pr_debug("nf_conntrack_sip: process_video\n");
+		ret = process_video(skb, protoff, ct, ctinfo, &dptr,
+				    &datalen, &addr);
+		if (ret != NF_ACCEPT)
+			goto out;
+		pr_debug("nf_conntrack_sip: process_anat\n");
+		ret = process_anat(skb, protoff, ct, ctinfo, &dptr, &datalen);
+		if (ret != NF_ACCEPT)
+			goto out;
+		pr_debug("nf_conntrack_sip: update_content_length\n");
+		ret = update_content_length(skb, protoff, ct, ctinfo, &dptr,
+					    &datalen);
+	}
+
+out:
+	pr_debug("nf_conntrack_sip: %s\n", ret == NF_ACCEPT?
+		 "accepted" : "dropped");
+	return ret;
+}
+
+static void nf_conntrack_sip_fini(void)
+{
+	int i, j;
+
+	for (i = 0; i < ports_c; i++) {
+		for (j = 0; j < 2; j++) {
+			if (sip[i][j].me == NULL)
+				continue;
+
+#if defined(CONFIG_BCM_KF_NETFILTER)
+        /* unregister the SIP ports with ingress QoS classifier */
+        iqos_rem_L4port( sip[i][j].tuple.dst.protonum, 
+			              sip[i][j].tuple.src.u.udp.port, IQOS_ENT_STAT );
+#endif
+			nf_conntrack_helper_unregister(&sip[i][j]);
+		}
+	}
+}
+
+static int __init nf_conntrack_sip_init(void)
+{
+	int i, j, ret;
+	char *tmpname;
+
+	if (ports_c == 0)
+		ports[ports_c++] = SIP_PORT;
+
+	for (i = 0; i < ports_c; i++) {
+		memset(&sip[i], 0, sizeof(sip[i]));
+
+		sip[i][0].tuple.src.l3num = AF_INET;
+		sip[i][1].tuple.src.l3num = AF_INET6;
+		for (j = 0; j < 2; j++) {
+			sip[i][j].tuple.dst.protonum = IPPROTO_UDP;
+			sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
+			sip[i][j].me = THIS_MODULE;
+			sip[i][j].help = sip_help;
+			sip[i][j].expect_policy	= &sip_exp_policy[0],
+			sip[i][j].expect_class_max = SIP_EXPECT_CLASS_MAX;
+
+			tmpname = &sip_names[i][j][0];
+			if (ports[i] == SIP_PORT)
+				sprintf(tmpname, "sip");
+			else
+				sprintf(tmpname, "sip-%u", i);
+			sip[i][j].name = tmpname;
+
+			pr_debug("port #%u: %u\n", i, ports[i]);
+
+			ret = nf_conntrack_helper_register(&sip[i][j]);
+			if (ret) {
+				printk("nf_ct_sip: failed to register helper "
+				       "for pf: %u port: %u\n",
+				       sip[i][j].tuple.src.l3num, ports[i]);
+				nf_conntrack_sip_fini();
+				return ret;
+			}
+		}
+        
+        /* register the SIP ports with ingress QoS classifier */
+        iqos_add_L4port( IPPROTO_UDP, ports[i], IQOS_ENT_STAT, IQOS_PRIO_HIGH );
+	}
+	pr_debug("nf_conntrack_sip registered\n");
+	return 0;
+}
+
+module_init(nf_conntrack_sip_init);
+module_exit(nf_conntrack_sip_fini);
+#else /* CONFIG_BCM_KF_NETFILTER */
 static int sip_direct_signalling __read_mostly = 1;
 module_param(sip_direct_signalling, int, 0600);
 MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar "
@@ -1608,3 +2572,5 @@ static int __init nf_conntrack_sip_init(void)
 
 module_init(nf_conntrack_sip_init);
 module_exit(nf_conntrack_sip_fini);
+
+#endif
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 885f5ab9bc28efed6b3d7116ed87887bb9fd33b9..a343b6a58245d56d8331519d96a14f9cf847327e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -32,6 +32,13 @@
 #include <net/netfilter/nf_conntrack_timestamp.h>
 #include <linux/rculist_nulls.h>
 
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+#include <linux/devinfo.h>
+#include <linux/dpistats.h>
+#include <linux/urlinfo.h>
+#include <linux/dpi_ctk.h>
+#endif
+
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
@@ -168,6 +175,47 @@ ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
 }
 #endif
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BCM_KF_NETFILTER) && defined(CONFIG_BLOG)
+static void ct_blog_query(struct nf_conn *ct, BlogCtTime_t *ct_time_p)
+{
+	blog_lock();
+	if (ct->blog_key[BLOG_PARAM1_DIR_ORIG] != BLOG_KEY_NONE || 
+		ct->blog_key[BLOG_PARAM1_DIR_REPLY] != BLOG_KEY_NONE) {
+		blog_query(QUERY_FLOWTRACK, (void*)ct, 
+	    	ct->blog_key[BLOG_PARAM1_DIR_ORIG],
+			ct->blog_key[BLOG_PARAM1_DIR_REPLY], (uint32_t) ct_time_p);
+	}
+	blog_unlock();
+}
+
+static inline long ct_blog_calc_timeout(struct nf_conn *ct, 
+		BlogCtTime_t *ct_time_p)
+{
+	long ct_time;
+
+	blog_lock();
+	if (ct->blog_key[BLOG_PARAM1_DIR_ORIG] != BLOG_KEY_NONE || 
+		ct->blog_key[BLOG_PARAM1_DIR_REPLY] != BLOG_KEY_NONE) {
+		unsigned long partial_intv;             /* to provide more accuracy */
+		unsigned long intv_jiffies = ct_time_p->intv * HZ;
+
+        if (jiffies > ct->prev_timeout.expires)
+			partial_intv = (jiffies - ct->prev_timeout.expires) % intv_jiffies; 
+        else
+			partial_intv = (ULONG_MAX - ct->prev_timeout.expires 
+												+ jiffies) % intv_jiffies; 
+
+		ct_time = (long)(ct->timeout.expires - ct->prev_timeout.expires 
+								- ct_time_p->idle_jiffies - partial_intv);
+	}
+	else
+		ct_time = (long)(ct->timeout.expires - jiffies);
+
+	blog_unlock();
+	return ct_time;
+}
+#endif
+
 /* return 0 on success, 1 in case of error */
 static int ct_seq_show(struct seq_file *s, void *v)
 {
@@ -176,6 +224,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
 	const struct nf_conntrack_l3proto *l3proto;
 	const struct nf_conntrack_l4proto *l4proto;
 	int ret = 0;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BCM_KF_NETFILTER) && defined(CONFIG_BLOG)
+    BlogCtTime_t ct_time;
+#endif
 
 	NF_CT_ASSERT(ct);
 	if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
@@ -191,11 +242,21 @@ static int ct_seq_show(struct seq_file *s, void *v)
 	NF_CT_ASSERT(l4proto);
 
 	ret = -ENOSPC;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BCM_KF_NETFILTER) && defined(CONFIG_BLOG)
+    ct_blog_query(ct, &ct_time);
+	if (seq_printf(s, "%-8s %u %-8s %u %ld ",
+		       l3proto->name, nf_ct_l3num(ct),
+		       l4proto->name, nf_ct_protonum(ct),
+		       timer_pending(&ct->timeout)
+			   ? ct_blog_calc_timeout(ct, &ct_time)/HZ : 0) != 0)
+#else
 	if (seq_printf(s, "%-8s %u %-8s %u %ld ",
 		       l3proto->name, nf_ct_l3num(ct),
 		       l4proto->name, nf_ct_protonum(ct),
 		       timer_pending(&ct->timeout)
 		       ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
+#endif
 		goto release;
 
 	if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
@@ -239,6 +300,13 @@ static int ct_seq_show(struct seq_file *s, void *v)
 	if (ct_show_delta_time(s, ct))
 		goto release;
 
+#if defined(CONFIG_BCM_KF_XT_MATCH_LAYER7) && \
+	(defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE))
+	if(ct->layer7.app_proto &&
+		seq_printf(s, "l7proto=%s ", ct->layer7.app_proto))
+		return -ENOSPC;
+#endif
+
 	if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
 		goto release;
 
@@ -269,6 +337,333 @@ static const struct file_operations ct_file_ops = {
 	.release = seq_release_net,
 };
 
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+static void *ct_dpi_seq_start(struct seq_file *seq, loff_t *pos)
+	__acquires(RCU)
+{
+	struct ct_iter_state *st = seq->private;
+
+	rcu_read_lock();
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	st->time_now = ktime_to_ns(ktime_get_real());
+	return ct_get_idx(seq, *pos);
+}
+
+static void *ct_dpi_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+    if (v == SEQ_START_TOKEN)
+    {
+        return ct_get_idx(s, *pos);
+    }
+
+	(*pos)++;
+	return ct_get_next(s, v);
+}
+
+static void ct_dpi_seq_stop(struct seq_file *s, void *v)
+	__releases(RCU)
+{
+	rcu_read_unlock();
+}
+
+/* return 0 on success, 1 in case of error */
+static int ct_dpi_seq_show(struct seq_file *s, void *v)
+{
+	struct nf_conntrack_tuple_hash *hash;
+	struct nf_conn *ct;
+	const struct nf_conntrack_l3proto *l3proto;
+	const struct nf_conntrack_l4proto *l4proto;
+	int ret = 0;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(s, "AppID  Mac               Vendor OS Class Type Dev"
+						" UpPkt UpByte UpTS DnPkt DnByte DnTS Status"
+						" UpTuple DnTuple URL\n");
+		return 0;
+	}
+
+	hash = v;
+	ct = nf_ct_tuplehash_to_ctrack(hash);
+
+	NF_CT_ASSERT(ct);
+	if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+		return 0;
+
+	/* we only want to print DIR_ORIGINAL */
+	if (NF_CT_DIRECTION(hash))
+		goto release;
+
+	ret = -ENOSPC;
+
+	if (ct->dpi.app_id == 0)
+	{
+		ret = 0;
+		goto release;
+	}
+
+	l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
+	NF_CT_ASSERT(l3proto);
+	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+	NF_CT_ASSERT(l4proto);
+
+	if (seq_printf(s, "%08x ", ct->dpi.app_id))
+		goto release;
+
+	if (ct->dpi.dev_key != DEVINFO_IX_INVALID)
+	{
+		uint8_t mac[ETH_ALEN];
+		DevInfoEntry_t entry;
+
+		devinfo_getmac(ct->dpi.dev_key, mac);
+		devinfo_get(ct->dpi.dev_key, &entry);
+
+		if (seq_printf(s, "%02x:%02x:%02x:%02x:%02x:%02x %u %u %u %u %u ",
+					mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+					entry.vendor_id, entry.os_id, entry.class_id,entry.type_id,
+					entry.dev_id))
+			goto release;
+	}
+	else
+	{
+		if (seq_printf(s, "NoMac "))
+			goto release;
+	}
+
+	if (!IS_CTK_INIT_FROM_WAN(ct))
+	{
+		if (seq_print_acct_dpi(s, ct, IP_CT_DIR_ORIGINAL))
+			goto release;
+
+		if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
+			if (seq_printf(s, "[UNREPLIED] "))
+				goto release;
+
+		if (seq_print_acct_dpi(s, ct, IP_CT_DIR_REPLY))
+			goto release;
+	}
+	else
+	{
+		if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
+			if (seq_printf(s, "[UNREPLIED] "))
+				goto release;
+
+		if (seq_print_acct_dpi(s, ct, IP_CT_DIR_REPLY))
+			goto release;
+
+		if (seq_print_acct_dpi(s, ct, IP_CT_DIR_ORIGINAL))
+			goto release;
+	}
+
+	if (seq_printf(s, "%x ", ct->dpi.flags))
+		goto release;
+
+	if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+			l3proto, l4proto))
+		goto release;
+
+	if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
+		if (seq_printf(s, "[UNREPLIED] "))
+			goto release;
+
+	if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+			l3proto, l4proto))
+		goto release;
+
+	if (ct->dpi.url_id != URLINFO_IX_INVALID)
+	{
+		UrlInfoEntry_t entry;
+
+		urlinfo_get(ct->dpi.url_id, &entry);
+
+		if (seq_printf(s, "%s ", entry.host))
+			goto release;
+	}
+
+	if (seq_printf(s, "\n"))
+		goto release;
+
+	ret = 0;
+release:
+	nf_ct_put(ct);
+	return ret;
+}
+
+static const struct seq_operations ct_dpi_seq_ops = {
+	.start = ct_dpi_seq_start,
+	.next  = ct_dpi_seq_next,
+	.stop  = ct_dpi_seq_stop,
+	.show  = ct_dpi_seq_show
+};
+
+static int ct_dpi_open(struct inode *inode, struct file *file)
+{
+	return seq_open_net(inode, file, &ct_dpi_seq_ops,
+			sizeof(struct ct_iter_state));
+}
+
+static const struct file_operations ct_dpi_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = ct_dpi_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_net,
+};
+
+static void *dpi_seq_start(struct seq_file *seq, loff_t *pos)
+	__acquires(RCU)
+{
+	struct ct_iter_state *st = seq->private;
+
+	rcu_read_lock();
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	st->time_now = ktime_to_ns(ktime_get_real());
+	return ct_get_idx(seq, *pos);
+}
+
+static void *dpi_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+    if (v == SEQ_START_TOKEN)
+    {
+        return ct_get_idx(s, *pos);
+    }
+
+	(*pos)++;
+	return ct_get_next(s, v);
+}
+
+static void dpi_seq_stop(struct seq_file *s, void *v)
+	__releases(RCU)
+{
+	dpistats_show(s);
+	rcu_read_unlock();
+}
+
+/* return 0 on success, 1 in case of error */
+static int dpi_seq_show(struct seq_file *s, void *v)
+{
+	struct nf_conntrack_tuple_hash *hash;
+	struct nf_conn *ct;
+	int ret = 0;
+	DpiStatsEntry_t stats;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(s, "AppID  Mac               Vendor OS Class Type Dev"
+						" UpPkt UpByte DnPkt DnByte\n");
+		dpistats_info(0, NULL); //inform DpiStats module to reset
+		return ret;
+	}
+
+	hash = v;
+	ct = nf_ct_tuplehash_to_ctrack(hash);
+
+	NF_CT_ASSERT(ct);
+	if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+		return 0;
+
+	/* we only want to print DIR_ORIGINAL */
+	if (NF_CT_DIRECTION(hash))
+		goto release;
+
+#if 0
+    if (ct->stats_idx == DPISTATS_IX_INVALID)
+    {
+        if (ct->dpi.app_id == 0) goto release;
+
+        ct->stats_idx = dpistats_lookup(&ct->dpi);
+
+        if (ct->stats_idx == DPISTATS_IX_INVALID)
+        {
+            printk("fail to alloc dpistats_id?\n");
+            goto release;
+        }
+    }
+#endif
+    if (ct->dpi.app_id == 0) goto release;
+
+    ct->stats_idx = dpistats_lookup(&ct->dpi);
+    if (ct->stats_idx == DPISTATS_IX_INVALID)
+    {
+        printk("fail to alloc dpistats_id?\n");
+        goto release;
+    }
+
+	stats.result.app_id = ct->dpi.app_id;
+	stats.result.dev_key = ct->dpi.dev_key;
+	stats.result.flags = ct->dpi.flags;
+
+	/* origin direction is upstream */
+	if (!IS_CTK_INIT_FROM_WAN(ct))
+	{
+		if (conntrack_get_stats(ct, IP_CT_DIR_ORIGINAL, &stats.upstream))
+        {
+            printk("1conntrack_get_stats(upstream) fails");
+			goto release;
+        }
+
+		if ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
+        {
+			if (conntrack_get_stats(ct, IP_CT_DIR_REPLY, &stats.dnstream))
+            {
+                printk("1conntrack_get_stats(dnstream) fails");
+				goto release;
+            }
+        }
+        else
+	        memset(&stats.dnstream, 0 , sizeof(CtkStats_t));
+	}
+	else /* origin direction is dnstream */
+	{
+		if (conntrack_get_stats(ct, IP_CT_DIR_ORIGINAL, &stats.dnstream))
+        {
+            printk("2conntrack_get_stats(dnstream) fails");
+			goto release;
+        }
+
+		if ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
+        {
+			if (conntrack_get_stats(ct, IP_CT_DIR_REPLY, &stats.upstream))
+            {
+                printk("2conntrack_get_stats(upstream) fails");
+				goto release;
+            }
+        }
+        else
+        	memset(&stats.upstream, 0 , sizeof(CtkStats_t));
+	}
+
+	dpistats_info(ct->stats_idx, &stats);
+
+release:
+	nf_ct_put(ct);
+	return ret;
+}
+
+static const struct seq_operations dpi_seq_ops = {
+	.start = dpi_seq_start,
+	.next  = dpi_seq_next,
+	.stop  = dpi_seq_stop,
+	.show  = dpi_seq_show
+};
+
+static int dpi_open(struct inode *inode, struct file *file)
+{
+	return seq_open_net(inode, file, &dpi_seq_ops,
+			sizeof(struct ct_iter_state));
+}
+
+static const struct file_operations dpi_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = dpi_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_net,
+};
+#endif
+
 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	struct net *net = seq_file_net(seq);
@@ -374,8 +769,23 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
 			  &ct_cpu_seq_fops);
 	if (!pde)
 		goto out_stat_nf_conntrack;
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+	pde = proc_net_fops_create(net, "conntrack_dpi", 0440, &ct_dpi_file_ops);
+	if (!pde)
+		goto out_conntrack_dpi;
+	pde = proc_net_fops_create(net, "dpi_stat", 0440, &dpi_file_ops);
+	if (!pde)
+		goto out_dpi_stat;
+#endif
+
 	return 0;
 
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+out_dpi_stat:
+	proc_net_remove(net, "conntrack_dpi");
+out_conntrack_dpi:
+	remove_proc_entry("nf_conntrack", net->proc_net_stat);
+#endif
 out_stat_nf_conntrack:
 	proc_net_remove(net, "nf_conntrack");
 out_nf_conntrack:
@@ -384,6 +794,10 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
 
 static void nf_conntrack_standalone_fini_proc(struct net *net)
 {
+#if defined(CONFIG_BCM_KF_DPI) && defined(CONFIG_BRCM_DPI)
+	proc_net_remove(net, "conntrack_dpi");
+	proc_net_remove(net, "dpi_stat");
+#endif
 	remove_proc_entry("nf_conntrack", net->proc_net_stat);
 	proc_net_remove(net, "nf_conntrack");
 }
diff --git a/net/netfilter/nf_dyndscp.c b/net/netfilter/nf_dyndscp.c
new file mode 100644
index 0000000000000000000000000000000000000000..3d190f80a1a3e2fcf2681444e5b9f36875ec41d6
--- /dev/null
+++ b/net/netfilter/nf_dyndscp.c
@@ -0,0 +1,773 @@
+/*
+<:copyright-BRCM:2012:GPL/GPL:standard
+
+   Copyright (c) 2012 Broadcom Corporation
+   All Rights Reserved
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as published by
+the Free Software Foundation (the "GPL").
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+
+A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by
+writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+:> 
+*/
+
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <net/ip.h>
+#include <net/dsfield.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#if defined(CONFIG_IPV6)
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/netfilter_ipv6.h>
+#endif
+#include "skb_defines.h"
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
+#if 0
+
+#define DEBUG_DSCP(args) printk args
+#define DEBUG_DSCP1(args) printk args
+
+#else
+
+#define DEBUG_DSCP(args)   
+#define DEBUG_DSCP1(args)  
+
+#endif
+
+#define DUMP_TUPLE_IPV4(tp)						\
+	 DEBUG_DSCP(("tuple %p: %u %pI4:%hu  %pI4:%hu \n", \
+				 (tp), (tp)->dst.protonum,				\
+				 &(tp)->src.u3.ip, ntohs((tp)->src.u.all),		\
+				 &(tp)->dst.u3.ip, ntohs((tp)->dst.u.all))) 
+
+
+#define DUMP_TUPLE_IPV6(tp)						\
+	 DEBUG_DSCP(("tuple %p: %u %pI6 %hu -> %pI6 %hu \n", \
+				 (tp), (tp)->dst.protonum,				\
+				 (tp)->src.u3.all, ntohs((tp)->src.u.all),		\
+				 (tp)->dst.u3.all, ntohs((tp)->dst.u.all)))
+
+#define DYNDSCP_DSCP_MASK 0xfc  /* 11111100 */
+#define DYNDSCP_DSCP_SHIFT   2
+#define DYNDSCP_DSCP_MAX  0x3f  /* 00111111 */
+
+#define DYNDSCP_INITIALIZING   0
+#define DYNDSCP_INHERITED 		1
+#define DYNDSCP_SKIP 				2
+
+#define DYNDSCP_LAN2WAN_DEFAULT_DSCP 0
+#define DYNDSCP_WAN2LAN_DEFAULT_DSCP 0
+
+#define DYNDSCP_PROC_TRANSTBL_FILENAME "nf_dyndscp_w2ldscp_transtbl"
+#define DSCP_MAPPINGTABLE_MAX_SIZE 64
+#define DYNDSCP_MAX_PROC_WRITE_BUFLEN 64
+
+static DEFINE_SPINLOCK(nf_dyndscp_lock);
+
+static char dyndscp_proc_buffer[DYNDSCP_MAX_PROC_WRITE_BUFLEN];
+static struct proc_dir_entry * dyndscp_proc_file = NULL;
+
+/* structure used to maintain dscp transmarking table entries*/
+
+struct dscpMapping {
+	 uint8_t orig;
+	 uint8_t new;
+};
+
+/* dscp transmarking table entries*/
+struct transMarkTable {
+	 uint16_t size;
+	 uint16_t used;
+	 struct dscpMapping *dscp;
+};
+
+static struct transMarkTable transMarkTbl;
+
+/*finds the dscp mapping and returns new dscp value
+ * returns DYNDSCP_WAN2LAN_DEFAULT_DSCP if no match */ 
+
+uint8_t getDscpfromTransTbl(uint8_t orig)
+{
+	int i;
+	spin_lock_bh(&nf_dyndscp_lock);
+	for(i=0; i < transMarkTbl.size; i++)
+	{
+		if(transMarkTbl.dscp[i].orig == orig)
+		{
+			spin_unlock_bh(&nf_dyndscp_lock);
+			return transMarkTbl.dscp[i].new;
+		}
+
+	}
+
+	spin_unlock_bh(&nf_dyndscp_lock);
+	return DYNDSCP_WAN2LAN_DEFAULT_DSCP;
+}
+
+/* Adds a new DSCP mapping,(over writes the existing mapping for 
+ * origDscp value if present)
+ * an entry is free if both orig and new are 0 */
+int  addDscpinTransTbl(uint8_t origDscp, uint8_t newDscp)
+{
+	 int i;
+
+	 spin_lock_bh(&nf_dyndscp_lock);
+	 /*replace entry */
+	 for(i=0; i < transMarkTbl.size; i++)
+	 {
+			if(transMarkTbl.dscp[i].orig == origDscp)
+			{
+
+				 if((transMarkTbl.dscp[i].orig == 0) && (transMarkTbl.dscp[i].new == 0 ) &&(newDscp != 0 )) 
+						transMarkTbl.used++;/* new entry special case as intially entries are set to 0*/
+
+				 if((transMarkTbl.dscp[i].orig == 0) && (transMarkTbl.dscp[i].new != 0 ) &&(newDscp == 0 )) 
+						transMarkTbl.used--;/*  remove entry special case as intially entries are set to 0*/
+
+				 transMarkTbl.dscp[i].new = newDscp;
+
+
+				 spin_unlock_bh(&nf_dyndscp_lock);
+				 return 0; 
+			}
+	 }
+
+	 /*new entry */
+	 for(i=0; i < transMarkTbl.size; i++)
+	 {
+			if((transMarkTbl.dscp[i].orig == 0) && (transMarkTbl.dscp[i].new == 0 ))
+			{
+				 transMarkTbl.dscp[i].orig = origDscp;
+				 transMarkTbl.dscp[i].new = newDscp;
+				 transMarkTbl.used++;
+				 spin_unlock_bh(&nf_dyndscp_lock);
+				 return 0; 
+			}
+	 }
+
+	 spin_unlock_bh(&nf_dyndscp_lock);
+	 /*table full */
+	 printk(KERN_ERR "%s:Transmark Table is Full\n",__FUNCTION__);
+	 return -1; 
+}
+
+/* delete a DSCP mapping from trans table */
+int  delDscpinTransTbl(uint8_t origDscp)
+{
+	 int i;
+
+	 spin_lock_bh(&nf_dyndscp_lock);
+	 for(i=0; i < transMarkTbl.size; i++)
+	 {
+			if((transMarkTbl.dscp[i].orig == origDscp) )
+			{
+				 transMarkTbl.dscp[i].orig = 0;
+				 transMarkTbl.dscp[i].new = 0;
+				 transMarkTbl.used--;
+				 spin_unlock_bh(&nf_dyndscp_lock);
+				 return 0; 
+			}
+	 }
+
+	 printk(KERN_ERR "%s: Entry not found in Transmark Table\n",__FUNCTION__);
+	 spin_unlock_bh(&nf_dyndscp_lock);
+	 return -1; 
+}
+
+#if defined(CONFIG_BCM_KF_WANDEV)
+/*for setting interface's IFF_WANDEV flag ;LAB testing purpose */
+int setWanIfFlag(char *name)
+{
+	 struct net_device *dev = NULL;         
+
+	 dev = dev_get_by_name(&init_net, name);  
+	 if(dev){
+			printk(KERN_INFO "setting %s IFF_WANDEV flag\n",name);
+			dev->priv_flags  |= IFF_WANDEV;     
+			return 0;
+	 } else {
+			printk(KERN_ERR "interface %s not found\n",name);
+			return -1;
+	 }
+}
+#endif
+
+/* Entry point into dyndscp module at pre-routing
+ * this  function is the core engine of this module
+ * */
+static unsigned int nf_dyndscp_in(unsigned int hooknum,
+			struct sk_buff *skb,
+			const struct net_device *in,
+			const struct net_device *out,
+			int (*okfn)(struct sk_buff *))
+{
+	 struct nf_conn *ct;
+	 enum ip_conntrack_info ctinfo;
+	 u_int8_t pktDscp; 
+
+	 ct = nf_ct_get(skb, &ctinfo);
+
+	 DEBUG_DSCP1((" %s: seen packet \n",__FUNCTION__));
+
+	 if(!ct) {
+			DEBUG_DSCP1((KERN_INFO " %s: seen packet with out flow\n",__FUNCTION__));
+			return NF_ACCEPT;
+	 }
+
+	 if(ct->dyndscp.status == DYNDSCP_INHERITED) {
+			DEBUG_DSCP1((KERN_INFO "%s: changing tos in pkt to %x \n",__FUNCTION__,
+                ct->dyndscp.dscp[CTINFO2DIR(ctinfo)]));
+
+			if (!skb_make_writable(skb, sizeof(struct iphdr)))
+				 return NF_DROP;
+
+			ipv4_change_dsfield(ip_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+						ct->dyndscp.dscp[CTINFO2DIR(ctinfo)] << DYNDSCP_DSCP_SHIFT);
+
+	 } else if(ct->dyndscp.status == DYNDSCP_INITIALIZING) {
+
+			if (ct == &nf_conntrack_untracked) {
+
+				 ct->dyndscp.status = DYNDSCP_SKIP;
+				 DEBUG_DSCP((KERN_INFO "skipping tos mangling for untracked flow\n"));
+				 return NF_ACCEPT;
+			}
+
+			/*for now we change DSCP only for TCP/UDP */
+			if(!((ip_hdr(skb)->protocol == IPPROTO_UDP) || (ip_hdr(skb)->protocol == IPPROTO_TCP))){
+				 ct->dyndscp.status = DYNDSCP_SKIP;
+				 return NF_ACCEPT;
+			}
+
+         /*TODO: should we skip broadcast packets ?? */
+
+			pktDscp = ipv4_get_dsfield(ip_hdr(skb)) >> DYNDSCP_DSCP_SHIFT;
+
+			if(!SKBMARK_GET_IFFWAN_MARK(skb->mark)) {
+				 /* LAN -> WAN packet */
+
+				 DEBUG_DSCP1((" %s: initializing case lan->wan packet \n",__FUNCTION__));
+
+				 if(pktDscp != DYNDSCP_LAN2WAN_DEFAULT_DSCP) {
+
+						if (!skb_make_writable(skb, sizeof(struct iphdr)))
+							 return NF_DROP;
+
+						ipv4_change_dsfield(ip_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+									DYNDSCP_LAN2WAN_DEFAULT_DSCP << DYNDSCP_DSCP_SHIFT);
+				 }
+
+			} else {
+				 /* WAN -> LAN packet */
+
+				 DEBUG_DSCP1(("%s: initializing case wan->lan packet \n",__FUNCTION__));
+				 if (!skb_make_writable(skb, sizeof(struct iphdr)))
+						return NF_DROP;
+
+				 /* inherit tos from packet */
+				 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+						/*connection intiated from WAN */
+						ct->dyndscp.dscp[IP_CT_DIR_REPLY] = pktDscp;
+						ct->dyndscp.dscp[IP_CT_DIR_ORIGINAL] = getDscpfromTransTbl(pktDscp);
+
+						ipv4_change_dsfield(ip_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+									ct->dyndscp.dscp[IP_CT_DIR_ORIGINAL] << DYNDSCP_DSCP_SHIFT);
+				 } else {
+						/*connection intiated from LAN or LOCAL*/
+						ct->dyndscp.dscp[IP_CT_DIR_ORIGINAL] = pktDscp;
+						ct->dyndscp.dscp[IP_CT_DIR_REPLY] = getDscpfromTransTbl(pktDscp);
+
+						ipv4_change_dsfield(ip_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+									ct->dyndscp.dscp[IP_CT_DIR_REPLY] << DYNDSCP_DSCP_SHIFT);
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+						/* Notify associated flows in flow cache, so they will relearn with
+						 *  new tos values this is needed only for UDP, as TCP flows 
+						 *  are created only when packet are seen from both directions 	 
+						 */
+
+						if(ip_hdr(skb)->protocol == IPPROTO_UDP){
+							blog_lock();
+							blog_notify(DYNAMIC_DSCP_EVENT, (void*)ct, 0, 0);
+							blog_unlock();
+
+							 DEBUG_DSCP(("%s:blog_notify:DYNAMIC_DSCP_EVENT for\n",__FUNCTION__));
+							 DUMP_TUPLE_IPV4(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+						}
+#endif
+				 }
+
+				 /*update tos status in nf_conn */
+				 ct->dyndscp.status = DYNDSCP_INHERITED;
+
+				 DEBUG_DSCP((KERN_INFO "dynamic tos values(%X, %X) inherited forflow\n",
+									ct->dyndscp.dscp[IP_CT_DIR_ORIGINAL],
+									ct->dyndscp.dscp[IP_CT_DIR_REPLY]));
+				 DUMP_TUPLE_IPV4(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+			}
+
+	 } else if(ct->dyndscp.status == DYNDSCP_SKIP){
+			/*handle untracked connections */
+
+	 } else {
+
+			printk(KERN_WARNING " %s :dyndscp unknown status(%d) for flow\n",
+						__FUNCTION__, ct->dyndscp.status);
+			DUMP_TUPLE_IPV4(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+	 }
+
+	 return NF_ACCEPT;
+}
+
+static unsigned int nf_dyndscp_local(unsigned int hooknum,
+			struct sk_buff *skb,
+			const struct net_device *in,
+			const struct net_device *out,
+			int (*okfn)(struct sk_buff *))
+{
+	 struct nf_conn *ct;
+	 enum ip_conntrack_info ctinfo;
+
+	 /* root is playing with raw sockets. */
+	 if (skb->len < sizeof(struct iphdr)
+				 || ip_hdr(skb)->ihl * 4 < sizeof(struct iphdr)) {
+			if (net_ratelimit())
+				 DEBUG_DSCP((KERN_INFO "nf_dyndscp_local: happy cracking.\n"));
+			return NF_ACCEPT;
+	 }
+
+	 ct = nf_ct_get(skb, &ctinfo);
+
+	 if(!ct){
+			DEBUG_DSCP((KERN_INFO "%s: seen packet with out flow\n",__FUNCTION__));
+			return NF_ACCEPT;
+	 }
+
+	 if(ct->dyndscp.status == DYNDSCP_INHERITED) {
+
+			if (!skb_make_writable(skb, sizeof(struct iphdr)))
+				 return NF_DROP;
+
+			/* LOCAL -> WAN packet */
+			ipv4_change_dsfield(ip_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+						ct->dyndscp.dscp[CTINFO2DIR(ctinfo)] << DYNDSCP_DSCP_SHIFT);
+
+	 } else if(ct->dyndscp.status == DYNDSCP_INITIALIZING) {
+
+			if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+				 /*this happens only with LAN <-> LOCAL so just skip */ 
+				 ct->dyndscp.status = DYNDSCP_SKIP;
+			}
+
+	 }
+
+	 return NF_ACCEPT;
+}
+
+#if defined(CONFIG_IPV6)
+/* Entry point into dyndscp module at pre-routing
+ * this  function is the core engine of this module
+ * */
+static unsigned int nf_dyndscp_in6(unsigned int hooknum,
+			struct sk_buff *skb,
+			const struct net_device *in,
+			const struct net_device *out,
+			int (*okfn)(struct sk_buff *))
+{
+	 struct nf_conn *ct;
+	 enum ip_conntrack_info ctinfo;
+	 u_int8_t pktDscp; 
+
+	 ct = nf_ct_get(skb, &ctinfo);
+
+	 DEBUG_DSCP1((" %s: seen packet \n",__FUNCTION__));
+
+	 if(!ct) {
+			DEBUG_DSCP1((KERN_INFO " %s: seen packet with out flow\n",__FUNCTION__));
+			return NF_ACCEPT;
+	 }
+
+	 if(ct->dyndscp.status == DYNDSCP_INHERITED) {
+			DEBUG_DSCP1((KERN_INFO "%s: changing tos in pkt to %x \n",__FUNCTION__,
+                ct->dyndscp.dscp[CTINFO2DIR(ctinfo)]));
+
+			if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
+				 return NF_DROP;
+
+			ipv6_change_dsfield(ipv6_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+						ct->dyndscp.dscp[CTINFO2DIR(ctinfo)] << DYNDSCP_DSCP_SHIFT);
+
+	 } else if(ct->dyndscp.status == DYNDSCP_INITIALIZING) {
+
+			if (ct == &nf_conntrack_untracked) {
+
+				 ct->dyndscp.status = DYNDSCP_SKIP;
+				 DEBUG_DSCP((KERN_INFO "%s:skipping tos mangling for untracked flow\n",__FUNCTION__));
+				 return NF_ACCEPT;
+			}
+
+			/*for now we change DSCP only for TCP/UDP */
+			if(!((ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) || (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP))){
+				 ct->dyndscp.status = DYNDSCP_SKIP;
+				 return NF_ACCEPT;
+			}
+
+         	/*TODO: should we skip broadcast packets ?? */
+
+
+			pktDscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> DYNDSCP_DSCP_SHIFT;
+
+			if(!SKBMARK_GET_IFFWAN_MARK(skb->mark)) {
+				 /* LAN -> WAN packet */
+
+				 DEBUG_DSCP1((" %s: initializing case lan->wan packet \n",__FUNCTION__));
+
+				 if(pktDscp != DYNDSCP_LAN2WAN_DEFAULT_DSCP) {
+
+						if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
+							 return NF_DROP;
+
+						ipv6_change_dsfield(ipv6_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+									DYNDSCP_LAN2WAN_DEFAULT_DSCP << DYNDSCP_DSCP_SHIFT);
+				 }
+
+			} else {
+				 /* WAN -> LAN packet */
+
+				 DEBUG_DSCP1(("%s: initializing case wan->lan packet \n",__FUNCTION__));
+				 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
+						return NF_DROP;
+
+				 /* inherit tos from packet */
+				 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+						/*connection intiated from WAN */
+						ct->dyndscp.dscp[IP_CT_DIR_REPLY] = pktDscp;
+						ct->dyndscp.dscp[IP_CT_DIR_ORIGINAL] = getDscpfromTransTbl(pktDscp);
+
+						ipv6_change_dsfield(ipv6_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+									ct->dyndscp.dscp[IP_CT_DIR_ORIGINAL] << DYNDSCP_DSCP_SHIFT);
+				 } else {
+						/*connection intiated from LAN or LOCAL*/
+						ct->dyndscp.dscp[IP_CT_DIR_ORIGINAL] = pktDscp;
+						ct->dyndscp.dscp[IP_CT_DIR_REPLY] = getDscpfromTransTbl(pktDscp);
+
+						ipv6_change_dsfield(ipv6_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+									ct->dyndscp.dscp[IP_CT_DIR_REPLY] << DYNDSCP_DSCP_SHIFT);
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+						/* Notify associated flows in flow cache, so they will relearn with
+						 *  new tos values this is needed only for UDP, as TCP flows 
+						 *  are created only when packet are seen from both directions 	 
+						 */
+
+						if(ipv6_hdr(skb)->nexthdr == IPPROTO_UDP){
+
+							blog_lock();
+							blog_notify(DYNAMIC_DSCP_EVENT, (void*)ct, 0, 0);
+							blog_unlock();
+
+							 DEBUG_DSCP(("%s:blog_notify:DYNAMIC_DSCP_EVENT for\n",__FUNCTION__));
+							 DUMP_TUPLE_IPV6(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+						}
+#endif
+				 }
+
+				 /*update tos status in nf_conn */
+				 ct->dyndscp.status = DYNDSCP_INHERITED;
+
+				 DEBUG_DSCP((KERN_INFO "dynamic tos values(%X, %X) inherited forflow\n",
+									ct->dyndscp.dscp[IP_CT_DIR_ORIGINAL],
+									ct->dyndscp.dscp[IP_CT_DIR_REPLY]));
+				 DUMP_TUPLE_IPV6(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+			}
+
+	 } else if(ct->dyndscp.status == DYNDSCP_SKIP){
+			/*handle untracked connections */
+
+	 } else {
+
+			printk(KERN_WARNING " %s :dyndscp unknown status(%d) for flow\n",
+						__FUNCTION__, ct->dyndscp.status);
+			DUMP_TUPLE_IPV6(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+	 }
+
+	 return NF_ACCEPT;
+}
+
+static unsigned int nf_dyndscp_local6(unsigned int hooknum,
+			struct sk_buff *skb,
+			const struct net_device *in,
+			const struct net_device *out,
+			int (*okfn)(struct sk_buff *))
+{
+	 struct nf_conn *ct;
+	 enum ip_conntrack_info ctinfo;
+
+	 /* root is playing with raw sockets. */
+	 if (skb->len < sizeof(struct ipv6hdr)){
+			if (net_ratelimit())
+				 DEBUG_DSCP((KERN_INFO "nf_dyndscp_local6: happy cracking.\n"));
+			return NF_ACCEPT;
+	 }
+
+	 ct = nf_ct_get(skb, &ctinfo);
+
+	 if(!ct){
+			DEBUG_DSCP((KERN_INFO "%s: seen packet with out flow\n",__FUNCTION__));
+			return NF_ACCEPT;
+	 }
+
+	 if(ct->dyndscp.status == DYNDSCP_INHERITED) {
+
+			if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
+				 return NF_DROP;
+
+			/* LOCAL -> WAN packet */
+			ipv6_change_dsfield(ipv6_hdr(skb), (__u8)(~DYNDSCP_DSCP_MASK),
+						ct->dyndscp.dscp[CTINFO2DIR(ctinfo)] << DYNDSCP_DSCP_SHIFT);
+
+	 } else if(ct->dyndscp.status == DYNDSCP_INITIALIZING) {
+
+			if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+				 /*this happens only with LAN <-> LOCAL so just skip */ 
+				 ct->dyndscp.status = DYNDSCP_SKIP;
+			}
+
+	 }
+
+	 return NF_ACCEPT;
+}
+#endif
+
+static struct nf_hook_ops nf_dyndscp_ops[] = {
+	 {
+			.hook		= nf_dyndscp_in,
+			.owner		= THIS_MODULE,
+			.pf			= PF_INET,
+			.hooknum	= NF_INET_PRE_ROUTING,
+			.priority	= NF_IP_PRI_MANGLE - 10,/*pre routing do it before mangle table */
+	 },
+	 {
+			.hook		= nf_dyndscp_local,
+			.owner		= THIS_MODULE,
+			.pf			= PF_INET,
+			.hooknum	= NF_INET_LOCAL_OUT,
+			.priority	= NF_IP_PRI_MANGLE + 10,/*local out do it after mangle table */
+
+	 },
+#if defined(CONFIG_IPV6)
+	 {
+			.hook		= nf_dyndscp_in6,
+			.owner		= THIS_MODULE,
+			.pf			= PF_INET6,
+			.hooknum	= NF_INET_PRE_ROUTING,
+			.priority	= NF_IP6_PRI_MANGLE - 10,/*pre routing do it before mangle table */
+	 },
+	 {
+			.hook		= nf_dyndscp_local6,
+			.owner		= THIS_MODULE,
+			.pf			= PF_INET6,
+			.hooknum	= NF_INET_LOCAL_OUT,
+			.priority	= NF_IP6_PRI_MANGLE + 10,/*local out do it after mangle table */
+
+	 },
+#endif
+};
+
+/* proc interface functions for configuring/reading transmark table 
+ * from userspace
+ * */
+
+static void *dyndscp_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	 if(*pos > transMarkTbl.size)
+			return NULL;
+
+	 return *pos ? pos : SEQ_START_TOKEN;
+}
+
+static void *dyndscp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	 ++(*pos);
+	 if(*pos > transMarkTbl.size)
+			return NULL;
+
+	 return &transMarkTbl.dscp[(*pos)-1];
+}
+
+static void dyndscp_seq_stop(struct seq_file *seq, void *v)
+{
+	 return;
+}
+
+static int dyndscp_seq_show(struct seq_file *seq, void *v)
+{
+	 if (v == SEQ_START_TOKEN){
+			seq_printf(seq,"WANDSCP\t-->\tLANDSCP Max num entries:%d,"
+						"Current num Entries:%d\n",
+						transMarkTbl.size, transMarkTbl.used);
+	 } else {
+			struct dscpMapping *tos = (struct dscpMapping *)v;
+			if((tos->orig !=0) && (tos->new !=0))/*show only used entries*/
+				 seq_printf(seq, "%02x\t   \t%02x\n",tos->orig,tos->new);
+	 }
+	 return 0;
+}
+
+static struct seq_operations dyndscp_seq_ops = {
+	 .start   =  dyndscp_seq_start,
+	 .next =  dyndscp_seq_next,
+	 .stop =  dyndscp_seq_stop,
+	 .show =  dyndscp_seq_show,
+};
+
+int nf_dyndscp_proc_open(struct inode *inode, struct file *file)
+{
+	 return seq_open(file, &dyndscp_seq_ops);
+}
+
+
+static ssize_t nf_dyndscp_proc_write(struct file *file, const char *buffer,
+			size_t len, loff_t *offset)
+{
+	 uint8_t origDscp, newDscp;
+	 char wanIfname[32];
+
+	 if(len > DYNDSCP_MAX_PROC_WRITE_BUFLEN)
+	 {
+			printk(KERN_ALERT "%s: User datalen > max kernel buffer len=%d\n",
+						__FUNCTION__, len);
+			return -EFAULT;
+	 }
+
+	 if ( copy_from_user(dyndscp_proc_buffer, buffer, len) )
+	 {
+			printk(KERN_ALERT "%s copy_from_user failure.\n", __FUNCTION__ );
+			//kfree( kbuffer );
+			return -EFAULT;
+	 }
+
+	 DEBUG_DSCP((KERN_INFO "Applying %u bytes configuration\n", len));
+
+	 if(sscanf(dyndscp_proc_buffer,"add %hhi %hhi",&origDscp, &newDscp)) {
+			if(addDscpinTransTbl(origDscp,newDscp) < 0)
+				 return -EFAULT;
+	 } else if(sscanf(dyndscp_proc_buffer,"delete %hhi",&origDscp)) {
+			if(delDscpinTransTbl(origDscp) < 0)
+				 return -EFAULT;
+	 } 
+#if defined(CONFIG_BCM_KF_WANDEV)
+	 else if(sscanf(dyndscp_proc_buffer,"setwanif %s", wanIfname)) {
+			if(setWanIfFlag(wanIfname) < 0)
+				 return -EFAULT;
+	 } 
+#endif
+	 else {
+			printk(KERN_ALERT " unknown command/syntax in %s .\n", __FUNCTION__ );
+			printk(KERN_ALERT "use 'add' or 'delete' commands Ex: \n");
+			printk(KERN_ALERT "add origDscp newDscp >/proc/.../.. \n");
+			printk(KERN_ALERT "delete origDscp >/proc/.../.. \n");
+			return -EFAULT;
+	 }	
+
+	 return len;
+}
+
+static struct file_operations dyndscp_proc_fops = {
+	 .open    = nf_dyndscp_proc_open,
+	 .read    = seq_read,
+	 .write   = nf_dyndscp_proc_write,
+	 .llseek  = seq_lseek,
+	 .release = seq_release,
+};
+
+int nf_dyndscp_proc_init(void)
+{
+	 dyndscp_proc_file = create_proc_entry(DYNDSCP_PROC_TRANSTBL_FILENAME, 0644, proc_net_netfilter);
+	 if ( dyndscp_proc_file == (struct proc_dir_entry *)NULL )
+	 {
+			printk(KERN_ALERT "Error: Could not initialize /proc/net/netfilter/%s\n",
+						DYNDSCP_PROC_TRANSTBL_FILENAME);
+			return -ENOMEM;
+	 }
+
+	 dyndscp_proc_file->proc_fops = &dyndscp_proc_fops;
+	 dyndscp_proc_file->mode = S_IFREG | S_IRUGO | S_IWUSR;
+	 dyndscp_proc_file->uid = dyndscp_proc_file->gid = 0;
+	 dyndscp_proc_file->size = 80;
+
+	 printk(KERN_INFO "/proc/net/netfilter/%s created\n", DYNDSCP_PROC_TRANSTBL_FILENAME);
+
+	 return 0; /* success */
+}
+
+void nf_dyndscp_proc_fini(void)
+{
+	 remove_proc_entry(DYNDSCP_PROC_TRANSTBL_FILENAME, proc_net_netfilter);
+	 printk(KERN_INFO "/proc/net/netfilter/%s removed\n", DYNDSCP_PROC_TRANSTBL_FILENAME);
+}
+
+static int __init nf_dyndscp_init(void)
+{
+	 int ret = 0;
+
+	 need_conntrack();
+
+	 transMarkTbl.size = DSCP_MAPPINGTABLE_MAX_SIZE;
+	 transMarkTbl.dscp = kmalloc((transMarkTbl.size * sizeof(struct dscpMapping)),
+																		 GFP_KERNEL);
+	 memset(transMarkTbl.dscp, 0, (transMarkTbl.size * sizeof(struct dscpMapping)));
+
+	 ret = nf_register_hooks(nf_dyndscp_ops,
+				 ARRAY_SIZE(nf_dyndscp_ops));
+	 if (ret < 0) {
+			printk("nf_dyndscp: can't register hooks.\n");
+			goto cleanup_tbl;
+	 }
+#if defined(CONFIG_PROC_FS)
+	 ret = nf_dyndscp_proc_init();
+	 if (ret < 0)
+			goto cleanup_hooks;
+
+	 return ret;
+
+cleanup_hooks:
+	 nf_unregister_hooks(nf_dyndscp_ops, ARRAY_SIZE(nf_dyndscp_ops));
+#endif
+cleanup_tbl:
+	 return ret;
+}
+
+static void __exit nf_dyndscp_fini(void)
+{
+#if defined(CONFIG_PROC_FS)
+	 nf_dyndscp_proc_fini();
+#endif
+	 nf_unregister_hooks(nf_dyndscp_ops, ARRAY_SIZE(nf_dyndscp_ops));
+}
+
+MODULE_AUTHOR("broadcom.com");
+MODULE_DESCRIPTION("DSCP Inheritance from WAN");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("nf_dyndscp-" __stringify(AF_INET));
+MODULE_ALIAS("nf_dyndscp");
+
+module_init(nf_dyndscp_init);
+module_exit(nf_dyndscp_fini);
diff --git a/net/netfilter/regexp/regexp.c b/net/netfilter/regexp/regexp.c
new file mode 100644
index 0000000000000000000000000000000000000000..06c2e5ce1c02978dd10a59a64c27587b3dd58d61
--- /dev/null
+++ b/net/netfilter/regexp/regexp.c
@@ -0,0 +1,1199 @@
+/*
+ * regcomp and regexec -- regsub and regerror are elsewhere
+ * @(#)regexp.c	1.3 of 18 April 87
+ *
+ *	Copyright (c) 1986 by University of Toronto.
+ *	Written by Henry Spencer.  Not derived from licensed software.
+ *
+ *	Permission is granted to anyone to use this software for any
+ *	purpose on any computer system, and to redistribute it freely,
+ *	subject to the following restrictions:
+ *
+ *	1. The author is not responsible for the consequences of use of
+ *		this software, no matter how awful, even if they arise
+ *		from defects in it.
+ *
+ *	2. The origin of this software must not be misrepresented, either
+ *		by explicit claim or by omission.
+ *
+ *	3. Altered versions must be plainly marked as such, and must not
+ *		be misrepresented as being the original software.
+ *
+ * Beware that some of this code is subtly aware of the way operator
+ * precedence is structured in regular expressions.  Serious changes in
+ * regular-expression syntax might require a total rethink.
+ *
+ * This code was modified by Ethan Sommer to work within the kernel
+ * (it now uses kmalloc etc..)
+ *
+ * Modified slightly by Matthew Strait to use more modern C.
+ */
+
+#include "regexp.h"
+#include "regmagic.h"
+
+/* added by ethan and matt.  Lets it work in both kernel and user space.
+(So iptables can use it, for instance.)  Yea, it goes both ways... */
+#if __KERNEL__
+  #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
+#else
+  #define printk(format,args...) printf(format,##args)
+#endif
+
+void regerror(char * s)
+{
+        printk("<3>Regexp: %s\n", s);
+        /* NOTREACHED */
+}
+
+/*
+ * The "internal use only" fields in regexp.h are present to pass info from
+ * compile to execute that permits the execute phase to run lots faster on
+ * simple cases.  They are:
+ *
+ * regstart	char that must begin a match; '\0' if none obvious
+ * reganch	is the match anchored (at beginning-of-line only)?
+ * regmust	string (pointer into program) that match must include, or NULL
+ * regmlen	length of regmust string
+ *
+ * Regstart and reganch permit very fast decisions on suitable starting points
+ * for a match, cutting down the work a lot.  Regmust permits fast rejection
+ * of lines that cannot possibly match.  The regmust tests are costly enough
+ * that regcomp() supplies a regmust only if the r.e. contains something
+ * potentially expensive (at present, the only such thing detected is * or +
+ * at the start of the r.e., which can involve a lot of backup).  Regmlen is
+ * supplied because the test in regexec() needs it and regcomp() is computing
+ * it anyway.
+ */
+
+/*
+ * Structure for regexp "program".  This is essentially a linear encoding
+ * of a nondeterministic finite-state machine (aka syntax charts or
+ * "railroad normal form" in parsing technology).  Each node is an opcode
+ * plus a "next" pointer, possibly plus an operand.  "Next" pointers of
+ * all nodes except BRANCH implement concatenation; a "next" pointer with
+ * a BRANCH on both ends of it is connecting two alternatives.  (Here we
+ * have one of the subtle syntax dependencies:  an individual BRANCH (as
+ * opposed to a collection of them) is never concatenated with anything
+ * because of operator precedence.)  The operand of some types of node is
+ * a literal string; for others, it is a node leading into a sub-FSM.  In
+ * particular, the operand of a BRANCH node is the first node of the branch.
+ * (NB this is *not* a tree structure:  the tail of the branch connects
+ * to the thing following the set of BRANCHes.)  The opcodes are:
+ */
+
+/* definition	number	opnd?	meaning */
+#define	END	0	/* no	End of program. */
+#define	BOL	1	/* no	Match "" at beginning of line. */
+#define	EOL	2	/* no	Match "" at end of line. */
+#define	ANY	3	/* no	Match any one character. */
+#define	ANYOF	4	/* str	Match any character in this string. */
+#define	ANYBUT	5	/* str	Match any character not in this string. */
+#define	BRANCH	6	/* node	Match this alternative, or the next... */
+#define	BACK	7	/* no	Match "", "next" ptr points backward. */
+#define	EXACTLY	8	/* str	Match this string. */
+#define	NOTHING	9	/* no	Match empty string. */
+#define	STAR	10	/* node	Match this (simple) thing 0 or more times. */
+#define	PLUS	11	/* node	Match this (simple) thing 1 or more times. */
+#define	OPEN	20	/* no	Mark this point in input as start of #n. */
+			/*	OPEN+1 is number 1, etc. */
+#define	CLOSE	30	/* no	Analogous to OPEN. */
+
+/*
+ * Opcode notes:
+ *
+ * BRANCH	The set of branches constituting a single choice are hooked
+ *		together with their "next" pointers, since precedence prevents
+ *		anything being concatenated to any individual branch.  The
+ *		"next" pointer of the last BRANCH in a choice points to the
+ *		thing following the whole choice.  This is also where the
+ *		final "next" pointer of each individual branch points; each
+ *		branch starts with the operand node of a BRANCH node.
+ *
+ * BACK		Normal "next" pointers all implicitly point forward; BACK
+ *		exists to make loop structures possible.
+ *
+ * STAR,PLUS	'?', and complex '*' and '+', are implemented as circular
+ *		BRANCH structures using BACK.  Simple cases (one character
+ *		per match) are implemented with STAR and PLUS for speed
+ *		and to minimize recursive plunges.
+ *
+ * OPEN,CLOSE	...are numbered at compile time.
+ */
+
+/*
+ * A node is one char of opcode followed by two chars of "next" pointer.
+ * "Next" pointers are stored as two 8-bit pieces, high order first.  The
+ * value is a positive offset from the opcode of the node containing it.
+ * An operand, if any, simply follows the node.  (Note that much of the
+ * code generation knows about this implicit relationship.)
+ *
+ * Using two bytes for the "next" pointer is vast overkill for most things,
+ * but allows patterns to get big without disasters.
+ */
+#define	OP(p)	(*(p))
+#define	NEXT(p)	(((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
+#define	OPERAND(p)	((p) + 3)
+
+/*
+ * See regmagic.h for one further detail of program structure.
+ */
+
+
+/*
+ * Utility definitions.
+ */
+#ifndef CHARBITS
+#define	UCHARAT(p)	((int)*(unsigned char *)(p))
+#else
+#define	UCHARAT(p)	((int)*(p)&CHARBITS)
+#endif
+
+#define	FAIL(m)	{ regerror(m); return(NULL); }
+#define	ISMULT(c)	((c) == '*' || (c) == '+' || (c) == '?')
+#define	META	"^$.[()|?+*\\"
+
+/*
+ * Flags to be passed up and down.
+ */
+#define	HASWIDTH	01	/* Known never to match null string. */
+#define	SIMPLE		02	/* Simple enough to be STAR/PLUS operand. */
+#define	SPSTART		04	/* Starts with * or +. */
+#define	WORST		0	/* Worst case. */
+
+/*
+ * Global work variables for regcomp().
+ */
+struct match_globals {
+char *reginput;		/* String-input pointer. */
+char *regbol;		/* Beginning of input, for ^ check. */
+char **regstartp;	/* Pointer to startp array. */
+char **regendp;		/* Ditto for endp. */
+char *regparse;		/* Input-scan pointer. */
+int regnpar;		/* () count. */
+char regdummy;
+char *regcode;		/* Code-emit pointer; &regdummy = don't. */
+long regsize;		/* Code size. */
+};
+
+/*
+ * Forward declarations for regcomp()'s friends.
+ */
+#ifndef STATIC
+#define	STATIC	static
+#endif
+STATIC char *reg(struct match_globals *g, int paren,int *flagp);
+STATIC char *regbranch(struct match_globals *g, int *flagp);
+STATIC char *regpiece(struct match_globals *g, int *flagp);
+STATIC char *regatom(struct match_globals *g, int *flagp);
+STATIC char *regnode(struct match_globals *g, char op);
+STATIC char *regnext(struct match_globals *g, char *p);
+STATIC void regc(struct match_globals *g, char b);
+STATIC void reginsert(struct match_globals *g, char op, char *opnd);
+STATIC void regtail(struct match_globals *g, char *p, char *val);
+STATIC void regoptail(struct match_globals *g, char *p, char *val);
+
+
+__kernel_size_t my_strcspn(const char *s1,const char *s2)
+{
+        char *scan1;
+        char *scan2;
+        int count;
+
+        count = 0;
+        for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
+                for (scan2 = (char *)s2; *scan2 != '\0';)       /* ++ moved down. */
+                        if (*scan1 == *scan2++)
+                                return(count);
+                count++;
+        }
+        return(count);
+}
+
+/*
+ - regcomp - compile a regular expression into internal code
+ *
+ * We can't allocate space until we know how big the compiled form will be,
+ * but we can't compile it (and thus know how big it is) until we've got a
+ * place to put the code.  So we cheat:  we compile it twice, once with code
+ * generation turned off and size counting turned on, and once "for real".
+ * This also means that we don't allocate space until we are sure that the
+ * thing really will compile successfully, and we never have to move the
+ * code and thus invalidate pointers into it.  (Note that it has to be in
+ * one piece because free() must be able to free it all.)
+ *
+ * Beware that the optimization-preparation code in here knows about some
+ * of the structure of the compiled regexp.
+ */
+regexp *
+regcomp(char *exp,int *patternsize)
+{
+	register regexp *r;
+	register char *scan;
+	register char *longest;
+	register int len;
+	int flags;
+	struct match_globals g;
+	
+	/* commented out by ethan
+	   extern char *malloc();
+	*/
+
+	if (exp == NULL)
+		FAIL("NULL argument");
+
+	/* First pass: determine size, legality. */
+	g.regparse = exp;
+	g.regnpar = 1;
+	g.regsize = 0L;
+	g.regcode = &g.regdummy;
+	regc(&g, MAGIC);
+	if (reg(&g, 0, &flags) == NULL)
+		return(NULL);
+
+	/* Small enough for pointer-storage convention? */
+	if (g.regsize >= 32767L)		/* Probably could be 65535L. */
+		FAIL("regexp too big");
+
+	/* Allocate space. */
+	*patternsize=sizeof(regexp) + (unsigned)g.regsize;
+	r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
+	if (r == NULL)
+		FAIL("out of space");
+
+	/* Second pass: emit code. */
+	g.regparse = exp;
+	g.regnpar = 1;
+	g.regcode = r->program;
+	regc(&g, MAGIC);
+	if (reg(&g, 0, &flags) == NULL)
+		return(NULL);
+
+	/* Dig out information for optimizations. */
+	r->regstart = '\0';	/* Worst-case defaults. */
+	r->reganch = 0;
+	r->regmust = NULL;
+	r->regmlen = 0;
+	scan = r->program+1;			/* First BRANCH. */
+	if (OP(regnext(&g, scan)) == END) {		/* Only one top-level choice. */
+		scan = OPERAND(scan);
+
+		/* Starting-point info. */
+		if (OP(scan) == EXACTLY)
+			r->regstart = *OPERAND(scan);
+		else if (OP(scan) == BOL)
+			r->reganch++;
+
+		/*
+		 * If there's something expensive in the r.e., find the
+		 * longest literal string that must appear and make it the
+		 * regmust.  Resolve ties in favor of later strings, since
+		 * the regstart check works with the beginning of the r.e.
+		 * and avoiding duplication strengthens checking.  Not a
+		 * strong reason, but sufficient in the absence of others.
+		 */
+		if (flags&SPSTART) {
+			longest = NULL;
+			len = 0;
+			for (; scan != NULL; scan = regnext(&g, scan))
+				if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
+					longest = OPERAND(scan);
+					len = strlen(OPERAND(scan));
+				}
+			r->regmust = longest;
+			r->regmlen = len;
+		}
+	}
+
+	return(r);
+}
+
+/*
+ - reg - regular expression, i.e. main body or parenthesized thing
+ *
+ * Caller must absorb opening parenthesis.
+ *
+ * Combining parenthesis handling with the base level of regular expression
+ * is a trifle forced, but the need to tie the tails of the branches to what
+ * follows makes it hard to avoid.
+ */
+static char *
+reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
+{
+	register char *ret;
+	register char *br;
+	register char *ender;
+	register int parno = 0; /* 0 makes gcc happy */
+	int flags;
+
+	*flagp = HASWIDTH;	/* Tentatively. */
+
+	/* Make an OPEN node, if parenthesized. */
+	if (paren) {
+		if (g->regnpar >= NSUBEXP)
+			FAIL("too many ()");
+		parno = g->regnpar;
+		g->regnpar++;
+		ret = regnode(g, OPEN+parno);
+	} else
+		ret = NULL;
+
+	/* Pick up the branches, linking them together. */
+	br = regbranch(g, &flags);
+	if (br == NULL)
+		return(NULL);
+	if (ret != NULL)
+		regtail(g, ret, br);	/* OPEN -> first. */
+	else
+		ret = br;
+	if (!(flags&HASWIDTH))
+		*flagp &= ~HASWIDTH;
+	*flagp |= flags&SPSTART;
+	while (*g->regparse == '|') {
+		g->regparse++;
+		br = regbranch(g, &flags);
+		if (br == NULL)
+			return(NULL);
+		regtail(g, ret, br);	/* BRANCH -> BRANCH. */
+		if (!(flags&HASWIDTH))
+			*flagp &= ~HASWIDTH;
+		*flagp |= flags&SPSTART;
+	}
+
+	/* Make a closing node, and hook it on the end. */
+	ender = regnode(g, (paren) ? CLOSE+parno : END);	
+	regtail(g, ret, ender);
+
+	/* Hook the tails of the branches to the closing node. */
+	for (br = ret; br != NULL; br = regnext(g, br))
+		regoptail(g, br, ender);
+
+	/* Check for proper termination. */
+	if (paren && *g->regparse++ != ')') {
+		FAIL("unmatched ()");
+	} else if (!paren && *g->regparse != '\0') {
+		if (*g->regparse == ')') {
+			FAIL("unmatched ()");
+		} else
+			FAIL("junk on end");	/* "Can't happen". */
+		/* NOTREACHED */
+	}
+
+	return(ret);
+}
+
+/*
+ - regbranch - one alternative of an | operator
+ *
+ * Implements the concatenation operator.
+ */
+static char *
+regbranch(struct match_globals *g, int *flagp)
+{
+	register char *ret;
+	register char *chain;
+	register char *latest;
+	int flags;
+
+	*flagp = WORST;		/* Tentatively. */
+
+	ret = regnode(g, BRANCH);
+	chain = NULL;
+	while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
+		latest = regpiece(g, &flags);
+		if (latest == NULL)
+			return(NULL);
+		*flagp |= flags&HASWIDTH;
+		if (chain == NULL)	/* First piece. */
+			*flagp |= flags&SPSTART;
+		else
+			regtail(g, chain, latest);
+		chain = latest;
+	}
+	if (chain == NULL)	/* Loop ran zero times. */
+		(void) regnode(g, NOTHING);
+
+	return(ret);
+}
+
+/*
+ - regpiece - something followed by possible [*+?]
+ *
+ * Note that the branching code sequences used for ? and the general cases
+ * of * and + are somewhat optimized:  they use the same NOTHING node as
+ * both the endmarker for their branch list and the body of the last branch.
+ * It might seem that this node could be dispensed with entirely, but the
+ * endmarker role is not redundant.
+ */
+static char *
+regpiece(struct match_globals *g, int *flagp)
+{
+	register char *ret;
+	register char op;
+	register char *next;
+	int flags;
+
+	ret = regatom(g, &flags);
+	if (ret == NULL)
+		return(NULL);
+
+	op = *g->regparse;
+	if (!ISMULT(op)) {
+		*flagp = flags;
+		return(ret);
+	}
+
+	if (!(flags&HASWIDTH) && op != '?')
+		FAIL("*+ operand could be empty");
+	*flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
+
+	if (op == '*' && (flags&SIMPLE))
+		reginsert(g, STAR, ret);
+	else if (op == '*') {
+		/* Emit x* as (x&|), where & means "self". */
+		reginsert(g, BRANCH, ret);			/* Either x */
+		regoptail(g, ret, regnode(g, BACK));		/* and loop */
+		regoptail(g, ret, ret);			/* back */
+		regtail(g, ret, regnode(g, BRANCH));		/* or */
+		regtail(g, ret, regnode(g, NOTHING));		/* null. */
+	} else if (op == '+' && (flags&SIMPLE))
+		reginsert(g, PLUS, ret);
+	else if (op == '+') {
+		/* Emit x+ as x(&|), where & means "self". */
+		next = regnode(g, BRANCH);			/* Either */
+		regtail(g, ret, next);
+		regtail(g, regnode(g, BACK), ret);		/* loop back */
+		regtail(g, next, regnode(g, BRANCH));		/* or */
+		regtail(g, ret, regnode(g, NOTHING));		/* null. */
+	} else if (op == '?') {
+		/* Emit x? as (x|) */
+		reginsert(g, BRANCH, ret);			/* Either x */
+		regtail(g, ret, regnode(g, BRANCH));		/* or */
+		next = regnode(g, NOTHING);		/* null. */
+		regtail(g, ret, next);
+		regoptail(g, ret, next);
+	}
+	g->regparse++;
+	if (ISMULT(*g->regparse))
+		FAIL("nested *?+");
+
+	return(ret);
+}
+
+/*
+ - regatom - the lowest level
+ *
+ * Optimization:  gobbles an entire sequence of ordinary characters so that
+ * it can turn them into a single node, which is smaller to store and
+ * faster to run.  Backslashed characters are exceptions, each becoming a
+ * separate node; the code is simpler that way and it's not worth fixing.
+ */
+static char *
+regatom(struct match_globals *g, int *flagp)
+{
+	register char *ret;
+	int flags;
+
+	*flagp = WORST;		/* Tentatively. */
+
+	switch (*g->regparse++) {
+	case '^':
+		ret = regnode(g, BOL);
+		break;
+	case '$':
+		ret = regnode(g, EOL);
+		break;
+	case '.':
+		ret = regnode(g, ANY);
+		*flagp |= HASWIDTH|SIMPLE;
+		break;
+	case '[': {
+			register int class;
+			register int classend;
+
+			if (*g->regparse == '^') {	/* Complement of range. */
+				ret = regnode(g, ANYBUT);
+				g->regparse++;
+			} else
+				ret = regnode(g, ANYOF);
+			if (*g->regparse == ']' || *g->regparse == '-')
+				regc(g, *g->regparse++);
+			while (*g->regparse != '\0' && *g->regparse != ']') {
+				if (*g->regparse == '-') {
+					g->regparse++;
+					if (*g->regparse == ']' || *g->regparse == '\0')
+						regc(g, '-');
+					else {
+						class = UCHARAT(g->regparse-2)+1;
+						classend = UCHARAT(g->regparse);
+						if (class > classend+1)
+							FAIL("invalid [] range");
+						for (; class <= classend; class++)
+							regc(g, class);
+						g->regparse++;
+					}
+				} else
+					regc(g, *g->regparse++);
+			}
+			regc(g, '\0');
+			if (*g->regparse != ']')
+				FAIL("unmatched []");
+			g->regparse++;
+			*flagp |= HASWIDTH|SIMPLE;
+		}
+		break;
+	case '(':
+		ret = reg(g, 1, &flags);
+		if (ret == NULL)
+			return(NULL);
+		*flagp |= flags&(HASWIDTH|SPSTART);
+		break;
+	case '\0':
+	case '|':
+	case ')':
+		FAIL("internal urp");	/* Supposed to be caught earlier. */
+		break;
+	case '?':
+	case '+':
+	case '*':
+		FAIL("?+* follows nothing");
+		break;
+	case '\\':
+		if (*g->regparse == '\0')
+			FAIL("trailing \\");
+		ret = regnode(g, EXACTLY);
+		regc(g, *g->regparse++);
+		regc(g, '\0');
+		*flagp |= HASWIDTH|SIMPLE;
+		break;
+	default: {
+			register int len;
+			register char ender;
+
+			g->regparse--;
+			len = my_strcspn((const char *)g->regparse, (const char *)META);
+			if (len <= 0)
+				FAIL("internal disaster");
+			ender = *(g->regparse+len);
+			if (len > 1 && ISMULT(ender))
+				len--;		/* Back off clear of ?+* operand. */
+			*flagp |= HASWIDTH;
+			if (len == 1)
+				*flagp |= SIMPLE;
+			ret = regnode(g, EXACTLY);
+			while (len > 0) {
+				regc(g, *g->regparse++);
+				len--;
+			}
+			regc(g, '\0');
+		}
+		break;
+	}
+
+	return(ret);
+}
+
+/*
+ - regnode - emit a node
+ */
+static char *			/* Location. */
+regnode(struct match_globals *g, char op)
+{
+	register char *ret;
+	register char *ptr;
+
+	ret = g->regcode;
+	if (ret == &g->regdummy) {
+		g->regsize += 3;
+		return(ret);
+	}
+
+	ptr = ret;
+	*ptr++ = op;
+	*ptr++ = '\0';		/* Null "next" pointer. */
+	*ptr++ = '\0';
+	g->regcode = ptr;
+
+	return(ret);
+}
+
+/*
+ - regc - emit (if appropriate) a byte of code
+ */
+static void
+regc(struct match_globals *g, char b)
+{
+	if (g->regcode != &g->regdummy)
+		*g->regcode++ = b;
+	else
+		g->regsize++;
+}
+
+/*
+ - reginsert - insert an operator in front of already-emitted operand
+ *
+ * Means relocating the operand.
+ */
+static void
+reginsert(struct match_globals *g, char op, char* opnd)
+{
+	register char *src;
+	register char *dst;
+	register char *place;
+
+	if (g->regcode == &g->regdummy) {
+		g->regsize += 3;
+		return;
+	}
+
+	src = g->regcode;
+	g->regcode += 3;
+	dst = g->regcode;
+	while (src > opnd)
+		*--dst = *--src;
+
+	place = opnd;		/* Op node, where operand used to be. */
+	*place++ = op;
+	*place++ = '\0';
+	*place++ = '\0';
+}
+
+/*
+ - regtail - set the next-pointer at the end of a node chain
+ */
+static void
+regtail(struct match_globals *g, char *p, char *val)
+{
+	register char *scan;
+	register char *temp;
+	register int offset;
+
+	if (p == &g->regdummy)
+		return;
+
+	/* Find last node. */
+	scan = p;
+	for (;;) {
+		temp = regnext(g, scan);
+		if (temp == NULL)
+			break;
+		scan = temp;
+	}
+
+	if (OP(scan) == BACK)
+		offset = scan - val;
+	else
+		offset = val - scan;
+	*(scan+1) = (offset>>8)&0377;
+	*(scan+2) = offset&0377;
+}
+
+/*
+ - regoptail - regtail on operand of first argument; nop if operandless
+ */
+static void
+regoptail(struct match_globals *g, char *p, char *val)
+{
+	/* "Operandless" and "op != BRANCH" are synonymous in practice. */
+	if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
+		return;
+	regtail(g, OPERAND(p), val);
+}
+
+/*
+ * regexec and friends
+ */
+
+
+/*
+ * Forwards.
+ */
+STATIC int regtry(struct match_globals *g, regexp *prog, char *string);
+STATIC int regmatch(struct match_globals *g, char *prog);
+STATIC int regrepeat(struct match_globals *g, char *p);
+
+#ifdef DEBUG
+int regnarrate = 0;
+void regdump();
+STATIC char *regprop(char *op);
+#endif
+
+/*
+ - regexec - match a regexp against a string
+ *  return 1: success
+ *           0: failed
+ */
+int
+regexec(regexp *prog, char *string)
+{
+	register char *s;
+	struct match_globals g;
+
+	/* Be paranoid... */
+	if (prog == NULL || string == NULL) {
+		printk("<3>Regexp: NULL parameter\n");
+		return(0);
+	}
+
+	/* Check validity of program. */
+	if (UCHARAT(prog->program) != MAGIC) {
+		printk("<3>Regexp: corrupted program\n");
+		return(0);
+	}
+
+	/* If there is a "must appear" string, look for it. */
+	if (prog->regmust != NULL) {
+		s = string;
+		while ((s = strchr(s, prog->regmust[0])) != NULL) {
+			if (strncmp(s, prog->regmust, prog->regmlen) == 0)
+				break;	/* Found it. */
+			s++;
+		}
+		if (s == NULL)	/* Not present. */
+			return(0);
+	}
+
+	/* Mark beginning of line for ^ . */
+	g.regbol = string;
+
+	/* Simplest case:  anchored match need be tried only once. */
+	if (prog->reganch)
+		return(regtry(&g, prog, string));
+
+	/* Messy cases:  unanchored match. */
+	s = string;
+	if (prog->regstart != '\0')
+		/* We know what char it must start with. */
+		while ((s = strchr(s, prog->regstart)) != NULL) {
+			if (regtry(&g, prog, s))
+				return(1);
+			s++;
+		}
+	else
+		/* We don't -- general case. */
+		do {
+			if (regtry(&g, prog, s))
+				return(1);
+		} while (*s++ != '\0');
+
+	/* Failure. */
+	return(0);
+}
+
+/*
+ - regtry - try match at specific point
+ */
+static int			/* 0 failure, 1 success */
+regtry(struct match_globals *g, regexp *prog, char *string)
+{
+	register int i;
+	register char **sp;
+	register char **ep;
+
+	g->reginput = string;
+	g->regstartp = prog->startp;
+	g->regendp = prog->endp;
+
+	sp = prog->startp;
+	ep = prog->endp;
+	for (i = NSUBEXP; i > 0; i--) {
+		*sp++ = NULL;
+		*ep++ = NULL;
+	}
+	if (regmatch(g, prog->program + 1)) {
+		prog->startp[0] = string;
+		prog->endp[0] = g->reginput;
+		return(1);
+	} else
+		return(0);
+}
+
+/*
+ - regmatch - main matching routine
+ *
+ * Conceptually the strategy is simple:  check to see whether the current
+ * node matches, call self recursively to see whether the rest matches,
+ * and then act accordingly.  In practice we make some effort to avoid
+ * recursion, in particular by going through "ordinary" nodes (that don't
+ * need to know whether the rest of the match failed) by a loop instead of
+ * by recursion.
+ */
+static int			/* 0 failure, 1 success */
+regmatch(struct match_globals *g, char *prog)
+{
+	register char *scan = prog; /* Current node. */
+	char *next;		    /* Next node. */
+
+#ifdef DEBUG
+	if (scan != NULL && regnarrate)
+		fprintf(stderr, "%s(\n", regprop(scan));
+#endif
+	while (scan != NULL) {
+#ifdef DEBUG
+		if (regnarrate)
+			fprintf(stderr, "%s...\n", regprop(scan));
+#endif
+		next = regnext(g, scan);
+
+		switch (OP(scan)) {
+		case BOL:
+			if (g->reginput != g->regbol)
+				return(0);
+			break;
+		case EOL:
+			if (*g->reginput != '\0')
+				return(0);
+			break;
+		case ANY:
+			if (*g->reginput == '\0')
+				return(0);
+			g->reginput++;
+			break;
+		case EXACTLY: {
+				register int len;
+				register char *opnd;
+
+				opnd = OPERAND(scan);
+				/* Inline the first character, for speed. */
+				if (*opnd != *g->reginput)
+					return(0);
+				len = strlen(opnd);
+				if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
+					return(0);
+				g->reginput += len;
+			}
+			break;
+		case ANYOF:
+			if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
+				return(0);
+			g->reginput++;
+			break;
+		case ANYBUT:
+			if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
+				return(0);
+			g->reginput++;
+			break;
+		case NOTHING:
+		case BACK:
+			break;
+		case OPEN+1:
+		case OPEN+2:
+		case OPEN+3:
+		case OPEN+4:
+		case OPEN+5:
+		case OPEN+6:
+		case OPEN+7:
+		case OPEN+8:
+		case OPEN+9: {
+				register int no;
+				register char *save;
+
+				no = OP(scan) - OPEN;
+				save = g->reginput;
+
+				if (regmatch(g, next)) {
+					/*
+					 * Don't set startp if some later
+					 * invocation of the same parentheses
+					 * already has.
+					 */
+					if (g->regstartp[no] == NULL)
+						g->regstartp[no] = save;
+					return(1);
+				} else
+					return(0);
+			}
+			break;
+		case CLOSE+1:
+		case CLOSE+2:
+		case CLOSE+3:
+		case CLOSE+4:
+		case CLOSE+5:
+		case CLOSE+6:
+		case CLOSE+7:
+		case CLOSE+8:
+		case CLOSE+9:
+			{
+				register int no;
+				register char *save;
+
+				no = OP(scan) - CLOSE;
+				save = g->reginput;
+
+				if (regmatch(g, next)) {
+					/*
+					 * Don't set endp if some later
+					 * invocation of the same parentheses
+					 * already has.
+					 */
+					if (g->regendp[no] == NULL)
+						g->regendp[no] = save;
+					return(1);
+				} else
+					return(0);
+			}
+			break;
+		case BRANCH: {
+				register char *save;
+
+				if (OP(next) != BRANCH)		/* No choice. */
+					next = OPERAND(scan);	/* Avoid recursion. */
+				else {
+					do {
+						save = g->reginput;
+						if (regmatch(g, OPERAND(scan)))
+							return(1);
+						g->reginput = save;
+						scan = regnext(g, scan);
+					} while (scan != NULL && OP(scan) == BRANCH);
+					return(0);
+					/* NOTREACHED */
+				}
+			}
+			break;
+		case STAR:
+		case PLUS: {
+				register char nextch;
+				register int no;
+				register char *save;
+				register int min;
+
+				/*
+				 * Lookahead to avoid useless match attempts
+				 * when we know what character comes next.
+				 */
+				nextch = '\0';
+				if (OP(next) == EXACTLY)
+					nextch = *OPERAND(next);
+				min = (OP(scan) == STAR) ? 0 : 1;
+				save = g->reginput;
+				no = regrepeat(g, OPERAND(scan));
+				while (no >= min) {
+					/* If it could work, try it. */
+					if (nextch == '\0' || *g->reginput == nextch)
+						if (regmatch(g, next))
+							return(1);
+					/* Couldn't or didn't -- back up. */
+					no--;
+					g->reginput = save + no;
+				}
+				return(0);
+			}
+			break;
+		case END:
+			return(1);	/* Success! */
+			break;
+		default:
+			printk("<3>Regexp: memory corruption\n");
+			return(0);
+			break;
+		}
+
+		scan = next;
+	}
+
+	/*
+	 * We get here only if there's trouble -- normally "case END" is
+	 * the terminating point.
+	 */
+	printk("<3>Regexp: corrupted pointers\n");
+	return(0);
+}
+
+/*
+ - regrepeat - repeatedly match something simple, report how many
+ */
+static int
+regrepeat(struct match_globals *g, char *p)
+{
+	register int count = 0;
+	register char *scan;
+	register char *opnd;
+
+	scan = g->reginput;
+	opnd = OPERAND(p);
+	switch (OP(p)) {
+	case ANY:
+		count = strlen(scan);
+		scan += count;
+		break;
+	case EXACTLY:
+		while (*opnd == *scan) {
+			count++;
+			scan++;
+		}
+		break;
+	case ANYOF:
+		while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
+			count++;
+			scan++;
+		}
+		break;
+	case ANYBUT:
+		while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
+			count++;
+			scan++;
+		}
+		break;
+	default:		/* Oh dear.  Called inappropriately. */
+		printk("<3>Regexp: internal foulup\n");
+		count = 0;	/* Best compromise. */
+		break;
+	}
+	g->reginput = scan;
+
+	return(count);
+}
+
+/*
+ - regnext - dig the "next" pointer out of a node
+ */
+static char*
+regnext(struct match_globals *g, char *p)
+{
+	register int offset;
+
+	if (p == &g->regdummy)
+		return(NULL);
+
+	offset = NEXT(p);
+	if (offset == 0)
+		return(NULL);
+
+	if (OP(p) == BACK)
+		return(p-offset);
+	else
+		return(p+offset);
+}
+
+#ifdef DEBUG
+
+STATIC char *regprop();
+
+/*
+ - regdump - dump a regexp onto stdout in vaguely comprehensible form
+ */
+void
+regdump(regexp *r)
+{
+	register char *s;
+	register char op = EXACTLY;	/* Arbitrary non-END op. */
+	register char *next;
+	/* extern char *strchr(); */
+
+
+	s = r->program + 1;
+	while (op != END) {	/* While that wasn't END last time... */
+		op = OP(s);
+		printf("%2d%s", s-r->program, regprop(s));	/* Where, what. */
+		next = regnext(s);
+		if (next == NULL)		/* Next ptr. */
+			printf("(0)");
+		else
+			printf("(%d)", (s-r->program)+(next-s));
+		s += 3;
+		if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
+			/* Literal string, where present. */
+			while (*s != '\0') {
+				putchar(*s);
+				s++;
+			}
+			s++;
+		}
+		putchar('\n');
+	}
+
+	/* Header fields of interest. */
+	if (r->regstart != '\0')
+		printf("start `%c' ", r->regstart);
+	if (r->reganch)
+		printf("anchored ");
+	if (r->regmust != NULL)
+		printf("must have \"%s\"", r->regmust);
+	printf("\n");
+}
+
+/*
+ - regprop - printable representation of opcode
+ */
+static char *
+regprop(char *op)
+{
+#define BUFLEN 50
+	register char *p;
+	static char buf[BUFLEN];
+
+	strcpy(buf, ":");
+
+	switch (OP(op)) {
+	case BOL:
+		p = "BOL";
+		break;
+	case EOL:
+		p = "EOL";
+		break;
+	case ANY:
+		p = "ANY";
+		break;
+	case ANYOF:
+		p = "ANYOF";
+		break;
+	case ANYBUT:
+		p = "ANYBUT";
+		break;
+	case BRANCH:
+		p = "BRANCH";
+		break;
+	case EXACTLY:
+		p = "EXACTLY";
+		break;
+	case NOTHING:
+		p = "NOTHING";
+		break;
+	case BACK:
+		p = "BACK";
+		break;
+	case END:
+		p = "END";
+		break;
+	case OPEN+1:
+	case OPEN+2:
+	case OPEN+3:
+	case OPEN+4:
+	case OPEN+5:
+	case OPEN+6:
+	case OPEN+7:
+	case OPEN+8:
+	case OPEN+9:
+		snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
+		p = NULL;
+		break;
+	case CLOSE+1:
+	case CLOSE+2:
+	case CLOSE+3:
+	case CLOSE+4:
+	case CLOSE+5:
+	case CLOSE+6:
+	case CLOSE+7:
+	case CLOSE+8:
+	case CLOSE+9:
+		snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
+		p = NULL;
+		break;
+	case STAR:
+		p = "STAR";
+		break;
+	case PLUS:
+		p = "PLUS";
+		break;
+	default:
+		printk("<3>Regexp: corrupted opcode\n");
+		break;
+	}
+	if (p != NULL)
+		strncat(buf, p, BUFLEN-strlen(buf));
+	return(buf);
+}
+#endif
+
+
diff --git a/net/netfilter/regexp/regexp.h b/net/netfilter/regexp/regexp.h
new file mode 100644
index 0000000000000000000000000000000000000000..a72eba71fb61ab7dbe560881ff4d089ea1a67e08
--- /dev/null
+++ b/net/netfilter/regexp/regexp.h
@@ -0,0 +1,41 @@
+/*
+ * Definitions etc. for regexp(3) routines.
+ *
+ * Caveat:  this is V8 regexp(3) [actually, a reimplementation thereof],
+ * not the System V one.
+ */
+
+#ifndef REGEXP_H
+#define REGEXP_H
+
+
+/*
+http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
+which contains a version of this library, says:
+
+ *
+ * NSUBEXP must be at least 10, and no greater than 117 or the parser
+ * will not work properly.
+ *
+
+However, it looks rather like this library is limited to 10.  If you think
+otherwise, let us know.
+*/
+
+#define NSUBEXP  10
+typedef struct regexp {
+	char *startp[NSUBEXP];
+	char *endp[NSUBEXP];
+	char regstart;		/* Internal use only. */
+	char reganch;		/* Internal use only. */
+	char *regmust;		/* Internal use only. */
+	int regmlen;		/* Internal use only. */
+	char program[1];	/* Unwarranted chumminess with compiler. */
+} regexp;
+
+regexp * regcomp(char *exp, int *patternsize);
+int regexec(regexp *prog, char *string);
+void regsub(regexp *prog, char *source, char *dest);
+void regerror(char *s);
+
+#endif
diff --git a/net/netfilter/regexp/regmagic.h b/net/netfilter/regexp/regmagic.h
new file mode 100644
index 0000000000000000000000000000000000000000..5acf4478ff71e198b649bc0fcb45245a9c1d52be
--- /dev/null
+++ b/net/netfilter/regexp/regmagic.h
@@ -0,0 +1,5 @@
+/*
+ * The first byte of the regexp internal "program" is actually this magic
+ * number; the start node begins in the second byte.
+ */
+#define	MAGIC	0234
diff --git a/net/netfilter/regexp/regsub.c b/net/netfilter/regexp/regsub.c
new file mode 100644
index 0000000000000000000000000000000000000000..339631f06f004e3263c7755593007306ed553b80
--- /dev/null
+++ b/net/netfilter/regexp/regsub.c
@@ -0,0 +1,95 @@
+/*
+ * regsub
+ * @(#)regsub.c	1.3 of 2 April 86
+ *
+ *	Copyright (c) 1986 by University of Toronto.
+ *	Written by Henry Spencer.  Not derived from licensed software.
+ *
+ *	Permission is granted to anyone to use this software for any
+ *	purpose on any computer system, and to redistribute it freely,
+ *	subject to the following restrictions:
+ *
+ *	1. The author is not responsible for the consequences of use of
+ *		this software, no matter how awful, even if they arise
+ *		from defects in it.
+ *
+ *	2. The origin of this software must not be misrepresented, either
+ *		by explicit claim or by omission.
+ *
+ *	3. Altered versions must be plainly marked as such, and must not
+ *		be misrepresented as being the original software.
+ *
+ *
+ * This code was modified by Ethan Sommer to work within the kernel
+ * (it now uses kmalloc etc..)
+ *
+ */
+#include "regexp.h"
+#include "regmagic.h"
+#include <linux/string.h>
+
+
+#ifndef CHARBITS
+#define	UCHARAT(p)	((int)*(unsigned char *)(p))
+#else
+#define	UCHARAT(p)	((int)*(p)&CHARBITS)
+#endif
+
+#if 0
+//void regerror(char * s)
+//{
+//        printk("regexp(3): %s", s);
+//        /* NOTREACHED */
+//}
+#endif
+
+/*
+ - regsub - perform substitutions after a regexp match
+ */
+void
+regsub(regexp * prog, char * source, char * dest)
+{
+	register char *src;
+	register char *dst;
+	register char c;
+	register int no;
+	register int len;
+	
+	/* Not necessary and gcc doesn't like it -MLS */
+	/*extern char *strncpy();*/
+
+	if (prog == NULL || source == NULL || dest == NULL) {
+		regerror("NULL parm to regsub");
+		return;
+	}
+	if (UCHARAT(prog->program) != MAGIC) {
+		regerror("damaged regexp fed to regsub");
+		return;
+	}
+
+	src = source;
+	dst = dest;
+	while ((c = *src++) != '\0') {
+		if (c == '&')
+			no = 0;
+		else if (c == '\\' && '0' <= *src && *src <= '9')
+			no = *src++ - '0';
+		else
+			no = -1;
+
+		if (no < 0) {	/* Ordinary character. */
+			if (c == '\\' && (*src == '\\' || *src == '&'))
+				c = *src++;
+			*dst++ = c;
+		} else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
+			len = prog->endp[no] - prog->startp[no];
+			(void) strncpy(dst, prog->startp[no], len);
+			dst += len;
+			if (len != 0 && *(dst-1) == '\0') {	/* strncpy hit NUL. */
+				regerror("damaged match string");
+				return;
+			}
+		}
+	}
+	*dst++ = '\0';
+}
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index af9c4dadf8165922af9ee23b02abc48047496a87..9a78ed947d438935af055fd26a56cac461477127 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -33,6 +33,12 @@ classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_classify_target_info *clinfo = par->targinfo;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_CLASSIFY;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	skb->priority = clinfo->priority;
 	return XT_CONTINUE;
 }
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 3746d8b9a47868694be0848fc0d09013a0a5b282..86861ae904efcc158ddc2cd0d446a5a0aa0562ad 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -101,7 +101,11 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
 		goto err1;
 
 	memset(&t, 0, sizeof(t));
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	ct = nf_conntrack_alloc(par->net, info->zone, NULL, &t, &t, GFP_KERNEL);
+#else
 	ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
+#endif
 	ret = PTR_ERR(ct);
 	if (IS_ERR(ct))
 		goto err2;
@@ -191,7 +195,11 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
 		goto err1;
 
 	memset(&t, 0, sizeof(t));
+#if defined(CONFIG_BCM_KF_NETFILTER)
+	ct = nf_conntrack_alloc(par->net, info->zone, NULL, &t, &t, GFP_KERNEL);
+#else
 	ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
+#endif
 	ret = PTR_ERR(ct);
 	if (IS_ERR(ct))
 		goto err2;
diff --git a/net/netfilter/xt_DC.c b/net/netfilter/xt_DC.c
new file mode 100644
index 0000000000000000000000000000000000000000..30aa0bd1b0defc197b02cad22ad6a76b043d0562
--- /dev/null
+++ b/net/netfilter/xt_DC.c
@@ -0,0 +1,963 @@
+/*
+* published by the free software foundation
+*/
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/string.h>
+#include <net/sock.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("ddc: dpi data collection module");
+
+//#define DDC_DEBUG 
+struct proc_dir_entry *proc_dpi = NULL;
+#define PROC_DPI_FILE_SZ        (1024)
+#define DPI_DIRECTORY   "dpi"
+#define DPI_PROC_FILE_NAME      "http"
+#define DPI_INFO_BUF_LEN        (1024)
+static char* pdpi_buf = NULL;
+static unsigned int total_bytes = 0;
+
+//#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
+
+typedef enum
+{
+    DPI_PROTO_INVALID = 0,
+    DPI_PROTO_HTTP,
+    DPI_PROTO_FTP,
+    DPI_PROTO_POP3,
+    DPI_PROTO_TELNET,
+    DPI_PROTO_MAX,
+}DPI_PROTOCOL;
+
+#define MAX_HOST_LENGTH 64
+#define MAX_SEARCH_KEY_LEN  128
+static char host[MAX_HOST_LENGTH]        = {0};
+//static char referer[MAX_HOST_LENGTH]   = {0}; 
+static char search_key[MAX_SEARCH_KEY_LEN] = {0};
+
+//static char http_ver[16] ={0};
+static char http_date[48] = {0};
+static char http_charset[16] = {0};
+
+#define PARSE_DATE      (1<<0)
+#define PARSE_CHARSET      (1<<1)
+#define DATE_VALID      (1 <<16)
+#define CHARSET_VALID      (1 <<17)
+
+static unsigned int  bparse_flag = 0;
+
+typedef enum
+{
+    METHOD_GET,
+    METHOD_POST,
+    METHOD_HEAD,
+    METHOD_PUT,
+    METHOD_DELETE,
+    METHOD_TRACE,
+    METHOD_OPTIONS,
+    METHOD_CONNECT,
+    METHOD_PATCH,
+    METHOD_MAX,    
+}http_method;
+
+struct dpi_packet_info
+{
+    char * ip_hdr;
+    char * tcp_hdr;
+    char * http_hdr;
+    char * host;
+    char * referer;
+    char * key_end;     //pointer to the last character of searching key
+    http_method method;
+    int src_ptn_idx;
+};
+
+typedef enum
+{
+    SESSION_STATE_EMPTY,
+    SESSION_STATE_NEW,
+    SESSION_STATE_CONFIRMED,
+    SESSION_STATE_SEARCHING,
+    SESSION_STATE_MAX,
+}session_state;
+
+struct http_session
+{
+   session_state state;
+   int              index;          //tell the updator the index of the session
+   int              update_cnt;         //record how many http packet comes
+   char *        keyword;
+   //bool         record_key;
+   int              keycnt;
+   char         host_name[MAX_HOST_LENGTH];
+   // add member later: mac, first access time, when to leave, and so on
+};
+
+struct srch_ptn
+{
+    int ptn_len;
+    char * ptn;
+};
+
+static struct srch_ptn wld_ptn[]=   //for ns?, i? and v?
+{
+    {5, "word="},
+    {0, NULL},
+};
+
+static struct srch_ptn search_ptn[]=     //search?
+{
+    {2, "q="},
+    {5, "word="},
+    {4, "key="},
+    {0, NULL},
+};
+
+static struct srch_ptn sq[]=   //s?
+{
+    {3, "wd="},
+    {0, NULL},
+};
+
+static struct srch_ptn kw[]=   //s?
+{
+    {3, "kw="},
+    {0, NULL},
+};
+
+
+static struct srch_ptn q[]=   //?
+{
+    {2, "q="},
+    {0, NULL},
+};
+
+struct search_mkptn
+{
+    int marklen;
+    char* mark;
+    struct srch_ptn *wdp;
+};
+/*
+to make it simple, only 2. one is active page, the other is inactive
+*/
+#define SESSION_POOL_MAX        2  
+#define UPDATE_CNT_THRESHOLD 4
+
+static struct dpi_packet_info pkt_info;
+//static struct http_session  session[SESSION_POOL_MAX];
+//static int active = 0;
+//static int last_update = 0;
+
+const char* search_engine[] = 
+{
+    "www.baidu.com",
+    "www.google.com",
+    "www.google.com.hk",
+    NULL,
+};
+
+//put in right sequence!
+const struct search_mkptn search_pat[] =   //profiled search pattern: mark and pattern pairs
+{   
+    {7, "search?", search_ptn},
+    {2, "s?", sq},
+    {2, "f?", kw},
+    {2, "v?", wld_ptn},
+    {2, "i?", wld_ptn},
+    {2, "ns?", wld_ptn},
+    {3, "?q=", q},
+    {5, "word?", wld_ptn},
+};
+
+#define SEARCH_PATTERN_NBR (sizeof(search_pat)/sizeof(struct search_mkptn))
+
+typedef enum
+{
+    RECORD_PCONLINE = 1,
+    RECORD_GATEWAY_STATE,
+    RECORD_FLOW,
+    RECORD_NETWORK_PERF,
+    RECORD_URL,
+    RECORD_MAIL,
+    RECORD_APP,
+    RECORD_MAX,
+}record_type;
+
+typedef enum
+{   
+    ACTION_WEB,
+    ACTION_SEARCH,
+    ACTION_MAX,
+}action_type;
+
+typedef enum
+{
+    CHARSET_INVALID,
+    CHARSET_UTF_8,
+    CHARSET_GBK,
+    CHARSET_GB2312,
+    //add other members
+    CHARSET_MAX,
+}charset;
+
+struct host_key_record
+{
+    /*  PC online =1;
+    *   gateway state
+    *   flow
+    *   network performance
+    *   url
+    *   mail
+    *   app
+    */
+    unsigned short rcdtype;     
+    unsigned short item_len;    //dont change the order of first two member       
+    unsigned char actiontype;       /*web =1, search =2*/
+    unsigned char url_len;
+    unsigned char mac[6];
+    char date[32];    
+    unsigned char charset;
+    unsigned char unused;
+    unsigned short key_len;
+    char data[];
+};
+
+struct host_key_record *phn_rcd = NULL;  /*pointer of host-name record*/
+
+DEFINE_SPINLOCK(ddc_lock);
+
+
+#if 1
+static inline int isPrintable(char c)
+{
+    if((c>= 0x20) && (c<=0x7E))
+        return 1;
+     else
+        return 0;
+}
+
+/*len = 16. for last line, len<16*/
+static void printLine(const char * buf, unsigned int len)
+{
+    unsigned int i;
+    unsigned char c;
+    for(i=0; i<len; i++)
+    {
+        c = *(buf+i);
+        printk("%02x ", c);
+    }
+    //allignment
+    printk("    ");
+    if(len <16)
+    {
+        for(i=0; i<16-len; i++)
+            printk("   ");
+    }
+    for(i=0; i<len; i++)
+    {
+        c = *(buf+i);
+        if(isPrintable(c))
+            printk("%c", c);
+        else
+            printk(".");
+    }
+    printk("\n");
+}
+
+/*
+*   Brief description: dump packet content to console
+*   start: start address of the packet
+*   end: end address of the packet
+*/
+void ddc_printPacket( char* start,  char* end)
+{
+    unsigned int  len;
+    char *buf = start;
+    printk("\n Total Len = %d\n", (unsigned int)(end-start));
+    while(buf < end)
+    {
+        if(buf + 16 < end)
+            len = 16;
+        else
+            len = (unsigned int)(end -buf);
+        printLine(buf, len);
+        buf+=len;
+    }
+}
+#endif
+
+static inline bool utl_in_range(const char* start, const char * end, const char * ptr)
+{
+    if(ptr >= start && ptr < end)
+    {
+        return true;
+    }
+    return false;    
+}
+static inline char* skipIpheader(char *ip_hdr)
+{
+    unsigned int iphdr_len;
+    char * iphdr = ip_hdr;
+    pkt_info.ip_hdr = ip_hdr;
+    iphdr_len = ((*iphdr) & 0x0F)<<2;
+
+    return iphdr+ iphdr_len;
+}
+
+static inline char* skipTcpheader(const char * tcp_hdr)
+{
+    unsigned int tcphdr_len;
+    char *tcphdr = (char *)tcp_hdr;
+    pkt_info.tcp_hdr = (char *)tcp_hdr;
+    tcphdr_len =  ((*(tcphdr + 12))  & 0xF0) >> 2;
+
+    return (tcphdr + tcphdr_len);
+}
+
+static inline bool pktdata_match(const char * src, const char * dst, int len)
+{
+    int i;
+    for(i=0; i<len; i++)
+    {
+        if(src[i] != dst[i])
+            return false;
+    }
+    return true;
+}
+
+static void extract_http_method(const char * http_hdr)
+{
+    if(http_hdr == NULL || *http_hdr == 0)
+    {
+        pkt_info.method = METHOD_MAX;
+        return;
+    }
+
+    if(pktdata_match(http_hdr, "GET /", 5))
+        pkt_info.method = METHOD_GET;
+    else if(pktdata_match(http_hdr, "POST /", 6))
+        pkt_info.method = METHOD_POST;
+    #if 0    
+    else if(pktdata_match(http_hdr, "HEAD /", 6))
+        pkt_info.method = METHOD_HEAD;  
+    else if(pktdata_match(http_hdr, "PUT /", 5))
+        pkt_info.method = METHOD_PUT;    
+    else if(pktdata_match(http_hdr, "DELETE /", 8))
+        pkt_info.method = METHOD_POST;        
+    else if(pktdata_match(http_hdr, "TRACE /", 7))
+        pkt_info.method = METHOD_TRACE; 
+    else if(pktdata_match(http_hdr, "OPTIONS /", 9))
+        pkt_info.method = METHOD_OPTIONS;     
+    else if(pktdata_match(http_hdr, "CONNECT /", 9))
+        pkt_info.method = METHOD_CONNECT;  
+    else if(pktdata_match(http_hdr, "PATCH /", 7))
+        pkt_info.method = METHOD_PATCH;     
+   #endif     
+    else
+        pkt_info.method = METHOD_MAX;
+}
+static void locateHTTPHeader(struct sk_buff * skb)
+{
+    char *proto_hdr = (char *)skb->data;
+    #if 0
+    if((*proto_hdr >>4) == 4)
+    {
+        printk("IPv4 packet\n");
+    }
+    #endif
+    
+    proto_hdr = skipIpheader(proto_hdr);
+    proto_hdr = skipTcpheader(proto_hdr);  //later add code to check UDP header
+
+    if(proto_hdr <=(char *)skb->tail)
+    {
+        pkt_info.http_hdr = proto_hdr;
+    }
+    else
+    {
+        pkt_info.http_hdr = NULL;   //just tcp ack packet
+    } 
+}
+
+/*
+    changed to char * left, char * right, char * keyword, char* start, int mode
+    left              |          right
+    right mode: start ->
+
+*/
+static char * find_keywords_r(const char* right, const char *keyword, char *start)
+{
+    char *  ptr = start;
+    char * arch = NULL;
+    int len = strlen(keyword);     
+
+    while((ptr +len) <= (right +1))
+    {
+        if(*ptr == *keyword)
+        {
+            if(pktdata_match(ptr, keyword, len))
+            {
+                arch = ptr;
+                return arch;
+            }
+        }
+        ptr++;
+    }
+
+    return arch;
+}
+
+#if 0
+/*search from right to the left
+    caller should give left edge, keyword and start = right -len +1
+*/
+static char * find_keywords_l(const char* left, const char *keyword, char *start)
+{
+    char *  ptr = start;
+    char * arch = NULL;
+    int len = strlen(keyword);     
+
+    while(ptr >=left)
+    {
+        if(*ptr == *keyword)
+        {
+            if(pktdata_match(ptr, keyword, len))
+            {
+                arch = ptr;
+                return arch;
+            }
+        }
+        ptr--;
+    }
+
+    return arch;
+}
+
+/*
+*   only search/record user's searching keyword for some profiled search engine
+*/
+static bool match_search_engine(char* host_name)
+{   
+    int i = 0;
+
+    //search profiled search engine
+    const char * phost = search_engine[i];  
+    while(phost !=NULL)
+    {
+        if(strcmp(phost, host_name) == 0)
+            return true;
+        phost = search_engine[++i];
+    }
+
+    //later add code to search configured search engine
+    return false;
+}
+#endif
+
+/*
+*   extract host name from the HTTP GET packet and save this info into 
+*   the array host[]. 
+*/
+static bool extract_http_host(const char * left, const char * right, char * start)
+{
+    int i =0;
+    char * ptr = start;
+    char * http_host = NULL;
+
+    if(!utl_in_range(left, right, ptr))
+        return false;
+
+    http_host = find_keywords_r(right, "Host: ", ptr);
+    if(http_host == NULL)   return false;
+
+    /*
+    now we dont consider the status of HTTP response. actually when the 
+    return code is 301, which means the host has been moved permanently. 
+    then we should extract the valid host name from "Location: xxxx\r\d"
+    */
+    http_host +=6;          //skip "Host: "
+    pkt_info.host = http_host;
+    while((http_host <=right) 
+                && (*http_host != 0x0d) 
+                && (*http_host != 0x0a))
+    {
+        host[i++] = *http_host;
+        http_host++;
+        if(i>=MAX_HOST_LENGTH) break;
+    }
+    host[i] = '\0';    
+    return true;
+}
+
+static void update_search_key(struct sk_buff *skb, char * host_name, char * key)
+{
+    unsigned short key_len = round_up(strlen(key), 4);
+    unsigned short url_len =  round_up(strlen(host_name), 4);
+    unsigned short item_len = key_len + url_len +  round_up(sizeof(struct host_key_record), 4);
+    
+    phn_rcd = (struct host_key_record *)(pdpi_buf + total_bytes);
+    
+    if(!utl_in_range(pdpi_buf, pdpi_buf+DPI_INFO_BUF_LEN, (char*)phn_rcd + item_len))
+    {
+        //debug
+        pr_debug("out of dpi buffer\n");
+        return;
+    }
+
+    phn_rcd->rcdtype = RECORD_URL;
+    phn_rcd->actiontype = 2; //search
+    phn_rcd->url_len = url_len;
+    phn_rcd->charset = CHARSET_INVALID;
+    phn_rcd->key_len = key_len;
+    phn_rcd->item_len =item_len;
+    phn_rcd->date[0] = '\0';    //now date is invalid
+    memcpy(phn_rcd->mac, (char*)skb->mac_header+6, 6);
+
+    strcpy(phn_rcd->data, host_name);
+    strcpy(phn_rcd->data + url_len, key);
+
+    total_bytes += item_len;
+
+    //debug
+    //printk("total bytes: %d\n", total_bytes);
+    //ddc_printPacket( (char*)pdpi_buf, (char*)pdpi_buf +  total_bytes);    
+}
+
+static void update_charset(char * charset)
+{
+    char * ptr = charset + 8;       //skip "charset="
+    
+    if(!utl_in_range(pdpi_buf, pdpi_buf+DPI_INFO_BUF_LEN, (char*)phn_rcd))
+        return;
+        
+    if(strcmp(ptr, "utf-8") == 0)
+        phn_rcd->charset = CHARSET_UTF_8;
+    else if(strcmp(ptr, "gbk") == 0)
+        phn_rcd->charset = CHARSET_GBK;
+    else if(strcmp(ptr, "gb2312") == 0)
+        phn_rcd->charset = CHARSET_GB2312;
+    else
+        phn_rcd->charset = CHARSET_INVALID;
+
+    //debug
+   //ddc_printPacket( (char*)pdpi_buf, (char*)pdpi_buf +  total_bytes);
+}
+
+/*total 29 bytes.*/
+static void update_date(char * date)
+{
+    if(!utl_in_range(pdpi_buf, pdpi_buf+DPI_INFO_BUF_LEN, (char*)phn_rcd))
+        return;
+    strcpy(phn_rcd->date, date);   //have enough space
+
+    //debug
+    //ddc_printPacket( (char*)pdpi_buf, (char*)pdpi_buf +  total_bytes);
+}
+static void update_host_name(struct sk_buff *skb, char * host_name)
+{
+    unsigned short url_len, item_len;
+    phn_rcd = (struct host_key_record *)(pdpi_buf + total_bytes);
+
+    url_len = round_up(strlen(host_name), 4);
+    item_len = url_len + round_up(sizeof(struct host_key_record), 4);    
+
+    if(!utl_in_range(pdpi_buf, pdpi_buf+DPI_INFO_BUF_LEN, (char*)phn_rcd+item_len))
+    {   
+        pr_debug("out of dpi buffer\n");
+        return;
+    }
+    
+    phn_rcd->rcdtype = RECORD_URL;
+    phn_rcd->actiontype = 1;    //web
+    phn_rcd->url_len = url_len;
+    phn_rcd->charset = CHARSET_INVALID;
+    
+    memcpy(phn_rcd->mac, (char*)skb->mac_header+6, 6);
+    phn_rcd->date[0] = '\0';    //now date is invalid
+    phn_rcd->item_len = item_len;
+    
+    strcpy(phn_rcd->data, host_name);
+    total_bytes += phn_rcd->item_len ;
+    phn_rcd->key_len = 0;
+    
+    //debug
+    //ddc_printPacket( (char*)pdpi_buf, (char*)pdpi_buf +  total_bytes);
+    return;
+}
+
+static bool check_search_pattern(const char * start)
+{
+    int i =0;
+    char * ptr = (char *)start;
+    struct search_mkptn * ptn = NULL;
+
+    //skip the additional '/'
+    while(i<10) //max we what skip. tunable
+    {
+        if(*ptr ==0x2F)  //'/'
+        {
+            ptr++;
+            goto PATTERN_SEARCH;
+        }
+        i++;
+        ptr++;            
+    }
+    ptr = (char *)start;        //reset
+
+    /*later add code to read additional pattern configured through ctms
+    */
+PATTERN_SEARCH:  
+    i=0;
+    while(i<SEARCH_PATTERN_NBR)
+    {
+        ptn = (struct search_mkptn *)(&search_pat[i]);
+        if(pktdata_match(ptr, ptn->mark, ptn->marklen))
+        {
+            pkt_info.src_ptn_idx = i;
+            return true;
+        }
+        i++;
+    };
+    return false;
+}
+
+static bool extract_search_word(const char* start, const char* end)
+{
+    char * ptr = (char *)start;
+    struct srch_ptn * ptrn = NULL;
+
+    int i = 0;   
+    
+    if(pkt_info.src_ptn_idx < SEARCH_PATTERN_NBR)    
+    {
+        ptrn = search_pat[pkt_info.src_ptn_idx].wdp;
+    }
+    else
+    {
+        return false;
+    }
+
+    while(ptr <=(end-ptrn[i].ptn_len+1) && (*ptr!='\r') && (*ptr!='\n')&& (*ptr!=' '))
+    {   
+        while(ptrn[i].ptn != NULL)
+        {
+            if(pktdata_match(ptr, ptrn[i].ptn, ptrn[i].ptn_len))
+            {
+                ptr +=ptrn[i].ptn_len;
+                goto CPY_SEARCH_KEY;
+            }
+            i++;
+        }
+        ptr++;
+        i=0;
+    }
+    return false;
+    
+CPY_SEARCH_KEY:
+    i =0;
+    while((*ptr !='&') && (*ptr !=' ') && (i <(MAX_SEARCH_KEY_LEN-1)))
+    {
+        search_key[i++]  = *ptr;
+        ptr++;
+    }
+    search_key[i] = '\0';
+    pkt_info.key_end = ptr;
+    return (i>0)? true : false;
+}
+
+static bool extract_http_date(const char * left, const char * right, const char * start)
+{
+    int i =0;
+    char * ptr = (char *)start;
+
+    while(ptr <=(right-3))
+    {
+        if(pktdata_match(ptr, "Date", 4))
+        {
+            goto CPY_DATE;
+        }
+        ptr++;
+    }
+    http_date[0] ='\0';
+    return false;
+
+CPY_DATE:
+    while((*ptr!=0x0d) &&(*ptr!=0x0a))
+    {
+        http_date[i++] = *ptr++;
+    }
+    http_date[i] ='\0';
+    return true;
+}
+
+static bool extract_http_charset(const char * left, const char * right, const char * start)
+{
+    int i =0;
+    char * ptr = (char *)start;
+
+    while(ptr <=(right-6))
+    {
+        if(pktdata_match(ptr, "charset", 7))
+        {
+            goto CPY_CHARSET;
+        }
+        ptr++;
+    }
+    http_charset[0] ='\0';
+    return false;
+
+CPY_CHARSET:
+    while((*ptr!=0x0d) &&(*ptr!=0x0a))
+    {
+        http_charset[i++] = isascii(*ptr)? tolower(*ptr) : *ptr;
+        ptr++;
+    }
+    http_charset[i] ='\0';
+    return true;
+}
+
+static void http_handler_ds(struct sk_buff *skb)
+{
+    if(bparse_flag & (PARSE_DATE |PARSE_CHARSET))
+    {
+        locateHTTPHeader(skb);
+    }
+    
+    if(bparse_flag & PARSE_DATE)
+    {
+        if(pkt_info.http_hdr && pktdata_match(pkt_info.http_hdr,"HTTP/",5))
+        {
+            if(extract_http_date(pkt_info.http_hdr, (const char *)skb->tail, pkt_info.http_hdr ))
+            {
+                bparse_flag &= ~PARSE_DATE;
+                //printk("access time: %s\n", http_date+6); //just skip "Date: 
+                update_date(http_date+6);
+            }
+        }
+    }        
+
+    /*
+    * parese character set is just a demo
+    */
+    if(bparse_flag & PARSE_CHARSET)
+    {
+        if(pkt_info.http_hdr && pktdata_match(pkt_info.http_hdr,"HTTP/",5))     
+        {
+            if(extract_http_charset(pkt_info.http_hdr, (const char *)skb->tail, pkt_info.http_hdr + 7))
+            {
+                bparse_flag &= ~PARSE_CHARSET;
+                bparse_flag |= CHARSET_VALID;
+                //printk("%s\n", http_charset);
+                update_charset(http_charset);
+            }
+        }
+    }
+}
+static void http_handler_us(struct sk_buff *skb)
+{
+    char* ptr;
+    bool spkt = false;
+    locateHTTPHeader(skb);
+    extract_http_method(pkt_info.http_hdr);
+        
+    if(pkt_info.method == METHOD_GET)
+    {        
+        if(pkt_info.http_hdr && pktdata_match(pkt_info.http_hdr + 6, "HTTP/", 5))
+        {
+            bparse_flag |= PARSE_DATE | PARSE_CHARSET;
+            if(extract_http_host((char *)skb->data, (char *) skb->tail, pkt_info.http_hdr + 15))
+            {      
+                //printk("\n\nHost: %s\n", host);  //debug
+                update_host_name(skb, host);
+            }
+        }    
+
+        /*  now all web sites will be examined to extract searching key word. 
+      *  later add code to extract key word only from specified web site based
+      *  on ctms config
+      */
+        ptr = pkt_info.http_hdr + 5;      //skip "GET /"
+        spkt = check_search_pattern(ptr);
+        if(spkt)
+        {
+            if(extract_search_word(pkt_info.http_hdr + 5, (char *) skb->tail))  //skip space
+            {
+                extract_http_host((char *)skb->data, (char *) skb->tail, pkt_info.key_end ? pkt_info.key_end : (char *)skb->data);
+                //printk("search key: %s\n", search_key);   //debug
+                update_search_key(skb, host, search_key);
+                bparse_flag |= PARSE_DATE;
+            }
+        }  
+    }
+    //add function to process other method, such as POST to parse webmail
+
+}
+
+static unsigned int
+dc_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{       
+    struct nf_conn * conntrack = NULL;
+    enum ip_conntrack_info ctinfo;
+
+   
+    conntrack = nf_ct_get(skb, &ctinfo);
+    if(unlikely(conntrack == NULL))
+    {                
+        return XT_CONTINUE;
+    }
+    
+    memset(&pkt_info, 0, sizeof(pkt_info));
+    
+    spin_lock_bh(&ddc_lock); 
+
+    if(((skb_dst(skb)->flags & DST_NOXFRM) && (skb->dev->priv_flags & IFF_WANDEV)) ||
+       (!(skb_dst(skb)->flags & DST_NOXFRM) && (skb_dst(skb)->dev->priv_flags & IFF_WANDEV)))
+    {
+        /*NOTE: all http protocol pattern file should start with "http"
+       now only handle http packet (only http rule is inserted)
+    */
+        if(pktdata_match(conntrack->layer7.app_proto, "http", 4))   
+            http_handler_us(skb);
+        //add other protocol handler
+    }
+    else
+    {  
+        if(pktdata_match(conntrack->layer7.app_proto, "http", 4))   
+            http_handler_ds(skb);         
+    }    
+    spin_unlock_bh(&ddc_lock);    
+    
+    return XT_CONTINUE;
+}
+
+/*
+ * internal DPI buffer is 1024 now(tunable). so the count should >=1024.
+*/
+static int dc_read_proc(char* page, char ** start, off_t off, int count,
+                            int* eof, void * data)
+{
+    int r = 0;
+    
+    //lock. need to define a lock        
+    #if 1
+    if(pdpi_buf == NULL)
+        return 0;    
+    memcpy(page, pdpi_buf, total_bytes);  
+    r = total_bytes;
+    #else
+    //here provide an example of how to parse data in /proc/dpi/http
+    char * ptr = pdpi_buf;
+    unsigned short items = 0;
+    struct host_key_record * prcd;
+    
+    if(pdpi_buf == NULL)
+        return 0;       
+    printk("\n--dump contents in dpi buffer--\n");
+    while((char *)ptr < (pdpi_buf + total_bytes))
+    {
+        prcd = (struct host_key_record *)ptr;
+        if(prcd->rcdtype == 5)
+            printk("RT: URL\n");
+        else
+            printk("RT: other\n");
+        printk("MC: ");
+        {
+            int i;
+            for(i=0; i<6; i++)
+            {
+                if(i!=5)
+                    printk("%02x-", prcd->mac[i]);
+                else
+                    printk("%02x\n", prcd->mac[i]);
+            }
+        }
+        printk("DT: %s\n", prcd->date);
+        printk("HN: %s\n", prcd->data);
+        if(prcd->actiontype ==2)
+            printk("KY: %s\n", (char *)prcd->data + prcd->url_len);
+        
+        ptr += prcd->item_len;
+        items++;
+        printk("\n\n");
+    }
+    printk("\--total %d items--\n\n", items);
+    #endif
+
+    *eof = 1;   
+    total_bytes = 0;
+    //unlock
+    
+    return r;
+}
+
+static void dc_init_proc(void)
+{
+    struct proc_dir_entry* entry;
+
+    proc_dpi = proc_mkdir(DPI_DIRECTORY, NULL);
+    if(!proc_dpi)
+    {
+        return;
+    }
+    
+    entry = create_proc_entry(DPI_PROC_FILE_NAME, 0644,proc_dpi);
+    if(!entry)
+    {
+        remove_proc_entry(DPI_DIRECTORY, init_net.proc_net);
+        return;      
+    }
+    else
+    {
+        entry->read_proc = dc_read_proc;  //only provide read operation
+    }         
+}
+
+static void dc_cleanup_proc(void)
+{
+    remove_proc_entry(DPI_PROC_FILE_NAME, proc_dpi);
+    remove_proc_entry(DPI_DIRECTORY, init_net.proc_net);
+}
+
+
+static struct xt_target dc_tg_reg  __read_mostly = 
+{
+    .name 		= "DC",
+    .revision		= 0,
+    .family		= NFPROTO_UNSPEC,
+    .target		= dc_tg,
+    .me		= THIS_MODULE,
+};
+
+static int __init dc_tg_init(void)
+{    
+    pdpi_buf = (char *)kmalloc(DPI_INFO_BUF_LEN, GFP_KERNEL);
+    if(pdpi_buf == NULL)
+    {
+        pr_debug("%s: kmalloc %d bytes failed\n", __FUNCTION__, DPI_INFO_BUF_LEN);
+        return -ENOMEM;
+    }    
+    
+    dc_init_proc();
+    return xt_register_target(&dc_tg_reg);
+}
+
+static void __exit dc_tg_exit(void)
+{    
+    if(pdpi_buf != NULL)
+        kfree(pdpi_buf);
+        
+    dc_cleanup_proc();
+    
+    xt_unregister_target(&dc_tg_reg);
+}
+
+module_init(dc_tg_init);
+module_exit(dc_tg_exit);
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c
index 1535e87ed9bd4fa47a7f431afab82320cf5e8530..abe17b306de4f5b3299bb5b7ea226d137d52bc71 100644
--- a/net/netfilter/xt_HL.c
+++ b/net/netfilter/xt_HL.c
@@ -32,6 +32,12 @@ ttl_tg(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct ipt_TTL_info *info = par->targinfo;
 	int new_ttl;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_TTL;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	if (!skb_make_writable(skb, skb->len))
 		return NF_DROP;
 
@@ -72,6 +78,12 @@ hl_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct ip6t_HL_info *info = par->targinfo;
 	int new_hl;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_HL;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	if (!skb_make_writable(skb, skb->len))
 		return NF_DROP;
 
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 993de2ba89d33bb3fa532ded0952bfb83de63836..bbeba748647164dfe2b968703b16fa2455088397 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -56,6 +56,12 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_led_info *ledinfo = par->targinfo;
 	struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_LED;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	/*
 	 * If "always blink" is enabled, and there's still some time until the
 	 * LED will switch off, briefly switch it off now.
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index a17dd0f589b22d3ffce573177414c4afa0790c1d..6a584655f1599c1e8cc3b691b5773d64cf47e27d 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -27,6 +27,12 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_nflog_info *info = par->targinfo;
 	struct nf_loginfo li;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_NFLOG;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	li.type		     = NF_LOG_TYPE_ULOG;
 	li.u.ulog.copy_len   = info->len;
 	li.u.ulog.group	     = info->group;
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 95237c89607a7ee330273abf693c77af607d108a..670ce08995903e30fe3c71f3d08862c945d80805 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -35,6 +35,12 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_NFQ_info *tinfo = par->targinfo;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_NFQUEUE;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	return NF_QUEUE_NR(tinfo->queuenum);
 }
 
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
index 9d782181b6c8993236cec126c131949cbd3ee0a2..1d492def07914926002876ec1dec928d8a541dfc 100644
--- a/net/netfilter/xt_NOTRACK.c
+++ b/net/netfilter/xt_NOTRACK.c
@@ -15,6 +15,13 @@ MODULE_ALIAS("ip6t_NOTRACK");
 static unsigned int
 notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_NOTRACK;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	/* Previously seen (loopback)? Ignore. */
 	if (skb->nfct != NULL)
 		return XT_CONTINUE;
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index f264032b8c56e1748e84de12fa1ef801dfc35152..75a17d0107797fe3e728ae6c3589edfd3f7b6273 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -82,6 +82,12 @@ xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_rateest_target_info *info = par->targinfo;
 	struct gnet_stats_basic_packed *stats = &info->est->bstats;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_RATEEST;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	spin_lock_bh(&info->est->lock);
 	stats->bytes += skb->len;
 	stats->packets++;
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 9faf5e050b796186b3204a02ece181726a26cb1a..00490ee225bb256b727a6a369d2e9ec72dd4a868 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -35,6 +35,12 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
 	u32 secmark = 0;
 	const struct xt_secmark_target_info *info = par->targinfo;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_SECMARK;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	BUG_ON(info->mode != mode);
 
 	switch (mode) {
diff --git a/net/netfilter/xt_SKIPLOG.c b/net/netfilter/xt_SKIPLOG.c
new file mode 100644
index 0000000000000000000000000000000000000000..aa9522933e2a96a943b1d54ea5972a041bf44989
--- /dev/null
+++ b/net/netfilter/xt_SKIPLOG.c
@@ -0,0 +1,77 @@
+/*
+*    Copyright (c) 2003-2012 Broadcom Corporation
+*    All Rights Reserved
+*
+<:label-BRCM:2012:DUAL/GPL:standard
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("iptables stop logging module");
+MODULE_ALIAS("ipt_SKIPLOG");
+
+static unsigned int
+skiplog_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+#if defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_SKIPLOG;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+	blog_skip(skb);
+#endif
+
+	return XT_CONTINUE;
+}
+
+static struct xt_target skiplog_tg_reg __read_mostly = {
+	.name		= "SKIPLOG",
+	.revision   = 0,
+	.family		= NFPROTO_UNSPEC,
+	.target		= skiplog_tg,
+	.me		= THIS_MODULE,
+};
+
+static int __init skiplog_tg_init(void)
+{
+	return xt_register_target(&skiplog_tg_reg);
+}
+
+static void __exit skiplog_tg_exit(void)
+{
+	xt_unregister_target(&skiplog_tg_reg);
+}
+
+module_init(skiplog_tg_init);
+module_exit(skiplog_tg_exit);
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 190ad37c5cf8ef7cc6acaca03e3494fad5a8a724..48c867a907d30a243fffc598cdce4e3307a88299 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -183,6 +183,12 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 	__be16 newlen;
 	int ret;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_TCPMSS;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	ret = tcpmss_mangle_packet(skb, par->targinfo,
 				   tcpmss_reverse_mtu(skb, PF_INET),
 				   iph->ihl * 4,
@@ -208,6 +214,12 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 	int tcphoff;
 	int ret;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_TCPMSS;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	nexthdr = ipv6h->nexthdr;
 	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
 	if (tcphoff < 0)
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 25fd1c4e1eec3229e8629420e6750ebe74bb6bea..e4ca6c374b6ee0036101b9f745f2d726cf10828d 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -76,6 +76,12 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
 static unsigned int
 tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_TCPOPTSTRIP;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb),
 	       sizeof(struct iphdr) + sizeof(struct tcphdr));
 }
@@ -89,6 +95,12 @@ tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 	u_int8_t nexthdr;
 	__be16 frag_off;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_TCPOPTSTRIP;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	nexthdr = ipv6h->nexthdr;
 	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
 	if (tcphoff < 0)
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 35a959a096e0a49e8c9d20931f95047b12895576..2f14ebcaa16641958e1a7f014ac7cb880c15467b 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -125,6 +125,12 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
 	struct udphdr _hdr, *hp;
 	struct sock *sk;
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_TPROXY;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
 	if (hp == NULL)
 		return NF_DROP;
diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c
index df48967af38210e766f2bb85568f4d5ddb67bbfa..f8b035f1c14b66c02b14a1328e0b91e6b916d99f 100644
--- a/net/netfilter/xt_TRACE.c
+++ b/net/netfilter/xt_TRACE.c
@@ -13,6 +13,13 @@ MODULE_ALIAS("ip6t_TRACE");
 static unsigned int
 trace_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_TRACE;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
+
 	skb->nf_trace = 1;
 	return XT_CONTINUE;
 }
diff --git a/net/netfilter/xt_layer7.c b/net/netfilter/xt_layer7.c
new file mode 100644
index 0000000000000000000000000000000000000000..b5e671193d461bad4869de0f8e827e15ad69a7dc
--- /dev/null
+++ b/net/netfilter/xt_layer7.c
@@ -0,0 +1,634 @@
+/*
+  Kernel module to match application layer (OSI layer 7) data in connections.
+
+  http://l7-filter.sf.net
+
+  (C) 2003-2009 Matthew Strait and Ethan Sommer.
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version
+  2 of the License, or (at your option) any later version.
+  http://www.gnu.org/licenses/gpl.txt
+
+  Based on ipt_string.c (C) 2000 Emmanuel Roger <winfield@freegates.be>,
+  xt_helper.c (C) 2002 Harald Welte and cls_layer7.c (C) 2003 Matthew Strait,
+  Ethan Sommer, Justin Levandoski.
+*/
+
+#include <linux/spinlock.h>
+#include <linux/version.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#endif
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_layer7.h>
+#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+
+#include "regexp/regexp.c"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>");
+MODULE_DESCRIPTION("iptables application layer match module");
+MODULE_ALIAS("ipt_layer7");
+MODULE_VERSION("2.21");
+
+static int maxdatalen = 2048; // this is the default
+module_param(maxdatalen, int, 0444);
+MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter");
+#ifdef CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG
+   #define DPRINTK(format,args...) printk(format,##args)
+#else
+   #define DPRINTK(format,args...)
+#endif
+
+/* Number of packets whose data we look at.
+This can be modified through /proc/net/layer7_numpackets */
+static int num_packets = 10;
+
+static struct pattern_cache {
+	char * regex_string;
+	regexp * pattern;
+	struct pattern_cache * next;
+} * first_pattern_cache = NULL;
+
+DEFINE_SPINLOCK(l7_lock);
+
+static int total_acct_packets(struct nf_conn *ct)
+{
+	struct nf_conn_counter *acct;
+
+	BUG_ON(ct == NULL);
+	acct = nf_conn_acct_find(ct);
+	if (!acct)
+		return 0;
+#if 1
+	return (atomic64_read(&acct[IP_CT_DIR_ORIGINAL].packets) + atomic64_read(&acct[IP_CT_DIR_REPLY].packets));
+#else
+	else
+	{
+		unsigned long orig_pkt, reply_pkt;
+
+		orig_pkt = atomic64_read(&acct[IP_CT_DIR_ORIGINAL].packets);
+		reply_pkt = atomic64_read(&acct[IP_CT_DIR_REPLY].packets);
+printk("origpkt<%ld> replypkt<%ld>\n", orig_pkt, reply_pkt);        
+		return (orig_pkt + reply_pkt);
+	}
+#endif
+}
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG
+/* Converts an unfriendly string into a friendly one by
+replacing unprintables with periods and all whitespace with " ". */
+static char * friendly_print(unsigned char * s)
+{
+	char * f = kmalloc(strlen(s) + 1, GFP_ATOMIC);
+	int i;
+
+	if(!f) {
+		if (net_ratelimit())
+			pr_debug("layer7: out of memory in "
+					"friendly_print, bailing.\n");
+		return NULL;
+	}
+
+	for(i = 0; i < strlen(s); i++){
+		if(isprint(s[i]) && s[i] < 128)	f[i] = s[i];
+		else if(isspace(s[i]))		f[i] = ' ';
+		else 				f[i] = '.';
+	}
+	f[i] = '\0';
+	return f;
+}
+
+static char dec2hex(int i)
+{
+	switch (i) {
+		case 0 ... 9:
+			return (i + '0');
+			break;
+		case 10 ... 15:
+			return (i - 10 + 'a');
+			break;
+		default:
+			if (net_ratelimit())
+				pr_debug("layer7: Problem in dec2hex\n");
+			return '\0';
+	}
+}
+
+static char * hex_print(unsigned char * s)
+{
+	char * g = kmalloc(strlen(s)*3 + 1, GFP_ATOMIC);
+	int i;
+
+	if(!g) {
+	       if (net_ratelimit())
+			pr_debug("layer7: out of memory in hex_print, "
+					"bailing.\n");
+	       return NULL;
+	}
+
+	for(i = 0; i < strlen(s); i++) {
+		g[i*3    ] = dec2hex(s[i]/16);
+		g[i*3 + 1] = dec2hex(s[i]%16);
+		g[i*3 + 2] = ' ';
+	}
+	g[i*3] = '\0';
+
+	return g;
+}
+#endif // DEBUG
+
+/* Use instead of regcomp.  As we expect to be seeing the same regexps over and
+*   over again, it make sense to cache the results. 
+*/
+static regexp * compile_and_cache(const char * regex_string, 
+                                  const char * protocol)
+{
+	struct pattern_cache * node               = first_pattern_cache;
+	struct pattern_cache * last_pattern_cache = first_pattern_cache;
+	struct pattern_cache * tmp;
+	unsigned int len;
+
+	while (node != NULL) {
+		if (!strcmp(node->regex_string, regex_string))
+		return node->pattern;
+
+		last_pattern_cache = node;/* points at the last non-NULL node */
+		node = node->next;
+	}
+
+	/* If we reach the end of the list, then we have not yet cached
+	   the pattern for this regex. Let's do that now.
+	   Be paranoid about running out of memory to avoid list corruption. */
+	tmp = kmalloc(sizeof(struct pattern_cache), GFP_ATOMIC);
+
+	if(!tmp) {
+		if (net_ratelimit())
+			pr_debug("layer7: out of memory in "
+					"compile_and_cache, bailing.\n");
+		return NULL;
+	}
+
+	tmp->regex_string  = kmalloc(strlen(regex_string) + 1, GFP_ATOMIC);
+	tmp->pattern       = kmalloc(sizeof(struct regexp),    GFP_ATOMIC);
+	tmp->next = NULL;
+
+	if(!tmp->regex_string || !tmp->pattern) {
+		if (net_ratelimit())
+			pr_debug("layer7: out of memory in "
+					"compile_and_cache, bailing.\n");
+		kfree(tmp->regex_string);
+		kfree(tmp->pattern);
+		kfree(tmp);
+		return NULL;
+	}
+
+	/* Ok.  The new node is all ready now. */
+	node = tmp;
+
+	if(first_pattern_cache == NULL) /* list is empty */
+		first_pattern_cache = node; /* make node the beginning */
+	else
+		last_pattern_cache->next = node; /* attach node to the end */
+
+	/* copy the string and compile the regex */
+	len = strlen(regex_string);
+	node->pattern = regcomp((char *)regex_string, &len);
+	if ( !node->pattern ) {
+		//if (net_ratelimit())
+			pr_debug("layer7: Error compiling regexp "
+					"\"%s\" (%s)\n", 
+					regex_string, protocol);
+		/* pattern is now cached as NULL, so we won't try again. */
+	}
+
+	strcpy(node->regex_string, regex_string);
+
+	return node->pattern;
+}
+
+static int can_handle(const struct sk_buff *skb)
+{
+	if(!ip_hdr(skb)) /* not IP */
+		return 0;
+	if(ip_hdr(skb)->protocol != IPPROTO_TCP &&
+	   ip_hdr(skb)->protocol != IPPROTO_UDP &&
+	   ip_hdr(skb)->protocol != IPPROTO_ICMP)
+		return 0;
+	return 1;
+}
+
+/* Returns offset the into the skb->data that the application data starts */
+static int app_data_offset(const struct sk_buff *skb)
+{
+	/* In case we are ported somewhere (ebtables?) where ip_hdr(skb)
+	isn't set, this can be gotten from 4*(skb->data[0] & 0x0f) as well. */
+	int ip_hl = 4*ip_hdr(skb)->ihl;
+
+	if( ip_hdr(skb)->protocol == IPPROTO_TCP ) {
+		/* 12 == offset into TCP header for the header length field.
+		Can't get this with skb->h.th->doff because the tcphdr
+		struct doesn't get set when routing (this is confirmed to be
+		true in Netfilter as well as QoS.) */
+		int tcp_hl = 4*(skb->data[ip_hl + 12] >> 4);
+
+		return ip_hl + tcp_hl;
+	} else if( ip_hdr(skb)->protocol == IPPROTO_UDP  ) {
+		return ip_hl + 8; /* UDP header is always 8 bytes */
+	} else if( ip_hdr(skb)->protocol == IPPROTO_ICMP ) {
+		return ip_hl + 8; /* ICMP header is 8 bytes */
+	} else {
+		if (net_ratelimit())
+			pr_debug("layer7: tried to handle unknown "
+					"protocol!\n");
+		return ip_hl + 8; /* something reasonable */
+	}
+}
+
+/* handles whether there's a match when we aren't appending data anymore */
+static int match_no_append(struct nf_conn * conntrack, 
+                           struct nf_conn * master_conntrack, 
+                           enum ip_conntrack_info ctinfo,
+                           enum ip_conntrack_info master_ctinfo,
+                           const struct xt_layer7_info * info)
+{
+	/* If we're in here, throw the app data away */
+	if(master_conntrack->layer7.app_data != NULL) {
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG
+		if(!master_conntrack->layer7.app_proto) {
+			char * f = 
+			  friendly_print(master_conntrack->layer7.app_data);
+			char * g = 
+			  hex_print(master_conntrack->layer7.app_data);
+			pr_debug("\nl7-filter gave up after %d bytes "
+				"(%d packets):\n%s\n",
+				strlen(f), total_acct_packets(master_conntrack), f);
+			kfree(f);
+			pr_debug("In hex: %s\n", g);
+			kfree(g);
+		}
+#endif
+
+		kfree(master_conntrack->layer7.app_data);
+		master_conntrack->layer7.app_data = NULL; /* don't free again */
+	}
+
+	if(master_conntrack->layer7.app_proto){
+		/* Here child connections set their .app_proto (for /proc) */
+		if(!conntrack->layer7.app_proto) {
+			conntrack->layer7.app_proto = 
+			  kmalloc(strlen(master_conntrack->layer7.app_proto)+1, 
+			    GFP_ATOMIC);
+			if(!conntrack->layer7.app_proto){
+				if (net_ratelimit())
+					pr_debug("layer7: out of memory "
+							"in match_no_append, "
+							"bailing.\n");
+				return 1;
+			}
+			strcpy(conntrack->layer7.app_proto, 
+				master_conntrack->layer7.app_proto);
+		}
+
+		return (!strcmp(master_conntrack->layer7.app_proto, 
+				info->protocol));
+	}
+	else {
+		/* If not classified even num_packets have been checked, 
+		*set to "unknown" to distinguish from
+		*connections that are still being tested. */
+		master_conntrack->layer7.app_proto = 
+			kmalloc(strlen("unknown")+1, GFP_ATOMIC);
+		if(!master_conntrack->layer7.app_proto){
+			if (net_ratelimit())
+				pr_debug("layer7: out of memory in "
+						"match_no_append, bailing.\n");
+			return 1;
+		}
+		strcpy(master_conntrack->layer7.app_proto, "unknown");
+		return 0;
+	}
+}
+
+/* add the new app data to the conntrack.  Return number of bytes added. */
+static int add_data(struct nf_conn * master_conntrack,
+                    char * app_data, int appdatalen)
+{
+	int length = 0, i;
+	int oldlength = master_conntrack->layer7.app_data_len;
+
+	/* This is a fix for a race condition by Deti Fliegl. However, I'm not 
+	   clear on whether the race condition exists or whether this really 
+	   fixes it.  I might just be being dense... Anyway, if it's not really 
+	   a fix, all it does is waste a very small amount of time. */
+	if(!master_conntrack->layer7.app_data) return 0;
+
+	/* Strip nulls. Make everything lower case (our regex lib doesn't
+	do case insensitivity).  Add it to the end of the current data. */
+	for(i = 0; i < maxdatalen-oldlength-1 &&
+		   i < appdatalen; i++) {
+		if(app_data[i] != '\0') {
+			/* the kernel version of tolower mungs 'upper ascii' */
+			master_conntrack->layer7.app_data[length+oldlength] =
+				isascii(app_data[i])? 
+					tolower(app_data[i]) : app_data[i];
+			length++;
+		}
+	}
+
+	master_conntrack->layer7.app_data[length+oldlength] = '\0';
+	master_conntrack->layer7.app_data_len = length + oldlength;
+
+	return length;
+}
+
+/* taken from drivers/video/modedb.c */
+static int my_atoi(const char *s)
+{
+	int val = 0;
+
+	for (;; s++) {
+		switch (*s) {
+			case '0'...'9':
+			val = 10*val+(*s-'0');
+			break;
+		default:
+			return val;
+		}
+	}
+}
+
+/* write out num_packets to userland. */
+static int layer7_read_proc(char* page, char ** start, off_t off, int count,
+                            int* eof, void * data)
+{
+	if(num_packets > 99 && net_ratelimit())
+		pr_debug("layer7: NOT REACHED. num_packets too big\n");
+
+	page[0] = num_packets/10 + '0';
+	page[1] = num_packets%10 + '0';
+	page[2] = '\n';
+	page[3] = '\0';
+
+	*eof=1;
+
+	return 3;
+}
+
+/* Read in num_packets from userland */
+static int layer7_write_proc(struct file* file, const char* buffer,
+                             unsigned long count, void *data)
+{
+	char * foo = kmalloc(count, GFP_ATOMIC);
+
+	if(!foo){
+		if (net_ratelimit())
+			pr_debug("layer7: out of memory, bailing. "
+					"num_packets unchanged.\n");
+		return count;
+	}
+
+	if(copy_from_user(foo, buffer, count)) {
+		return -EFAULT;
+	}
+
+
+	num_packets = my_atoi(foo);
+	kfree (foo);
+
+	/* This has an arbitrary limit to make the math easier. I'm lazy.
+	But anyway, 99 is a LOT! If you want more, you're doing it wrong! */
+	if(num_packets > 99) {
+		pr_debug("layer7: num_packets can't be > 99.\n");
+		num_packets = 99;
+	} else if(num_packets < 1) {
+		pr_debug("layer7: num_packets can't be < 1.\n");
+		num_packets = 1;
+	}
+
+	return count;
+}
+
+static bool l7_mt4(const struct sk_buff *skbin, 
+                   struct xt_action_param *par)
+{
+	/* sidestep const without getting a compiler warning... */
+	struct sk_buff * skb = (struct sk_buff *)skbin; 
+	const struct xt_layer7_info * info = par->matchinfo;
+	enum ip_conntrack_info master_ctinfo, ctinfo;
+	struct nf_conn *master_conntrack, *conntrack;
+	unsigned char * app_data;
+	unsigned int pattern_result, appdatalen;
+	regexp * comppattern;
+
+	/* Be paranoid/incompetent - lock the entire match function. */
+	spin_lock_bh(&l7_lock);
+
+	if(!can_handle(skb)){
+		pr_debug("layer7: This is some protocol I can't handle. skb(%p)\n", skb);
+		spin_unlock_bh(&l7_lock);
+		return (bool)(info->invert);
+	}
+
+	/* Treat parent & all its children together as one connection, except
+	*   for the purpose of setting conntrack->layer7.app_proto in the actual
+	*   connection. This makes /proc/net/ip_conntrack more satisfying. 
+	*/
+	if(!(conntrack = nf_ct_get(skb, &ctinfo)) ||
+	   !(master_conntrack=nf_ct_get(skb,&master_ctinfo))){
+		pr_debug("layer7: couldn't get conntrack. skb(%p)\n", skb);
+		spin_unlock_bh(&l7_lock);
+		return (bool)(info->invert);
+	}
+
+	/* Try to get a master conntrack (and its master etc) for FTP, etc. */
+	while (master_ct(master_conntrack) != NULL)
+		master_conntrack = master_ct(master_conntrack);
+
+	/* if we've classified it or seen too many packets or mast conntrack has its 
+	*   protocol name
+	*/
+	if(total_acct_packets(master_conntrack) > num_packets ||
+	   master_conntrack->layer7.app_proto) {
+
+		pattern_result = match_no_append(conntrack, master_conntrack, 
+						 ctinfo, master_ctinfo, info);
+
+		/* skb->cb[0] == seen. Don't do things twice if there are 
+		multiple l7 rules. I'm not sure that using cb for this purpose 
+		is correct, even though it says "put your private variables 
+		there". But it doesn't look like it is being used for anything
+		else in the skbs that make it here. */
+		skb->cb[0] = 1; /* marking it seen here's probably irrelevant */
+
+		spin_unlock_bh(&l7_lock);
+		return (bool)(pattern_result ^ info->invert);
+	}
+
+	if(skb_is_nonlinear(skb)){
+		if(skb_linearize(skb) != 0){
+			if (net_ratelimit())
+				pr_debug("layer7: failed to linearize "
+						"packet, bailing.\n");
+			spin_unlock_bh(&l7_lock);
+			return (bool)(info->invert);
+		}
+	}
+
+	/* now that the skb is linearized, it's safe to set these. */
+	app_data = skb->data + app_data_offset(skb);
+	appdatalen = skb_tail_pointer(skb) - app_data;
+
+	/* the return value gets checked later, when we're ready to use it */
+	comppattern = compile_and_cache(info->pattern, info->protocol);
+
+	/* On the first packet of a connection, allocate space for app data */
+	if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] && 
+	   !master_conntrack->layer7.app_data){
+		master_conntrack->layer7.app_data = 
+			kmalloc(maxdatalen, GFP_ATOMIC);
+		if(!master_conntrack->layer7.app_data){
+			if (net_ratelimit())
+				pr_debug("layer7: out of memory in "
+						"l7_mt4, bailing.\n");
+			spin_unlock_bh(&l7_lock);
+			return (bool)(info->invert);
+		}
+
+		master_conntrack->layer7.app_data[0] = '\0';
+	}
+
+	/* Can be here, but unallocated, if numpackets is increased near
+	the beginning of a connection */
+	if(master_conntrack->layer7.app_data == NULL){
+		spin_unlock_bh(&l7_lock);
+		return (bool)(info->invert); /* unmatched */
+	}
+
+	if(!skb->cb[0]){
+		int newbytes;
+		newbytes = add_data(master_conntrack, app_data, appdatalen);
+
+		if(newbytes == 0) { /* didn't add any data */
+			skb->cb[0] = 1;
+			/* Didn't match before, not going to match now */
+			spin_unlock_bh(&l7_lock);
+			return (bool)(info->invert);
+		}
+	}
+
+	/* If looking for "unknown", then never match.  "Unknown" means that
+	we've given up; we're still trying with these packets. */
+	if(!strcmp(info->protocol, "unknown")) {
+		pattern_result = 0;
+	/* If looking for "unset", then always match. "Unset" means that we
+	haven't yet classified the connection. */
+	} else if(!strcmp(info->protocol, "unset")) {
+		pattern_result = 2;
+		pr_debug("layer7: matched unset: not yet classified "
+			"(%d/%d packets)\n",
+                        total_acct_packets(master_conntrack), num_packets);
+	/* If the regexp failed to compile, don't bother running it */
+	} else if(comppattern && 
+		  regexec(comppattern, master_conntrack->layer7.app_data)){
+		char tmp_commpattern[21];
+		char tmp_app_data[21];
+
+		memcpy(tmp_commpattern, comppattern, 20);
+		tmp_commpattern[20] = '\0';
+		memcpy(tmp_app_data, master_conntrack->layer7.app_data, 20);
+		tmp_app_data[20] 	= '\0';
+		pattern_result = 1;
+	} else pattern_result = 0;
+
+	if(pattern_result == 1) {
+		master_conntrack->layer7.app_proto = 
+			kmalloc(strlen(info->protocol)+1, GFP_ATOMIC);
+		if(!master_conntrack->layer7.app_proto){
+			if (net_ratelimit())
+				pr_debug("layer7: out of memory in "
+						"l7_mt4, bailing.\n");
+			spin_unlock_bh(&l7_lock);
+			return (bool)(pattern_result ^ info->invert);
+		}
+		strcpy(master_conntrack->layer7.app_proto, info->protocol);
+	} else if(pattern_result > 1) { /* cleanup from "unset" */
+		pattern_result = 1;
+	}
+
+	/* mark the packet seen */
+	skb->cb[0] = 1;
+
+	spin_unlock_bh(&l7_lock);
+	return (bool)(pattern_result ^ info->invert);
+}
+
+static void l7_destroy(const struct xt_mtdtor_param *par)
+{
+	nf_ct_l3proto_module_put(par->match->family);
+}
+
+static struct xt_match l7_mt_reg __read_mostly = {
+		.name		= "layer7",
+		.revision   = 0,
+		.family     = NFPROTO_IPV4,
+		.match		= l7_mt4,
+		.destroy	= l7_destroy,
+		.matchsize	= sizeof(struct xt_layer7_info),
+		.me		    = THIS_MODULE,
+};
+
+static void layer7_cleanup_proc(void)
+{
+	remove_proc_entry("layer7_numpackets", init_net.proc_net);
+}
+
+/* register the proc file */
+static void layer7_init_proc(void)
+{
+	struct proc_dir_entry* entry;
+	entry = create_proc_entry("layer7_numpackets", 0644, init_net.proc_net);
+	entry->read_proc = layer7_read_proc;
+	entry->write_proc = layer7_write_proc;
+}
+
+static int __init xt_layer7_init(void)
+{
+	need_conntrack();
+
+	layer7_init_proc();
+	if(maxdatalen < 1) {
+		pr_debug("layer7: maxdatalen can't be < 1, "
+			"using 1\n");
+		maxdatalen = 1;
+	}
+	/* This is not a hard limit.  It's just here to prevent people from
+	bringing their slow machines to a grinding halt. */
+	else if(maxdatalen > 65536) {
+		pr_debug("layer7: maxdatalen can't be > 65536, "
+			"using 65536\n");
+		maxdatalen = 65536;
+	}
+	return xt_register_match(&l7_mt_reg);
+}
+
+static void __exit xt_layer7_fini(void)
+{
+	layer7_cleanup_proc();
+	xt_unregister_match(&l7_mt_reg);
+}
+
+module_init(xt_layer7_init);
+module_exit(xt_layer7_fini);
diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
index 176e5570a9991727c8613de3bd8fe3341a852501..8d710fb16b106502aa9cdcc760d1bb6abca7dc57 100644
--- a/net/netfilter/xt_length.c
+++ b/net/netfilter/xt_length.c
@@ -26,6 +26,13 @@ length_mt(const struct sk_buff *skb, struct xt_action_param *par)
 	const struct xt_length_info *info = par->matchinfo;
 	u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len);
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	struct sk_buff *skb_p;
+	skb_p = (struct sk_buff *)skb;
+	skb_p->ipt_check |= IPT_MATCH_LENGTH;
+	skb_p->ipt_log.u32[BLOG_MIN_LEN_INDEX] = info->min;
+	skb_p->ipt_log.u32[BLOG_MAX_LEN_INDEX] = info->max;
+#endif
 	return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
 }
 
@@ -36,6 +43,13 @@ length_mt6(const struct sk_buff *skb, struct xt_action_param *par)
 	const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) +
 				 sizeof(struct ipv6hdr);
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	struct sk_buff *skb_p;
+	skb_p = (struct sk_buff *)skb;
+	skb_p->ipt_check |= IPT_MATCH_LENGTH;
+	skb_p->ipt_log.u32[BLOG_MIN_LEN_INDEX] = info->min;
+	skb_p->ipt_log.u32[BLOG_MAX_LEN_INDEX] = info->max;
+#endif
 	return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
 }
 
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 23345238711b515805a63687b774cdb4b6d788f4..ffd968a66eec5456cb2f731b03cdd9afae41cb09 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -28,6 +28,15 @@ static unsigned int
 mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_mark_tginfo2 *info = par->targinfo;
+    
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	skb->ipt_check |= IPT_TARGET_MARK;
+	skb->ipt_log.u32[BLOG_ORIGINAL_MARK_INDEX] = skb->mark;
+	skb->ipt_log.u32[BLOG_TARGET_MARK_INDEX] = (skb->mark & ~info->mask) ^
+	   info->mark;
+	if ( skb->ipt_check & IPT_TARGET_CHECK )
+		return XT_CONTINUE;
+#endif
 
 	skb->mark = (skb->mark & ~info->mask) ^ info->mark;
 	return XT_CONTINUE;
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index c14d4645daa3525d3ce45548344e4973763cd599..90b7f0dfeb7c79a0d0c23c9d3df3a845a6643cb5 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -102,6 +102,14 @@ static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par)
 			ntohs(th->dest),
 			!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
 		return false;
+
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE)
+	if ( tcpinfo->flg_mask & 0x10 ) {
+		struct sk_buff *skb_p;
+		skb_p = (struct sk_buff *)skb;
+	}
+#endif
+
 	if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
 		      == tcpinfo->flg_cmp,
 		      XT_TCP_INV_FLAGS))
diff --git a/net/phonet/Kconfig b/net/phonet/Kconfig
index 6ec7d55b1769933f399af5f4673b95c2de83150c..3d3a05b6a20953b85b34b7c68389b71ada01d9a4 100644
--- a/net/phonet/Kconfig
+++ b/net/phonet/Kconfig
@@ -14,3 +14,28 @@ config PHONET
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called phonet. If unsure, say N.
+if BCM_KF_PHONET
+if PHONET
+config LD_PHONET
+	tristate "Line Discipline for Phonet"
+	help
+	  Line discipline for transmitting and receiving phonet packet to modem
+	  via TTY device
+
+config LD_TMODEM
+	tristate "Line Discipline for Thin Modem"
+	help
+	  Line discipline for transmitting and receiving thin modem packet to
+	  modem via TTY device
+
+config PHONET_DEBUG
+	boolean "Debug support for PHONET drivers"
+	depends on DEBUG_KERNEL
+	help
+          Say "yes" to enable phonet debug messaging
+
+	  Activate Phonet header logging
+
+	  Activate Phonet data logging
+endif
+endif
diff --git a/net/phonet/Makefile b/net/phonet/Makefile
index e10b1b182ce3f03c5e6881ab7ce86cdfa633b91f..c58ac4acae5f2aa5b4383ffddd0c9e9338f55ef8 100644
--- a/net/phonet/Makefile
+++ b/net/phonet/Makefile
@@ -9,3 +9,10 @@ phonet-y := \
 	af_phonet.o
 
 pn_pep-y := pep.o pep-gprs.o
+
+ifdef BCM_KF # defined(CONFIG_BCM_KF_PHONET)
+obj-$(CONFIG_LD_PHONET) += ld_phonet.o
+obj-$(CONFIG_LD_TMODEM) += ld_tmodem.o
+
+subdir-ccflags-y += -Werror
+endif # BCM_KF
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index d65f699fbf343a98d788a8f49286c71dc4f7887f..a1d40ea6059ebb47aaabdf75c7e18a1f8b6fd4fd 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -34,6 +34,52 @@
 #include <net/phonet/phonet.h>
 #include <net/phonet/pn_dev.h>
 
+#ifdef CONFIG_BCM_KF_PHONET
+#ifdef ACTIVATE_PHONET_DEBUG
+
+phonet_debug_state phonet_dbg_state = OFF;
+
+static ssize_t phonet_show(struct kobject *kobj, struct kobj_attribute *attr,
+			   char *buf)
+{
+
+	switch (phonet_dbg_state) {
+	case ON:
+		return sprintf(buf, "on\n");
+	case OFF:
+		return sprintf(buf, "off\n");
+	case DATA:
+		return sprintf(buf, "data\n");
+	default:
+		return -ENODEV;
+	}
+
+	return -ENODEV;
+}
+
+static ssize_t phonet_store(struct kobject *kobj, struct kobj_attribute *attr,
+			    const char *buf, size_t n)
+{
+
+	if (sysfs_streq(buf, "on")) {
+		phonet_dbg_state = ON;
+		pr_alert(
+		       "Phonet traces activated\nBe Careful do not trace Dmesg in MTD\n");
+	} else if (sysfs_streq(buf, "off")) {
+		phonet_dbg_state = OFF;
+	} else if (sysfs_streq(buf, "data")) {
+		phonet_dbg_state = DATA;
+	} else {
+		pr_alert("please use  on/off/data\n");
+	}
+	return -EINVAL;
+}
+
+static struct kobj_attribute phonet_attr =
+__ATTR(phonet_dbg, 0644, phonet_show, phonet_store);
+#endif
+#endif /* CONFIG_BCM_KF_PHONET */
+
 /* Transport protocol registration */
 static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
 
@@ -68,8 +114,13 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
 	struct phonet_protocol *pnp;
 	int err;
 
+#ifdef CONFIG_BCM_KF_PHONET
+	/* skip the capable check in order to allow user
+	 * applications creating phonet socket */
+#else
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
+#endif
 
 	if (protocol == 0) {
 		/* Default protocol selection */
@@ -112,6 +163,10 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
 	pn->sobject = 0;
 	pn->dobject = 0;
 	pn->resource = 0;
+#ifdef CONFIG_BCM_KF_PHONET
+	pn->resource_type = 0;
+	pn->resource_subtype = 0;
+#endif
 	sk->sk_prot->init(sk);
 	err = 0;
 
@@ -163,6 +218,9 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
 {
 	struct phonethdr *ph;
 	int err;
+#ifdef CONFIG_BCM_KF_PHONET
+	int i;
+#endif
 
 	if (skb->len + 2 > 0xffff /* Phonet length field limit */ ||
 	    skb->len + sizeof(struct phonethdr) > dev->mtu) {
@@ -192,6 +250,18 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
 	skb->priority = 0;
 	skb->dev = dev;
 
+#ifdef CONFIG_BCM_KF_PHONET
+	PN_PRINTK("pn_send  rdev %x sdev %x res %x robj %x sobj %x netdev=%s\n",
+		  ph->pn_rdev, ph->pn_sdev, ph->pn_res, ph->pn_robj,
+		  ph->pn_sobj, dev->name);
+	PN_DATA_PRINTK("PHONET : skb  data = %d\nPHONET :", skb->len);
+	for (i = 1; i <= skb->len; i++) {
+		PN_DATA_PRINTK(" %02x", skb->data[i - 1]);
+		if ((i % 8) == 0)
+			PN_DATA_PRINTK("\n");
+	}
+#endif
+
 	if (skb->pkt_type == PACKET_LOOPBACK) {
 		skb_reset_mac_header(skb);
 		skb_orphan(skb);
@@ -280,7 +350,11 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
 		goto drop;
 
 	if (!pn_addr(src))
+#ifdef CONFIG_BCM_KF_PHONET
+		src = pn_object(saddr, pn_port(src));
+#else
 		src = pn_object(saddr, pn_obj(src));
+#endif
 
 	err = pn_send(skb, dev, dst, src, res, 0);
 	dev_put(dev);
@@ -376,6 +450,9 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
 	struct phonethdr *ph;
 	struct sockaddr_pn sa;
 	u16 len;
+#ifdef CONFIG_BCM_KF_PHONET
+	int i;
+#endif
 
 	/* check we have at least a full Phonet header */
 	if (!pskb_pull(skb, sizeof(struct phonethdr)))
@@ -393,6 +470,26 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
 
 	pn_skb_get_dst_sockaddr(skb, &sa);
 
+#ifdef CONFIG_BCM_KF_PHONET
+	PN_PRINTK(
+	    "phonet rcv : phonet hdr rdev %x sdev %x res %x robj %x sobj %x netdev=%s orig_netdev=%s\n",
+	     ph->pn_rdev, ph->pn_sdev, ph->pn_res, ph->pn_robj, ph->pn_sobj,
+	     dev->name, orig_dev->name);
+
+	PN_DATA_PRINTK("PHONET : skb  data = %d\nPHONET :", skb->len);
+	for (i = 1; i <= skb->len; i++) {
+		PN_DATA_PRINTK(" %02x", skb->data[i - 1]);
+		if ((i % 8) == 0)
+			PN_DATA_PRINTK("\n");
+	}
+
+	/* check if this is multicasted */
+	if (pn_sockaddr_get_object(&sa) == PNOBJECT_MULTICAST) {
+		pn_deliver_sock_broadcast(net, skb);
+		goto out;
+	}
+#endif
+
 	/* check if this is broadcasted */
 	if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) {
 		pn_deliver_sock_broadcast(net, skb);
@@ -409,7 +506,11 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
 	/* check if we are the destination */
 	if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) {
 		/* Phonet packet input */
+#ifdef CONFIG_BCM_KF_PHONET
+		struct sock *sk = pn_find_sock_by_sa_and_skb(net, &sa, skb);
+#else
 		struct sock *sk = pn_find_sock_by_sa(net, &sa);
+#endif
 
 		if (sk)
 			return sk_receive_skb(sk, skb, 0);
@@ -434,8 +535,15 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
 		__skb_push(skb, sizeof(struct phonethdr));
 		skb->dev = out_dev;
 		if (out_dev == dev) {
+#ifdef CONFIG_BCM_KF_PHONET
+			LIMIT_NETDEBUG(KERN_ERR
+				       "Phonet loop to %02X on %s ori=%s\n",
+				       pn_sockaddr_get_addr(&sa), dev->name,
+				       orig_dev->name);
+#else
 			LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n",
 					pn_sockaddr_get_addr(&sa), dev->name);
+#endif
 			goto out_dev;
 		}
 		/* Some drivers (e.g. TUN) do not allocate HW header space */
@@ -502,6 +610,13 @@ EXPORT_SYMBOL(phonet_proto_unregister);
 static int __init phonet_init(void)
 {
 	int err;
+#ifdef CONFIG_BCM_KF_PHONET
+#ifdef ACTIVATE_PHONET_DEBUG
+	err = sysfs_create_file(kernel_kobj, &phonet_attr.attr);
+	if (err)
+		pr_alert("phonet sysfs_create_file failed: %d\n", err);
+#endif
+#endif
 
 	err = phonet_device_init();
 	if (err)
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index bf35b4e1a14c02dfe8f3c8f03dbcb19084bcaaf7..af000e71f7195ad17f0ea774fa59a2a2d8736d6c 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -139,8 +139,10 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
 			MSG_CMSG_COMPAT))
 		goto out_nofree;
 
+#ifndef CONFIG_BCM_KF_PHONET
 	if (addr_len)
 		*addr_len = sizeof(sa);
+#endif
 
 	skb = skb_recv_datagram(sk, flags, noblock, &rval);
 	if (skb == NULL)
@@ -162,8 +164,15 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
 
 	rval = (flags & MSG_TRUNC) ? skb->len : copylen;
 
+#ifdef CONFIG_BCM_KF_PHONET
+	if (msg->msg_name != NULL) {
+		memcpy(msg->msg_name, &sa, sizeof(sa));
+		*addr_len = sizeof(sa);
+	}
+#else
 	if (msg->msg_name != NULL)
 		memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
+#endif
 
 out:
 	skb_free_datagram(sk, skb);
diff --git a/net/phonet/ld_phonet.c b/net/phonet/ld_phonet.c
new file mode 100644
index 0000000000000000000000000000000000000000..6e894901f8f5ad9a07211a5b29f930a8985494e8
--- /dev/null
+++ b/net/phonet/ld_phonet.c
@@ -0,0 +1,1004 @@
+#ifdef CONFIG_BCM_KF_PHONET
+/*
+<:copyright-BRCM:2011:DUAL/GPL:standard
+
+   Copyright (c) 2011 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: ld_pnonet.c
+ *
+ * Phonet device TTY line discipline
+ */
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serio.h>
+#include <linux/tty.h>
+
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <linux/errno.h>
+
+#include <linux/if_arp.h>
+#include <linux/if_phonet.h>
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pn_dev.h>
+//#include <linux/switch.h>	/* AT-ISI Separation */	// TODO: do we need this header? it is not in 3.4.11
+#include <linux/interrupt.h>
+MODULE_AUTHOR("david RMC");
+MODULE_DESCRIPTION("Phonet TTY line discipline");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_LDISC(N_PHONET);
+
+/* Comment - 01 */
+/*AT+ATSTART will be entered by closing ISI application. By the methods under
+ implementation for disconnecting an application in NTrace)the same is
+ expected to be available for TnT), it is expected that, congestion condition
+ will be present when executing AT+ATSTART allowing a few bytes of room from
+ underlying layer. Hence, keeping simplicity later write_back functionality
+ is not used here as it is done for normal transfer.*/
+
+/* Comment - 02 */
+/*If control is transferred to AT Parser, activateld can close the tty
+ interfering tty->write. Hence, tty->write is done first. Only
+ programming error can fail AT switch . practically, no other reasons apply.
+ Tty->write will synchronously write to the lower driver which can later
+ transfer the data in tty independent way. In testing no synchronization
+ issue is seen.*/
+
+#define SEND_QUEUE_LOW		10
+#define SEND_QUEUE_HIGH		100
+#define PHONET_SENDING		1	/* Bit 1 = 0x02 */
+#define PHONET_FLOW_OFF_SENT	4	/* Bit 4 = 0x10 */
+#define MAX_WRITE_CHUNK		8192
+#define ISI_MSG_HEADER_SIZE	6
+/*#define MAX_BUFF_SIZE		20000*/
+#define MAX_BUFF_SIZE		65535
+
+#define LD_PHONET_SWITCH		4
+#define LD_PHONET_NEW_ISI_MSG		0
+#define LD_PHONET_ISI_MSG_LEN		1
+#define LD_PHONET_ISI_MSG_NO_LEN	2
+
+#define LD_PHONET_BUFFER_LEN	1048576
+#define LD_PHONET_INIT_LEN	0
+
+#define LD_ATCMD_BUFFER_LEN	1024
+
+#define LD_WAKEUP_DATA_INIT	0
+#define ATPLIB_AT_CMD_MAX	1024
+
+#define LD_SWITCH_ATSTART_RESP		1
+#define LD_SWITCH_MODECHAN02_RESP	2
+
+#define GUID_HEADER_BYTE1	0xdd
+#define GUID_HEADER_BYTE2	0x7f
+#define GUID_HEADER_BYTE3	0x21
+#define GUID_HEADER_BYTE4	0x9a
+
+
+struct ld_phonet {
+	struct tty_struct *tty;
+	wait_queue_head_t wait;
+	spinlock_t lock;
+	unsigned long flags;
+	struct sk_buff *skb;
+	unsigned long len;
+	unsigned long lentorcv;
+	unsigned long datarcv;
+	unsigned long state;
+	struct net_device *dev;
+	struct list_head node;
+	struct sk_buff_head head;
+	char *tty_name;
+	int ld_phonet_state;
+	int n_data_processed;
+	int n_data_sent;
+	int n_remaining_data;
+	bool link_up;
+	int nb_try_to_tx;
+	unsigned char *ld_atcmd_buffer;
+};
+
+static int ld_buff_len;		/* LD Phonet Tx Backlog buffer Len */
+static struct workqueue_struct *ld_phonet_wq;
+
+/* Work to hanlde TTY wake up */
+struct ld_tty_wakeup_work_t {
+	struct work_struct ld_work;
+	/*This holds TTY info for TTY wakeup */
+	struct tty_struct *ld_work_write_wakeup_tty;
+};
+static struct ld_tty_wakeup_work_t *ld_tty_wakeup_work;
+
+/* Wotk to Handle AT+ATSTART Switch */
+struct ld_uart_switch_work_t {
+	struct work_struct ld_work;
+	unsigned long at_modechan02_mode;
+};
+static struct ld_uart_switch_work_t *ld_uart_switch_work;
+
+/* Ld phonet statistics */
+static unsigned long ld_phonet_tx_request_count;
+static unsigned long ld_phonet_rx_request_count;
+static unsigned long ld_phonet_tx_bytes;
+static unsigned long ld_phonet_rx_bytes;
+static unsigned long ld_phonet_hangup_events;
+static unsigned long ld_phonet_drop_events;
+
+/* AT-ISI Separation ends */
+#define LD_PHONET_DEBUG 0
+#if LD_PHONET_DEBUG
+#define dbg(fmt, ...) printk(fmt,  ## __VA_ARGS__)
+#else
+#define dbg(fmt, ...)
+#endif
+
+static int ld_pn_net_open(struct net_device *dev)
+{
+	netif_wake_queue(dev);
+	return 0;
+}
+
+static int ld_pn_net_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static void ld_tx_overflow(void)
+{
+	ld_phonet_drop_events++;
+	pr_crit(
+	       "##### ATTENTION : LD Phonet Transmit overflow events %lu : 1 MB #####",
+	       ld_phonet_drop_events);
+}
+
+static int ld_pn_handle_tx(struct ld_phonet *ld_pn)
+{
+	struct tty_struct *tty = ld_pn->tty;
+	struct sk_buff *skb;
+	int tty_wr, len, room, i;
+
+	if (tty == NULL)
+		return 0;
+	/* Enter critical section */
+	if (test_and_set_bit(PHONET_SENDING, &ld_pn->state))
+		return 0;
+
+	/* skb_peek is safe because handle_tx is called after skb_queue_tail */
+	while ((skb = skb_peek(&ld_pn->head)) != NULL) {
+
+		/* Make sure you don't write too much */
+		len = skb->len;
+		room = tty_write_room(tty);
+
+		if (!room) {
+			if (ld_buff_len > LD_PHONET_BUFFER_LEN) {
+				if (ld_pn->link_up == true)
+					ld_tx_overflow();
+				ld_pn->link_up = false;
+				/* Flush TX queue */
+				while ((skb =
+					skb_dequeue(&ld_pn->head)) != NULL) {
+					skb->dev->stats.tx_dropped++;
+					dbg("Flush TX queue tx_dropped = %d",
+					    skb->dev->stats.tx_dropped);
+					if (in_interrupt())
+						dev_kfree_skb_irq(skb);
+					else
+						kfree_skb(skb);
+				}
+				ld_buff_len = LD_PHONET_INIT_LEN;
+				goto error;
+			} else {	/* FALLBACK TRIAL */
+				dbg(
+					"ld_pn_handle_tx no room, waiting for previous to be sent..:\n");
+
+				if (!test_bit(TTY_DO_WRITE_WAKEUP,
+					&tty->flags)) {
+					/* wakeup bit is not set, set it */
+					dbg(
+						"ld_pn_handle_tx Setting TTY_DO_WRITE_WAKEUP bit...\n");
+					set_bit(TTY_DO_WRITE_WAKEUP,
+						&tty->flags);
+				} else {
+					dbg(
+						"ld_pn_handle_tx TTY_DO_WRITE_WAKEUP bit already set!...\n");
+				}
+			}
+			break;
+		}
+
+		/* Get room => reset nb_try_to_tx counter */
+		ld_pn->nb_try_to_tx = 0;
+
+		if (len > room)
+			len = room;
+
+		tty_wr = tty->ops->write(tty, skb->data, len);
+		ld_buff_len -= tty_wr;
+		if (tty_wr > 0)
+			ld_phonet_tx_bytes += tty_wr;
+		if (ld_buff_len < LD_PHONET_INIT_LEN)
+			ld_buff_len = LD_PHONET_INIT_LEN;
+		ld_pn->dev->stats.tx_packets++;
+		ld_pn->dev->stats.tx_bytes += tty_wr;
+		dbg(" Response start\n");
+		for (i = 1; i <= len; i++) {
+			dbg(" %02x", skb->data[i - 1]);
+			if ((i % 8) == 0)
+				dbg("\n");
+		}
+		dbg("\n");
+		dbg(" Response stop\n");
+		/* Error on TTY ?! */
+		if (tty_wr < 0)
+			goto error;
+		/* Reduce buffer written, and discard if empty */
+		skb_pull(skb, tty_wr);
+		if (skb->len == 0) {
+			struct sk_buff *tmp = skb_dequeue(&ld_pn->head);
+			BUG_ON(tmp != skb);
+			if (in_interrupt())
+				dev_kfree_skb_irq(skb);
+			else
+				kfree_skb(skb);
+		}
+	}
+	/* Send flow off if queue is empty */
+	clear_bit(PHONET_SENDING, &ld_pn->state);
+	return NETDEV_TX_OK;
+error:
+	clear_bit(PHONET_SENDING, &ld_pn->state);
+	return NETDEV_TX_OK;
+}
+
+static int ld_pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ld_phonet *ld_pn;
+	u8 *ptr;
+
+	BUG_ON(dev == NULL);
+	ld_pn = netdev_priv(dev);
+	if ((ld_pn == NULL) || (ld_pn->tty == NULL)) {
+		if (in_interrupt())
+			dev_kfree_skb_irq(skb);
+		else
+			kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	ld_phonet_tx_request_count++;
+	ptr = skb_push(skb, 6);
+	ptr[0] = GUID_HEADER_BYTE1;
+	ptr[1] = GUID_HEADER_BYTE2;
+	ptr[2] = GUID_HEADER_BYTE3;
+	ptr[3] = GUID_HEADER_BYTE4;
+	ptr[4] = skb->data[10];
+	ptr[5] = skb->data[11];
+	PN_PRINTK("ld_pn_net_xmit: send skb to %s", dev->name);
+	if (ld_pn->link_up == true) {
+		skb_queue_tail(&ld_pn->head, skb);
+		ld_buff_len += skb->len;
+		return ld_pn_handle_tx(ld_pn);
+	} else {
+		if (tty_write_room(ld_pn->tty)) {
+			/* link is up again */
+			ld_pn->link_up = true;
+			ld_pn->nb_try_to_tx = 0;
+			skb_queue_tail(&ld_pn->head, skb);
+			ld_buff_len += skb->len;
+			return ld_pn_handle_tx(ld_pn);
+		} else {
+			if (in_interrupt())
+				dev_kfree_skb_irq(skb);
+			else
+				kfree_skb(skb);
+			dev->stats.tx_dropped++;
+			dbg("tx_dropped = %d", dev->stats.tx_dropped);
+			return NETDEV_TX_OK;
+		}
+	}
+}
+
+static int ld_pn_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCPNGAUTOCONF:
+		phonet_route_add(dev, PN_DEV_PC);
+		dev_open(dev);
+		netif_carrier_on(dev);
+		/* Return NOIOCTLCMD so Phonet won't do it again */
+		return -ENOIOCTLCMD;
+	}
+	return -ENOIOCTLCMD;
+}
+
+static int ld_pn_net_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static const struct net_device_ops ld_pn_netdev_ops = {
+	.ndo_open = ld_pn_net_open,
+	.ndo_stop = ld_pn_net_close,
+	.ndo_start_xmit = ld_pn_net_xmit,
+	.ndo_do_ioctl = ld_pn_net_ioctl,
+	.ndo_change_mtu = ld_pn_net_mtu,
+};
+
+#define PN_ADDR_LEN 1
+#define PN_QUEUE_LEN 5
+#define PN_HARD_HEADER_LEN 1
+static void ld_pn_net_setup(struct net_device *dev)
+{
+	dev->features = 0;
+	dev->type = ARPHRD_PHONET;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+	dev->mtu = PHONET_DEV_MTU;
+	dev->hard_header_len = PN_HARD_HEADER_LEN;
+	dev->dev_addr[0] = PN_MEDIA_USB;
+	dev->addr_len = PN_ADDR_LEN;
+	dev->tx_queue_len = PN_QUEUE_LEN;
+
+	dev->netdev_ops = &ld_pn_netdev_ops;
+	dev->destructor = free_netdev;
+	dev->header_ops = &phonet_header_ops;
+};
+
+/*****************************************
+*** TTY
+******************************************/
+#define LD_RECEIVE_ROOM 65536
+static int ld_phonet_ldisc_open(struct tty_struct *tty)
+{
+
+	struct ld_phonet *ld_pn;
+	struct net_device *dev;
+	int err = 0;
+	dbg("ld_phonet_ldisc_open starts\n");
+
+	/* Create net device */
+	dev = alloc_netdev(sizeof(*ld_pn), "upnlink%d", ld_pn_net_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	ld_pn = netdev_priv(dev);
+	spin_lock_init(&ld_pn->lock);
+	netif_carrier_off(dev);
+	skb_queue_head_init(&ld_pn->head);
+	ld_pn->tty = tty;
+	tty->disc_data = ld_pn;
+	tty->receive_room = LD_RECEIVE_ROOM;
+	ld_pn->dev = dev;
+	ld_pn->skb = NULL;
+	ld_pn->len = 0;
+	ld_pn->lentorcv = 0;
+	ld_pn->datarcv = 0;
+	ld_pn->ld_phonet_state = LD_PHONET_NEW_ISI_MSG;
+	ld_pn->n_data_processed = 0;
+	ld_pn->n_data_sent = 0;
+	ld_pn->n_remaining_data = 0;
+	ld_pn->link_up = true;
+	ld_pn->nb_try_to_tx = 0;
+	ld_pn->ld_atcmd_buffer = kmalloc(LD_ATCMD_BUFFER_LEN, GFP_KERNEL);
+	if (ld_pn->ld_atcmd_buffer == NULL)
+		goto LDISC_ERROR;
+	err = register_netdev(dev);
+	if (err)
+LDISC_ERROR:
+		free_netdev(dev);
+	else
+		ld_tty_wakeup_work->ld_work_write_wakeup_tty = tty;
+
+	dbg("ld_phonet_ldisc_open exits err = %d\n", err);
+	return err;
+
+}
+
+static void ld_phonet_ldisc_close(struct tty_struct *tty)
+{
+	struct ld_phonet *ld_pn = tty->disc_data;
+
+	tty->disc_data = NULL;
+	kfree(ld_pn->ld_atcmd_buffer);
+	ld_pn->tty = NULL;
+	ld_tty_wakeup_work->ld_work_write_wakeup_tty = NULL;
+	unregister_netdev(ld_pn->dev);
+}
+
+static void ld_phonet_ldisc_init_transfer
+	(struct ld_phonet *ld_pn, const unsigned char *cp, int count)
+{
+	struct sk_buff *skb = NULL;
+	unsigned int msglen = 0;
+
+	struct phonethdr *ph = NULL;
+	int i;
+
+	dbg("ld_phonet: initiate transfer Data Sent = %d ", ld_pn->n_data_sent);
+	dbg("Data Processed = %d ", ld_pn->n_data_processed);
+	dbg("Data Remaining = %d\n", ld_pn->n_remaining_data);
+
+	/* Check if there is still data in cp */
+	while (ld_pn->n_data_processed < count) {
+		/* Check if extract length is possible */
+		if ((count - ld_pn->n_data_processed) > ISI_MSG_HEADER_SIZE) {
+			/* Extract length */
+			/* Move 1 byte since media parameter is not there in */
+			/* phonethdr structure */
+			ph = (struct phonethdr *)
+			    (cp + ld_pn->n_data_processed + sizeof(char));
+			msglen = get_unaligned_be16(&ph->pn_length);
+			ld_pn->len = msglen + ISI_MSG_HEADER_SIZE;
+
+			if (ld_pn->len == ISI_MSG_HEADER_SIZE) {
+				printk(
+					"ld_phonet: Extracted ISI msg len = ISI_MSG_HEADER_SIZE, dumping rest of buffer");
+				goto out;
+			}
+
+			/* Alloc SKBuff */
+			skb = netdev_alloc_skb(ld_pn->dev, ld_pn->len);
+			if (NULL == skb) {
+				/* TBD handle error */
+				return;
+			}
+
+			skb->dev = ld_pn->dev;
+			skb->protocol = htons(ETH_P_PHONET);
+			skb_reset_mac_header(skb);
+			ld_pn->skb = skb;
+
+			/* check if we receive complete data in this */
+			/* usb frame */
+			if (ld_pn->len <= (count - ld_pn->n_data_processed)) {
+				/* We received complete data in this usb */
+				/* frame */
+				/* copy the ISI buffer */
+				memcpy(skb_put(skb, ld_pn->len),
+				       cp + ld_pn->n_data_processed,
+				       ld_pn->len);
+				ld_pn->n_data_processed += ld_pn->len;
+
+				/* Send to Phonet */
+				ld_pn->dev->stats.rx_packets++;
+				ld_pn->dev->stats.rx_bytes += skb->len;
+				__skb_pull(skb, 1);
+				dbg("Request buffer start\n");
+				for (i = 1; i <= skb->len; i++) {
+					dbg("%02x", skb->data[i - 1]);
+					if (i % 8 == 0)
+						dbg("\n");
+				}
+
+				dbg("Request buffer end\n");
+				dbg(
+					"calling netif_rx inside initiate_transfer ld_pn->len=%d\n",
+					ld_pn->len);
+				ld_pn->n_data_sent += ld_pn->len;
+				ld_phonet_rx_bytes += skb->len;
+				netif_rx(skb);
+
+				/* TBD : Reset pointers */
+				ld_pn->len = LD_PHONET_INIT_LEN;
+			} else {
+				/* We receive only partial ISI message */
+				/* Copy the partial ISI message */
+				memcpy(skb_put(skb, count -
+					       ld_pn->n_data_processed), cp +
+				       ld_pn->n_data_processed, count -
+				       ld_pn->n_data_processed);
+				ld_pn->ld_phonet_state = LD_PHONET_ISI_MSG_LEN;
+				ld_pn->n_remaining_data = ld_pn->len -
+				    (count - ld_pn->n_data_processed);
+				ld_pn->n_data_processed += count -
+				    ld_pn->n_data_processed;
+
+				return;
+			}
+		} else {
+			/* Not able to extract length since received */
+			/* usb frame length is */
+			/* less than ISI message header size */
+
+			/* Alloc SKBuff with max size */
+			skb = netdev_alloc_skb(ld_pn->dev, MAX_BUFF_SIZE);
+			if (NULL == skb) {
+				/* TBD handle error */
+				return;
+			}
+
+			skb->dev = ld_pn->dev;
+			skb->protocol = htons(ETH_P_PHONET);
+			skb_reset_mac_header(skb);
+			ld_pn->skb = skb;
+
+			/* Copy available data */
+			memcpy(skb_put(skb, count - ld_pn->n_data_processed),
+			       cp + ld_pn->n_data_processed, count -
+			       ld_pn->n_data_processed);
+			ld_pn->ld_phonet_state = LD_PHONET_ISI_MSG_NO_LEN;
+
+			ld_pn->len += count - ld_pn->n_data_processed;
+			ld_pn->n_data_processed +=
+			    count - ld_pn->n_data_processed;
+
+			return;
+		}
+	}
+
+out:
+	/* No more data in cp */
+	ld_pn->ld_phonet_state = LD_PHONET_NEW_ISI_MSG;
+	ld_pn->len = 0;
+	ld_pn->n_data_processed = 0;
+	ld_pn->n_data_sent = 0;
+	ld_pn->n_remaining_data = 0;
+
+	return;
+}
+
+/* AT-ISI Message Separation Starts */
+
+ssize_t ld_set_manualsw(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count);
+
+int stop_isi;
+
+/* AT-ISI Message Separation Ends */
+static void ld_phonet_ldisc_receive
+	(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+{
+	struct ld_phonet *ld_pn = tty->disc_data;
+	struct sk_buff *skb = ld_pn->skb;
+	unsigned long flags = 0;
+	unsigned int msglen = 0, i;
+	int check_at = 27;
+	struct phonethdr *ph = NULL;
+
+	ld_phonet_rx_request_count++;
+	if (ld_pn->link_up == false) {
+		/* data received from PC => can TX */
+		ld_pn->link_up = true;
+
+		ld_pn->nb_try_to_tx = 0;
+	}
+	PN_PRINTK("ld_phonet_ldisc_receive: receive  %d data", count);
+	for (i = 1; i <= count; i++) {
+		PN_DATA_PRINTK(" %02x", cp[i - 1]);
+		if ((i % 8) == 0)
+			PN_DATA_PRINTK("\n");
+	}
+
+	spin_lock_irqsave(&ld_pn->lock, flags);
+
+	/* Whenever you receive a new USB frame Data Processed should be reset */
+	ld_pn->n_data_processed = 0;
+
+	while (1) {
+		switch (ld_pn->ld_phonet_state) {
+		case LD_PHONET_NEW_ISI_MSG:
+		{
+			int first_byte = 0;
+			if (count >= 1) {
+				if (*cp) {
+					first_byte = *cp;
+					dbg(
+						"case LD_PHONET_NEW_ISI_MSG: %d\n",
+						*cp);
+				}
+			} else
+				dbg("case LD_PHONET_NEW_ISI_MSG\n");
+
+			if ((count >= 1) && (first_byte != check_at)) {
+				dbg("MATCH FOR change mode %c\n", *cp);
+				ld_pn->ld_phonet_state =
+				    LD_PHONET_SWITCH;
+				continue;
+			}
+
+			/* AT-ISI Message Separation Ends */
+			ld_phonet_ldisc_init_transfer(ld_pn, cp,
+							  count);
+			break;
+		}
+		case LD_PHONET_ISI_MSG_LEN:
+		/* check if Remaining Data is complete */
+		if (ld_pn->n_remaining_data > count) {
+			/* We dont receive complete data */
+			/* Copy the available data */
+			memcpy(skb_put(skb, count), cp +
+			       ld_pn->n_data_processed, count);
+			ld_pn->n_data_processed += count;
+			ld_pn->ld_phonet_state = LD_PHONET_ISI_MSG_LEN;
+			ld_pn->n_remaining_data -= count;
+		} else {
+			/* We have complete data available */
+			/* Copy remaining data */
+			memcpy(skb_put(skb, ld_pn->n_remaining_data),
+			       cp + ld_pn->n_data_processed,
+			       ld_pn->n_remaining_data);
+			/* Send to Phonet */
+			ld_pn->dev->stats.rx_packets++;
+			ld_pn->dev->stats.rx_bytes += skb->len;
+			__skb_pull(skb, sizeof(char));
+			dbg("Request buffer start\n");
+			for (i = 1; i <= skb->len; i++) {
+				dbg("%02x", skb->data[i - 1]);
+				if (i % 8 == 0)
+					dbg("\n");
+			}
+			dbg("Request buffer end\n");
+			dbg(
+				"calling netif_rx inside ldisc_receive first ld_pn->len=%d\n",
+				ld_pn->len);
+			ld_pn->n_data_sent += ld_pn->len;
+			ld_phonet_rx_bytes += skb->len;
+			netif_rx(skb);
+
+			/* TBD : Update pointers */
+			ld_pn->n_data_sent += ld_pn->n_remaining_data;
+			ld_pn->n_data_processed +=
+			    ld_pn->n_remaining_data;
+			ld_pn->len = LD_PHONET_INIT_LEN;
+
+			/* Initiate a new ISI transfer */
+			ld_phonet_ldisc_init_transfer
+			    (ld_pn, cp, count);
+		}
+		break;
+
+		case LD_PHONET_ISI_MSG_NO_LEN:
+		/*Check if we can extact length */
+		if ((ld_pn->len + count) >= ISI_MSG_HEADER_SIZE) {
+
+			/* Copy remaining header to SKBuff to extract */
+			/* length */
+			memcpy(skb_put(skb, ISI_MSG_HEADER_SIZE -
+				       ld_pn->len),
+			       cp + ld_pn->n_data_processed,
+			       ISI_MSG_HEADER_SIZE - ld_pn->len);
+			ph = (struct phonethdr *)
+			    (skb->data + sizeof(char));
+			msglen = get_unaligned_be16(&ph->pn_length);
+
+			ld_pn->n_data_processed +=
+			    ISI_MSG_HEADER_SIZE - ld_pn->len;
+
+			/* Check if we receive complete data */
+			if ((count + ld_pn->len) <
+			    (msglen + ISI_MSG_HEADER_SIZE)) {
+				/* We have not received complete data */
+				/* Copy available data */
+				memcpy(skb_put(skb, count -
+					       (ISI_MSG_HEADER_SIZE -
+						ld_pn->len)),
+				       cp + ld_pn->n_data_processed,
+				       count - (ISI_MSG_HEADER_SIZE -
+						ld_pn->len));
+				ld_pn->ld_phonet_state =
+				    LD_PHONET_ISI_MSG_LEN;
+				ld_pn->n_remaining_data =
+				    (msglen + ISI_MSG_HEADER_SIZE) -
+				    (count + ld_pn->len);
+				ld_pn->n_data_processed +=
+				    count - (ISI_MSG_HEADER_SIZE -
+					     ld_pn->len);
+
+				/* Reset pointers */
+				ld_pn->len = msglen +
+				    ISI_MSG_HEADER_SIZE;
+
+				/* return; */
+				break;
+			} else {
+				/* We receive complete data */
+				/* Copy remaining data */
+				memcpy(skb_put(skb,
+					(msglen + ISI_MSG_HEADER_SIZE)
+					- (ld_pn->len +
+					ld_pn->n_data_processed)),
+				       cp + ld_pn->n_data_processed,
+				       (msglen + ISI_MSG_HEADER_SIZE) -
+				       (ld_pn->len +
+					ld_pn->n_data_processed));
+
+				/* Send to Phonet */
+				ld_pn->dev->stats.rx_packets++;
+				ld_pn->dev->stats.rx_bytes += skb->len;
+				__skb_pull(skb, sizeof(char));
+				dbg("Request buffer start\n");
+				for (i = 1; i <= skb->len; i++) {
+					dbg("%02x", skb->data[i - 1]);
+					if (i % 8 == 0)
+						dbg("\n");
+				}
+
+				dbg("Request buffer end\n");
+				dbg(
+					"calling netif_rx inside ldisc_receive second ld_pn->len= %d\n",
+					ld_pn->len);
+				ld_phonet_rx_bytes += skb->len;
+				netif_rx(skb);
+
+				ld_pn->n_data_sent += (msglen +
+					ISI_MSG_HEADER_SIZE)
+					- (ld_pn->len +
+					ld_pn->n_data_processed);
+
+				ld_pn->n_data_processed += (msglen +
+					ISI_MSG_HEADER_SIZE)
+					- (ld_pn->len +
+					ld_pn->n_data_processed);
+
+				/* Reset len as skb buffer */
+				/* is sent to phonet */
+				ld_pn->len = LD_PHONET_INIT_LEN;
+
+				/* Check if we still have data in cp */
+				if (count > ld_pn->n_data_processed) {
+					/* We still have data in cp */
+					/* Initiate new ISI transfer */
+					ld_phonet_ldisc_init_transfer(
+					    ld_pn, cp, count);
+				} else {
+					/* No more data in cp */
+					ld_pn->ld_phonet_state =
+					    LD_PHONET_NEW_ISI_MSG;
+
+					/* Reset pointers */
+					ld_pn->len = 0;
+					ld_pn->n_data_processed = 0;
+					ld_pn->n_data_sent = 0;
+					ld_pn->n_remaining_data = 0;
+				}
+			}
+		} else {
+			/* Cannot extract length */
+			/* Copy available data */
+			memcpy(skb_put(skb, count), cp +
+			       ld_pn->n_data_processed, count);
+			ld_pn->len += count;
+			ld_pn->ld_phonet_state =
+			    LD_PHONET_ISI_MSG_NO_LEN;
+			ld_pn->n_data_processed += count;
+		}
+		break;
+
+		default:
+			break;
+		}
+		break;
+	}
+
+	spin_unlock_irqrestore(&ld_pn->lock, flags);
+}
+
+#define AT_START_LEN 10
+#define AT_MODECHAN_LEN 15
+static void ld_uart_switch_function(struct work_struct *work)
+{
+	struct ld_uart_switch_work_t *at_mode_work;
+	at_mode_work = (struct ld_uart_switch_work_t *)work;
+	set_current_state(TASK_INTERRUPTIBLE);
+	ld_uart_switch_work->at_modechan02_mode = 0;
+	return;
+}
+
+static void ld_tty_wakeup_workfunction(struct work_struct *work)
+{
+	struct tty_struct *tty;
+	struct ld_phonet *ld_pn;
+	struct ld_tty_wakeup_work_t *ld_work_tty_wk =
+	    (struct ld_tty_wakeup_work_t *)work;
+
+	if (ld_work_tty_wk == NULL) {
+		dbg("TTY work NULL\n");
+		return;
+	}
+
+	tty = ld_work_tty_wk->ld_work_write_wakeup_tty;
+	if (tty == NULL) {
+		dbg("LD Work Queue tty Data NULL\n");
+		return;
+	}
+
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+
+	ld_pn = tty->disc_data;
+	if (ld_pn == NULL) {
+		dbg("LD PN Work Queue DATA NULL\n");
+		return;
+	}
+
+	BUG_ON(ld_pn->tty != tty);
+	ld_pn_handle_tx(ld_pn);
+	return;
+}
+
+static void ld_phonet_ldisc_write_wakeup(struct tty_struct *tty)
+{
+	ld_tty_wakeup_work->ld_work_write_wakeup_tty = tty;
+	queue_work(ld_phonet_wq, (struct work_struct *)ld_tty_wakeup_work);
+}
+
+int ld_phonet_hangup_wait(void *data)
+{
+	return NETDEV_TX_OK;
+}
+
+static int ld_phonet_ldisc_hangup(struct tty_struct *tty)
+{
+	struct ld_phonet *ld_pn;
+	struct sk_buff *skb;
+
+	/* Flush TX queue */
+	ld_pn = tty->disc_data;
+	ld_phonet_hangup_events++;
+	wait_on_bit_lock(&ld_pn->state, PHONET_SENDING,
+			 ld_phonet_hangup_wait, TASK_KILLABLE);
+
+	while ((skb = skb_dequeue(&ld_pn->head)) != NULL) {
+		skb->dev->stats.tx_dropped++;
+		if (in_interrupt())
+			dev_kfree_skb_irq(skb);
+		else
+			kfree_skb(skb);
+	}
+	ld_buff_len = LD_PHONET_INIT_LEN;
+	clear_bit(PHONET_SENDING, &ld_pn->state);
+	return NETDEV_TX_OK;
+}
+
+static struct tty_ldisc_ops ld_phonet_ldisc = {
+	.owner = THIS_MODULE,
+	.name = "phonet",
+	.open = ld_phonet_ldisc_open,
+	.close = ld_phonet_ldisc_close,
+	.receive_buf = ld_phonet_ldisc_receive,
+	.write_wakeup = ld_phonet_ldisc_write_wakeup,
+	.hangup = ld_phonet_ldisc_hangup
+};
+
+static ssize_t ld_phonet_show_stats(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	pr_crit("LD Phonet Tx request %lu\n",
+	       ld_phonet_tx_request_count);
+	pr_crit("LD Phonet Rx request %lu\n",
+	       ld_phonet_rx_request_count);
+	pr_crit("LD Phonet Tx Bytes %lu\n", ld_phonet_tx_bytes);
+	pr_crit("LD Phonet Rx Bytes %lu\n", ld_phonet_rx_bytes);
+	pr_crit("LD Phonet TTY hangup events %lu\n",
+	       ld_phonet_hangup_events);
+	pr_crit("LD Phonet Tx overflow events %lu\n",
+	       ld_phonet_drop_events);
+	pr_crit("LD Phonet TX buffer len %d\n", ld_buff_len);
+	return 0;
+}
+
+static ssize_t ld_phonet_reset_stats(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	ld_phonet_tx_request_count = ld_phonet_rx_request_count
+	    = ld_phonet_tx_bytes
+	    = ld_phonet_rx_bytes = ld_phonet_hangup_events = 0;
+	return 0;
+}
+
+static DEVICE_ATTR(ld_phonet_stats, S_IRUGO | S_IWUSR,
+		   ld_phonet_show_stats, ld_phonet_reset_stats);
+
+static struct attribute *ld_phonet_attributes[] = {
+	&dev_attr_ld_phonet_stats.attr,
+	NULL
+};
+
+static struct kobject *ld_phonet_kobj;
+#define LD_PHONET_FS "ld_phonet_isi"
+
+static const struct attribute_group ld_phonet_group = {
+	.attrs = ld_phonet_attributes,
+};
+
+static int ld_phonet_sysfs_init(void)
+{
+	int ret = 1;
+	ld_phonet_kobj = kobject_create_and_add(LD_PHONET_FS, kernel_kobj);
+	if (!ld_phonet_kobj) {
+		pr_err("LD Sysfs Kojb failed");
+		return -ENOMEM;
+	}
+	ret = sysfs_create_group(ld_phonet_kobj, &ld_phonet_group);
+	if (ret)
+		kobject_put(kernel_kobj);
+	return ret;
+}
+
+static int __init ld_phonet_init(void)
+{
+	int retval;
+	retval = tty_register_ldisc(N_PHONET, &ld_phonet_ldisc);
+	ld_buff_len = LD_PHONET_INIT_LEN;
+
+	ld_phonet_wq = create_workqueue("ld_queue");
+	if (NULL == ld_phonet_wq) {
+		pr_err("Create Workqueue failed\n");
+		tty_unregister_ldisc(N_PHONET);
+		return -ENOMEM;
+	}
+
+	/* Work for handling TTY wakr up */
+	ld_tty_wakeup_work = kmalloc(sizeof(struct ld_tty_wakeup_work_t),
+				     GFP_KERNEL);
+	if (ld_tty_wakeup_work) {
+		INIT_WORK((struct work_struct *)ld_tty_wakeup_work,
+			  ld_tty_wakeup_workfunction);
+		ld_tty_wakeup_work->ld_work_write_wakeup_tty = NULL;
+	} else {
+		pr_err("TTY Wake up work Error\n");
+		tty_unregister_ldisc(N_PHONET);
+		return false;
+	}
+	/* Work for handling AT+ATSTART switch */
+	ld_uart_switch_work = kmalloc(sizeof(struct ld_uart_switch_work_t),
+				      GFP_KERNEL);
+	if (ld_uart_switch_work) {
+		INIT_WORK((struct work_struct *)ld_uart_switch_work,
+			  ld_uart_switch_function);
+	} else {
+		pr_crit("UART Switch Work Failed");
+	}
+
+	retval = ld_phonet_sysfs_init();
+	return retval;
+}
+
+static void __exit ld_phonet_exit(void)
+{
+	flush_workqueue(ld_phonet_wq);
+	destroy_workqueue(ld_phonet_wq);
+	kfree(ld_tty_wakeup_work);
+	kfree(ld_uart_switch_work);
+	tty_unregister_ldisc(N_PHONET);
+	sysfs_remove_group(ld_phonet_kobj, &ld_phonet_group);
+	kobject_put(ld_phonet_kobj);
+}
+
+module_init(ld_phonet_init);
+module_exit(ld_phonet_exit);
+#endif /* CONFIG_BCM_KF_PHONET */
diff --git a/net/phonet/ld_tmodem.c b/net/phonet/ld_tmodem.c
new file mode 100644
index 0000000000000000000000000000000000000000..06166b90c5b0f0de400f63deab278732de861240
--- /dev/null
+++ b/net/phonet/ld_tmodem.c
@@ -0,0 +1,1059 @@
+#ifdef CONFIG_BCM_KF_PHONET
+/*
+<:copyright-BRCM:2014:DUAL/GPL:standard
+
+   Copyright (c) 2014 Broadcom Corporation
+   All Rights Reserved
+
+Unless you and Broadcom execute a separate written software license
+agreement governing use of this software, this software is licensed
+to you under the terms of the GNU General Public License version 2
+(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
+with the following added to such license:
+
+   As a special exception, the copyright holders of this software give
+   you permission to link this software with independent modules, and
+   to copy and distribute the resulting executable under terms of your
+   choice, provided that you also meet, for each linked independent
+   module, the terms and conditions of the license of that module.
+   An independent module is a module which is not derived from this
+   software.  The special exception does not apply to any modifications
+   of the software.
+
+Not withstanding the above, under no circumstances may you combine
+this software in any way with any other Broadcom software provided
+under a license other than the GPL, without Broadcom's express prior
+written consent.
+
+:>
+*/
+/*
+ * File: ld_tmodem.c
+ * Thin Modem device TTY line discipline
+ */
+
+#include <linux/debugfs.h>
+#include <linux/if_arp.h>
+#include <linux/if_phonet.h>
+#include <linux/mhi_l2mux.h>
+#include <linux/module.h>
+#include <linux/phonet.h>
+#include <linux/tty.h>
+#include <net/af_mhi.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pn_dev.h>
+#include <net/sock.h>
+
+#define LD_TMODEM_PLATFORM_DRIVER_NAME	"ld_tmodem"
+#define PN_HARD_HEADER_LEN	1
+#define NETDEV_HARD_HEADER_LEN	(PN_HARD_HEADER_LEN + L2MUX_HDR_SIZE)
+#define TMODEM_SENDING_BIT	1 /* Bit 1 = 0x02 */
+#define LD_TMODEM_BUFFER_LEN	SZ_1M
+#define LD_TMODEM_INIT_LEN	0
+#define LD_WAKEUP_DATA_INIT	0
+#define MHI_MAX_MTU		65540
+#define LD_RECEIVE_ROOM		SZ_64K
+
+#define PN_DEV_AUX_HOST		0x44	/* Additional host */
+#define PN_DEV_MODEM		0x60	/* Modem */
+#define PN_DEV_MODEM_1		0x64	/* Modem 1 */
+#define PN_DEV_HOST		0
+
+#define LOW_PRIORITY	0
+#define MEDIUM_PRIORITY	1
+#define HIGH_PRIORITY	6
+
+struct ldtmdm_dbg_stats {
+	u64 ld_tmodem_tx_request_count;
+	u64 ld_tmodem_rx_request_count;
+	u64 ld_tmodem_tx_bytes;
+	u64 ld_tmodem_rx_bytes;
+	u64 ld_tmodem_hangup_events;
+	u64 ld_tmodem_drop_events;
+	u64 ld_tmodem_skb_tx_err;
+};
+
+struct ld_tty_wakeup_work_t {
+	struct work_struct ld_work;
+	/*This holds TTY info for TTY wakeup */
+	struct tty_struct *ld_work_write_wakeup_tty;
+};
+
+struct ld_tmodem {
+	struct tty_struct *tty;
+	wait_queue_head_t wait;
+	spinlock_t lock;
+	unsigned long flags;
+	struct sk_buff *skb;
+	unsigned long len;
+	unsigned long state;
+	struct net_device *dev;
+	struct list_head node;
+	struct sk_buff_head head;
+	char *tty_name;
+	bool link_up;
+	char *pending_buffer;
+	unsigned int pending_length;
+	struct dentry *root;
+	struct workqueue_struct *ld_tmodem_wq;
+	struct ld_tty_wakeup_work_t *ld_tty_wakeup_work;
+	struct ldtmdm_dbg_stats ldtmdm_dbg;
+	int ld_backlog_len;	/* LD Phonet Tx Backlog buffer Len */
+};
+
+struct ld_tmodem *gb_ld_tmodem;
+
+struct prop_ctrl_msg {
+	struct l2muxhdr l2hdr;
+	u8 ch_id;
+	u8 event;
+	u8 rsvd[2];
+};
+
+/* L2MUX Queue mapping for protocols */
+enum tmodem_l2mux_queue {
+	LD_TMODEM_L2MUX_QUEUE_1_PHONET,
+	LD_TMODEM_L2MUX_QUEUE_2_MHI,
+};
+
+enum tmodem_flow_ctrl_events {
+	HOST_STOP_SENDING_MSG = 1,
+	HOST_RESUME_SENDING_MSG,
+};
+
+static const char ld_tmodem_ifname[] = "smc0";
+
+static void free_ld_tmodem_skb(struct sk_buff *skb)
+{
+	if (in_interrupt())
+		dev_kfree_skb_irq(skb);
+	else
+		kfree_skb(skb);
+}
+
+static int mhi_flow_ctrl_rx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct prop_ctrl_msg *ctrl_ptr = (struct prop_ctrl_msg *)skb->data;
+	int subqueue;
+
+	BUG_ON(dev == NULL);
+
+	netdev_dbg(dev, "channel id %d, event id %d\n",
+				ctrl_ptr->ch_id, ctrl_ptr->event);
+
+	if (skb == NULL) {
+		netdev_err(dev, "skb received is NULL");
+		return -ENOMEM;
+	}
+
+	switch (ctrl_ptr->ch_id) {
+	case LD_TMODEM_L2MUX_QUEUE_1_PHONET:
+		subqueue = LD_TMODEM_L2MUX_QUEUE_1_PHONET;
+		break;
+	case LD_TMODEM_L2MUX_QUEUE_2_MHI:
+		subqueue = LD_TMODEM_L2MUX_QUEUE_2_MHI;
+		break;
+	default:
+		netdev_err(dev, "ch id not supported %d",
+			ctrl_ptr->ch_id);
+		free_ld_tmodem_skb(skb);
+		return 0;
+	}
+
+	switch (ctrl_ptr->event) {
+	case HOST_STOP_SENDING_MSG:
+		netif_stop_subqueue(dev, subqueue);
+		break;
+	case HOST_RESUME_SENDING_MSG:
+		netif_wake_subqueue(dev, subqueue);
+		break;
+	default:
+		netdev_err(dev, "event not supported %d",
+					ctrl_ptr->event);
+		free_ld_tmodem_skb(skb);
+		return 0;
+	}
+	free_ld_tmodem_skb(skb);
+	return 0;
+}
+
+static int ld_tmdm_net_open(struct net_device *dev)
+{
+	netdev_dbg(dev, "ld_tmdm_net_open: wakeup queues");
+	netif_tx_wake_all_queues(dev);
+	phonet_route_add(dev, PN_DEV_MODEM);
+	phonet_route_add(dev, PN_DEV_MODEM_1);
+	phonet_route_add(dev, PN_DEV_AUX_HOST);
+	netif_carrier_on(dev);
+	return 0;
+}
+
+static int ld_tmdm_net_close(struct net_device *dev)
+{
+	netdev_dbg(dev, "ld_tmdm_net_close: stop all queues");
+	netif_tx_stop_all_queues(dev);
+	phonet_route_del(dev, PN_DEV_MODEM);
+	phonet_route_del(dev, PN_DEV_MODEM_1);
+	phonet_route_del(dev, PN_DEV_AUX_HOST);
+	netif_carrier_off(dev);
+	return 0;
+}
+
+static void ld_tx_overflow(struct ld_tmodem *ld_tmdm)
+{
+	struct ldtmdm_dbg_stats *dbg;
+	dbg = &ld_tmdm->ldtmdm_dbg;
+	dbg->ld_tmodem_drop_events++;
+	pr_debug(
+		"##### ATTENTION : LD Phonet Transmit overflow events %llu : %u #####",
+		dbg->ld_tmodem_drop_events, LD_TMODEM_BUFFER_LEN);
+}
+
+static int ld_tmdm_handle_tx(struct ld_tmodem *ld_tmdm)
+{
+	struct tty_struct *tty = ld_tmdm->tty;
+	struct sk_buff *skb;
+	unsigned int tty_wr, room;
+	struct sk_buff *tmp;
+	int ret;
+
+	if (tty == NULL)
+		return 0;
+	/* Enter critical section */
+	if (test_and_set_bit(TMODEM_SENDING_BIT, &ld_tmdm->state))
+		return 0;
+
+	/* skb_peek is safe because handle_tx is called after skb_queue_tail */
+	while ((skb = skb_peek(&ld_tmdm->head)) != NULL) {
+		room = tty_write_room(tty);
+
+		if (!room && ld_tmdm->ld_backlog_len > LD_TMODEM_BUFFER_LEN) {
+			if (ld_tmdm->link_up == true)
+				ld_tx_overflow(ld_tmdm);
+			ld_tmdm->link_up = false;
+			/* Flush TX queue */
+			while ((skb = skb_dequeue(&ld_tmdm->head)) != NULL) {
+				skb->dev->stats.tx_dropped++;
+				netdev_dbg(ld_tmdm->dev, "Flush TX queue tx_dropped = %ld",
+				skb->dev->stats.tx_dropped);
+				free_ld_tmodem_skb(skb);
+			}
+			ld_tmdm->ld_backlog_len = LD_TMODEM_INIT_LEN;
+			goto out;
+		} else if (!room) {
+			netdev_dbg(ld_tmdm->dev,
+				"ld_tmdm_handle_tx no room, waiting for previous to be sent..:\n");
+
+			if (!test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+				/* wakeup bit is not set, set it */
+				netdev_dbg(ld_tmdm->dev,
+					"ld_tmdm_handle_tx Setting TTY_DO_WRITE_WAKEUP bit\n");
+				set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+			} else {
+				netdev_dbg(ld_tmdm->dev,
+					"ld_tmdm_handle_tx TTY_DO_WRITE_WAKEUP bit already set!\n");
+			}
+			goto out;
+		}
+
+		if (skb->len > (room + NETDEV_HARD_HEADER_LEN)) {
+			netdev_warn(ld_tmdm->dev, "%s :room not available buf len:%d, room:%d\n"
+						, __func__, skb->len, room);
+			goto out;
+		}
+
+		ret = l2mux_skb_tx(skb, ld_tmdm->dev);
+		if (ret) {
+			/*drop this packet...*/
+			struct sk_buff *tmp = skb_dequeue(&ld_tmdm->head);
+			BUG_ON(tmp != skb);
+			free_ld_tmodem_skb(skb);
+			ld_tmdm->ldtmdm_dbg.ld_tmodem_skb_tx_err++;
+			goto out;
+		}
+
+		tty_wr = tty->ops->write(tty, skb->data, skb->len);
+
+		print_hex_dump(KERN_DEBUG, "ld_tm_write: ", DUMP_PREFIX_NONE, 16, 1,
+						skb->data, skb->len, 1);
+
+		if (unlikely(tty_wr != skb->len)) {
+			netdev_err(ld_tmdm->dev,
+						"buffer split...req len :%d, act written:%d\n",
+							skb->len, tty_wr);
+		}
+		ld_tmdm->ld_backlog_len -= skb->len;
+
+			ld_tmdm->ldtmdm_dbg.ld_tmodem_tx_bytes += tty_wr;
+		if (ld_tmdm->ld_backlog_len < LD_TMODEM_INIT_LEN)
+			ld_tmdm->ld_backlog_len = LD_TMODEM_INIT_LEN;
+
+		ld_tmdm->dev->stats.tx_packets++;
+		ld_tmdm->dev->stats.tx_bytes += tty_wr;
+
+		tmp = skb_dequeue(&ld_tmdm->head);
+
+		BUG_ON(tmp != skb);
+		free_ld_tmodem_skb(skb);
+
+	}
+out:
+	clear_bit(TMODEM_SENDING_BIT, &ld_tmdm->state);
+	return NETDEV_TX_OK;
+}
+
+int l2mux_send(char *data, int len, int l3_id, int res)
+{
+	struct net_device *dev = NULL;
+	struct l2muxhdr *l2hdr;
+	struct sk_buff *skb;
+	struct net *net;
+	int ret;
+
+	skb = alloc_skb(len + L2MUX_HDR_SIZE, GFP_ATOMIC);
+	if (!skb) {
+		pr_err("l2mux_send: skb_alloc failed\n");
+		return -ENOMEM;
+	}
+
+	skb_reserve(skb, L2MUX_HDR_SIZE);
+	skb_reset_transport_header(skb);
+
+	memcpy(skb_put(skb, len), data, len);
+
+	for_each_net(net) {
+		dev = dev_get_by_name(net, ld_tmodem_ifname);
+		if (dev)
+			break;
+	}
+
+	if (!dev) {
+		ret = -ENODEV;
+		pr_err("l2mux_send: interface:%s not found\n",
+							ld_tmodem_ifname);
+		goto drop;
+	}
+
+	if (!(dev->flags & IFF_UP)) {
+		netdev_err(dev, "l2mux_send: device %s not IFF_UP\n",
+							ld_tmodem_ifname);
+		ret = -ENETDOWN;
+		goto drop;
+	}
+
+	if (len + L2MUX_HDR_SIZE > dev->mtu) {
+		netdev_err(dev, "l2mux_send: device %s not IFF_UP\n",
+						ld_tmodem_ifname);
+		ret = -EMSGSIZE;
+		goto drop;
+	}
+
+	skb_reset_network_header(skb);
+
+	skb_push(skb, L2MUX_HDR_SIZE);
+	skb_reset_mac_header(skb);
+
+	l2hdr = l2mux_hdr(skb);
+	l2mux_set_proto(l2hdr, l3_id);
+	l2mux_set_length(l2hdr, len);
+	netdev_dbg(dev, "l2mux_send: proto:%d skb_len:%d\n",
+					l3_id, skb->len);
+	skb->protocol = htons(ETH_P_MHI);
+	skb->dev = dev;
+
+	switch (l3_id) {
+	case MHI_L3_XFILE:
+	case MHI_L3_LOW_PRIO_TEST:
+		skb->priority = MEDIUM_PRIORITY;
+		break;
+	case MHI_L3_AUDIO:
+	case MHI_L3_TEST_PRIO:
+	case MHI_L3_HIGH_PRIO_TEST:
+		skb->priority = HIGH_PRIORITY;
+		break;
+	default:
+		skb->priority = LOW_PRIORITY;
+		break;
+	}
+	ret = dev_queue_xmit(skb); /*this will consume irrespective of status*/
+
+	dev_put(dev);
+	return ret;
+
+drop:
+	if (skb)
+		kfree_skb(skb);
+	if (dev)
+		dev_put(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(l2mux_send);
+
+static int ld_tmdm_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+
+	struct ld_tmodem *ld_tmdm;
+
+	BUG_ON(dev == NULL);
+
+	print_hex_dump(KERN_DEBUG, "ld_tm_xmit: ", DUMP_PREFIX_NONE,
+					16, 1, skb->data, skb->len, 1);
+
+	ld_tmdm = netdev_priv(dev);
+	if ((ld_tmdm == NULL)) {
+		free_ld_tmodem_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	ld_tmdm->ldtmdm_dbg.ld_tmodem_tx_request_count++;
+	netdev_dbg(dev, "ld_tmdm_net_xmit: send skb to %s", dev->name);
+	if (ld_tmdm->link_up == true) {
+		skb_queue_tail(&ld_tmdm->head, skb);
+		ld_tmdm->ld_backlog_len += skb->len;
+		return ld_tmdm_handle_tx(ld_tmdm);
+	} else if (tty_write_room(ld_tmdm->tty)) {
+		/* link is up again */
+		ld_tmdm->link_up = true;
+		skb_queue_tail(&ld_tmdm->head, skb);
+		ld_tmdm->ld_backlog_len += skb->len;
+		return ld_tmdm_handle_tx(ld_tmdm);
+	} else {
+		free_ld_tmodem_skb(skb);
+		dev->stats.tx_dropped++;
+		netdev_dbg(dev, "tx_dropped = %ld",
+						dev->stats.tx_dropped);
+		return NETDEV_TX_OK;
+	}
+
+}
+
+static int ld_tmdm_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct if_phonet_req *req = (struct if_phonet_req *)ifr;
+
+	netdev_dbg(dev, "IOCTL called\n");
+	switch (cmd) {
+	case SIOCPNGAUTOCONF:
+		netdev_dbg(dev, "IOCTL SIOCPNGAUTOCON called, adding routes\n");
+		req->ifr_phonet_autoconf.device = PN_DEV_HOST;
+		//dev_open(dev);
+		return 0;
+	}
+	/* Return NOIOCTLCMD so Phonet won't do it again */
+	return -ENOIOCTLCMD;
+}
+
+static u16
+ld_tmdm_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+
+	struct ld_tmodem *ld_tmdm;
+	u16 subqueue = 0;/*default return value is phonet sub queue*/
+
+	BUG_ON(dev == NULL);
+	ld_tmdm = netdev_priv(dev);
+
+	if (skb->protocol == htons(ETH_P_PHONET)) {
+		netdev_dbg(dev, "ld_tmdm_net_select_queue: protocol ETH_P_PHONET");
+		subqueue = LD_TMODEM_L2MUX_QUEUE_1_PHONET;
+	} else if (skb->protocol == htons(ETH_P_MHI)) {
+		netdev_dbg(dev, "ld_tmdm_net_select_queue: protocol ETH_P_MHI");
+		subqueue = LD_TMODEM_L2MUX_QUEUE_2_MHI;
+	} else
+		netdev_err(dev, "unsupported protocol device %p, 0x%04X",
+							dev, skb->protocol);
+
+	return subqueue;
+}
+
+static int ld_tmdm_set_mtu(struct net_device *dev, int new_mtu)
+{
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static const struct net_device_ops ld_tmdm_netdev_ops = {
+	.ndo_open = ld_tmdm_net_open,
+	.ndo_stop = ld_tmdm_net_close,
+	.ndo_select_queue = ld_tmdm_net_select_queue,
+	.ndo_start_xmit = ld_tmdm_net_xmit,
+	.ndo_do_ioctl = ld_tmdm_net_ioctl,
+	.ndo_change_mtu = ld_tmdm_set_mtu,
+};
+
+static void ld_tmdm_net_setup(struct net_device *dev)
+{
+	dev->features		= NETIF_F_SG;
+	dev->type		= ARPHRD_MHI;
+	dev->flags		= IFF_POINTOPOINT | IFF_NOARP;
+	dev->mtu		= MHI_MAX_MTU;
+	dev->hard_header_len	= NETDEV_HARD_HEADER_LEN;
+	dev->dev_addr[0]	= PN_MEDIA_AUX_HOST_HOST_IF;
+	dev->addr_len		= 1;
+	dev->tx_queue_len	= 500;
+	dev->netdev_ops		= &ld_tmdm_netdev_ops;
+	dev->destructor		= free_netdev;
+};
+
+static int ld_tmodem_ldisc_open(struct tty_struct *tty)
+{
+	struct ld_tmodem *ld_tmdm = gb_ld_tmodem;
+	ld_tmdm->tty = tty;
+	tty->disc_data = ld_tmdm;
+
+	gb_ld_tmodem->tty = tty;
+	tty->receive_room = LD_RECEIVE_ROOM;
+	tty->low_latency = 1;
+	return 0;
+}
+
+static void ld_tmodem_ldisc_close(struct tty_struct *tty)
+{
+	struct ld_tmodem *ld_tmdm = tty->disc_data;
+	netdev_dbg(ld_tmdm->dev, "ld_tmodem_ldisc_close\n");
+	tty->disc_data = NULL;
+	ld_tmdm->tty = NULL;
+	ld_tmdm->ld_tty_wakeup_work->ld_work_write_wakeup_tty = NULL;
+}
+
+static void send_buffer_to_l2mux(struct ld_tmodem *ld_tmdm,
+				const unsigned char *buff_ptr,
+							unsigned int count)
+{
+	struct sk_buff *skb;
+
+	skb = netdev_alloc_skb(ld_tmdm->dev, count + NET_IP_ALIGN);
+	if (NULL == skb)
+		return;
+
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	memcpy(skb->data, buff_ptr, count);
+	skb_put(skb, count);
+
+	skb_reset_mac_header(skb);
+	skb->pkt_type = PACKET_HOST;
+
+	/*no need to free skb: skb will be consumed by callee in all the cases*/
+	if (l2mux_skb_rx(skb, ld_tmdm->dev) != 0)
+		netdev_err(ld_tmdm->dev, "not able to send skb");
+
+	ld_tmdm->ldtmdm_dbg.ld_tmodem_rx_bytes += count;
+
+}
+
+static void process_rx_buffer(struct ld_tmodem *ld_tmdm, int curr_len,
+						const unsigned char *buff_ptr)
+{
+	unsigned int pending_len = ld_tmdm->pending_length;
+	const unsigned char *pbuff_start;
+	struct l2muxhdr *pl2msg;
+	int l3len;
+
+	if (pending_len + curr_len < L2MUX_HDR_SIZE) {
+		memcpy(ld_tmdm->pending_buffer + pending_len,
+		buff_ptr,
+		curr_len);
+		ld_tmdm->pending_length += curr_len;
+		return;
+	}
+
+	netdev_dbg(ld_tmdm->dev, "pending_length %d", pending_len);
+	while (1) {
+		if (pending_len && pending_len < L2MUX_HDR_SIZE) {
+			memcpy(ld_tmdm->pending_buffer + pending_len,
+				buff_ptr,
+				L2MUX_HDR_SIZE - pending_len);
+			curr_len -= (L2MUX_HDR_SIZE - pending_len);
+			buff_ptr += (L2MUX_HDR_SIZE - pending_len);
+
+			ld_tmdm->pending_length = L2MUX_HDR_SIZE;
+			pending_len = ld_tmdm->pending_length;
+		}
+
+		if (pending_len)
+			pbuff_start = ld_tmdm->pending_buffer;
+		else
+			pbuff_start = buff_ptr;
+
+		pl2msg = (struct l2muxhdr *)pbuff_start;
+		l3len = l2mux_get_length(pl2msg);
+
+		if (pending_len + curr_len < l3len + L2MUX_HDR_SIZE) {
+			memcpy(ld_tmdm->pending_buffer + pending_len,
+			buff_ptr,
+			curr_len);
+
+			ld_tmdm->pending_length += curr_len;
+			break;
+		}
+
+		if (pending_len) {
+			memcpy(ld_tmdm->pending_buffer + pending_len,
+				buff_ptr,
+				l3len + L2MUX_HDR_SIZE - pending_len);
+		}
+
+		print_hex_dump(KERN_DEBUG, "ld_tm_process_buff: ",
+				DUMP_PREFIX_NONE,
+				16, 1,
+				pbuff_start, l3len + L2MUX_HDR_SIZE,
+				1);
+
+		netdev_dbg(ld_tmdm->dev,
+			"total pending length %d",
+			pending_len);
+
+		send_buffer_to_l2mux(ld_tmdm, pbuff_start,
+					l3len + L2MUX_HDR_SIZE);
+
+		curr_len -= (l3len + L2MUX_HDR_SIZE - pending_len);
+		buff_ptr += (l3len + L2MUX_HDR_SIZE - pending_len);
+
+		ld_tmdm->pending_length = 0;
+		pending_len = ld_tmdm->pending_length;
+		if (curr_len > 0) {
+			netdev_dbg(ld_tmdm->dev,
+			"current length is more than zero %d",
+			curr_len);
+			continue;
+		} else
+			break;
+	}
+
+}
+
+static void ld_tmodem_ldisc_receive
+	(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+{
+	struct ld_tmodem *ld_tmdm = tty->disc_data;
+	ld_tmdm->ldtmdm_dbg.ld_tmodem_rx_request_count++;
+	if (ld_tmdm->link_up == false) {
+		/* data received from PC => can TX */
+		ld_tmdm->link_up = true;
+	}
+	process_rx_buffer(ld_tmdm, count, cp);
+}
+
+static void ld_tty_wakeup_workfunction(struct work_struct *work)
+{
+	struct tty_struct *tty;
+	struct ld_tmodem *ld_tmdm;
+	struct ld_tty_wakeup_work_t *ld_work_tty_wk =
+				(struct ld_tty_wakeup_work_t *)work;
+
+	if (ld_work_tty_wk == NULL) {
+		pr_err("TTY work NULL\n");
+		return;
+	}
+
+	tty = ld_work_tty_wk->ld_work_write_wakeup_tty;
+	if (tty == NULL) {
+		pr_err("LD Work Queue tty Data NULL\n");
+		return;
+	}
+
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+
+	ld_tmdm = tty->disc_data;
+	if (ld_tmdm == NULL) {
+		pr_err("LD PN Work Queue DATA NULL\n");
+		return;
+	}
+
+	BUG_ON(ld_tmdm->tty != tty);
+	ld_tmdm_handle_tx(ld_tmdm);
+}
+
+static void ld_tmodem_ldisc_write_wakeup(struct tty_struct *tty)
+{
+	struct ld_tmodem *ld_tmdm;
+	ld_tmdm = tty->disc_data;
+	if (!ld_tmdm) {
+		pr_err("ld modem ldisc write wakeup: ld_tmdm is NULL\n");
+		return;
+	}
+	ld_tmdm->ld_tty_wakeup_work->ld_work_write_wakeup_tty = tty;
+	queue_work(ld_tmdm->ld_tmodem_wq,
+			(struct work_struct *)ld_tmdm->ld_tty_wakeup_work);
+}
+
+static int ld_tmodem_hangup_wait(void *data)
+{
+	return NETDEV_TX_OK;
+}
+
+static int ld_tmodem_ldisc_hangup(struct tty_struct *tty)
+{
+	struct ld_tmodem *ld_tmdm;
+	struct sk_buff *skb;
+
+	ld_tmdm = tty->disc_data;
+	ld_tmdm->ldtmdm_dbg.ld_tmodem_hangup_events++;
+	wait_on_bit_lock(&ld_tmdm->state, TMODEM_SENDING_BIT,
+			 ld_tmodem_hangup_wait, TASK_KILLABLE);
+
+	while ((skb = skb_dequeue(&ld_tmdm->head)) != NULL) {
+		skb->dev->stats.tx_dropped++;
+		free_ld_tmodem_skb(skb);
+	}
+	ld_tmdm->ld_backlog_len = LD_TMODEM_INIT_LEN;
+	clear_bit(TMODEM_SENDING_BIT, &ld_tmdm->state);
+	return NETDEV_TX_OK;
+}
+
+static struct tty_ldisc_ops ld_tmodem_ldisc = {
+	.owner = THIS_MODULE,
+	.name = "tmodem",
+	.open = ld_tmodem_ldisc_open,
+	.close = ld_tmodem_ldisc_close,
+	.receive_buf = ld_tmodem_ldisc_receive,
+	.write_wakeup = ld_tmodem_ldisc_write_wakeup,
+	.hangup = ld_tmodem_ldisc_hangup
+};
+
+static ssize_t tmdm_dbg_write(struct file *file, const char __user *buf,
+		size_t size, loff_t *ppos)
+{
+	if (!gb_ld_tmodem) {
+		pr_err("gb_ld_tmodem is NULL");
+		return -EBUSY;
+	}
+	/*resetting received and transmitted stats; while keeping the
+	*error stats intact
+	*/
+	gb_ld_tmodem->ldtmdm_dbg.ld_tmodem_tx_request_count =
+	gb_ld_tmodem->ldtmdm_dbg.ld_tmodem_rx_request_count =
+	gb_ld_tmodem->ldtmdm_dbg.ld_tmodem_tx_bytes =
+	gb_ld_tmodem->ldtmdm_dbg.ld_tmodem_rx_bytes = 0;
+
+	return size;
+}
+
+static ssize_t tmdm_dbg_read(struct file *file, char __user *user_buf,
+		size_t count, loff_t *ppos)
+{
+	char *buf;
+	int used = 0;
+	int ret;
+	struct ldtmdm_dbg_stats *dbg;
+	if (!gb_ld_tmodem) {
+		pr_err("gb_ld_tmodem is NULL");
+		return -EBUSY;
+	}
+
+	dbg = &gb_ld_tmodem->ldtmdm_dbg;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = snprintf(buf + used, PAGE_SIZE - used,
+				"LD Thin Modem Tx request: %llu\n",
+				dbg->ld_tmodem_tx_request_count);
+	if (ret < 0)
+		goto out;
+
+	used = used + ret;
+
+	if (used >= PAGE_SIZE - 1)
+		goto out;
+
+	ret = snprintf(buf + used,
+					PAGE_SIZE - used,
+					"LD Thin Modem Rx request: %llu\n",
+					dbg->ld_tmodem_rx_request_count);
+	if (ret < 0)
+		goto out;
+
+	used = used + ret;
+
+	if (used >= PAGE_SIZE - 1)
+		goto out;
+
+	ret = snprintf(buf + used,
+					PAGE_SIZE - used,
+					"LD Thin Modem Tx Bytes: %llu\n",
+					dbg->ld_tmodem_tx_bytes);
+	if (ret < 0)
+		goto out;
+
+	used = used + ret;
+
+	if (used >= PAGE_SIZE - 1)
+		goto out;
+
+	ret = snprintf(buf + used,
+					PAGE_SIZE - used,
+					"LD Thin Modem Tx Bytes: %llu\n",
+					dbg->ld_tmodem_rx_bytes);
+	if (ret < 0)
+		goto out;
+
+	used = used + ret;
+
+	if (used >= PAGE_SIZE - 1)
+		goto out;
+
+	ret = snprintf(buf + used,
+				PAGE_SIZE - used,
+				"LD Thin Modem hangup events: %llu\n",
+				dbg->ld_tmodem_hangup_events);
+
+	if (ret < 0)
+		goto out;
+
+	used = used + ret;
+
+	if (used >= PAGE_SIZE - 1)
+		goto out;
+
+	ret = snprintf(buf + used,
+					PAGE_SIZE - used,
+					"LD Thin Modem drop events: %llu\n",
+					dbg->ld_tmodem_drop_events);
+	if (ret < 0)
+		goto out;
+
+	used = used + ret;
+
+	if (used >= PAGE_SIZE - 1)
+		goto out;
+
+	ret = snprintf(buf + used,
+					PAGE_SIZE - used,
+					"LD Thin Modem Tx error: %llu\n",
+					dbg->ld_tmodem_skb_tx_err);
+	if (ret < 0)
+		goto out;
+
+	used = used + ret;
+
+	if (used >= PAGE_SIZE - 1)
+		goto out;
+
+	ret = snprintf(buf + used,
+				PAGE_SIZE - used,
+				"LD Thin Modem buff len: %d\n",
+				gb_ld_tmodem->ld_backlog_len);
+
+out:
+	ret = simple_read_from_buffer(user_buf, count, ppos,
+			buf,
+			strlen(buf));
+
+	kfree(buf);
+
+	return ret;
+
+}
+
+static const struct file_operations tmdm_dbg_fops = {
+	.read	= tmdm_dbg_read,
+	.write	= tmdm_dbg_write,
+	.open	= simple_open,
+	.llseek	= default_llseek,
+};
+
+static int ld_tmodem_dbgfs_init(struct net_device *ndev)
+{
+	int ret = -ENOMEM;
+	struct ld_tmodem *ld_tmdm;
+	struct dentry *root;
+	struct dentry *node;
+
+	ld_tmdm = netdev_priv(ndev);
+	root = debugfs_create_dir("ld_tmodem", NULL);
+	if (IS_ERR(root))
+		return PTR_ERR(root);
+
+	if (!root)
+		goto err_root;
+
+	node = debugfs_create_file("ld_dbg", S_IRUGO | S_IWUSR, root,
+					NULL, &tmdm_dbg_fops);
+	if (!node)
+		goto err_node;
+
+	ld_tmdm->root = root;
+	return 0;
+
+err_node:
+	debugfs_remove_recursive(root);
+err_root:
+	pr_err("ld_tmodem_dbgfs_init: Failed to initialize debugfs\n");
+	return ret;
+}
+
+static void
+remove_nw_interface(struct net_device *ndev)
+{
+	struct ld_tmodem *ld_tmdm;
+	ld_tmdm = netdev_priv(ndev);
+	kfree(ld_tmdm->pending_buffer);
+	unregister_netdev(ld_tmdm->dev);
+}
+
+static void
+ld_tmodem_l2_mux_deinit(void)
+{
+	int err;
+	err = l2mux_netif_rx_unregister(MHI_L3_CTRL_TMODEM);
+	if (err) {
+		pr_err("l2mux_netif_rx_unregister fails l3 id %d, error %d\n",
+						MHI_L3_CTRL_TMODEM, err);
+	}
+	err = mhi_unregister_protocol(MHI_L3_CTRL_TMODEM);
+	if (err) {
+		pr_err("mhi_unregister_protocol fails l3 id %d, error %d\n",
+						MHI_L3_CTRL_TMODEM, err);
+	}
+}
+
+static int ld_tmodem_l2_mux_init(void)
+{
+	int err;
+	err = l2mux_netif_rx_register(MHI_L3_CTRL_TMODEM, mhi_flow_ctrl_rx);
+	if (err) {
+		pr_err("l2mux_netif_rx_register fails l3 id %d, error %d\n",
+						MHI_L3_CTRL_TMODEM, err);
+		return err;
+	}
+	err = mhi_register_protocol(MHI_L3_CTRL_TMODEM);
+	if (err) {
+		l2mux_netif_rx_unregister(MHI_L3_CTRL_TMODEM);
+		pr_err("mhi_register_protocol fails l3 id %d, error %d\n",
+						MHI_L3_CTRL_TMODEM, err);
+		return err;
+	}
+	return 0;
+}
+
+static int create_nw_interface(void)
+{
+	struct net_device *ndev;
+	struct ld_tmodem *ld_tmdm;
+	int err;
+	ndev = alloc_netdev_mq(sizeof(*ld_tmdm), ld_tmodem_ifname,
+						ld_tmdm_net_setup, 2);
+	if (!ndev)
+		return -ENOMEM;
+
+	netdev_dbg(ndev, "ld_tmodem_ldisc_open starts\n");
+
+	ld_tmdm = netdev_priv(ndev);
+	spin_lock_init(&ld_tmdm->lock);
+	netif_carrier_off(ndev);
+	skb_queue_head_init(&ld_tmdm->head);
+	ld_tmdm->dev = ndev;
+	ld_tmdm->skb = NULL;
+	ld_tmdm->len = 0;
+
+	ld_tmdm->pending_length = 0;
+
+	netif_tx_stop_all_queues(ndev);
+
+	ld_tmdm->link_up = true;
+
+	ld_tmdm->pending_buffer = kmalloc(MHI_MAX_MTU, GFP_KERNEL);
+	if (!ld_tmdm->pending_buffer) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = register_netdev(ndev);
+out:
+	if (err) {
+		kfree(ld_tmdm->pending_buffer);
+		free_netdev(ndev);
+	} else {
+		gb_ld_tmodem = ld_tmdm;
+	}
+	return err;
+}
+
+static int __init tmodem_net_init(void)
+{
+	int retval;
+	struct workqueue_struct *ld_tmodem_wq;
+	struct ld_tty_wakeup_work_t *ld_tty_wakeup_work;
+	retval = tty_register_ldisc(N_LDTMODEM, &ld_tmodem_ldisc);
+
+	printk(KERN_INFO "%s called\n", __func__);
+
+	ld_tmodem_wq = create_workqueue("ld_queue");
+	if (NULL == ld_tmodem_wq) {
+		pr_err("Create Workqueue failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+	ld_tty_wakeup_work = kmalloc(sizeof(struct ld_tty_wakeup_work_t),
+					GFP_KERNEL);
+	if (ld_tty_wakeup_work) {
+		INIT_WORK((struct work_struct *)ld_tty_wakeup_work,
+			  ld_tty_wakeup_workfunction);
+		ld_tty_wakeup_work->ld_work_write_wakeup_tty = NULL;
+	} else {
+		pr_err("TTY Wake up work Error\n");
+		goto error1;
+	}
+
+	retval = ld_tmodem_l2_mux_init();
+	if (retval) {
+		pr_err("ld_tmodem_l2_mux_init fails\n");
+		goto error1;
+	}
+
+	retval = create_nw_interface();
+	if (retval) {
+		pr_err("create_nw_interface fails\n");
+		goto error2;
+	}
+
+	gb_ld_tmodem->ld_tmodem_wq = ld_tmodem_wq;
+	gb_ld_tmodem->ld_tty_wakeup_work = ld_tty_wakeup_work;
+
+	retval = ld_tmodem_dbgfs_init(gb_ld_tmodem->dev);
+	if (retval) {
+		pr_err("ld_tmodem_dbgfs_init fails\n");
+		goto error3;
+	}
+
+	return retval;
+
+error3:
+	remove_nw_interface(gb_ld_tmodem->dev);
+
+error2:
+	ld_tmodem_l2_mux_deinit();
+error1:
+	flush_workqueue(ld_tmodem_wq);
+	destroy_workqueue(ld_tmodem_wq);
+	kfree(ld_tty_wakeup_work);
+error:
+	tty_unregister_ldisc(N_LDTMODEM);
+	return retval;
+
+}
+
+static void __exit tmodem_net_remove(void)
+{
+
+	struct ld_tmodem *ld_tmdm = gb_ld_tmodem;
+	flush_workqueue(ld_tmdm->ld_tmodem_wq);
+	destroy_workqueue(ld_tmdm->ld_tmodem_wq);
+	kfree(ld_tmdm->ld_tty_wakeup_work);
+	debugfs_remove_recursive(ld_tmdm->root);
+	remove_nw_interface(ld_tmdm->dev);
+	ld_tmodem_l2_mux_deinit();
+	tty_unregister_ldisc(N_LDTMODEM);
+}
+
+module_init(tmodem_net_init);
+module_exit(tmodem_net_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("pranav@broadcom.com");
+MODULE_DESCRIPTION("TMODEM TTY line discipline");
+MODULE_ALIAS_LDISC(N_LDTMODEM);
+#endif /* CONFIG_BCM_KF_PHONET */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9726fe684ab8a35ded1f1acb9ae4ea0dbdbe6b12..a0b92f3ba629322f9bbed8050f56d5ca930f748f 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -163,6 +163,10 @@ static int pipe_handler_send_created_ind(struct sock *sk)
 		PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
 		pn->tx_fc, pn->rx_fc,
 	};
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK(
+	    "pipe_handler_send_created_ind : send PNS_PIPE_CREATED_IND\n");
+#endif
 
 	return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
 				data, 4, GFP_ATOMIC);
@@ -183,6 +187,9 @@ static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
 			PN_LEGACY_FLOW_CONTROL,
 			PAD,
 	};
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK("pep_accept_conn : send PN_PIPE_NO_ERROR\n");
+#endif
 
 	might_sleep();
 	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
@@ -229,6 +236,10 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
 {
 	u8 data[4] = { type, PAD, PAD, status };
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK("pipe_snd_status : PNS_PEP_STATUS_IND type=%d status=%d\n",
+		   type, status);
+#endif
 
 	return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
 				data, 4, priority);
@@ -279,18 +290,33 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 
 	switch (hdr->data[1]) {
 	case PN_PEP_IND_FLOW_CONTROL:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_rcv_status : PN_PEP_IND_FLOW_CONTROL\n");
+#endif
 		switch (pn->tx_fc) {
 		case PN_LEGACY_FLOW_CONTROL:
 			switch (hdr->data[4]) {
 			case PEP_IND_BUSY:
+#ifdef CONFIG_BCM_KF_PHONET
+				PEP_PRINTK(
+				    "pipe_rcv_status : PN_LEGACY_FLOW_CONTROL / PEP_IND_BUSY\n");
+#endif
 				atomic_set(&pn->tx_credits, 0);
 				break;
 			case PEP_IND_READY:
+#ifdef CONFIG_BCM_KF_PHONET
+				PEP_PRINTK(
+				    "pipe_rcv_status : PN_LEGACY_FLOW_CONTROL / PEP_IND_READY\n");
+#endif
 				atomic_set(&pn->tx_credits, wake = 1);
 				break;
 			}
 			break;
 		case PN_ONE_CREDIT_FLOW_CONTROL:
+#ifdef CONFIG_BCM_KF_PHONET
+			PEP_PRINTK(
+			    "pipe_rcv_status : ONE_CREDIT_FLOW_CONTROL\n");
+#endif
 			if (hdr->data[4] == PEP_IND_READY)
 				atomic_set(&pn->tx_credits, wake = 1);
 			break;
@@ -298,6 +324,10 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 		break;
 
 	case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK(
+		    "pipe_rcv_status : PN_PEP_IND_ID_MCFC_GRANT_CREDITS\n");
+#endif
 		if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
 			break;
 		atomic_add(wake = hdr->data[4], &pn->tx_credits);
@@ -319,6 +349,9 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
 	struct pnpipehdr *hdr = pnp_hdr(skb);
 	u8 n_sb = hdr->data[0];
 
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK("pipe_rcv_created\n");
+#endif
 	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 	__skb_pull(skb, sizeof(*hdr));
 	while (n_sb > 0) {
@@ -353,10 +386,16 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 
 	switch (hdr->message_id) {
 	case PNS_PEP_CONNECT_REQ:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PEP_CONNECT_REQ\n");
+#endif
 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
 		break;
 
 	case PNS_PEP_DISCONNECT_REQ:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PEP_DISCONNECT_REQ\n");
+#endif
 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 		sk->sk_state = TCP_CLOSE_WAIT;
 		if (!sock_flag(sk, SOCK_DEAD))
@@ -364,6 +403,9 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 		break;
 
 	case PNS_PEP_ENABLE_REQ:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PEP_ENABLE_REQ\n");
+#endif
 		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 		break;
@@ -371,9 +413,17 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 	case PNS_PEP_RESET_REQ:
 		switch (hdr->state_after_reset) {
 		case PN_PIPE_DISABLE:
+#ifdef CONFIG_BCM_KF_PHONET
+			PEP_PRINTK(
+			    "pipe_do_rcv : PNS_PEP_RESET_REQ / PN_PIPE_DISABLE\n");
+#endif
 			pn->init_enable = 0;
 			break;
 		case PN_PIPE_ENABLE:
+#ifdef CONFIG_BCM_KF_PHONET
+			PEP_PRINTK(
+			    "pipe_do_rcv : PNS_PEP_RESET_REQ / PN_PIPE_ENABLE\n");
+#endif
 			pn->init_enable = 1;
 			break;
 		default: /* not allowed to send an error here!? */
@@ -382,11 +432,17 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 		}
 		/* fall through */
 	case PNS_PEP_DISABLE_REQ:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PEP_DISABLE_REQ\n");
+#endif
 		atomic_set(&pn->tx_credits, 0);
 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 		break;
 
 	case PNS_PEP_CTRL_REQ:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PEP_CTRL_REQ\n");
+#endif
 		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
 			atomic_inc(&sk->sk_drops);
 			break;
@@ -396,9 +452,15 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 		goto queue;
 
 	case PNS_PIPE_ALIGNED_DATA:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PIPE_ALIGNED_DATA\n");
+#endif
 		__skb_pull(skb, 1);
 		/* fall through */
 	case PNS_PIPE_DATA:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PIPE_DATA\n");
+#endif
 		__skb_pull(skb, 3); /* Pipe data header */
 		if (!pn_flow_safe(pn->rx_fc)) {
 			err = sock_queue_rcv_skb(sk, skb);
@@ -418,23 +480,38 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 		goto queue;
 
 	case PNS_PEP_STATUS_IND:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PEP_STATUS_IND\n");
+#endif
 		pipe_rcv_status(sk, skb);
 		break;
 
 	case PNS_PIPE_REDIRECTED_IND:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PIPE_REDIRECTED_IND\n");
+#endif
 		err = pipe_rcv_created(sk, skb);
 		break;
 
 	case PNS_PIPE_CREATED_IND:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PIPE_CREATED_IND\n");
+#endif
 		err = pipe_rcv_created(sk, skb);
 		if (err)
 			break;
 		/* fall through */
 	case PNS_PIPE_RESET_IND:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PIPE_RESET_IND\n");
+#endif
 		if (!pn->init_enable)
 			break;
 		/* fall through */
 	case PNS_PIPE_ENABLED_IND:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PIPE_ENABLED_IND\n");
+#endif
 		if (!pn_flow_safe(pn->tx_fc)) {
 			atomic_set(&pn->tx_credits, 1);
 			sk->sk_write_space(sk);
@@ -446,6 +523,9 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 		break;
 
 	case PNS_PIPE_DISABLED_IND:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_do_rcv : PNS_PIPE_DISABLED_IND\n");
+#endif
 		sk->sk_state = TCP_SYN_RECV;
 		pn->rx_credits = 0;
 		break;
@@ -497,7 +577,9 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
 	struct pep_sock *pn = pep_sk(sk);
 	struct pnpipehdr *hdr;
 	u8 n_sb;
-
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK("pep_connresp_rcv\n");
+#endif
 	if (!pskb_pull(skb, sizeof(*hdr) + 4))
 		return -EINVAL;
 
@@ -567,9 +649,15 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
 
 	switch (hdr->message_id) {
 	case PNS_PIPE_ALIGNED_DATA:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_handler_do_rcv: PNS_PIPE_ALIGNED_DATA\n");
+#endif
 		__skb_pull(skb, 1);
 		/* fall through */
 	case PNS_PIPE_DATA:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_handler_do_rcv: PNS_PIPE_DATA\n");
+#endif
 		__skb_pull(skb, 3); /* Pipe data header */
 		if (!pn_flow_safe(pn->rx_fc)) {
 			err = sock_queue_rcv_skb(sk, skb);
@@ -594,11 +682,29 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
 		return NET_RX_SUCCESS;
 
 	case PNS_PEP_CONNECT_RESP:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_handler_do_rcv: PNS_PEP_CONNECT_RESP\n");
+		if (sk->sk_state != TCP_SYN_SENT) {
+			PEP_PRINTK(
+			    "PNS_PEP_CONNECT_RESP sk->sk_state != TCP_SYN_SENT\n");
+			break;
+		}
+		if (!sock_flag(sk, SOCK_DEAD)) {
+			sk->sk_state_change(sk);
+			PEP_PRINTK(
+			    "PNS_PEP_CONNECT_RESP sock flag != SOCK_DEAD\n");
+		}
+#else
 		if (sk->sk_state != TCP_SYN_SENT)
 			break;
 		if (!sock_flag(sk, SOCK_DEAD))
 			sk->sk_state_change(sk);
+#endif
 		if (pep_connresp_rcv(sk, skb)) {
+#ifdef CONFIG_BCM_KF_PHONET
+			PEP_PRINTK(
+			    "PNS_PEP_CONNECT_RESP pep_connresp_rcv failed\n");
+#endif
 			sk->sk_state = TCP_CLOSE_WAIT;
 			break;
 		}
@@ -624,10 +730,16 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
 		break;
 
 	case PNS_PEP_DISCONNECT_RESP:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_handler_do_rcv: PNS_PEP_DISCONNECT_RESP\n");
+#endif
 		/* sock should already be dead, nothing to do */
 		break;
 
 	case PNS_PEP_STATUS_IND:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pipe_handler_do_rcv: PNS_PEP_DISCONNECT_RESP\n");
+#endif
 		pipe_rcv_status(sk, skb);
 		break;
 	}
@@ -674,6 +786,9 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
 	struct sockaddr_pn dst;
 	u8 pipe_handle;
 
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK("pep_do_rcv\n");
+#endif
 	if (!pskb_may_pull(skb, sizeof(*hdr)))
 		goto drop;
 
@@ -691,7 +806,14 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
 
 	switch (hdr->message_id) {
 	case PNS_PEP_CONNECT_REQ:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pep_do_rcv: PNS_PEP_CONNECT_REQ\n");
+#endif
 		if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
+#ifdef CONFIG_BCM_KF_PHONET
+			PEP_PRINTK(
+			    "pep_do_rcv: PNS_PEP_CONNECT_REQ reject PN_PIPE_ERR_PEP_IN_USE\n");
+#endif
 			pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
 					GFP_ATOMIC);
 			break;
@@ -703,10 +825,16 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
 		return NET_RX_SUCCESS;
 
 	case PNS_PEP_DISCONNECT_REQ:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pep_do_rcv: PNS_PEP_DISCONNECT_REQ\n");
+#endif
 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
 		break;
 
 	case PNS_PEP_CTRL_REQ:
+#ifdef CONFIG_BCM_KF_PHONET
+		PEP_PRINTK("pep_do_rcv: PNS_PEP_CTRL_REQ\n");
+#endif
 		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
 		break;
 
@@ -899,6 +1027,9 @@ static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
 	int err;
 	u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK("pep_sock_connect\n");
+#endif
 	if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
 		pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
 
@@ -972,6 +1103,9 @@ static int pep_init(struct sock *sk)
 {
 	struct pep_sock *pn = pep_sk(sk);
 
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK("pep_init\n");
+#endif
 	sk->sk_destruct = pipe_destruct;
 	INIT_HLIST_HEAD(&pn->hlist);
 	pn->listener = NULL;
@@ -1362,6 +1496,10 @@ static struct phonet_protocol pep_pn_proto = {
 
 static int __init pep_register(void)
 {
+#ifdef CONFIG_BCM_KF_PHONET
+	PEP_PRINTK("== initialization\n");
+	pr_debug("PEP initialization\n");
+#endif
 	return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
 }
 
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index bf5cf69c820a285be318ecb3dab044530f2c8bc2..35de93733be5909f01d28f57da10fd2966cc0466 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -33,6 +33,10 @@
 #include <net/sock.h>
 #include <net/netns/generic.h>
 #include <net/phonet/pn_dev.h>
+#ifdef CONFIG_BCM_KF_PHONET
+#include <net/phonet/phonet.h>
+#include <linux/export.h>
+#endif
 
 struct phonet_routes {
 	struct mutex		lock;
@@ -44,7 +48,11 @@ struct phonet_net {
 	struct phonet_routes routes;
 };
 
+#ifdef CONFIG_BCM_KF_PHONET
+static int phonet_net_id __read_mostly;
+#else
 int phonet_net_id __read_mostly;
+#endif
 
 static struct phonet_net *phonet_pernet(struct net *net)
 {
@@ -270,6 +278,9 @@ static void phonet_route_autodel(struct net_device *dev)
 	struct phonet_net *pnn = phonet_pernet(dev_net(dev));
 	unsigned i;
 	DECLARE_BITMAP(deleted, 64);
+#ifdef CONFIG_BCM_KF_PHONET
+	LIMIT_NETDEBUG(KERN_WARNING "phonet_route_autodel : %s\n", dev->name);
+#endif
 
 	/* Remove left-over Phonet routes */
 	bitmap_zero(deleted, 64);
@@ -298,7 +309,11 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what,
 
 	switch (what) {
 	case NETDEV_REGISTER:
+#if defined(CONFIG_BCM_KF_PHONET) && defined(CONFIG_BCM_KF_MHI)
+		if ((dev->type == ARPHRD_PHONET) || (dev->type == ARPHRD_MHI))
+#else
 		if (dev->type == ARPHRD_PHONET)
+#endif
 			phonet_device_autoconf(dev);
 		break;
 	case NETDEV_UNREGISTER:
@@ -380,12 +395,19 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
 	mutex_unlock(&routes->lock);
 	return err;
 }
+#ifdef CONFIG_BCM_KF_PHONET
+EXPORT_SYMBOL(phonet_route_add);
+#endif
 
 int phonet_route_del(struct net_device *dev, u8 daddr)
 {
 	struct phonet_net *pnn = phonet_pernet(dev_net(dev));
 	struct phonet_routes *routes = &pnn->routes;
 
+#ifdef CONFIG_BCM_KF_PHONET
+	LIMIT_NETDEBUG(KERN_WARNING "phonet_route_del : %s  addr %x\n",
+		       dev->name, daddr);
+#endif
 	daddr = daddr >> 2;
 	mutex_lock(&routes->lock);
 	if (dev == routes->table[daddr])
@@ -400,6 +422,9 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
 	dev_put(dev);
 	return 0;
 }
+#ifdef CONFIG_BCM_KF_PHONET
+EXPORT_SYMBOL(phonet_route_del);
+#endif
 
 struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr)
 {
@@ -425,7 +450,17 @@ struct net_device *phonet_route_output(struct net *net, u8 daddr)
 		dev_hold(dev);
 	rcu_read_unlock();
 
+#ifdef CONFIG_BCM_KF_PHONET
+	if (!dev) {
+		/* avoid to send message on the deault route
+		 * if no route fond skb is dropped */
+		//dev = phonet_device_get(net); /* Default route */
+		LIMIT_NETDEBUG(KERN_ERR
+			       "phonet_route_output : no route found !!!\n");
+	}
+#else
 	if (!dev)
 		dev = phonet_device_get(net); /* Default route */
+#endif
 	return dev;
 }
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d61f6761777de54b547c5634853a0a3d7630765e..3ebcfe66e6c68625eddf538e7797026a852e8ac8 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -70,8 +70,13 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
 	int err;
 	u8 pnaddr;
 
+#ifdef CONFIG_BCM_KF_PHONET
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+#else
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
+#endif
 
 	ASSERT_RTNL();
 
@@ -228,8 +233,13 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
 	int err;
 	u8 dst;
 
+#ifdef CONFIG_BCM_KF_PHONET
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+#else
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
+#endif
 
 	ASSERT_RTNL();
 
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 4c7eff30dfa9e4cc3bb54418e4dda44841b4d9be..74bff9d79d21edb6918608db8ac75438679b0fdf 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -70,6 +70,91 @@ static struct hlist_head *pn_hash_list(u16 obj)
 	return pnsocks.hlist + (obj & PN_HASHMASK);
 }
 
+#ifdef CONFIG_BCM_KF_PHONET
+/*
+ * Find address based on socket address, match only certain fields.
+ * Also grab sock if it was found. Remember to sock_put it later.
+ */
+struct sock *pn_find_sock_by_sa_and_skb(struct net *net,
+					const struct sockaddr_pn *spn,
+					struct sk_buff *skb)
+{
+	struct sock *sknode;
+	struct sock *rval = NULL;
+	struct hlist_node *node;
+	u16 obj = pn_sockaddr_get_object(spn);
+	u8 res = spn->spn_resource;
+	struct hlist_head *hlist = pnsocks.hlist;
+	unsigned h;
+	u8 type;
+	u8 subtype;
+
+	rcu_read_lock();
+
+	for (h = 0; h < PN_HASHSIZE; h++) {
+		sk_for_each_rcu(sknode, node, hlist) {
+			struct pn_sock *pn = pn_sk(sknode);
+			BUG_ON(!pn->sobject);	/* unbound socket */
+			if (!net_eq(sock_net(sknode), net))
+				continue;
+
+			if ((PN_PREFIX == pn->resource) && (PN_PREFIX == res)) {
+
+				if (skb_shinfo(skb)->nr_frags) {
+					struct page *msg_page;
+					u8 *msg;
+					skb_frag_t *msg_frag =
+					    &skb_shinfo(skb)->frags[0];
+
+					msg_page = skb_frag_page(msg_frag);
+					msg = page_address(msg_page);
+
+					type = msg[msg_frag->page_offset + 2];
+					subtype =
+					    msg[msg_frag->page_offset + 3];
+
+				} else {
+					type = *(skb->data + 2);
+					subtype = *(skb->data + 3);
+				}
+
+				if (type != pn->resource_type)
+					continue;
+
+				if (subtype != pn->resource_subtype)
+					continue;
+			}
+
+			/* If port is zero, look up by resource */
+			if (pn_port(obj)) {
+				/* Look up socket by port */
+				if (pn_port(pn->sobject) != pn_port(obj))
+					continue;
+			} else {
+
+				/* If port is zero, look up by resource */
+				if (pn->resource != res)
+					continue;
+			}
+
+			if (pn_addr(pn->sobject) &&
+			    pn_addr(pn->sobject) != pn_addr(obj))
+				continue;
+
+			rval = sknode;
+			sock_hold(sknode);
+			goto out;
+		}
+		hlist++;
+	}
+
+out:
+	rcu_read_unlock();
+
+	return rval;
+}
+#endif
+
 /*
  * Find address based on socket address, match only certain fields.
  * Also grab sock if it was found. Remember to sock_put it later.
@@ -371,6 +456,27 @@ static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
 	struct sock *sk = sock->sk;
 	struct pn_sock *pn = pn_sk(sk);
 
+#ifdef CONFIG_BCM_KF_PHONET
+	if (cmd == SIOCCONFIGTYPE) {
+		u16 type;
+		if (get_user(type, (__u16 __user *) arg))
+			return -EFAULT;
+
+		pn->resource_type = type;
+		return 0;
+	}
+
+	if (cmd == SIOCCONFIGSUBTYPE) {
+		u16 subtype;
+
+		if (get_user(subtype, (__u16 __user *) arg))
+			return -EFAULT;
+
+		pn->resource_subtype = subtype;
+		return 0;
+	}
+#endif
+
 	if (cmd == SIOCPNGETOBJECT) {
 		struct net_device *dev;
 		u16 handle;
@@ -672,8 +778,10 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
 
 	if (!net_eq(sock_net(sk), &init_net))
 		return -ENOIOCTLCMD;
+#ifndef CONFIG_BCM_KF_PHONET
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
+#endif
 	if (pn_socket_autobind(sk->sk_socket))
 		return -EAGAIN;
 
@@ -691,8 +799,10 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
 {
 	int ret = -ENOENT;
 
+#ifndef CONFIG_BCM_KF_PHONET
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
+#endif
 
 	mutex_lock(&resource_mutex);
 	if (pnres.sk[res] == sk) {
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index cea1c7dbdae2cbd9e26ebb2d3185f056facf11f1..d7da447ba6d476c56ac1f102bd44a38f8ec39d6e 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -27,6 +27,12 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 
+#ifdef CONFIG_BCM_KF_PHONET
+#include <net/sock.h>
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+#endif
+
 #define DYNAMIC_PORT_MIN	0x40
 #define DYNAMIC_PORT_MAX	0x7f
 
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 78efe895b6636c4a57af6d72f156501e2c778992..d6e4d71d65fd3a09b5a46adf3c89cf4c4c6434ca 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,6 +10,12 @@ menuconfig RFKILL
 	  To compile this driver as a module, choose M here: the
 	  module will be called rfkill.
 
+config RFKILL_PM
+	bool "Power off on suspend"
+	depends on RFKILL && PM
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default y
+
 # LED trigger support
 config RFKILL_LEDS
 	bool
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 2c790204d042e3ac5ec4f2b322b8a0429628dbb5..1104a3be381a2f6b0f9512b1db75574f1d72e438 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -16,6 +16,9 @@
 #include <net/dsfield.h>
 #include <net/inet_ecn.h>
 #include <asm/byteorder.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
 
 /*
  * classid	class		marking
@@ -309,6 +312,9 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
 		break;
 	}
 
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+        blog_skip(skb);
+#endif
 	return skb;
 }
 
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 29b942ce9e824866c4d74de75a38c652f1999cb3..48c1992a28a74adac12d472f9040186fe265cd90 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -39,6 +39,9 @@
 #include <linux/slab.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+#include <linux/blog.h>
+#endif
 
 /* HTB algorithm.
     Author: devik@cdi.cz
@@ -901,7 +904,14 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 			m |= 1 << prio;
 			skb = htb_dequeue_tree(q, prio, level);
 			if (likely(skb != NULL))
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+                        {
+				blog_skip(skb);
+#endif
 				goto ok;
+#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG)
+                        }
+#endif
 		}
 	}
 	sch->qstats.overlimits++;
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 2e4444fedbe0143430fdaffdd58c8793ee6da9de..85b4c3b743a65511144542ed61daf9cd49c321e9 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -160,3 +160,15 @@ config LIB80211_DEBUG
 	  from lib80211.
 
 	  If unsure, say N.
+
+config CFG80211_ALLOW_RECONNECT
+	bool "Allow reconnect while already connected"
+	depends on CFG80211
+	depends on BCM_KF_ANDROID && BCM_ANDROID
+	default n
+	help
+	  cfg80211 stack doesn't allow to connect if you are already
+	  connected. This option allows to make a connection in this case.
+
+	  Select this option ONLY for wlan drivers that are specifically
+	  built for such purposes.
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index c4ad7958af52e84754b073915d009531299b9864..5ec753e52d0666257aafc7dbb6a377adbaaf3cfb 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -95,6 +95,12 @@ int ieee80211_radiotap_iterator_init(
 	struct ieee80211_radiotap_header *radiotap_header,
 	int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
 {
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+/*CVE-2013-7027*/
+	/* check the radiotap header can actually be present */
+	if (max_length < sizeof(struct ieee80211_radiotap_header))
+		return -EINVAL;
+#endif
 	/* Linux only supports version 0 radiotap format */
 	if (radiotap_header->it_version)
 		return -EINVAL;
@@ -129,7 +135,13 @@ int ieee80211_radiotap_iterator_init(
 			 */
 
 			if ((unsigned long)iterator->_arg -
+#if defined(CONFIG_BCM_KF_MISC_3_4_CVE_PORTS)
+		/*CVE-2013-7027*/
+			    (unsigned long)iterator->_rtheader +
+			    sizeof(uint32_t) >
+#else
 			    (unsigned long)iterator->_rtheader >
+#endif
 			    (unsigned long)iterator->_max_length)
 				return -EINVAL;
 		}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index a15d2a03172a4ab0015136911ffde32ba5e76e83..4a86d34829e524408b735e53d8163b5699d4d128 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2291,7 +2291,11 @@ static void __xfrm_garbage_collect(struct net *net)
 	}
 }
 
+#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE))
+void xfrm_garbage_collect(struct net *net)
+#else
 static void xfrm_garbage_collect(struct net *net)
+#endif
 {
 	flow_cache_flush();
 	__xfrm_garbage_collect(net);
diff --git a/samples/uhid/Makefile b/samples/uhid/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..11f2c6cce22ae77c4602a513ad21ea0bc833e269
--- /dev/null
+++ b/samples/uhid/Makefile
@@ -0,0 +1,13 @@
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := uhid-example
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
+
+HOSTCFLAGS_uhid-example.o += -I$(objtree)/usr/include
+
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 6a3ee981931d3c2b4438cff1d220ec2e8039bd7e..978416dd31ca0efeacfd938e961b241aca7633af 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -98,24 +98,24 @@ try-run = $(shell set -e;		\
 # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
 
 as-option = $(call try-run,\
-	$(CC) $(KBUILD_CFLAGS) $(1) -c -xassembler /dev/null -o "$$TMP",$(1),$(2))
+	$(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
 
 # as-instr
 # Usage: cflags-y += $(call as-instr,instr,option1,option2)
 
 as-instr = $(call try-run,\
-	printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
+	printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
 
 # cc-option
 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
 
 cc-option = $(call try-run,\
-	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
+	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
 
 # cc-option-yn
 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
 cc-option-yn = $(call try-run,\
-	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
+	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
 
 # cc-option-align
 # Prefix align with either -falign or -malign
@@ -125,7 +125,7 @@ cc-option-align = $(subst -functions=0,,\
 # cc-disable-warning
 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
 cc-disable-warning = $(call try-run,\
-	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+	$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-version
 # Usage gcc-ver := $(call cc-version)
@@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
 # cc-ldoption
 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 cc-ldoption = $(call try-run,\
-	$(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
+	$(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
 
 # ld-option
 # Usage: LDFLAGS += $(call ld-option, -X)
@@ -209,7 +209,7 @@ endif
 # >$< substitution to preserve $ when reloading .cmd file
 # note: when using inline perl scripts [perl -e '...$$t=1;...']
 # in $(cmd_xxx) double $$ your perl vars
-make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
+make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))))
 
 # Find any prerequisites that is newer than target or that does not exist.
 # PHONY targets skipped in both cases.
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index ff1720d28d0c80be0df2e1d0ae8f5697794e36e9..0135b116890d73792832fcf91edc9542cbbd8c1e 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -307,6 +307,26 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
 	$(call cmd,force_checksrc)
 	$(call if_changed_rule,cc_o_c)
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ATM_BACKEND)
+# C++ support
+cmd_cc_o_cpp = $(CXX) $(c_flags) -c -o $@ $<
+quiet_cmd_cc_o_cpp = C++ $(quiet_modtag) $@
+
+define rule_cc_o_cpp
+	$(call echo-cmd,checksrc) $(cmd_checksrc)                         \
+	$(call echo-cmd,cc_o_cpp) $(cmd_cc_o_cpp);                        \
+	$(cmd_modversions)                                                \
+	$(cmd_record_mcount)						  \
+	scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_cpp)' >  \
+	                                              $(dot-target).tmp;  \
+	rm -f $(depfile);                                                 \
+	mv -f $(dot-target).tmp $(dot-target).cmd
+endef
+
+%.o: %.cpp FORCE
+	$(call if_changed_rule,cc_o_cpp)
+endif # BCM_KF # (CONFIG_BCM_KF_ATM_BACKEND)
+
 # Single-part modules are special since we need to mark them in $(MODVERDIR)
 
 $(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0be6f110cce79be4c4acd2f4dbf4ac2ce7d24057..01151e6183b1bccfc313b1da702ca5e2b7b83d53 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -101,8 +101,13 @@ basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
 modname_flags  = $(if $(filter 1,$(words $(modname))),\
                  -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MISC_MAKEFILE)
+orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
+                 $(BCM_KBUILD_CMDLINE_FLAGS) $(ccflags-y) $(CFLAGS_$(basetarget).o)
+else # BCM_KF
 orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
                  $(ccflags-y) $(CFLAGS_$(basetarget).o)
+endif # BCM_KF
 _c_flags       = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
 _a_flags       = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(KBUILD_SUBDIR_ASFLAGS) \
                  $(asflags-y) $(AFLAGS_$(basetarget).o)
@@ -266,6 +271,16 @@ $(obj)/%.dtb.S: $(obj)/%.dtb
 quiet_cmd_dtc = DTC     $@
 cmd_dtc = $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 $(DTC_FLAGS) -d $(depfile) $<
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+# cat
+# ---------------------------------------------------------------------------
+# Concatentate multiple files together
+quiet_cmd_cat = CAT     $@
+cmd_cat = (cat $(filter-out FORCE,$^) > $@) || (rm -f $@; false)
+
+endif
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 # Bzip2
 # ---------------------------------------------------------------------------
 
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index efa5d940e6324caa0aa120343043b89475502e74..844cf88a71ad11012794aa25460fb845f756acea 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -23,7 +23,12 @@ quiet_cmd_modules_install = INSTALL $@
 INSTALL_MOD_DIR ?= extra
 ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D))
 
+ifdef BCM_KF # defined(CONFIG_BCM_KF_MODINST_DIR)
+modinst_dir = $(if $(filter ../% /%,$@),extra/,kernel/$(@D))
+else # BCM_KF # defined(CONFIG_BCM_KF_MODINST_DIR)
 modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
+endif # BCM_KF # defined(CONFIG_BCM_KF_MODINST_DIR)
+
 
 $(modules):
 	$(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
index debecb5561c481d8076cb97b36857878e6becb37..7f2126df91f2a876a5c8374339f22a6f6f4387ed 100644
--- a/scripts/gcc-version.sh
+++ b/scripts/gcc-version.sh
@@ -22,10 +22,10 @@ if [ ${#compiler} -eq 0 ]; then
 	exit 1
 fi
 
-MAJOR=$(echo __GNUC__ | $compiler -E -xc - | tail -n 1)
-MINOR=$(echo __GNUC_MINOR__ | $compiler -E -xc - | tail -n 1)
+MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1)
+MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1)
 if [ "x$with_patchlevel" != "x" ] ; then
-	PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -xc - | tail -n 1)
+	PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
 	printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
 else
 	printf "%02d%02d\\n" $MAJOR $MINOR
diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
index 29493dc4528d0b88032f0b8d2747242e66e8f231..12dbd0b11ea4fe8d037107a590bd1f8baff21eab 100644
--- a/scripts/gcc-x86_32-has-stack-protector.sh
+++ b/scripts/gcc-x86_32-has-stack-protector.sh
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
 if [ "$?" -eq "0" ] ; then
 	echo y
 else
diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
index afaec618b3955c9147608da4c915bc6159627cf0..973e8c1415677eeba513543655e939de234521e3 100644
--- a/scripts/gcc-x86_64-has-stack-protector.sh
+++ b/scripts/gcc-x86_64-has-stack-protector.sh
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
 if [ "$?" -eq "0" ] ; then
 	echo y
 else
diff --git a/scripts/kconfig/check.sh b/scripts/kconfig/check.sh
index fa59cbf9d62cf464515662c8ae1e98e82eafc99d..854d9c7c675ca4782702e83e1fa04b17aa47d6f8 100755
--- a/scripts/kconfig/check.sh
+++ b/scripts/kconfig/check.sh
@@ -1,6 +1,6 @@
 #!/bin/sh
 # Needed for systems without gettext
-$* -xc -o /dev/null - > /dev/null 2>&1 << EOF
+$* -x c -o /dev/null - > /dev/null 2>&1 << EOF
 #include <libintl.h>
 int main()
 {
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index f208f900ed3a8747f9bfd949b2221000d5021607..fb16c0a6f3d7cfa3c0e4e89ad9060c8f71835885 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -108,6 +108,12 @@ static int conf_askvalue(struct symbol *sym, const char *def)
 	case oldaskconfig:
 		fflush(stdout);
 		xfgets(line, 128, stdin);
+#if defined(CONFIG_BCM_KF_FAIL_CONFIG_ON_EOF) || !defined(CONFIG_BCM_IN_KERNEL)
+		if (feof(stdin)) {
+			fprintf(stderr, "Unexpected EOF\n");
+			exit(1);
+		}
+#endif
 		return 1;
 	default:
 		break;
@@ -308,6 +314,12 @@ static int conf_choice(struct menu *menu)
 		case oldaskconfig:
 			fflush(stdout);
 			xfgets(line, 128, stdin);
+#if defined(CONFIG_BCM_KF_FAIL_CONFIG_ON_EOF) || !defined(CONFIG_BCM_IN_KERNEL)
+			if (feof(stdin)) {
+				fprintf(stderr, "Unexpected EOF\n");
+				exit(1);
+			}
+#endif
 			strip(line);
 			if (line[0] == '?') {
 				print_help(menu);
diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
index 82cc3a85e7f885e2c7ab52aa99a6f76242899aa5..50df490fe1de67203d132febb9b8a7688de04735 100644
--- a/scripts/kconfig/lxdialog/check-lxdialog.sh
+++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
@@ -38,7 +38,7 @@ trap "rm -f $tmp" 0 1 2 3 15
 
 # Check if we can link to ncurses
 check() {
-        $cc -xc - -o $tmp 2>/dev/null <<'EOF'
+        $cc -x c - -o $tmp 2>/dev/null <<'EOF'
 #include CURSES_LOC
 main() {}
 EOF
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index bccf07ddd0b67a0e2f761fd5721f274a49bf9bbb..f9770c1e22e5e52de528e25993bb5f3ecf38701c 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -463,6 +463,8 @@ while(<CIN>) {
 	if (defined($configs{$1})) {
 	    if ($localyesconfig) {
 	        $setconfigs{$1} = 'y';
+	        print "$1=y\n";
+	        next;
 	    } else {
 	        $setconfigs{$1} = $2;
 	    }
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index bd6dca8a0ab23b78e0499a8868ec08bdb92e48f3..a7b9664a1300ff583c32a513e6f6bf8b81902791 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -159,6 +159,7 @@ fi
 res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}"
 
 # scm version string if not at a tagged commit
+if false ; then # CONFIG_BCM_KF
 if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then
 	# full scm version string
 	res="$res$(scm_version)"
@@ -172,5 +173,6 @@ else
 		res="$res${scm:++}"
 	fi
 fi
+fi
 
 echo "$res"
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 92271d32bc30b2056c472f273b1af6bed9847b31..2493cd705fa529f5e4af3cdf988d6c041cd1ae77 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -70,7 +70,15 @@ ifeq ($(ARCH),x86_64)
 	ARCH := x86
 	IS_X86_64 := 0
 	ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+		IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
+else
+		IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 		IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 	endif
 	ifeq (${IS_X86_64}, 1)
 		RAW_ARCH := x86_64
@@ -241,13 +249,29 @@ $(OUTPUT)util/parse-events-flex.c: util/parse-events.l
 	$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
 
 $(OUTPUT)util/parse-events-bison.c: util/parse-events.y
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+	$(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
+else
 	$(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+	$(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 $(OUTPUT)util/pmu-flex.c: util/pmu.l
 	$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
 
 $(OUTPUT)util/pmu-bison.c: util/pmu.y
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+	$(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
+else
+	$(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 	$(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
 $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index d9637da7333c2eb5de44b759842df68f76ccafe5..3f35ea3f5047d71e01933666b16606e3554aa66a 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -1,5 +1,4 @@
 
-%name-prefix "parse_events_"
 %parse-param {struct list_head *list_all}
 %parse-param {struct list_head *list_event}
 %parse-param {int *idx}
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
index 20ea77e93169076d378eac2a884d2ed3f038caf0..522943f7bfdf1b79a9d1a7f7e750a81352fc8360 100644
--- a/tools/perf/util/pmu.y
+++ b/tools/perf/util/pmu.y
@@ -1,5 +1,4 @@
 
-%name-prefix "perf_pmu_"
 %parse-param {struct list_head *format}
 %parse-param {char *name}
 
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index a93e06cfcc2a2f6218a7e7a4397510efa20fc2a3..26a5ddb2db02d0546c703b8993167ca55aa1654c 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -111,7 +111,15 @@ GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo $(OUTPUT)po/$$HLANG.gmo;
 export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
 
 # check if compiler option is supported
+ifdef BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
+ifneq ($(strip $(CONFIG_BCM_ANDROID)),)
+cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+else
+cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+endif
+else # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+endif # BCM_KF # defined(CONFIG_BCM_KF_ANDROID)
 
 # use '-Os' optimization if available, else use -O2
 OPTIMIZATION := $(call cc-supports,-Os,-O2)