From cdfbc7377a85cfc1e1e79957e2d0cfbd3f22a358 Mon Sep 17 00:00:00 2001
From: "Waksman, Michael" <michael.waksman@intel.com>
Date: Tue, 27 Feb 2018 14:18:06 +0100
Subject: [PATCH] Merge pull request #247 in SW_PON/linux from
 feature/integrate-bm-qos-datapath to xrx500

PPV4 QoS integration

* commit '6c194145ceb32b6616c599792dbaffa0bf3c83f5': (75 commits)
  checkpatch warning fixing in datapath_swdev.c
  change space to tab in kconfig and remove trailing space
  Set BSL threshold when the first interface comes up using switch api
  Fix remove node command
  Added BM debugfs. Fixed code review comments
  Add entry log to qos api
  Fix pull request #247 comments
  fix qos proc bug
  tune debug info
  Omit pp_qos_platform.c
  get resources by name, not by index
  Change the sequence in deleting the interface to prevent a deadlock situation and hang
  maintain the sanity of the flags for alloc, otherwise it causes mismatch/error in the flag while deleting the interface from the bridge
  fix the param ep
  update qos proc
  cat /proc/dp/qos works without scheduler. But queue/scheduler/port stat still wrong
  Add qos debugfs documentation
  Add debug and stat API
  cqm small adjustments to compile with the lates BM changes
  BM checkpatch fixes
  ...
---
 .../bindings/net/intel,falconmx-bm.txt        |   13 +
 Documentation/pp_qos_debugfs.txt              |  133 +
 drivers/net/ethernet/lantiq/cqm/cbm_wrapper.c |    9 -
 drivers/net/ethernet/lantiq/cqm/cqm_common.h  |    1 -
 .../net/ethernet/lantiq/cqm/falconmx/Makefile |    1 +
 .../net/ethernet/lantiq/cqm/falconmx/cqm.c    |  133 +-
 .../net/ethernet/lantiq/cqm/falconmx/cqm.h    |    1 +
 .../ethernet/lantiq/cqm/falconmx/cqm_config.c |   12 +-
 .../ethernet/lantiq/cqm/falconmx/cqm_config.h |    8 +
 drivers/net/ethernet/lantiq/datapath/Kconfig  |  123 +-
 drivers/net/ethernet/lantiq/datapath/Makefile |    2 +-
 .../net/ethernet/lantiq/datapath/datapath.h   |  145 +-
 .../ethernet/lantiq/datapath/datapath_api.c   |   52 +-
 .../lantiq/datapath/datapath_instance.c       |    1 +
 .../lantiq/datapath/datapath_logical_dev.c    |    2 +-
 .../lantiq/datapath/datapath_loopeth_dev.c    |   69 +-
 .../lantiq/datapath/datapath_notifier.c       |    4 +-
 .../ethernet/lantiq/datapath/datapath_proc.c  |  294 +-
 .../lantiq/datapath/datapath_proc_api.c       |   53 +-
 .../lantiq/datapath/datapath_proc_qos.c       |  842 ++++++
 .../ethernet/lantiq/datapath/datapath_qos.c   |   22 +
 .../ethernet/lantiq/datapath/datapath_soc.c   |    1 +
 .../ethernet/lantiq/datapath/datapath_swdev.c |  201 +-
 .../ethernet/lantiq/datapath/gswip30/Kconfig  |   39 +-
 .../datapath/gswip30/datapath_lookup_proc.c   |   42 +-
 .../lantiq/datapath/gswip30/datapath_mib.c    |   32 +-
 .../lantiq/datapath/gswip30/datapath_proc.c   |    3 +-
 .../ethernet/lantiq/datapath/gswip31/Kconfig  |   68 +-
 .../ethernet/lantiq/datapath/gswip31/Makefile |    6 +-
 .../datapath/gswip31/datapath_debugfs.c       |  166 ++
 .../datapath/gswip31/datapath_ext_vlan.c      |  666 +++++
 .../lantiq/datapath/gswip31/datapath_gswip.c  |   81 +-
 .../datapath/gswip31/datapath_lookup_proc.c   |   80 +-
 .../lantiq/datapath/gswip31/datapath_misc.c   |  273 +-
 .../lantiq/datapath/gswip31/datapath_misc.h   |   44 +-
 .../lantiq/datapath/gswip31/datapath_ppv4.c   |  564 +++-
 .../lantiq/datapath/gswip31/datapath_ppv4.h   |  117 +-
 .../datapath/gswip31/datapath_ppv4_api.c      | 2403 +++++++++++++--
 .../lantiq/datapath/gswip31/datapath_proc.c   |  160 +-
 .../datapath/gswip31/datapath_switchdev.c     |   82 +-
 .../datapath/gswip31/datapath_switchdev.h     |    1 +
 drivers/net/ethernet/lantiq/ppv4/Kconfig      |   33 +
 drivers/net/ethernet/lantiq/ppv4/Makefile     |   11 +-
 drivers/net/ethernet/lantiq/ppv4/bm/Makefile  |   10 +
 .../ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c   |  316 ++
 .../net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c   | 2086 +++++++++++++
 .../net/ethernet/lantiq/ppv4/bm/pp_bm_drv.h   |  265 ++
 .../lantiq/ppv4/bm/pp_bm_drv_internal.h       |  212 ++
 drivers/net/ethernet/lantiq/ppv4/qos/Makefile |    9 +
 .../ethernet/lantiq/ppv4/qos/pp_qos_common.h  |  180 ++
 .../ethernet/lantiq/ppv4/qos/pp_qos_debugfs.c |  660 +++++
 .../net/ethernet/lantiq/ppv4/qos/pp_qos_fw.c  | 2373 +++++++++++++++
 .../net/ethernet/lantiq/ppv4/qos/pp_qos_fw.h  |  156 +
 .../ethernet/lantiq/ppv4/qos/pp_qos_kernel.h  |   75 +
 .../ethernet/lantiq/ppv4/qos/pp_qos_linux.c   |  536 ++++
 .../ethernet/lantiq/ppv4/qos/pp_qos_main.c    | 2191 ++++++++++++++
 .../ethernet/lantiq/ppv4/qos/pp_qos_tests.c   | 2574 +++++++++++++++++
 .../ethernet/lantiq/ppv4/qos/pp_qos_uc_defs.h |  516 ++++
 .../lantiq/ppv4/qos/pp_qos_uc_wrapper.h       |   34 +
 .../ethernet/lantiq/ppv4/qos/pp_qos_utils.c   | 2518 ++++++++++++++++
 .../ethernet/lantiq/ppv4/qos/pp_qos_utils.h   |  675 +++++
 include/net/datapath_api.h                    |   10 +-
 include/net/datapath_api_qos.h                |   78 +-
 include/net/datapath_inst.h                   |    3 +
 include/net/datapath_proc_api.h               |    9 +-
 include/net/pp_bm_regs.h                      |   38 +-
 include/net/pp_qos_drv.h                      | 1784 ++++++------
 include/net/pp_qos_drv_slim.h                 |   10 -
 68 files changed, 22571 insertions(+), 1740 deletions(-)
 create mode 100644 Documentation/devicetree/bindings/net/intel,falconmx-bm.txt
 create mode 100644 Documentation/pp_qos_debugfs.txt
 create mode 100644 drivers/net/ethernet/lantiq/datapath/datapath_proc_qos.c
 create mode 100644 drivers/net/ethernet/lantiq/datapath/gswip31/datapath_debugfs.c
 create mode 100644 drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ext_vlan.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/bm/Makefile
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.h
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv_internal.h
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/Makefile
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_common.h
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_debugfs.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_fw.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_fw.h
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_kernel.h
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_linux.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_main.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_tests.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_uc_defs.h
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_uc_wrapper.h
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_utils.c
 create mode 100644 drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_utils.h

diff --git a/Documentation/devicetree/bindings/net/intel,falconmx-bm.txt b/Documentation/devicetree/bindings/net/intel,falconmx-bm.txt
new file mode 100644
index 000000000..8defde0dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/intel,falconmx-bm.txt
@@ -0,0 +1,13 @@
+Intel FALCON-MX BM driver Device Tree Bindings
+-------------------------------------------------------------
+
+ - compatible:          "intel,falconmx-bm"
+ - reg:                 TODO:
+
+Examples:
+
+bm@0 {
+        compatible = "intel,falconmx-bm";
+        reg = <0x00>;
+};
+
diff --git a/Documentation/pp_qos_debugfs.txt b/Documentation/pp_qos_debugfs.txt
new file mode 100644
index 000000000..e763e7236
--- /dev/null
+++ b/Documentation/pp_qos_debugfs.txt
@@ -0,0 +1,133 @@
+pov4 qos debugfs API
+====================
+
+Each qos device will have its own directory under DEBUGFS_ROOT/ppv4_qos. So the directory for first device
+is DEBUGFS_ROOT/ppv4_qos/qos0, the second will have DEBUGFS_ROOT/ppv4_qos/qos1 and so on. 
+
+The files for each device are:
+
+--w------- 1 root root 0   1 16:50 cmd
+--w------- 1 root root 0   1 16:50 ctrl
+-r-------- 1 root root 0   1 16:50 geninfo
+-rw------- 1 root root 0   1 16:50 node
+-r-------- 1 root root 0   1 16:50 nodeinfo
+-r-------- 1 root root 0   1 16:50 phy2id
+-r-------- 1 root root 0   1 16:50 stat
+
+cmd and ctrl are for internal use, normally should not used by clients. 
+
+For the next explenation I will assume the following configuration:
+3 ports - 0, 7, 26 with logical ids 0 1 2 respectively
+10 queues:  
+	- first 4 iqueues have logical ids 3 4 5 6 and are linked to port 0, 
+	- 3 queues with logical ids 7 8 9 linked to port 7
+	- 3 queues with logical ids 10 11 12 linked to port 26
+
+I will define the term node_phy as the node number where qos scheduler stores information about node.
+For ports and only for them, the node_phy designates the port number, sp port 26 has node_phy 26
+
+
+General system info (geninfo)
+=============================
+Gives general information about the system:
+
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# cat geninfo
+Driver version: 1.0.0-fl
+FW version:     major(1) minor(0) build(0)
+Used nodes:     13
+Ports:          3
+Scheds:         0
+Queues:         10
+Internals:      0
+
+Per node information
+====================
+
+File node defines the current node and it interpreted differently by the files. 
+
+Node info (nodeinfo)
+====================
+
+Most of the data is read from hardware(through firmware) and not from driver's database.
+Interprets the content of node file as a logical id of the node for which we want to retrieve info.
+
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# echo 1 > node
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# cat nodeinfo
+1(7) - Port: suspended(No) internal node(No)
+7(132) 8(133) 9(134)
+Bandwidth: 0 Kbps
+
+We insert 1 to node, meaning we want info about the node whose logical id is 1.
+First line: logical id 1 is mapped to node_phy 7, node is a port, it is not suspended and its not internal node
+Second line: node direct children are logical nodes 7 8 and 9 which have repectively node_phys of 132 133 and 134
+Third line: node's bandwidth limit
+
+Lets get info about the first queue linked to this port:
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# echo 7 > node
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# cat nodeinfo
+7(132) - Queue: suspended(No) internal node(No)
+7(132) ==> 1(7)
+Physical queue: 235
+Port: 7
+Bandwidth: 0 Kbps
+
+First line: logical id 7 mapped to node_phy 128, node is a queue, it is not suspended and its not internal node
+Second line: ancestors of node, starting with the node itself which is logical id 7 (node_phy 132), its first ancestor is logical id 1 (node_phy 7)
+Third line: physical queue of this node is queue 235
+Fourth line: queue is linked (possibly through schedulers) to port 7 - we could see this also from the second line, the node_phy of the last ancestor is the port number 
+
+Node stat (stat)
+================
+
+Interprets the content of node file as a logical id of the node for which we want to retrieve stat.
+Statistics are aggregated.
+
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# ping -5 3 192.168.1.2
+PING 192.168.1.2 (192.168.1.2): 56 data bytes
+64 bytes from 192.168.1.2: seq=0 ttl=64 time=34.501 ms
+64 bytes from 192.168.1.2: seq=1 ttl=64 time=12.055 ms
+64 bytes from 192.168.1.2: seq=2 ttl=64 time=26.934 ms
+64 bytes from 192.168.1.2: seq=3 ttl=64 time=12.084 ms
+64 bytes from 192.168.1.2: seq=4 ttl=64 time=29.290 ms
+
+--- 192.168.1.2 ping statistics ---
+5 packets transmitted, 5 packets received, 0% packet loss
+round-trip min/avg/max = 12.055/22.972/34.501 ms
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# cat stat
+7(132) - Queue
+queue_packets_occupancy:0
+queue_bytes_occupancy:0
+total_packets_accepted:6		/**** No idea why it is 6 and not 5 (maybe arp ?) ****/
+total_packets_dropped:0
+total_packets_red_dropped:0
+total_bytes_accepted:598
+total_bytes_dropped:0
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# ping -c 3 192.168.1.2
+PING 192.168.1.2 (192.168.1.2): 56 data bytes
+64 bytes from 192.168.1.2: seq=0 ttl=64 time=30.657 ms
+64 bytes from 192.168.1.2: seq=1 ttl=64 time=56.920 ms
+64 bytes from 192.168.1.2: seq=2 ttl=64 time=12.592 ms
+
+--- 192.168.1.2 ping statistics ---
+3 packets transmitted, 3 packets received, 0% packet loss
+round-trip min/avg/max = 12.592/33.389/56.920 ms
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# cat stat
+7(132) - Queue
+queue_packets_occupancy:0
+queue_bytes_occupancy:0
+total_packets_accepted:9
+total_packets_dropped:0
+total_packets_red_dropped:0
+total_bytes_accepted:916
+total_bytes_dropped:0
+
+node_phy to id mapping
+======================
+
+Interprets the content of node file as a node_phy of the node for which we want to get the logical id.
+
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# echo 7 > node
+root@LEDE:/sys/kernel/debug/ppv4_qos/qos0# cat phy2id
+1
+
+The logical id of node_phy 7 (aka port 7) is 1 
diff --git a/drivers/net/ethernet/lantiq/cqm/cbm_wrapper.c b/drivers/net/ethernet/lantiq/cqm/cbm_wrapper.c
index 4653a4890..4dca44230 100644
--- a/drivers/net/ethernet/lantiq/cqm/cbm_wrapper.c
+++ b/drivers/net/ethernet/lantiq/cqm/cbm_wrapper.c
@@ -1,14 +1,5 @@
 #include "cqm_common.h"
 static const struct cbm_ops *g_cbm_ops;
-int cbm_buff_resv_bytes_get(int cbm_inst, int size)
-{
-	if (g_cbm_ops->cbm_buff_resv_bytes_get)
-		return g_cbm_ops->cbm_buff_resv_bytes_get(cbm_inst, size);
-	else
-		return CBM_FAILURE;
-}
-EXPORT_SYMBOL(cbm_buff_resv_bytes_get);
-
 void cbm_setup_DMA_p2p(void)
 {
 	if (g_cbm_ops->cbm_setup_DMA_p2p)
diff --git a/drivers/net/ethernet/lantiq/cqm/cqm_common.h b/drivers/net/ethernet/lantiq/cqm/cqm_common.h
index d0d2ece5e..8aeabf6e8 100644
--- a/drivers/net/ethernet/lantiq/cqm/cqm_common.h
+++ b/drivers/net/ethernet/lantiq/cqm/cqm_common.h
@@ -195,7 +195,6 @@ struct cbm_ops {
 	s32 (*pon_deq_cntr_get)(int port, u32 *count);
 	void (*cbm_setup_DMA_p2p)(void);
 	int (*cbm_turn_on_DMA_p2p)(void);
-	int (*cbm_buff_resv_bytes_get)(int, int);
 };
 
 static inline void set_val(void __iomem *reg, u32 val, u32 mask, u32 offset)
diff --git a/drivers/net/ethernet/lantiq/cqm/falconmx/Makefile b/drivers/net/ethernet/lantiq/cqm/falconmx/Makefile
index bdf790ce6..ccc6be681 100644
--- a/drivers/net/ethernet/lantiq/cqm/falconmx/Makefile
+++ b/drivers/net/ethernet/lantiq/cqm/falconmx/Makefile
@@ -4,4 +4,5 @@
 
 obj-$(CONFIG_LTQ_CBM) += cqm.o cqm_config.o cqm_proc.o
 #CFLAGS_cqm.o := -DDEBUG
+ccflags-y       += -Iinclude/net
 
diff --git a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.c b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.c
index 0c9d2ce36..192b159e3 100644
--- a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.c
+++ b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.c
@@ -2,6 +2,18 @@
 #include "cqm_proc.h"
 #include "../cqm_dev.h"
 #include <net/datapath_proc_api.h>
+#include <net/switch_api/gsw_flow_ops.h>
+
+#ifdef CONFIG_LTQ_PPV4_BM_SLIM
+#define CONFIG_CQM_SIZE0_BUF_SIZE 2048
+#define CONFIG_CQM_SIZE1_BUF_SIZE 2048
+#define CONFIG_CQM_SIZE2_BUF_SIZE 2048
+#define CONFIG_CQM_SIZE3_BUF_SIZE 2048
+#define CONFIG_CQM_NUM_SIZE0_BUF 0x80
+#define CONFIG_CQM_NUM_SIZE1_BUF 0x80
+#define CONFIG_CQM_NUM_SIZE2_BUF 0x80
+#define CONFIG_CQM_NUM_SIZE3_BUF 0x80
+#endif
 
 static const char cqm_name[] = "cqm";
 static void __iomem *bufreq[CQM_FMX_NUM_POOLS];
@@ -45,6 +57,33 @@ struct cqm_egp_map epg_lookup_table[] = {
 
 struct cqm_buf_dbg_cnt cqm_dbg_cntrs[CQM_MAX_POLICY_NUM][CQM_MAX_POOL_NUM];
 
+static int get_buff_resv_bytes(int cbm_inst, int size)
+{
+	int bsl_thr_val = 0;
+
+	dev_info(cqm_ctrl->dev, "BSL thres %d\n", BSL_THRES);
+
+	switch (size) {
+	case 0:
+		bsl_thr_val = CONFIG_CQM_SIZE0_BUF_SIZE - BSL_THRES;
+		break;
+	case 1:
+		bsl_thr_val = CONFIG_CQM_SIZE1_BUF_SIZE - BSL_THRES;
+		break;
+	case 2:
+		bsl_thr_val = CONFIG_CQM_SIZE2_BUF_SIZE - BSL_THRES;
+		break;
+	case 3:
+		bsl_thr_val = CONFIG_CQM_SIZE3_BUF_SIZE - BSL_THRES;
+		break;
+	default:
+		dev_err(cqm_ctrl->dev, "%s: unsupported size %d\n", __func__,
+			size);
+		break;
+	}
+	return bsl_thr_val;
+}
+
 int find_dqm_port_type(int port)
 {
 	if ((port >= DQM_CPU_START_ID) && (port <= DQM_CPU_END_ID))
@@ -1747,7 +1786,6 @@ s32 cqm_cpu_port_get(struct cbm_cpu_port_data *data, u32 flags)
 	int i;
 	int valid_type = 0;
 	u32 type;
-	void *txpush = cqm_ctrl->txpush;
 	struct cbm_tx_push *ptr;
 
 	for (i = 0; i < CQM_MAX_CPU; i++) {
@@ -1761,10 +1799,11 @@ s32 cqm_cpu_port_get(struct cbm_cpu_port_data *data, u32 flags)
 		ptr->deq_port = i;
 		ptr->tx_ring_size = 1;
 		ptr->tx_b_credit = 0;
-		ptr->tx_ring_addr = (u32)(txpush + (TXPUSH_CMD_RX_EGP_1 * i)) &
-				     0x7FFFFFF;
+		ptr->tx_ring_addr = (u32)(cqm_ctrl->txpush +
+					  (TXPUSH_CMD_RX_EGP_1 * i)) &
+					  0x7FFFFF;
 		ptr->tx_ring_offset = TXPUSH_CMD_RX_EGP_1;
-		ptr->tx_pkt_credit = 2;
+		ptr->tx_pkt_credit = dqm_port_info[i].dq_txpush_num;
 	}
 	return CBM_SUCCESS;
 }
@@ -1801,10 +1840,10 @@ static void fill_dp_alloc_data(struct cbm_dp_alloc_data *data, int dp,
 		p_info->dma_ch = chan;
 		snprintf(p_info->dma_chan_str, DMA_CH_STR_LEN, "port%d", port);
 		data->dma_chan = p_info->dma_ch;
-		data->tx_pkt_credit = p_info->deq_info.num_desc;
+		data->tx_pkt_credit = p_info->dq_txpush_num;
 	} else {
 		data->flags |= CBM_PORT_PKT_CRDT_SET;
-		data->tx_pkt_credit = p_info->deq_info.num_desc;
+		data->tx_pkt_credit = p_info->dq_txpush_num;
 	}
 	data->flags |= CBM_PORT_RING_ADDR_SET |
 		       CBM_PORT_RING_SIZE_SET |
@@ -2050,8 +2089,12 @@ static s32 handle_dma_chnl_init(int port, u32 flags)
 static s32 dp_enable(struct module *owner, u32 port_id,
 		     struct cbm_dp_en_data *data, u32 flags, u32 alloc_flags)
 {
-	int port, type;
+	int port, type, i;
 	u32 val;
+	struct core_ops *ops;
+	GSW_PMAC_Glbl_Cfg_t glbl_cfg;
+
+	static bool set_bsl;
 	void *deq = cqm_ctrl->deq;
 
 	port = data->deq_port;
@@ -2073,9 +2116,9 @@ static s32 dp_enable(struct module *owner, u32 port_id,
 			pib_port_enable(port, 0);
 		} else {
 			val = CFG_PON_EGP_26_DQREQ_MASK |
-			      CFG_PON_EGP_26_BUFRTN_MASK |
-			      CFG_PON_EGP_26_BFBPEN_MASK |
-			      CFG_PON_EGP_26_DQPCEN_MASK;
+				CFG_PON_EGP_26_BUFRTN_MASK |
+				CFG_PON_EGP_26_BFBPEN_MASK |
+				CFG_PON_EGP_26_DQPCEN_MASK;
 			val |= ((data->deq_port << CFG_DMA_EGP_7_EPMAP_POS) &
 				CFG_DMA_EGP_7_EPMAP_MASK);
 			cbm_w32((deq + DQ_PON_PORT((port), cfg)),
@@ -2090,6 +2133,20 @@ static s32 dp_enable(struct module *owner, u32 port_id,
 		return CBM_FAILURE;
 	break;
 	}
+	if (!set_bsl) {
+		ops = gsw_get_swcore_ops(0);
+		for (i = 0; i < 2; i++) {
+			glbl_cfg.nPmacId = i;
+			ops->gsw_pmac_ops.Pmac_Gbl_CfgGet(ops, &glbl_cfg);
+			glbl_cfg.nMaxJumboLen = get_buff_resv_bytes(0, 3);
+			glbl_cfg.nBslThreshold[0] = get_buff_resv_bytes(0, 0);
+			glbl_cfg.nBslThreshold[1] = get_buff_resv_bytes(0, 1);
+			glbl_cfg.nBslThreshold[2] = get_buff_resv_bytes(0, 2);
+			ops->gsw_pmac_ops.Pmac_Gbl_CfgSet(ops, &glbl_cfg);
+		}
+		set_bsl = true;
+	}
+
 	return CBM_SUCCESS;
 }
 
@@ -2718,6 +2775,7 @@ static int init_cqm_basic(struct platform_device *pdev)
 
 static int qos_init(struct platform_device *pdev)
 {
+#ifdef CONFIG_LTQ_PPV4_QOS_SLIM
 	u32 adj_size;
 	u32 adjusted_qos_ram_base;
 	u32 size = CONFIG_CQM_QMGR_NUM_DESC * 16;
@@ -2744,6 +2802,9 @@ static int qos_init(struct platform_device *pdev)
 			&adj_size,
 			64);
 	qos_config((void *)__pa(adjusted_qos_ram_base));
+#else
+#warning Updated "qos_init" implementation needed!
+#endif
 	return CBM_SUCCESS;
 }
 
@@ -2794,11 +2855,10 @@ static int pool_config(struct platform_device *pdev, int p, u32 buf_size,
 		panic("Total mem alloc exceeds CMA zone %d\n",
 		      cqm_ctrl->max_mem_alloc);
 	cqm_ctrl->bm_buf_base[p] = dma_alloc_attrs(&pdev->dev, size,
-						   &cqm_ctrl->dma_hndl_p[p],
-						   GFP_KERNEL,
-						   DMA_ATTR_NON_CONSISTENT);
-	dev_dbg(cqm_ctrl->dev, "qos bm buf 0x%p\n",
-		cqm_ctrl->bm_buf_base[p]);
+			&cqm_ctrl->dma_hndl_p[p],
+			GFP_KERNEL,
+			DMA_ATTR_NON_CONSISTENT);
+	dev_dbg(cqm_ctrl->dev, "qos bm buf 0x%p\n", cqm_ctrl->bm_buf_base[p]);
 	if (!cqm_ctrl->bm_buf_base[p]) {
 		dev_err(cqm_ctrl->dev, "Error in pool %d buffer allocation\n",
 			p);
@@ -2817,11 +2877,10 @@ static int pool_config(struct platform_device *pdev, int p, u32 buf_size,
 	return CBM_SUCCESS;
 }
 
-#if 1
 static int bm_init(struct platform_device *pdev)
 {
 	struct bmgr_pool_params p_params;
-	u8 i;
+	u8 i, j;
 	int result, pool = 0;
 	u32 buf_size;
 	u32 start_low;
@@ -2847,7 +2906,9 @@ static int bm_init(struct platform_device *pdev)
 		panic("pool %d allocation failed\n", pool);
 	pool++;
 
+#ifdef CONFIG_LTQ_PPV4_BM_SLIM
 	bmgr_driver_init();
+#endif
 	for (i = 0; i < CQM_FMX_NUM_BM_POOLS; i++) {
 		p_params.group_id = 0;
 		p_params.num_buffers = bm_pool_conf[i].buf_frm_num;
@@ -2867,12 +2928,14 @@ static int bm_init(struct platform_device *pdev)
 		bmgr_pool_configure(&p_params, &i);
 	}
 
-	for (i = 0; i < CQM_FMX_NUM_BM_POLICY; i++)
-		bmgr_policy_configure(&policy_params[i], &i);
+	for (j = 0; j < CQM_FMX_NUM_BM_POLICY; j++)
+		bmgr_policy_configure(&policy_params[j], &i);
+
+#ifdef CONFIG_LTQ_PPV4_BM_SLIM
 	new_init_bm();
+#endif
 	return CBM_SUCCESS;
 }
-#endif
 
 static int cbm_hw_init(struct platform_device *pdev)
 {
@@ -3226,6 +3289,7 @@ static int conf_deq_cpu_port(const struct dqm_cpu_port *cpu_ptr)
 	p_info->cbm_buf_free_base = deq + DQ_CPU_PORT(port, ptr_rtn_dw2);
 	p_info->num_free_entries = (port > 3) ? 32 : 1;
 	p_info->cpu_port_type = cpu_ptr->cpu_port_type;
+	p_info->dq_txpush_num = cpu_ptr->txpush_desc;
 	p_info->valid = 1;
 
 	dev_dbg(cqm_ctrl->dev, "0x%p %d %d %d 0x%p %d\n",
@@ -3277,6 +3341,7 @@ static int conf_deq_dma_port(const struct dqm_dma_port *dma_ptr)
 		p_info->deq_info.dma_tx_chan = dma_ptr->dma_chan + j;
 		p_info->cbm_buf_free_base = NULL;
 		p_info->num_free_entries = 0;
+		p_info->dq_txpush_num = dma_ptr->txpush_desc;
 		if (!get_matching_flag(&flags, index))
 			p_info->egp_type = flags;
 		p_info->dma_dt_ch = dma_ptr->dma_chan + j;
@@ -3401,33 +3466,6 @@ static void cqm_rst(struct cqm_data *lpp)
 	cqm_rst_deassert(lpp);
 }
 
-int get_buff_resv_bytes(int cbm_inst, int size)
-{
-	int bsl_thr_val = 0;
-
-	dev_info(cqm_ctrl->dev, "BSL thres %d\n", BSL_THRES);
-
-	switch (size) {
-	case 0:
-		bsl_thr_val = CONFIG_CQM_SIZE0_BUF_SIZE - BSL_THRES;
-		break;
-	case 1:
-		bsl_thr_val = CONFIG_CQM_SIZE1_BUF_SIZE - BSL_THRES;
-		break;
-	case 2:
-		bsl_thr_val = CONFIG_CQM_SIZE2_BUF_SIZE - BSL_THRES;
-		break;
-	case 3:
-		bsl_thr_val = CONFIG_CQM_SIZE3_BUF_SIZE - BSL_THRES;
-		break;
-	default:
-		dev_err(cqm_ctrl->dev, "%s: unsupported size %d\n", __func__,
-			size);
-		break;
-	}
-	return bsl_thr_val;
-}
-
 static const struct cbm_ops cqm_ops = {
 	.cbm_igp_delay_set = cqm_igp_delay_set,
 	.cbm_igp_delay_get = cqm_igp_delay_get,
@@ -3455,7 +3493,6 @@ static const struct cbm_ops cqm_ops = {
 	.pon_deq_cntr_get = pon_deq_cntr_get,
 	.set_lookup_qid_via_index = set_lookup_qid_via_index_fmx,
 	.get_lookup_qid_via_index = get_lookup_qid_via_idx_fmx,
-	.cbm_buff_resv_bytes_get = get_buff_resv_bytes,
 };
 
 static const struct of_device_id cqm_falconmx_match[] = {
diff --git a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.h b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.h
index ad1734fc7..892003a3e 100644
--- a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.h
+++ b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.h
@@ -472,6 +472,7 @@ struct cqm_dqm_port_info {
 	void		*cbm_buf_free_base;
 	u32	num_free_entries;/*!< Number of Free Port entries */
 	cbm_dq_info_t	deq_info;
+	u32 dq_txpush_num;
 	u32 egp_type;
 	char dma_chan_str[DMA_CH_STR_LEN];
 	u32 cpu_port_type;
diff --git a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm_config.c b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm_config.c
index 0490c322f..4a36f9fd5 100644
--- a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm_config.c
+++ b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm_config.c
@@ -79,25 +79,29 @@ const struct cqm_config falcon_cqm_config[] = {
 	.type = DQM_CPU_TYPE,
 	.data.dqm_cpu.port = 0,
 	.data.dqm_cpu.cpu_port_type = DP_F_DEQ_CPU,
-	.data.dqm_cpu.num_desc = 2
+	.data.dqm_cpu.num_desc = 2,
+	.data.dqm_cpu.txpush_desc = 2,
 	},
 	{
 	.type = DQM_CPU_TYPE,
 	.data.dqm_cpu.port = 4,
 	.data.dqm_cpu.cpu_port_type = 1,
-	.data.dqm_cpu.num_desc = 32
+	.data.dqm_cpu.num_desc = 32,
+	.data.dqm_cpu.txpush_desc = 32,
 	},
 	{
 	.type = DQM_CPU_TYPE,
 	.data.dqm_cpu.port = 5,
 	.data.dqm_cpu.cpu_port_type = 1,
 	.data.dqm_cpu.num_desc = 32,
+	.data.dqm_cpu.txpush_desc = 32,
 	},
 	{
 	.type = DQM_CPU_TYPE,
 	.data.dqm_cpu.port = 6,
 	.data.dqm_cpu.cpu_port_type = 1,
-	.data.dqm_cpu.num_desc = 32
+	.data.dqm_cpu.num_desc = 32,
+	.data.dqm_cpu.txpush_desc = 32,
 	},
 	{
 	.type = DQM_DMA_TYPE,
@@ -107,6 +111,7 @@ const struct cqm_config falcon_cqm_config[] = {
 	.data.dqm_dma.dma_chan = 0,
 	.data.dqm_dma.port_enable = 1,
 	.data.dqm_dma.num_desc = 8,
+	.data.dqm_dma.txpush_desc = 8,
 	},
 	{
 	.type = DQM_PON_TYPE,
@@ -116,6 +121,7 @@ const struct cqm_config falcon_cqm_config[] = {
 	.data.dqm_dma.dma_chan = 0,
 	.data.dqm_dma.port_enable = 1,
 	.data.dqm_dma.num_desc = 16,
+	.data.dqm_dma.txpush_desc = 8,
 	},
 	{
 	.type = EQM_CPU_TYPE,
diff --git a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm_config.h b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm_config.h
index 1e7196e96..b82f5f002 100644
--- a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm_config.h
+++ b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm_config.h
@@ -1,7 +1,11 @@
 #ifndef __CQM_CONFIG_H__
 #define __CQM_CONFIG_H__
 
+#ifdef CONFIG_LTQ_PPV4_BM_SLIM
 #include <net/bm_drv_slim.h>
+#else
+#include "../../ppv4/bm/pp_bm_drv.h"
+#endif
 
 #define CQM_FMX_NUM_BM_POOLS	4
 #define CQM_FMX_NUM_POOLS	(CQM_FMX_NUM_BM_POOLS + 1)
@@ -50,6 +54,7 @@ struct dqm_dma_port {
 	u32 dma_chan;
 	u32 port_enable;
 	u32 num_desc;
+	u32 txpush_desc;
 };
 
 struct dqm_pon_port {
@@ -61,6 +66,7 @@ struct dqm_pon_port {
 	u32 dma_chan;
 	u32 port_enable;
 	u32 num_desc;
+	u32 txpush_desc;
 };
 
 struct dqm_cpu_port {
@@ -68,6 +74,7 @@ struct dqm_cpu_port {
 	u32 queue;
 	u32 cpu_port_type;
 	u32 num_desc;
+	u32 txpush_desc;
 };
 
 struct eqm_dma_port {
@@ -115,4 +122,5 @@ extern const struct cqm_config falcon_cqm_config[];
 extern struct cqm_ctrl g_fmx_ctrl;
 extern struct cqm_bm_pool_config bm_pool_conf[CQM_FMX_NUM_POOLS];
 extern struct bmgr_policy_params policy_params[CQM_FMX_NUM_BM_POLICY];
+
 #endif /* __CQM_CONFIG_H__ */
diff --git a/drivers/net/ethernet/lantiq/datapath/Kconfig b/drivers/net/ethernet/lantiq/datapath/Kconfig
index c431024c9..475af401a 100644
--- a/drivers/net/ethernet/lantiq/datapath/Kconfig
+++ b/drivers/net/ethernet/lantiq/datapath/Kconfig
@@ -9,8 +9,7 @@ menuconfig LTQ_DATAPATH
 	  Datapath Lib is to provide common rx/tx wrapper Lib without taking
 	  care of much HW knowledge and also provide common interface for legacy
 	  devices and different HW like to CBM or LRO.
-	  Take note:
-	    All devices need to register to datapath Lib first
+	  Take note: All devices need to register to datapath Lib first
 
 if LTQ_DATAPATH
 config LTQ_DATAPATH_ACA_CSUM_WORKAROUND
@@ -28,61 +27,76 @@ config LTQ_DATAPATH_MANUAL_PARSE
 	depends on LTQ_DATAPATH
 	default y
 	---help---
-		Manual parse network protocol for tcp offloading
-		Only support limited tunnel yet
-		later need to enhance to support other tunnels
-		Also need to study to use network stack information
+	  Manual parse network protocol for tcp offloading
+	  Only support limited tunnel yet
+	  later need to enhance to support other tunnels
+	  Also need to study to use network stack information
+
 config LTQ_DATAPATH_COPY_LINEAR_BUF_ONLY
-        bool "Datapath Copy linear buffer only for skb"
-        default n
+	bool "Datapath Copy linear buffer only for skb"
+	default n
 	depends on LTQ_DATAPATH
-        ---help---
-           Datapath Copy linear buffer only for skb if need to alloc new buffer.
-		   For TSO/GSO case, it will not consider
-		   Make sure TSO/GSO always with enough header room to insert pmac header
-		   need to enhance in the future
+	---help---
+	  Datapath Copy linear buffer only for skb if need to alloc new buffer.
+	  For TSO/GSO case, it will not consider
+	  Make sure TSO/GSO always with enough header room to insert pmac header
+	  need to enhance in the future
+
 config LTQ_DATAPATH_DBG
 	bool "Datapath Debug Tool"
 	default y
 	depends on LTQ_DATAPATH
 	---help---
-	  Datapath Debug Tool is used to provide simple debug proc tool
-	  Each flag can be enabled/disabled easily
-	  Once this flag is enabled, the debugging information will be printed out
-	  otherwise, no debugging information for this flag will be printed
+	  Datapath Debug Tool is used to provide simple debug tool
+	  All other debug tools is based on it
+	  Once it is disabled, all other datapath debug tool disabled.
+	  By default had better enable it
+
+config LTQ_DATAPATH_LOOPETH
+	bool "Datapath Debug Tool to simulate datapath registration flow"
+	default y
+	depends on LTQ_DATAPATH_DBG
+	---help---
+	  Datapath Debug Tool to simulate different peripheral to register to datapath
+	  For example, it can simulate Fast WIFI/Litepath/Logical device
+	  Also it can simulate tcont and gem dev later.
+	  By default had better to be enabled
+
 config LTQ_DATAPATH_DBG_PROTOCOL_PARSE
-        bool "Datapath Debug Tool for hw checksum's protocol parsing"
-        default n
-        depends on LTQ_DATAPATH_DBG
-        ---help---
-          Datapath Debug Tool for hw checksum's protocol parsing
-		  Only for debugging purpose
-		  By default it should be disabled.
+	bool "Datapath Debug Tool for hw checksum's protocol parsing"
+	default n
+	depends on LTQ_DATAPATH_DBG
+	---help---
+	  Datapath Debug Tool for hw checksum's protocol parsing
+	  Only for debugging purpose
+	  By default it should be disabled.
 config LTQ_DATAPATH_EXTRA_DEBUG
 	bool "extra debugging support"
 	default n
 	depends on  LTQ_DATAPATH_DBG
 	---help---
-		This is to enable/disable extra strict debugging support.
-		This is useful during initial system bring up
-		It will affect performance
-		By default it should be disabled.
+	  This is to enable/disable extra strict debugging support.
+	  This is useful during initial system bring up
+	  It will affect performance
+	  By default it should be disabled.
 
 config LTQ_DATAPATH_SWDEV_TEST
 	bool "Test Switchdev Event"
 	default n
 	depends on  LTQ_DATAPATH_DBG && LTQ_DATAPATH_SWITCHDEV
 	---help---
-		This is to force enable macro CONFIG_SOC_SWITCHDEV_TESTING 
-		in order to test switchdev event
-		without real switchdev handling
+	  This is to force enable macro CONFIG_SOC_SWITCHDEV_TESTING
+	  in order to test switchdev event
+	  without real switchdev handling
+
 config LTQ_DATAPATH_SKB
-        bool "Datapath Skb Hack"
-        default n
-        depends on  LTQ_DATAPATH
-        ---help---
-          For Ethernet OAM and MPE FW purpose testing purpose, 
-	  It needs to hack SKB          
+	bool "Datapath Skb Hack"
+	default n
+	depends on  LTQ_DATAPATH
+	---help---
+	  For Ethernet OAM and MPE FW purpose testing purpose,
+	  It needs to hack SKB
+
 config LTQ_DATAPATH_MPE_FASTHOOK_TEST
 	bool "MPE Fast Hook Test"
 	default n
@@ -93,21 +107,30 @@ config LTQ_DATAPATH_MPE_FASTHOOK_TEST
 	  in order to support MPE FAST HOOK. The reason is that some network driver is
 	  pre-build out of this build system.
 	  The testing code by default is not checked in.
+
 config LTQ_DATAPATH_ETH_OAM
-        bool "ETH OAM SUPPORT"
-        default n
-        depends on  LTQ_DATAPATH_SKB
-        ---help---
-          Datapath Ethernet OAM support. Once it is enabled, it will add some fields in skb structure
-          in order to support MPE FAST HOOK. The reason is that some network driver is
-          pre-build out of this build system.
-          The testing code by default is not checked in.
+	bool "ETH OAM SUPPORT"
+	default n
+	depends on  LTQ_DATAPATH_SKB
+	---help---
+	  Datapath Ethernet OAM support. Once it is enabled, it will add some fields in skb structure
+	  in order to support MPE FAST HOOK. The reason is that some network driver is
+	  pre-build out of this build system.
+	  The testing code by default is not checked in.
+
 config LTQ_DATAPATH_SWITCHDEV
-        bool "Switchdev Support"
-        default n
-        depends on  LTQ_DATAPATH && NET_SWITCHDEV
-        ---help---
-          Switchdev support for different switch in datapath
+	bool "Switchdev Support"
+	default n
+	depends on  LTQ_DATAPATH && NET_SWITCHDEV
+	---help---
+	  Switchdev support for different switch in datapath
+
+config LTQ_DATAPATH_DDR_SIMULATE_GSWIP31
+	bool "Force FALCON-MX SOC"
+	default n
+	depends on LTQ_DATAPATH
+	---help---
+	  test falcon-mx HAL in GRX350 boards
 source "drivers/net/ethernet/lantiq/datapath/gswip31/Kconfig"
 source "drivers/net/ethernet/lantiq/datapath/gswip30/Kconfig"
 endif
diff --git a/drivers/net/ethernet/lantiq/datapath/Makefile b/drivers/net/ethernet/lantiq/datapath/Makefile
index 452bbc36c..aa4b3716c 100644
--- a/drivers/net/ethernet/lantiq/datapath/Makefile
+++ b/drivers/net/ethernet/lantiq/datapath/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_LTQ_DATAPATH) = datapath_api.o datapath_proc_api.o datapath_proc.o  datapath_misc.o datapath_notifier.o datapath_logical_dev.o datapath_instance.o datapath_platform_dev.o datapath_soc.o datapath_qos.o
+obj-$(CONFIG_LTQ_DATAPATH) = datapath_api.o datapath_proc_api.o datapath_proc.o  datapath_misc.o datapath_notifier.o datapath_logical_dev.o datapath_instance.o datapath_platform_dev.o datapath_soc.o datapath_qos.o datapath_proc_qos.o
 ifneq ($(CONFIG_LTQ_DATAPATH_LOOPETH),)
 obj-$(CONFIG_LTQ_DATAPATH) += datapath_loopeth_dev.o
 endif
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath.h b/drivers/net/ethernet/lantiq/datapath/datapath.h
index 6ce2c277a..062b6edb4 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath.h
+++ b/drivers/net/ethernet/lantiq/datapath/datapath.h
@@ -1,4 +1,4 @@
-/*
+ /*
  * Copyright (C) Intel Corporation
  * Author: Shao Guohua <guohua.shao@intel.com>
  *
@@ -17,8 +17,10 @@
 #include <linux/platform_device.h>
 #include <net/lantiq_cbm_api.h>
 
-#define PP_OLD_QOS_API
-#ifdef PP_OLD_QOS_API
+//#define CONFIG_LTQ_DATAPATH_DUMMY_QOS
+//#define DUMMY_PPV4_QOS_API_OLD
+
+#ifdef DUMMY_PPV4_QOS_API_OLD
 /*TODO:currently need to include both header file */
 #include <net/pp_qos_drv_slim.h>
 #include <net/pp_qos_drv.h>
@@ -269,6 +271,9 @@ enum DP_DBG_FLAG {
 	DP_DBG_FLAG_NOTIFY = BIT(16),
 	DP_DBG_FLAG_LOGIC = BIT(17),
 	DP_DBG_FLAG_GSWIP_API = BIT(18),
+	DP_DBG_FLAG_QOS = BIT(19),
+	DP_DBG_FLAG_QOS_DETAIL = BIT(20),
+	DP_DBG_FLAG_LOOKUP = BIT(21),
 
 	/*Note, once add a new entry here int the enum,
 	 *need to add new item in below macro DP_F_FLAG_LIST
@@ -276,29 +281,6 @@ enum DP_DBG_FLAG {
 	DP_DBG_FLAG_MAX = BIT(31)
 };
 
-enum {
-	NODE_LINK_ADD = 0,
-	NODE_LINK_GET,
-	NODE_LINK_EN_GET,
-	NODE_LINK_EN_SET,
-	NODE_UNLINK,
-	LINK_ADD,
-	LINK_GET,
-	LINK_PRIO_SET,
-	LINK_PRIO_GET,
-	QUEUE_CFG_SET,
-	QUEUE_CFG_GET,
-	SHAPER_SET,
-	SHAPER_GET,
-	NODE_ALLOC,
-	NODE_FREE,
-	DEQ_PORT_RES_GET,
-	COUNTER_MODE_SET,
-	COUNTER_MODE_GET,
-	QUEUE_MAP_GET,
-	QUEUE_MAP_SET
-};
-
 /*Note: per bit one variable */
 #define DP_DBG_FLAG_LIST {\
 	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_DBG, "dbg"), \
@@ -321,9 +303,40 @@ enum {
 	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_SWDEV, "swdev"), \
 	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_NOTIFY, "notify"), \
 	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_LOGIC, "logic"), \
-	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_GSWIP_API, "gswip") \
+	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_GSWIP_API, "gswip"), \
+	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_QOS, "qos"), \
+	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_QOS_DETAIL, "qos2"), \
+	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_LOOKUP, "lookup"), \
+	\
+	\
+	/*must be last one */\
+	DP_DBG_ENUM_OR_STRING(DP_DBG_FLAG_MAX, "")\
 }
 
+enum {
+	NODE_LINK_ADD = 0,
+	NODE_LINK_GET,
+	NODE_LINK_EN_GET,
+	NODE_LINK_EN_SET,
+	NODE_UNLINK,
+	LINK_ADD,
+	LINK_GET,
+	LINK_PRIO_SET,
+	LINK_PRIO_GET,
+	QUEUE_CFG_SET,
+	QUEUE_CFG_GET,
+	SHAPER_SET,
+	SHAPER_GET,
+	NODE_ALLOC,
+	NODE_FREE,
+	DEQ_PORT_RES_GET,
+	COUNTER_MODE_SET,
+	COUNTER_MODE_GET,
+	QUEUE_MAP_GET,
+	QUEUE_MAP_SET
+};
+
+
 struct dev_mib {
 	atomic_t rx_fn_rxif_pkt; /*! received packet counter */
 	atomic_t rx_fn_txif_pkt; /*! transmitted packet counter */
@@ -359,11 +372,13 @@ struct dp_subif_info {
 #ifdef CONFIG_NET_SWITCHDEV
 	struct switchdev_ops *old_swdev_ops;
 	struct switchdev_ops new_swdev_ops;
+	void *swdev_priv; /*to store ext vlan info*/
 #endif
 	u16 qid;    /*! physical queue id */
 	u16 q_node; /*! logical queue node Id if applicable */
 	u16 qos_deq_port;
 	u16 cqm_deq_port;
+	u16 cqm_port_idx; /*like tconf id */
 };
 
 struct vlan_info {
@@ -440,12 +455,82 @@ struct subif_platform_data {
 	int act; /*Set by HAL subif_platform_set and used by DP lib */
 };
 
+#if 0
 struct vlan_prop {
 	u8 num;
 	u16 out_proto, out_vid;
 	u16 in_proto, in_vid;
 	struct net_device *base;
 };
+#endif
+
+#if 1
+struct vlan_info1 {
+	/* Changed this TPID field to GSWIP API enum type.
+	 * We do not have flexible for any TPID, only following are supported:
+	 * 1. ignore (don't care)
+	 * 2. 0x8100
+	 * 3. Value configured int VTE Type register
+	 */
+	GSW_ExtendedVlanFilterTpid_t tpid;  /* TPID like 0x8100, 0x8800 */
+	u16 vid ;  /*VLAN ID*/
+	/*note: user priority/CFI both don't care */
+	/* DSCP to Priority value mapping is possible */
+};
+
+struct vlan1 {
+	int bp;  /*assigned bp for this single VLAN dev */
+	struct vlan_info1 outer_vlan; /*out vlan info */
+	/* Add Ethernet type with GSWIP API enum type.
+	 * Following types are supported:
+	 * 1. ignore	(don't care)
+	 * 2. IPoE  	(0x0800)
+	 * 3. PPPoE 	(0x8863 or 0x8864)
+	 * 4. ARP   	(0x0806)
+	 * 5. IPv6 IPoE	(0x86DD)
+	 */
+	GSW_ExtendedVlanFilterEthertype_t ether_type;
+};
+
+struct vlan2 {
+	int bp;  /*assigned bp for this double VLAN dev */
+	struct vlan_info1 outer_vlan;  /*out vlan info */
+	struct vlan_info1 inner_vlan;   /*in vlan info */
+	/* Add Ethernet type with GSWIP API enum type.
+	 * Following types are supported:
+	 * 1. ignore	(don't care)
+	 * 2. IPoE  	(0x0800)
+	 * 3. PPPoE 	(0x8863 or 0x8864)
+	 * 4. ARP   	(0x0806)
+	 * 5. IPv6 IPoE	(0x86DD)
+	 */
+	GSW_ExtendedVlanFilterEthertype_t ether_type;
+};
+#endif
+
+struct ext_vlan_info {
+	int subif_grp, logic_port; /* base subif group and logical port.
+				    * In DP it is subif
+				    */
+	int bp; /*default bp for this ctp */
+	int n_vlan1, n_vlan2; /*size of vlan1/2_list*/
+	int n_vlan1_drop, n_vlan2_drop; /*size of vlan1/2_drop_list */
+	struct vlan1 *vlan1_list; /*allow single vlan dev info list auto
+				   * bp is for egress VLAN setting
+				   */
+	struct vlan2 *vlan2_list; /* allow double vlan dev info list auto
+				   * bp is for egress VLAN setting
+				   */
+	struct vlan1 *vlan1_drop_list; /* drop single vlan list - manual
+					*  bp no use
+					*/
+	struct vlan2 *vlan2_drop_list; /* drop double vlan list - manual
+					* bp no use
+					*/
+	/* Need add other input / output information for deletion. ?? */
+	/* private data stored by function set_gswip_ext_vlan */
+	void *priv;
+};
 
 /*port 0 is reserved*/
 extern int dp_inst_num;
@@ -495,6 +580,10 @@ void proc_mib_timer_read(struct seq_file *s);
 int mpe_fh_netfiler_install(void);
 int dp_coc_cpufreq_exit(void);
 int dp_coc_cpufreq_init(void);
+int qos_dump_start(void);
+int qos_dump(struct seq_file *s, int pos);
+ssize_t proc_qos_write(struct file *file, const char *buf,
+		       size_t count, loff_t *ppos);
 int update_coc_up_sub_module(enum ltq_cpufreq_state new_state,
 			     enum ltq_cpufreq_state old_state, uint32_t flag);
 void proc_coc_read(struct seq_file *s);
@@ -574,6 +663,8 @@ int dp_request_inst(struct dp_inst_info *info, u32 flag);
 int register_dp_cap(u32 flag);
 int print_symbol_name(unsigned long addr);
 typedef GSW_return_t(*dp_gsw_cb)(void *, void *);
+void falcon_test(void); /*defined in Pp qos driver */
+
 
 #endif /*DATAPATH_H */
 
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_api.c b/drivers/net/ethernet/lantiq/datapath/datapath_api.c
index 0ea926f34..c044db267 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_api.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_api.c
@@ -49,7 +49,7 @@ struct dma_rx_desc_1 dma_tx_desc_mask1;
 u32 dp_drop_all_tcp_err;
 u32 dp_pkt_size_check;
 
-u32 dp_dbg_flag;
+u32 dp_dbg_flag = DP_DBG_FLAG_QOS;
 EXPORT_SYMBOL(dp_dbg_flag);
 
 #ifdef CONFIG_LTQ_DATAPATH_MPE_FASTHOOK_TEST
@@ -90,6 +90,7 @@ char *dp_port_status_str[] = {
 	"Invalid"
 };
 
+static int try_walkaround;
 static int dp_init_ok;
 static DEFINE_SPINLOCK(dp_lock); /*datapath spinlock*/
 unsigned int dp_dbg_err = 1; /*print error */
@@ -398,8 +399,9 @@ static int32_t dp_alloc_port_private(int inst,
 	dp_port_info[inst][port_id].port_id = cbm_data.dp_port;
 	dp_port_info[inst][port_id].deq_port_base = cbm_data.deq_port;
 	dp_port_info[inst][port_id].deq_port_num = cbm_data.deq_port_num;
-	PR_ERR("cbm alloc dp_port:%d deq:%d deq_num:%d\n", cbm_data.dp_port,
-	       cbm_data.deq_port, cbm_data.deq_port_num);
+	DP_DEBUG(DP_DBG_FLAG_DBG,
+		 "cbm alloc dp_port:%d deq:%d deq_num:%d\n",
+		 cbm_data.dp_port, cbm_data.deq_port, cbm_data.deq_port_num);
 	if (cbm_data.flags & CBM_PORT_DMA_CHAN_SET)
 		dp_port_info[inst][port_id].dma_chan = cbm_data.dma_chan;
 	if (cbm_data.flags & CBM_PORT_PKT_CRDT_SET)
@@ -520,7 +522,8 @@ int32_t dp_register_subif_private(int inst, struct module *owner,
 				PR_ERR("subif_platform_set fail\n");
 				goto EXIT;
 			} else {
-				PR_ERR("subif_platform_set succeed\n");
+				DP_DEBUG(DP_DBG_FLAG_DBG,
+					 "subif_platform_set succeed\n");
 			}
 		} else {
 			PR_ERR("port info status fail for 0\n");
@@ -686,7 +689,8 @@ int32_t dp_deregister_subif_private(int inst, struct module *owner,
 		cbm_data.deq_port = port_info->deq_port_base;
 		if (cbm_dp_enable(owner, port_id, &cbm_data,
 				  CBM_PORT_F_DISABLE, port_info->alloc_flags)) {
-			DP_DEBUG(DP_DBG_FLAG_DBG, "cbm_dp_disable:port %d\n",
+			DP_DEBUG(DP_DBG_FLAG_DBG,
+				 "cbm_dp_disable:port %d\n",
 				 port_id);
 
 			return res;
@@ -724,9 +728,12 @@ int32_t dp_alloc_port_ext(int inst, struct module *owner,
 	int res;
 
 	if (unlikely(!dp_init_ok)) {
-		DP_LIB_LOCK(&dp_lock);
-		dp_probe(NULL); /*workaround to re-init */
-		DP_LIB_UNLOCK(&dp_lock);
+		spin_lock(&dp_lock);
+		if (!try_walkaround) {
+			try_walkaround = 1;
+			dp_probe(NULL); /*workaround to re-init */
+		}
+		spin_unlock(&dp_lock);
 		if (!dp_init_ok) {
 			PR_ERR("dp_alloc_port fail: datapath can't init\n");
 			return DP_FAILURE;
@@ -933,10 +940,10 @@ int32_t dp_register_subif_ext(int inst, struct module *owner,
 		return DP_FAILURE;
 	}
 
-	DP_LIB_LOCK(&dp_lock);
+	spin_lock(&dp_lock);
 	if (port_info->owner != owner) {
 		DP_DEBUG(DP_DBG_FLAG_DBG,
-			 "Unregister subif failed:Not matching:%p(%s)->%p(%s)\n",
+			 "Unregister subif fail:Not matching:%p(%s)->%p(%s)\n",
 			 owner, owner->name, port_info->owner,
 			 port_info->owner->name);
 		DP_LIB_UNLOCK(&dp_lock);
@@ -949,7 +956,7 @@ int32_t dp_register_subif_ext(int inst, struct module *owner,
 	else /*register */
 		res = dp_register_subif_private(inst, owner, dev, subif_name,
 						subif_id, data, flags);
-	DP_LIB_UNLOCK(&dp_lock);
+	spin_unlock(&dp_lock);
 	return res;
 }
 EXPORT_SYMBOL(dp_register_subif_ext);
@@ -2315,11 +2322,24 @@ int dp_basic_proc(void)
 	if (dp_init_ok) /*alredy init */
 		return 0;
 	register_notifier(0);
+#ifdef CONFIG_LTQ_DATAPATH_DUMMY_QOS_VIA_FALCON_TEST
+	PR_INFO("\n\n--Falcon_test to simulate SLIM QOS drv---\n\n\n");
+	falcon_test();  /*Must put before register_dp_cap 
+			 *since it needs to do CPU path cfg
+			 */
+#endif /*CONFIG_LTQ_DATAPATH_DUMMY_QOS_VIA_FALCON_TEST*/
 	register_dp_cap(0);
 	if (request_dp(0)) /*register 1st dp instance */ {
 		PR_ERR("register_dp instance fail\n");
 		return -1;
 	}
+	PR_ERR("preempt_count=%x\n", preempt_count());
+	if (preempt_count() & HARDIRQ_MASK)
+		PR_ERR("HARDIRQ_MASK\n");
+	if (preempt_count() & SOFTIRQ_MASK)
+		PR_ERR("SOFTIRQ_MASK\n");
+	if (preempt_count() & NMI_MASK)
+		PR_ERR("NMI_MASK\n");
 	dp_init_ok = 1;
 	PR_INFO("datapath init done\n");
 	return res;
@@ -2349,3 +2369,13 @@ int dp_basic_proc(void)
 }
 
 MODULE_LICENSE("GPL");
+
+static int __init dp_dbg_lvl_set(char *str)
+{
+	PR_INFO("\n\ndp_dbg=%s\n\n", str);
+	dp_dbg_flag = dp_atoi(str);
+
+	return 0;
+}
+early_param("dp_dbg", dp_dbg_lvl_set);
+
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_instance.c b/drivers/net/ethernet/lantiq/datapath/datapath_instance.c
index 02e4ca5d1..5b3aa6cb4 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_instance.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_instance.c
@@ -122,6 +122,7 @@ int dp_request_inst(struct dp_inst_info *info, u32 flag)
 	dp_port_prop[i].ops[1] = info->ops[1];
 	dp_port_prop[i].info = hw_cap_list[k].info;
 	dp_port_prop[i].cbm_inst = info->cbm_inst;
+	dp_port_prop[i].qos_inst = info->qos_inst;
 	dp_port_prop[i].valid = 1;
 	if (dp_port_prop[i].info.dp_platform_set(i, DP_PLATFORM_INIT) < 0) {
 		dp_port_prop[i].valid = 0;
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_logical_dev.c b/drivers/net/ethernet/lantiq/datapath/datapath_logical_dev.c
index e48f9544f..259d21b9a 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_logical_dev.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_logical_dev.c
@@ -198,7 +198,7 @@ int del_logic_dev(int inst, struct list_head *head, struct net_device *dev,
 	dp_port_prop[inst].info.subif_platform_set_unexplicit(inst,
 		logic_dev->ep,
 		logic_dev, flags);
-	dp_inst_del_dev(dev, NULL, inst, logic_dev->bp, logic_dev->ctp, 0);
+	dp_inst_del_dev(dev, NULL, inst, logic_dev->ep, logic_dev->ctp, 0);
 	list_del(&logic_dev->list);
 	kfree(logic_dev);
 
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_loopeth_dev.c b/drivers/net/ethernet/lantiq/datapath/datapath_loopeth_dev.c
index 2972b1123..70e32aa71 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_loopeth_dev.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_loopeth_dev.c
@@ -48,6 +48,13 @@
  *              Definition
  * ####################################
  */
+#define PROC_NAME	"loop"
+#define PROC_DBG	"dbg"
+#define PROC_DEV	"dev"
+#define PROC_MIB	"mib"
+#define PROC_DP		"directpath"
+#define PROC_CPUTX	"cputx"
+
 #define ENABLE_DEBUG                            1
 
 #define ENABLE_ASSERT                           1
@@ -75,7 +82,7 @@
 #define dbg(format, arg...) do { if ((g_dbg_enable &\
 	DBG_ENABLE_MASK_DEBUG_PRINT)) \
 	PR_INFO(KERN_WARNING ":%d:%s: " format "\n",\
-	        __LINE__, __func__, ##arg); } \
+	__LINE__, __func__, ##arg); } \
 	while (0)
 #else
 #if !defined(dbg)
@@ -912,7 +919,7 @@ static int proc_write_dbg(struct file *file, const char *buf, size_t count,
 	}
 
 	return count;
- help:
+help:
 	PR_INFO("echo <enable/disable> [");
 
 	for (i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++) {
@@ -1133,7 +1140,7 @@ static int proc_write_dev(struct file *file, const char *buf, size_t count,
 
 	return count;
 
- help:
+help:
 	PR_INFO("echo add [index] %s/dev\n", PROC_DIR);
 	PR_INFO("   example: echo add 1 > %s/dev\n", PROC_DIR);
 	PR_INFO("   example: echo add    > %s/dev\n", PROC_DIR);
@@ -1238,7 +1245,7 @@ static int proc_write_mib(struct file *file, const char *buf, size_t count,
 	}
 
 	return count;
- help:
+help:
 	PR_INFO("echo <clear> [all/device name] > %s/mib\n", PROC_DIR);
 	return count;
 }
@@ -1352,10 +1359,18 @@ int get_dev_type_index(char *flag_name)
 	return -1;
 }
 
+#define OPT_PORT  "[-e <dp_port>]"
+#define OPT_F "[-f explicit]"
+#define OPT_Q "[-q <num_resv_queue>]"
+#define OPT_SCH "[-s <num_resv_schedule>]"
+#define OPT_NUM  "[-p <num_resv_port>]"
+#define OPT_NO "[ -b <start_port_no>]"
+#define OPT_ALLOC_PORT (OPT_PORT OPT_F OPT_Q OPT_SCH OPT_NUM OPT_NO)
+
 static int proc_write_directpath(struct file *file, const char *buf,
 				 size_t count, loff_t *ppos)
 {
-	char str[300];
+	char str[150];
 	int len, rlen;
 	char *ifname = NULL;
 	dp_cb_t cb = {
@@ -1397,9 +1412,17 @@ static int proc_write_directpath(struct file *file, const char *buf,
 	if (((dp_strcmpi(param_list[0], "register") == 0) ||
 	     (dp_strcmpi(param_list[0], "reg") == 0)) &&
 	    (dp_strcmpi(param_list[1], "dev") == 0)) {
-		if (param_list_num < 5)
-			goto help;
-
+		PR_INFO("Not finished yet, need to continue here\n");
+		return count;
+#if 0
+		if((c = dp_getopt(param_list, num, &start, &opt_arg, optstring)) > 0)
+				printf("c=%c opt_arg=%s next_offset=%d\n", c, opt_arg, start);
+			else if (c ==0) 
+				break;
+			else {
+				printf("wrong format %s\n", opt[start]);
+				break;
+			}
 		PR_INFO("Try to register dev %s to datapath\n", ifname);
 		dev_port_str = param_list[3];
 		flag_str = param_list[4];
@@ -1409,7 +1432,7 @@ static int proc_write_directpath(struct file *file, const char *buf,
 		PR_INFO("flag_str=%s\n", flag_str ? flag_str : "NULL");
 		PR_INFO("dp_port_str=%s\n",
 			dp_port_str ? dp_port_str : "NULL");
-
+#endif
 		dev_port = dp_atoi(dev_port_str);
 		flag_index = get_dev_type_index(flag_str);
 		dp_port_id = dp_atoi(dp_port_str);
@@ -1466,7 +1489,7 @@ static int proc_write_directpath(struct file *file, const char *buf,
 			if (param_list[3])
 				priv->dp_subif.subif = dp_atoi(param_list[3]);
 			else
-				priv->dp_subif.subif = -1;	/*dynamic */
+				priv->dp_subif.subif = -1;/*dynamic */
 
 			if (dp_register_subif
 			    (priv->owner, g_loop_eth_dev[i],
@@ -1556,15 +1579,15 @@ static int proc_write_directpath(struct file *file, const char *buf,
 		goto help;
 	}
 
- exit:
+exit:
 	return count;
- help:
+help:
 	/*   param_list[0]    [1]    [2]         [3]      [4]     [5]   */
-	PR_INFO("echo register dev <name> <dev_port> <type> [dp_port]>%s\n",
-		PROC_DIRECTPATH);
+	PR_INFO("echo register dev <name> <dev_port> <type>  %s > %s\n",
+		OPT_ALLOC_PORT, PROC_DIRECTPATH);
 	PR_INFO("echo unregister dev  <dev_name>  > %s\n",
 		PROC_DIRECTPATH);
-	PR_INFO("echo register subif <dev> [parent_dev_name]> %s\n",
+	PR_INFO("echo register subif <dev> -p [parent_dev_name] -i <idx> %s\n",
 		PROC_DIRECTPATH);
 	PR_INFO("Note: parent_dev_name is for register different subif\n");
 	PR_INFO("Device Type:\n");
@@ -1589,16 +1612,6 @@ static int proc_write_directpath(struct file *file, const char *buf,
 	return count;
 }
 
-/*###################################
- *           Global Function
- * ####################################
- */
-#define PROC_NAME		"loop"
-#define PROC_DBG		"dbg"
-#define PROC_READ_DEV		"dev"
-#define PROC_READ_MIB		"mib"
-#define PROC_READ_DP		"directpath"
-#define PROC_READ_CPUTX		"cputx"
 static struct dp_proc_entry proc_entries[] = {
 	/*name single_callback_t multi_callback_t multi_callback_start
 	 *write_callback_t
@@ -1606,9 +1619,9 @@ static struct dp_proc_entry proc_entries[] = {
 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
 	{PROC_DBG, proc_read_dbg, NULL, NULL, proc_write_dbg},
 #endif
-	{PROC_READ_DEV, NULL, proc_read_dev, NULL, proc_write_dev},
-	{PROC_READ_MIB, NULL, proc_read_mib, NULL, proc_write_mib},
-	{PROC_READ_DP, NULL, proc_read_dp, NULL, proc_write_directpath},
+	{PROC_DEV, NULL, proc_read_dev, NULL, proc_write_dev},
+	{PROC_MIB, NULL, proc_read_mib, NULL, proc_write_mib},
+	{PROC_DP, NULL, proc_read_dp, NULL, proc_write_directpath},
 
 	/*last one for place holder */
 	{NULL, NULL, NULL, NULL, NULL}
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_notifier.c b/drivers/net/ethernet/lantiq/datapath/datapath_notifier.c
index 094af53dc..761d7aa12 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_notifier.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_notifier.c
@@ -81,14 +81,14 @@ int dp_event(struct notifier_block *this, unsigned long event, void *ptr)
 			 "MAC:", addr[0], addr[1], addr[2],
 			 addr[3], addr[4], addr[5]);
 		prop = &dp_port_prop[inst];
-		prop->info.dp_mac_reset(port->subif_info[vap].bp,
+		prop->info.dp_mac_reset(0,
 					dp_dev->fid,
 					dp_dev->inst,
 					addr);
 	break;
 	case NETDEV_CHANGEUPPER:
 		port = &dp_port_info[inst][dp_dev->ep];
-		dp_port_prop[inst].info.dp_mac_reset(port->subif_info[vap].bp,
+		dp_port_prop[inst].info.dp_mac_reset(0,
 						     dp_dev->fid,
 						     dp_dev->inst,
 						     addr);
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_proc.c b/drivers/net/ethernet/lantiq/datapath/datapath_proc.c
index ece8cb1d8..acf5944eb 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_proc.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_proc.c
@@ -35,6 +35,8 @@
 #define PROC_INST_MOD "inst_mod"
 #define PROC_INST_HAL "inst_hal"
 #define PROC_INST "inst"
+#define PROC_TX_PKT "tx"
+#define PROC_QOS  "qos"
 
 static int tmp_inst;
 static ssize_t proc_port_write(struct file *file, const char *buf,
@@ -84,6 +86,13 @@ int proc_port_dump(struct seq_file *s, int pos)
 					   *port zero's member
 					   */
 				print_ctp_bp(s, tmp_inst, port, 0, 0);
+			i = 0;
+			seq_printf(s, "          : qid/node:    %d/%d\n",
+			   port->subif_info[i].qid,
+			   port->subif_info[i].q_node);
+			seq_printf(s, "          : port/node:    %d/%d\n",
+				   port->subif_info[i].cqm_deq_port,
+				   port->subif_info[i].qos_deq_port);
 
 		} else
 			seq_printf(s,
@@ -92,6 +101,8 @@ int proc_port_dump(struct seq_file *s, int pos)
 				   STATS_GET(port->rx_err_drop),
 				   STATS_GET(port->tx_err_drop));
 
+		
+
 		goto EXIT;
 	}
 
@@ -126,18 +137,18 @@ int proc_port_dump(struct seq_file *s, int pos)
 		   (u32)port->cb.stop_fn);
 	seq_printf(s, "    cb->get_subifid_fn:0x%0x\n",
 		   (u32)port->cb.get_subifid_fn);
-	seq_printf(s, "    num_subif:         %02d\n", port->num_subif);
+	seq_printf(s, "    num_subif:         %d\n", port->num_subif);
 	seq_printf(s, "    vap_offset/mask:   %d/0x%x\n", port->vap_offset,
 		   port->vap_mask);
-	seq_printf(s, "    flag_other:         %02d\n", port->flag_other);
-	seq_printf(s, "    deq_port_base:    %02d\n", port->deq_port_base);
-	seq_printf(s, "    deq_port_num:         %02d\n", port->deq_port_num);
-	seq_printf(s, "    dma_chan:         %02d\n", port->dma_chan);
-	seq_printf(s, "    tx_pkt_credit:         %02d\n", port->tx_pkt_credit);
-	seq_printf(s, "    tx_b_credit:         %02d\n", port->tx_b_credit);
-	seq_printf(s, "    tx_ring_addr:         %02d\n", port->tx_ring_addr);
-	seq_printf(s, "    tx_ring_size:         %02d\n", port->tx_ring_size);
-	seq_printf(s, "    tx_ring_offset:         %02d\n",
+	seq_printf(s, "    flag_other:        0x%x\n", port->flag_other);
+	seq_printf(s, "    deq_port_base:     %d\n", port->deq_port_base);
+	seq_printf(s, "    deq_port_num:      %d\n", port->deq_port_num);
+	seq_printf(s, "    dma_chan:          0x%x\n", port->dma_chan);
+	seq_printf(s, "    tx_pkt_credit:     %d\n", port->tx_pkt_credit);
+	seq_printf(s, "    tx_b_credit:       %02d\n", port->tx_b_credit);
+	seq_printf(s, "    tx_ring_addr:      0x%x\n", port->tx_ring_addr);
+	seq_printf(s, "    tx_ring_size:      %d\n", port->tx_ring_size);
+	seq_printf(s, "    tx_ring_offset:    %d(to next dequeue port)\n",
 		   port->tx_ring_offset);
 	for (i = 0; i < port->ctp_max; i++) {
 		if (!port->subif_info[i].flags)
@@ -172,10 +183,12 @@ int proc_port_dump(struct seq_file *s, int pos)
 			   STATS_GET(port->subif_info[i].mib.tx_hdr_room_pkt));
 		if (print_ctp_bp)
 			print_ctp_bp(s, tmp_inst, port, i, 0);
-		seq_printf(s, "          : qid:         %02d\n",
-			   port->subif_info[i].qid);
-		seq_printf(s, "          : q_node:         %02d\n",
+		seq_printf(s, "          : qid/node:    %d/%d\n",
+			   port->subif_info[i].qid,
 			   port->subif_info[i].q_node);
+		seq_printf(s, "          : port/node:    %d/%d\n",
+			   port->subif_info[i].cqm_deq_port,
+			   port->subif_info[i].qos_deq_port);
 	}
 	seq_printf(s, "    rx_err_drop=0x%08x  tx_err_drop=0x%08x\n",
 		   STATS_GET(port->rx_err_drop),
@@ -778,12 +791,17 @@ ssize_t proc_logical_dev_write(struct file *file, const char *buf,
 {
 	u16 len;
 	char str[64];
-	char *param_list[20 * 4];
+	static char *param_list[20 * 4];
 	unsigned int num;
 	struct net_device *dev = NULL, *base = NULL;
 	dp_subif_t *subif = NULL;
+
 	struct pmac_port_info *port_info;
+	static struct vlan_prop vlan_prop;
 
+	memset(&subif, 0, sizeof(subif));
+	memset(param_list, 0, sizeof(*param_list));
+	memset(&vlan_prop, 0, sizeof(vlan_prop));
 	len = (sizeof(str) > count) ? count : sizeof(str) - 1;
 	len -= copy_from_user(str, buf, len);
 	str[len] = 0;
@@ -796,8 +814,6 @@ ssize_t proc_logical_dev_write(struct file *file, const char *buf,
 		goto EXIT1;
 	}
 	if (dp_strcmpi(param_list[0], "check") == 0) {
-		struct vlan_prop vlan_prop = {0};
-
 		if (num != 2)
 			goto HELP;
 		dev = dev_get_by_name(&init_net, param_list[1]);
@@ -1167,6 +1183,250 @@ void print_device_tree_node(struct device_node *node, int depth)
 	kfree(indent);
 }
 
+static u8 ipv4_plain_udp[] = {
+	0x00, 0x01, 0x01, 0x01, 0x01, 0x01, /*mac */
+	0x00, 0x10, 0x94, 0x00, 0x00, 0x02,
+	0x08, 0x00,	/*type */
+	0x45, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x11, /*ip hdr*/
+	0x3A, 0x56, 0xC0, 0x55, 0x01, 0x02, 0xC0, 0x00, 0x00, 0x01,
+	0x04, 0x00, 0x00, 0x00, 0x00, 0x2A, 0x7A, 0x41, 0x00, 0x00, /*udp hdr*/
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00
+};
+
+static u8 ipv4_plain_tcp[1514] = {
+	0x00, 0x01, 0x01, 0x01, 0x01, 0x01, /*mac */
+	0x00, 0x10, 0x94, 0x00, 0x00, 0x02,
+	0x08, 0x00,	/*type */
+	0x45, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x06, /*ip hdr*/
+	0x3A, 0x61, 0xC0, 0x55, 0x01, 0x02, 0xC0, 0x00, 0x00, 0x01,
+	0x04, 0x00, 0x04, 0x00, 0x00, 0x01, 0xE2, 0x40, 0x00, 0x03, /*tcp hdr*/
+	0x94, 0x47, 0x50, 0x10, 0x10, 0x00, 0x9F, 0xD9, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*data */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00
+};
+
+/*flag: for dp_xmit
+ *pool: 1-CPU
+ *discard: discard bit in DMA descriptor, CQM HW will drop the packet
+ *priority: for skb->priority
+ */
+static int dp_send_packet(u8 *pdata, int len, char *devname, u32 flag,
+			  u32 pool, u32 discard, int priority, int color)
+{
+	struct sk_buff *skb;
+	dp_subif_t subif = { 0 };
+	#define PAD  32
+
+	if (pool == 1) {
+		len += PAD;
+		if (len < ETH_ZLEN)
+			len = ETH_ZLEN;
+		skb = alloc_skb(len, GFP_ATOMIC);
+		if (skb)
+			skb_reserve(skb, PAD);
+		PR_INFO_ONCE("Allocate CPU buffer\n");
+	} else {
+		skb = cbm_alloc_skb(len + 8, GFP_ATOMIC);
+		PR_INFO_ONCE("Allocate bm buffer\n");
+	}
+
+	if (unlikely(!skb)) {
+		PR_ERR("allocate cbm buffer fail\n");
+		return -1;
+	}
+
+	skb->DW0 = 0;
+	skb->DW1 = 0;
+	skb->DW2 = 0;
+	skb->DW3 = 0;
+	if (discard) {
+		((struct dma_tx_desc_3 *)&skb->DW3)->field.dic = 1;
+		PR_INFO_ONCE("discard bit set in DW3\n");
+	}
+	((struct dma_tx_desc_1 *)&skb->DW1)->field.color = color;
+	memcpy(skb->data, pdata, len);
+	skb->data[5] = (char )priority;
+	skb->len = 0;
+	skb_put(skb, len);
+	skb->dev = dev_get_by_name(&init_net, devname);
+	skb->priority = priority;
+
+	if (dp_get_netif_subifid(skb->dev, skb, NULL, skb->data, &subif, 0)) {
+		PR_ERR("dp_get_netif_subifid failed for %s\n",
+		       skb->dev->name);
+		dev_kfree_skb_any(skb);
+		return -1;
+	}
+
+	((struct dma_tx_desc_1 *)&skb->DW1)->field.ep = subif.port_id;
+	((struct dma_tx_desc_0 *)&skb->DW0)->field.dest_sub_if_id =
+		subif.subif;
+
+	if (dp_xmit(skb->dev, &subif, skb, skb->len, flag))
+		return -1;
+
+	return 0;
+}
+
+
+#define OPT_TX_DEV "[-i <dev_name>]"
+#define OPT_TX_NUM "[-n <pkt_num>]"
+#define OPT_TX_PROT "[-t <types: udp/tcp/raw>]"
+#define OPT_TX_POOL "[-p <pool:cpu/bm>]"
+#define OPT_TX_F_DISC "[-d]"
+#define OPT_TX_PRIO "[-c <class/prio range>]"
+#define OPT_TX_COLOR "[-o <color>]"
+#define OPT_TX1 (OPT_TX_DEV OPT_TX_NUM OPT_TX_PROT)
+#define OPT_TX2 (OPT_TX_POOL OPT_TX_F_DISC OPT_TX_PRIO OPT_TX_COLOR)
+#define OPT_TX (OPT_TX1 OPT_TX2)
+char l2_hdr[]={0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\
+		0x01, 0x01, 0x01, 0x01, 0x01, 0x11,
+		0x12, 0x34};
+ssize_t proc_tx_pkt(struct file *file, const char *buf,
+		    size_t count, loff_t *ppos)
+{
+	int len = 0, c;
+	char data[100];
+	char *param_list[10];
+	int num, pkt_num = 1, times, pool = 0, disc = 0;
+	char *p = ipv4_plain_udp;
+	int size = sizeof(ipv4_plain_udp);
+	short prio_range = 1, color = 0;
+	int opt_offset;
+	char* optstring = "i:n:t:p:dc:o:";
+	char *optarg = 0;
+	char *dev_name = "eth1";
+
+	len = (count >= sizeof(data)) ? (sizeof(data) - 1) : count;
+	DP_DEBUG(DP_DBG_FLAG_DBG, "len=%d\n", len);
+
+	if (len <= 0) {
+		PR_ERR("Wrong len value (%d)\n", len);
+		return count;
+	}
+	if (copy_from_user(data, buf, len)) {
+		PR_ERR("copy_from_user fail");
+		return count;
+	}
+	data[len - 1] = 0; /* Make string */
+	num = dp_split_buffer(data, param_list, ARRAY_SIZE(param_list));
+	if (num <= 1)
+		goto help;
+	pkt_num = dp_atoi(param_list[1]);
+	if (pkt_num <= 0)
+		pkt_num = 1;
+
+	opt_offset = 0;
+	while((c = dp_getopt(param_list, num,
+	      &opt_offset, &optarg, optstring)) > 0) {
+		if (optstring)
+			DP_DEBUG(DP_DBG_FLAG_DBG, "opt_offset=%d optarg=%s.\n",
+				 opt_offset, optarg);
+		switch (c) {
+		case 'i':
+			dev_name = optarg;
+			DP_DEBUG(DP_DBG_FLAG_DBG, "dev_name=%s\n", dev_name);
+			break;
+		case 'n':
+			pkt_num = dp_atoi(optarg);
+			PR_INFO("pkt_num=%d\n", pkt_num);
+			break;
+		case 't':
+			if (dp_strcmpi(optarg, "tcp") == 0) {
+				p = ipv4_plain_tcp;
+				size = sizeof(ipv4_plain_tcp);
+			} else if (dp_strcmpi(optarg, "udp") == 0) {
+				p = ipv4_plain_udp;
+				size = sizeof(ipv4_plain_udp);
+			} else  {
+				PR_INFO("Wrong procol selected\n");
+				return count;
+			}
+			break;
+		case 'p':
+			if (dp_strcmpi(optarg, "cpu") == 0) {
+				pool = 1;
+			} else if (dp_strcmpi(optarg, "bm") == 0) {
+				pool = 0;
+			} else  {
+				PR_INFO("Wrong procol selected\n");
+				return count;
+			}
+			break;
+		case 'd':
+			disc = 1;
+			break;
+		case 'c':
+			prio_range = dp_atoi(optarg);
+			if (prio_range <= 0)
+				prio_range = 1;
+			PR_INFO("prio_range=%d\n", prio_range);
+			break;
+		case 'o':
+			color = dp_atoi(optarg);
+			if (color < 0)
+				color = 0;
+			else if (color > 3)
+				color = 3;
+			PR_INFO("color=%d\n", color);
+			break;
+		default:
+			PR_INFO("Wrong command\n");
+			goto help;
+		}
+	}
+	if (c < 0) { /*c == 0 means reach end of list */
+		PR_INFO("Wrong command\n");
+		goto help;
+	}
+
+	if (!dev_name) { /*c == 0 means reach end of list */
+		PR_INFO("Must provide dev_name\n");
+		return count;
+	}
+		
+	
+#if 0
+	if (dp_strcmpi("tcp", param_list[2]) == 0) {
+		p = ipv4_plain_tcp;
+		size = sizeof(ipv4_plain_tcp);
+		PR_INFO("send tcp packet\n");
+	} else { /*udp*/
+		p = ipv4_plain_udp;
+		size = sizeof(ipv4_plain_udp);
+		PR_INFO("send udp packet\n");
+	}
+	if (dp_strcmpi("bm", param_list[3]) == 0) {
+		PR_INFO("BM\n");
+		pool = 0;
+	} else {
+		PR_INFO("CPU buffer\n");
+		pool = 1;
+	}
+	if (dp_strcmpi("disc", param_list[4]) == 0) {
+		PR_INFO("disc\n");
+		disc = 1;
+	}
+#endif
+	times = 0;
+	while (pkt_num--) {
+		if (dp_send_packet(p, size, dev_name, 0, pool, disc,
+		    times%prio_range, color))
+			break;
+		times++;
+	}
+	PR_INFO("sent %d packet already\n", times);
+	return count;
+help:   /*                        [0]         [1]         [2]     [3] [4]*/
+	PR_INFO("tx packet: echo %s\n", OPT_TX1);
+	PR_INFO("                %s\n", OPT_TX2);
+	return count;
+}
+
 static struct dp_proc_entry dp_proc_entries[] = {
 	/*name single_callback_t multi_callback_t/_start write_callback_t */
 #if defined(CONFIG_LTQ_DATAPATH_DBG) && CONFIG_LTQ_DATAPATH_DBG
@@ -1180,6 +1440,8 @@ static struct dp_proc_entry dp_proc_entries[] = {
 	{PROC_INST_MOD, NULL, proc_inst_mod_dump, proc_inst_mod_start, NULL},
 	{PROC_INST_HAL, NULL, proc_inst_hal_dump, NULL, NULL},
 	{PROC_INST, NULL, proc_inst_dump, NULL, NULL},
+	{PROC_TX_PKT, NULL, NULL, NULL, proc_tx_pkt},
+	{PROC_QOS, NULL, qos_dump, qos_dump_start, proc_qos_write},
 
 	/*the last place holder */
 	{NULL, NULL, NULL, NULL, NULL}
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_proc_api.c b/drivers/net/ethernet/lantiq/datapath/datapath_proc_api.c
index e39a2a12e..c44bc908b 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_proc_api.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_proc_api.c
@@ -105,9 +105,11 @@ EXPORT_SYMBOL(dp_replace_ch);
 static unsigned int btoi(char *str)
 {
 	unsigned int sum = 0;
-	u32 len = 0, i = 0;
+	int len = 0, i = 0;
 
 	len = strlen(str);
+	if (len == 0)
+		return 0;
 	len = len - 1;
 	while (len >= 0) {
 		if (*(str + len) == '1')
@@ -254,6 +256,55 @@ void set_start_end_id(unsigned int new_start, unsigned int new_end,
 }
 EXPORT_SYMBOL(set_start_end_id);
 
+/*
+ cmd: command line, its format should be like
+ cmd_len: the lenth of command line
+ optcurser: 
+ */
+int dp_getopt(char *cmd[], int cmd_size, int *cmd_offset,
+	      char **optarg, const char* optstring)
+{
+	char *p;
+	int offset;
+	int i;
+
+	if (!cmd || !cmd_offset || !optstring || !optarg) {
+		return -1;
+	}
+	if (*cmd_offset >= cmd_size)
+		return 0;
+	offset = *cmd_offset;
+	while (1) {
+		p = cmd[offset];
+		if(p[0] != '-') {
+			offset ++;
+			return -1; /*wrong format*/
+		}
+		for ( i = 0; i < strlen(optstring); i++) {
+			if (optstring[i] != p[1])
+				continue;
+			/*match*/
+			if (optstring[i+1] == ':') { /*opt + value */
+				if (offset + 1 > cmd_size)
+					return -1;
+				*optarg = cmd[offset + 1];
+				offset += 2;
+			} else { /*no value */
+				*optarg = NULL;
+				offset += 1;
+			}
+			*cmd_offset = offset;
+			return (int) optstring[i];
+		}
+		return -1;
+	}
+	return -1;
+}
+
+EXPORT_SYMBOL(dp_getopt);
+
+
+
 static void *dp_seq_start(struct seq_file *s, loff_t *pos)
 {
 	struct dp_proc_file_entry *p = s->private;
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_proc_qos.c b/drivers/net/ethernet/lantiq/datapath/datapath_proc_qos.c
new file mode 100644
index 000000000..ad4c2d1cd
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_proc_qos.c
@@ -0,0 +1,842 @@
+#include <net/datapath_api.h>
+#include "datapath.h"
+
+#define PROC_MAX_BOX_LVL (DP_MAX_SCH_LVL + 1) /*Sched/Port both map to a box*/
+#define PROC_DP_MAX_CHILD_PER_SCH_PORT 8 /*max direct child per scheduer/port*/
+#define PROC_MAX_Q_PER_PORT 16 /*max queues per port */
+#define PROC_DP_MAX_SCH_PER_PORT 4 /*max schedulers per port */
+
+struct location {
+	int x1, y1; /*!< start axis */
+	int x2, y2; /*!< end axis */
+};
+
+struct box_info;
+
+struct child_node {
+	int filled;
+	enum dp_node_type type;
+	union {
+		int qid;
+		struct box_info *box;
+		} box_qid;
+	struct location l;
+};
+
+struct box_info {
+	int filled;
+	int id;  /*!< physical id if available, otherwise -1 */
+	int node; /*!< node id if available, otherwise -1 */
+	int pir, cir, pbs, cbs;
+	int prn_leaf;  /*since PPV4 not support leaf, here generate leaf for layout printing*/
+
+	int box_x, box_y, box_height; /*!< axis (x,y) for box */
+	int size;
+	int n_q, n_sch; /*direct child queue/scheduler*/
+	struct box_info *p_box; /*parent box*/
+	struct child_node child[PROC_DP_MAX_CHILD_PER_SCH_PORT];
+	struct location l; /*!< location */
+};
+
+struct q_print_info {
+	int q_num;
+	int q_id[PROC_MAX_Q_PER_PORT]; /*!< physical queue id if available, otherwise -1 */
+	int q_node_id[PROC_MAX_Q_PER_PORT]; /*!< queue node id if available, otherwise -1 */
+	int q_prn_leaf[PROC_MAX_Q_PER_PORT];  /*since PPV4 not support leaf, here generate leaf for layout printing*/
+	int sch_lvl[PROC_MAX_Q_PER_PORT];
+
+	struct box_info *sch_box[PROC_MAX_Q_PER_PORT][PROC_MAX_BOX_LVL];
+	struct box_info port_box;
+	int box_num;
+	struct box_info box[PROC_DP_MAX_SCH_PER_PORT]; /*!<need kmalloc/kfree */
+};
+
+static char *get_node_stat(int node_id, int type);
+struct box_info *find_box_via_nodeid(struct box_info *box,
+		 int box_num, int sch_node_id)
+{
+	int i;
+
+	for (i = 0; i < box_num; i++) {
+		if (box[i].node != sch_node_id) 
+			continue;
+		return &box[i];
+	}
+	return NULL;
+}
+
+struct box_info *find_child_box(
+	struct child_node child_list[PROC_DP_MAX_CHILD_PER_SCH_PORT],
+	struct box_info *curr_box)
+{
+	int i = 0;
+
+	while (child_list[i].filled) {
+		if (child_list[i].type != DP_NODE_SCH) {
+			i++;
+			continue;
+		}
+		if (child_list[i].box_qid.box == curr_box) 
+			return curr_box;
+		i++;
+	}
+	return NULL;
+}
+
+void set_child_per_box(struct q_print_info *q_info)
+{
+	int i, j, idx;
+	struct box_info *p_box, *c_box;
+
+	/* Get child number and info */
+	for (i = 0; i < q_info->q_num; i++) { /*queue */
+		if (!q_info->sch_box[i][0]) {/*queue to port case*/
+			p_box = &q_info->port_box;
+			idx = p_box->n_q + p_box->n_sch;
+
+			if (idx >= PROC_DP_MAX_CHILD_PER_SCH_PORT) {
+				PR_ERR("too many child: should <%d\n",
+					PROC_DP_MAX_CHILD_PER_SCH_PORT);
+				return;					
+			}
+			p_box->child[idx].filled = 1;
+			p_box->child[idx].box_qid.qid = q_info->q_id[i];
+			p_box->child[idx].type = DP_NODE_QUEUE;
+			q_info->q_prn_leaf[i] = idx;
+			p_box->n_q++;
+			continue;
+		}
+		/*queue to 1st scheduler */
+		p_box = q_info->sch_box[i][0];
+		idx = p_box->n_q + p_box->n_sch;
+
+		if (idx >= PROC_DP_MAX_CHILD_PER_SCH_PORT) {
+			PR_ERR("too many child: should <%d\n",
+				PROC_DP_MAX_CHILD_PER_SCH_PORT);
+			return;					
+		}
+		p_box->child[idx].filled = 1;
+		p_box->child[idx].box_qid.qid = q_info->q_id[i];
+		p_box->child[idx].type = DP_NODE_QUEUE;
+		q_info->q_prn_leaf[i] = idx;
+		p_box->n_q++;
+
+		/*scheduler to schduer/port */
+		for (j = 0; j < q_info->sch_lvl[i]; j++) {
+			if (j < (q_info->sch_lvl[i]-1))
+				p_box = q_info->sch_box[i][j+1];
+			else
+				p_box = &q_info->port_box;
+			c_box = q_info->sch_box[i][j];
+			idx = p_box->n_q + p_box->n_sch;
+			c_box->p_box = p_box;
+			if (find_child_box(p_box->child,  c_box))
+				 	continue;
+			if (idx >= PROC_DP_MAX_CHILD_PER_SCH_PORT - 1) {
+				PR_ERR("too many child: should <%d\n",
+					PROC_DP_MAX_CHILD_PER_SCH_PORT);
+				return;					
+			}
+			p_box->child[idx].filled = 1;
+			p_box->child[idx].box_qid.box = c_box;
+			p_box->child[idx].type = DP_NODE_SCH;
+			c_box->prn_leaf = idx;
+			p_box->n_sch++;
+		}
+	}
+}
+#define PREFIX_SIZE_PER_BOX 1 /*for opening ----- */
+#define INFO_SIZE_PER_BOX   2 /*for box info, like node id and others*/
+#define SUFIX_SIZE_PER_BOX  1 /*for closing ---- */
+#define EXTRA_SIZE_PER_BOX (PREFIX_SIZE_PER_BOX + INFO_SIZE_PER_BOX + \
+							SUFIX_SIZE_PER_BOX)
+#define PADDING_BETWEEN_BOX_X 4 /*x */
+#define PADDING_BETWEEN_BOX_Y 1
+#define SIZE_PER_QUEUE      3
+#define BOX_WIDTH           20
+#define Q_WIDTH             18
+#define PORT_OTHER_INFO     20
+
+int set_location_size(struct box_info *box, int y)
+{
+	int i, y2 = 0, size = 0;
+
+	box->l.x1 = Q_WIDTH + box->box_x * (BOX_WIDTH + PADDING_BETWEEN_BOX_X);
+	box->l.x2 = box->l.x1 + BOX_WIDTH;	
+	box->l.y1 = y;
+	y2 = box->l.y1 + PREFIX_SIZE_PER_BOX + INFO_SIZE_PER_BOX;
+	for (i = 0;
+	     (box->child[i].filled && (i < (box->n_q + box->n_sch)));
+	     i++) {
+		if (box->child[i].type == DP_NODE_QUEUE) {
+			box->child[i].l.x2 = box->l.x1;
+			box->child[i].l.y2 = y2 ;
+			size += SIZE_PER_QUEUE;
+			y2 += SIZE_PER_QUEUE;
+		} else if (box->child[i].type == DP_NODE_SCH) {
+			set_location_size(box->child[i].box_qid.box, y2);
+			box->child[i].l.x2 = box->l.x1;
+			box->child[i].l.y2 = y2;
+			size += box->child[i].box_qid.box->size;
+			y2 += box->child[i].box_qid.box->size; 
+		}
+	}
+	y2 += SUFIX_SIZE_PER_BOX;
+	size += EXTRA_SIZE_PER_BOX;
+	box->l.y2 = y2;
+	box->size = size;
+	return 0;
+}
+
+int check_location(struct q_print_info *q_info)
+{
+	int i;
+
+	for (i = 0; i < q_info->box_num; i++) {		
+		if ((q_info->box[i].l.x2 - q_info->box[i].l.x1) != BOX_WIDTH) {
+			PR_ERR("sched[%d] x1/x2: %d - %d should equal%d\n",
+					   q_info->box[i].node,
+					   q_info->box[i].l.x2,
+					   q_info->box[i].l.x1, 
+					   q_info->box[i].l.x2 - q_info->box[i].l.x1);
+			return -1;
+		}
+		if (!q_info->box[i].p_box)
+			continue;
+		if ((q_info->box[i].p_box->l.x1 - q_info->box[i].l.x2) !=
+			PADDING_BETWEEN_BOX_X) {
+			PR_ERR("sched[%d]<->sched[%d]: %d - %d should equal%d\n",
+				   q_info->box[i].node, q_info->box[i].p_box->node,
+				   q_info->box[i].p_box->l.x2,
+				   q_info->box[i].l.x1, 
+				   q_info->box[i].p_box->l.x2 - q_info->box[i].l.x1);
+			return -1;
+				
+		}
+			
+	}
+	return 0;
+}
+
+void virtual_print_box(struct box_info *box, 
+			   struct box_info *p_box,
+		char *buf, int rows, int cols)
+{
+	char *p;
+	int i, len;
+	char *stat = NULL;
+
+	/*The format like below
+	 *         -----------------------
+	 *        |sched[%03d]            |
+	 *        |                       |
+	 *        |leaf[%2d]:kbps         |
+	 *        | cir/pir:%5d/%5d       |  20
+	 *        | cbs/pbs:%5d/%5d       |  
+	 *        | ....                  |
+	 *        |                       |
+	 *         -----------------------
+	 */
+	p = &buf[cols * box->l.y1];
+	for (i = box->l.x1; i < box->l.x2; i++)
+		p[i] = '-';
+	p = &buf[cols * (box->l.y2 - 1)];
+	for (i = box->l.x1; i < box->l.x2; i++)
+		p[i] = '-';
+
+	for (i = 0; i < INFO_SIZE_PER_BOX; i++) {
+		p = &buf[cols * (box->l.y1 + 1 + i)];
+		p += box->l.x1 + 1;
+		if (i == 0) { /*print 1st info of box*/
+			if (!p_box) { /*port box*/
+				len = snprintf(p, BOX_WIDTH - 3,
+					       "port[%d/%d]",
+					       box->id, box->node);
+				stat = get_node_stat(box->id, DP_NODE_PORT);
+				p[len] = ' ';
+			} else { /*sched box */
+				len = snprintf(p, BOX_WIDTH - 3,
+					       "sched[/%d]",  box->node);
+				stat = get_node_stat(box->node, DP_NODE_SCH);
+				p[len] = ' ';
+			}
+		} else if (i == 1) {
+				len = snprintf(p, BOX_WIDTH - 3,
+							   " stat:%s", stat ? stat : "NULL");
+				p[len] = ' ';
+		}
+	}
+
+	for (i = box->l.y1 + 1; i < box->l.y2 - 1; i++) {
+		p = &buf[cols * i];
+		p[box->l.x1] = '|';
+	}
+	for (i = box->l.y1 + 1; i < box->l.y2 - 1; i++) {
+		p = &buf[cols * i];
+		p[box->l.x2 - 1] = '|';
+	}
+	if (!p_box) 
+		return;
+
+	p = &buf[cols * ((box->l.y1 + box->l.y2) / 2)];
+	for (i = box->l.x2; i < p_box->l.x1; i++)
+		p[i] = '-';
+}
+
+void virtual_print_queues(struct q_print_info *q_info,
+	char *buf, int rows, int cols)
+{
+	int i, j;
+	struct box_info *box;
+	int len, idx;
+	char *p;
+	char *stat = NULL;
+
+	for (i = 0; i < q_info->q_num; i++) {
+		if (q_info->sch_box[i][0])
+			box = q_info->sch_box[i][0];
+		else
+			box = &q_info->port_box;
+		idx = q_info->q_prn_leaf[i];
+		stat = get_node_stat(q_info->q_id[i], DP_NODE_QUEUE);
+		p = &buf[cols * box->child[idx].l.y2];
+		len = snprintf(p, Q_WIDTH - 1, "q[%4d/%4d] %s",
+					  q_info->q_id[i], q_info->q_node_id[i],
+					  stat ? stat : "NULL");
+		for (j = len; j < box->l.x1; j++)
+			p[j] = '-';
+
+
+		/*print leaf info in the parent box:sched/port */
+		p += box->l.x1 + 1; /*skip '|' */
+		for (j = 0; j < SIZE_PER_QUEUE; j++) {
+			if (j == 0) {
+				len = snprintf(p, BOX_WIDTH - 3, /*skip: | and | and null */
+						   "prn_leaf[%d]", q_info->q_prn_leaf[i]);
+				p[len] = ' ';
+			} else if ( j == 1 ) {
+				len = snprintf(p, BOX_WIDTH - 3,
+						   " C/P:%5d/%5d", 0, 0);
+				p[len] = ' '; 
+			} else if (j == 2) {
+				len = snprintf(p, BOX_WIDTH - 3,
+						   " c/p:%5d/%5d", 0, 0);
+				p[len] = ' '; 
+			}
+			/*move to next row */
+			p += cols;
+		}
+	}
+}
+
+void print_all(struct seq_file *s, struct q_print_info *q_info)
+{
+	int cols = q_info->port_box.l.x2 + 1;
+	int rows = q_info->port_box.l.y2 + 1;
+	int i;
+	char *buf;
+	char *p;
+
+	buf = (char *)kmalloc(cols * rows + 1, GFP_KERNEL);
+	if (!buf) {
+		PR_ERR("fail to alloc %d bytes\n", cols * rows + 1);
+		return;
+	}
+	memset(buf, ' ', cols * rows);
+	buf[cols * rows] = 0;
+	seq_printf(s, "allocte buffer: %d bytes(%d * %d)\n", 
+		cols * rows, cols, rows);
+
+	p = buf;
+	for (i=0; i<rows; i++) {
+		p += cols;
+		*(p - 1) = 0;
+	}
+
+	/*print port box*/
+	virtual_print_box(&q_info->port_box, NULL, buf, rows, cols);
+	for (i = 0; i < q_info->box_num; i++)
+		virtual_print_box(&q_info->box[i], q_info->box[i].p_box, buf, rows, cols);
+	/*print queue */
+	virtual_print_queues(q_info, buf, rows, cols);
+	p = buf;
+	for (i=0; i<rows; i++) {
+		seq_printf(s, "%s\n", p);
+		p += cols;
+	}	
+	kfree(buf);
+}
+
+/*print_box_lvl must bigger 1 than sch_lvl */
+struct q_print_info *collect_info(struct seq_file *s,
+				  struct dp_dequeue_res *res,
+				  int print_box_lvl)
+{
+	int i, j, size, curr_box_y = 0, curr_box_x;
+	struct q_print_info *q_info = NULL;
+	struct box_info *box;
+	char f_new_box;
+
+	if (!res || !res->num_q || (res->num_deq_ports < 1)) {
+		goto ERR_EXIT;
+	}
+	if (res->num_q >= PROC_MAX_Q_PER_PORT) {
+		seq_printf(s, "too many queues in one port:%d should <%d\n",
+			res->num_q, PROC_MAX_Q_PER_PORT);
+		return NULL;
+	}
+	for (i = 0; i < res->num_q; i++) {
+		if (print_box_lvl < res->q_res[i].sch_lvl + 1) {
+			PR_ERR("wrong print_box_lvl(%d) < queue's real sch_lvl(%d)\n",
+				print_box_lvl,
+				res->q_res[i].sch_lvl + 1);
+		}
+	}
+
+	size = sizeof(*q_info);
+	q_info = (struct q_print_info *)kmalloc(size, GFP_KERNEL);
+	if (!q_info) {
+		PR_ERR("fail to alloc %d bytes\n", size);
+		return NULL;
+	}
+	memset(q_info, 0, size);
+
+	q_info->port_box.filled = 1;
+	q_info->port_box.id = res->q_res[0].cqm_deq_port;
+	q_info->port_box.node = res->q_res[0].qos_deq_port;
+	q_info->port_box.box_x = print_box_lvl - 1;
+	q_info->port_box.box_y = 0;
+
+	for (i = 0; i < res->num_q; i++) { /*q loop */
+		q_info->q_id[i] = res->q_res[i].q_id;
+		q_info->q_node_id[i] =  res->q_res[i].q_node;
+		if (!res->q_res[i].sch_lvl)
+			continue;
+		f_new_box = 0;
+		curr_box_x = print_box_lvl - res->q_res[i].sch_lvl - 1;
+		for (j = 0; j < res->q_res[i].sch_lvl; j++) { /*each sched */
+			box = find_box_via_nodeid(q_info->box,
+						  q_info->box_num,
+						  res->q_res[i].sch_id[j]);
+			if (box) {
+				q_info->sch_box[i][j] = box;
+				continue;
+			}
+			/*create a new box */
+			q_info->box[q_info->box_num].filled = 1;
+			q_info->box[q_info->box_num].node = res->q_res[i].sch_id[j];
+			q_info->box[q_info->box_num].id = -1; /*not valid */
+			q_info->box[q_info->box_num].box_x = curr_box_x + j ;
+			q_info->box[q_info->box_num].box_y = curr_box_y;
+			q_info->sch_box[i][j] = &q_info->box[q_info->box_num];
+			q_info->box_num++;
+			f_new_box = 1;
+		}
+		q_info->sch_lvl[i] = res->q_res[i].sch_lvl;
+		if (f_new_box)
+			curr_box_y++;
+	}
+	q_info->q_num = res->num_q;
+	return q_info;
+ERR_EXIT:
+	if (q_info)
+		kfree(q_info);
+	return NULL;
+}
+
+//static struct dp_dequeue_res res;
+static struct dp_queue_res q_res[PROC_MAX_Q_PER_PORT * 4];
+static int qos_layout_inst = 0;
+static int qos_layout_max_lvl = PROC_MAX_BOX_LVL;
+static int max_tconf_idx = 0;
+struct dp_dequeue_res tmp_res;
+struct dp_queue_res tmp_q_res[PROC_MAX_Q_PER_PORT] = {0};
+
+char *get_node_stat(int id, int type)
+{
+	struct dp_node_link_enable node_en;
+	static char stat[20];
+
+	node_en.inst = qos_layout_inst;
+	node_en.id.q_id = id;
+	node_en.type = (enum dp_node_type)type;
+	stat[0] = 0;
+	PR_INFO("inst=%d id=%d type=%d\n",
+		node_en.inst, node_en.id.q_id, node_en.type);
+	if (dp_node_link_en_get(&node_en, 0))
+		strcpy(stat, "?");
+	else {
+		if (node_en.en & DP_NODE_DIS)
+			strcat(stat, "B");
+		if (node_en.en & DP_NODE_SUSPEND)
+			strcat(stat, "S");
+	}
+
+	return stat;
+}
+int get_num_deq_ports(int inst, int dp_port)
+{
+	if (inst != 0)
+		return 0;
+	memset(&tmp_res, 0, sizeof(tmp_res));
+	tmp_res.inst = inst;
+	tmp_res.dp_port = dp_port;
+	tmp_res.q_res = NULL;
+	tmp_res.cqm_deq_idx = DEQ_PORT_OFFSET_ALL;
+	if(dp_deq_port_res_get(&tmp_res, 0)) {
+		return -1;
+	}
+
+	return tmp_res.num_deq_ports;
+}
+
+int get_res(int inst, int dp_port, int tconf_idx)
+{
+	memset(&tmp_res, 0, sizeof(tmp_res));
+	tmp_res.inst = inst;
+	tmp_res.dp_port = dp_port;
+	tmp_res.cqm_deq_idx = tconf_idx;
+	tmp_res.q_res = q_res;
+	tmp_res.q_res_size = ARRAY_SIZE(q_res);
+	DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL,
+		 "get_res: dp_port=%d tconf_idx=%d\n",
+		 tmp_res.dp_port, tmp_res.cqm_deq_idx);
+	if(dp_deq_port_res_get(&tmp_res, 0)) {
+		PR_ERR("dp_deq_port_res_get fail: inst=%d dp_port=%d, tconf_idx=%d\n",
+		       qos_layout_inst, tmp_res.dp_port, tconf_idx);
+		return -1;
+	}
+	return 0;
+}
+
+int dump_q_info_dbg(struct seq_file *s, struct dp_dequeue_res *res)
+{
+	struct dp_queue_res *q_res = res->q_res;
+	int i, j;
+
+	for (i = 0; i < res->num_q; i++) {
+		seq_printf(s, "q[%d]-", q_res[i].q_id);
+		for (j = 0; j < q_res[i].sch_lvl; j++)
+			seq_printf(s, "sched[%d]-", q_res[i].sch_id[j]);
+		seq_printf(s, "p[%d/%d]\n", q_res[i].cqm_deq_port, q_res[i].qos_deq_port);	
+	}
+	return 0;	
+}
+
+
+int qos_dump(struct seq_file *s, int pos)
+{
+	struct q_print_info *q_info;
+	int i;
+
+	if (pos == 0) {
+		seq_printf(s, "Note:\n");
+		seq_printf(s, "  x/y: physical node/logical node\n");
+		seq_printf(s, "     : cqm dequeue port/ppv4 logical node\n");
+		seq_printf(s, "  B: blocked, ie, new incoming packet will be dropped\n");
+		seq_printf(s, "  S: suspended, ie, packet in queue will not be trasmitted\n");
+		seq_printf(s, "\n");
+	}
+	max_tconf_idx = get_num_deq_ports(qos_layout_inst, pos);
+	if (max_tconf_idx <= 0)
+		goto NEXT_DP_PORT;
+	for (i = 0; i < max_tconf_idx; i++) {
+		/*get qos resource and store into tmp_res */
+		if (get_res(qos_layout_inst, pos, i)) {
+			PR_ERR("get_res fail\n");
+			return -1;
+		}
+		if (!tmp_res.num_q)
+			continue;
+		dump_q_info_dbg(s, &tmp_res);
+		q_info = collect_info(s, &tmp_res, qos_layout_max_lvl);
+		if (!q_info) {
+			PR_ERR("failed \n");
+			pos = -1;
+			return pos;
+		}
+		set_child_per_box(q_info);
+		set_location_size(&q_info->port_box, 0);
+		if (check_location(q_info))
+			continue;
+
+		print_all(s, q_info);
+		kfree(q_info);
+	}
+NEXT_DP_PORT:
+	pos ++;
+	if (pos >= 16)
+		pos = -1;
+
+	return pos;
+}
+
+
+int qos_dump_start(void)
+{	
+	qos_layout_inst = 0;
+	/*current just use maximum one. Later should check real QOS configuration
+	 *and get its maxim hirachy layers
+	 */
+	qos_layout_max_lvl = PROC_MAX_BOX_LVL;
+	max_tconf_idx = 0;
+	return 0;
+}
+
+ssize_t proc_qos_write(struct file *file, const char *buf,
+		       size_t count, loff_t *ppos)
+{
+#if 0
+	int len;
+	char str[100];
+	int sbit[SCHEDULE_MAX_LEVEL];
+	int leaf[SCHEDULE_MAX_LEVEL];
+	char *param_list[15] = { 0 };
+	char *temp_list[3] = { 0 };
+	int i;
+	unsigned int LINK_A = 0, LINK_B = 0, LEAF, egress_port = 0, qid = 0;
+	unsigned int level = 0, num = 0, tb = 0, sbid = 0;
+	len = (sizeof(str) > count) ? count : sizeof(str) - 1;
+	len -= copy_from_user(str, buf, len);
+	str[len] = 0;
+
+	if (!len)
+		return count;
+
+	num = dp_split_buffer(str, param_list, ARRAY_SIZE(param_list));
+	level = num - 3;
+
+	if (num <= 1 || num > ARRAY_SIZE(param_list)
+	    || dp_strcmpi(param_list[0], "help") == 0)
+		tmu_proc_tmu_create_cascade_help();
+	else if (dp_strcmpi(param_list[0], "set_wfq") == 0) {
+		struct tmu_sched_blk_in_link ilink;
+
+		if (num != 3) {
+			TMU_PRINT
+			    ("Wrong (try help): echo help > cascade\n");
+			return count;
+		}
+
+		dp_replace_ch(param_list[2], tmu_strlen(param_list[2]), ':',
+			      ' ');
+		dp_split_buffer(param_list[2], temp_list,
+				ARRAY_SIZE(temp_list));
+		LINK_B = dp_atoi(temp_list[0]);
+
+		if (LINK_B > (SCHEDULER_BLOCK_ID_MAX - 1)) {
+			TMU_PRINT("Wrong Parameter: SBID[%d]>SB_ID =0~%d]\n",
+			     LINK_B, (SCHEDULER_BLOCK_ID_MAX - 1));
+			return count;
+		}
+
+		LEAF = dp_atoi(temp_list[1]);
+
+		if (LEAF > (SCHEDULER_MAX_LEAF - 1)) {
+			TMU_PRINT
+			    ("Wrong: SBID:LEAF[%d] > [LEAF = 0 to 7]\n",
+			     LEAF);
+			return count;
+		}
+
+		tmu_sched_blk_in_link_get(((LINK_B + 0) << 3) + LEAF, &ilink);
+		ilink.iwgt = dp_atoi(param_list[1]);
+
+		if (ilink.iwgt >
+		    (TMU_SBITR0_IWGT_MASK >> TMU_SBITR0_IWGT_OFFSET)) {
+			TMU_PRINT("Wrong IWGT out of range [IWGT = 0 to 1023]\n");
+			TMU_PRINT("IWGT set to MAX value = %d\n",
+				  (TMU_SBITR0_IWGT_MASK >>
+				   TMU_SBITR0_IWGT_OFFSET));
+			ilink.iwgt =
+			    (TMU_SBITR0_IWGT_MASK >> TMU_SBITR0_IWGT_OFFSET);
+		}
+
+		tmu_sched_blk_in_link_set(((LINK_B + 0) << 3) + LEAF, &ilink);
+	} else if (dp_strcmpi(param_list[0], "del_q") == 0) {
+		qid = dp_atoi(param_list[1]);
+		tmu_egress_queue_delete(qid);
+		TMU_PRINT("\nQueue %d deleted\n\n", qid);
+	} else if (dp_strcmpi(param_list[0], "del_sb") == 0) {
+		sbid = dp_atoi(param_list[1]);
+
+		if (tmu_sched_blk_delete(sbid)) {
+			TMU_PRINT("\nCan not delete SB %d", sbid);
+			TMU_PRINT("(one of the SB's leaf input link ");
+			TMU_PRINT("has a vaild SB/Queue id)\n\n");
+		} else
+			TMU_PRINT("\nSB %d deleted\n\n", sbid);
+	} else if (dp_strcmpi(param_list[0], "attach_tb") == 0) {
+		if (num != 3) {
+			TMU_PRINT("Wrong:echo help > cascade\n");
+			return count;
+		}
+
+		dp_replace_ch(param_list[2], tmu_strlen(param_list[2]), ':',
+			      ' ');
+		dp_split_buffer(param_list[2], temp_list,
+				ARRAY_SIZE(temp_list));
+		LINK_B = dp_atoi(temp_list[0]);
+
+		if (LINK_B > (SCHEDULER_BLOCK_ID_MAX - 1)) {
+			TMU_PRINT("Wrong:SBID[%d]>[SB_ID=0 to %d]\n",
+			     LINK_B, (SCHEDULER_BLOCK_ID_MAX - 1));
+			return count;
+		}
+
+		LEAF = dp_atoi(temp_list[1]);
+
+		if (LEAF > (SCHEDULER_MAX_LEAF - 1)) {
+			TMU_PRINT("Wrong: SBID:LEAF[%d] > [LEAF = 0 to 7]\n",
+			     LEAF);
+			return count;
+		}
+
+		tb = dp_atoi(param_list[1]);
+
+		if (tb > (TOCKEN_BUCKET_ID - 1)) {
+			TMU_PRINT
+			    ("Wrong Parameter: TBID[%d] > [TBID = 0 to %d]\n",
+			     tb, (TOCKEN_BUCKET_ID - 1));
+			return count;
+		}
+
+		tmu_token_bucket_shaper_create(tb, ((LINK_B << 3) + LEAF));
+	} else if (dp_strcmpi(param_list[0], "remove_tb") == 0) {
+		if (num != 3) {
+			TMU_PRINT("Wrong: echo help > cascade\n");
+			return count;
+		}
+
+		dp_replace_ch(param_list[2], tmu_strlen(param_list[2]), ':',
+			      ' ');
+		dp_split_buffer(param_list[2], temp_list,
+				ARRAY_SIZE(temp_list));
+		LINK_B = dp_atoi(temp_list[0]);
+
+		if (LINK_B > (SCHEDULER_BLOCK_ID_MAX - 1)) {
+			TMU_PRINT("Wrong:SD[%d]>SCHEDULER_BLOCK_ID_MAX=0~%d\n",
+			     LINK_B, (SCHEDULER_BLOCK_ID_MAX - 1));
+			return count;
+		}
+
+		LEAF = dp_atoi(temp_list[1]);
+
+		if (LEAF > (SCHEDULER_MAX_LEAF - 1)) {
+			TMU_PRINT("Wrong:SBID:LF[%d] > [LEAF = 0 to 7]\n",
+			     LEAF);
+			return count;
+		}
+
+		tb = dp_atoi(param_list[1]);
+
+		if (tb > (TOCKEN_BUCKET_ID - 1)) {
+			TMU_PRINT("Wrong:TB[%d]>[TBID=0~%d]\n",
+			     tb, (TOCKEN_BUCKET_ID - 1));
+			return count;
+		}
+
+		tmu_token_bucket_shaper_delete(tb, ((LINK_B << 3) + LEAF));
+	} else if (dp_strcmpi(param_list[0], "cfg_tb") == 0) {
+		uint32_t tbid, cir, pir, cbs, pbs, mode;
+
+		if (num < 7) {
+			TMU_PRINT("Wrong Parameter(try help): echo help > cascade\n");
+			return count;
+		}
+
+		tbid = dp_atoi(param_list[1]);
+
+		if (tbid > (TOCKEN_BUCKET_ID - 1)) {
+			TMU_PRINT
+			    ("Wrong Parameter: TBID[%d] out of range [TBID = 0 to %d]\n",
+			     tbid, (TOCKEN_BUCKET_ID - 1));
+			return count;
+		}
+
+		cir = dp_atoi(param_list[2]);
+		pir = dp_atoi(param_list[3]);
+		cbs = dp_atoi(param_list[4]);
+		pbs = dp_atoi(param_list[5]);
+		mode = dp_atoi(param_list[6]);
+		tmu_shaper_cfg_set(tbid, 1, mode, cir, pir, cbs, pbs);
+	} else if (dp_strcmpi(param_list[0], "create") == 0) {
+		if (num < 4) {
+			TMU_PRINT("Wrong Parameter(try help): echo help > cascade\n");
+			return count;
+		}
+
+		qid = dp_atoi(param_list[1]);
+
+		if (qid > (EGRESS_QUEUE_ID_MAX - 1)) {
+			TMU_PRINT("Wrong Parameter: QID[%d] out of range [QID = 0 to %d]\n",
+			     qid, (EGRESS_QUEUE_ID_MAX - 1));
+			return count;
+		}
+
+		egress_port = dp_atoi(param_list[num - 1]);
+
+		if (egress_port > (EGRESS_PORT_ID_MAX - 1)) {
+			TMU_PRINT("Wrong Parameter: egress_port[%d] out of range [egress_port = 0 to %d]\n",
+			     egress_port, EGRESS_PORT_ID_MAX - 1);
+			return count;
+		}
+
+		for (i = 2; ((num - 1) > i && i < 10); i++) {
+			dp_replace_ch(param_list[i],
+				      tmu_strlen(param_list[i]), ':', ' ');
+			dp_split_buffer(param_list[i], temp_list,
+					ARRAY_SIZE(temp_list));
+			LINK_B = dp_atoi(temp_list[0]);
+			LEAF = dp_atoi(temp_list[1]);
+			sbit[i - 2] = LINK_B;
+			leaf[i - 2] = LEAF;
+
+			if (LINK_B > (SCHEDULER_BLOCK_ID_MAX - 1)) {
+				TMU_PRINT("Wrong Parameter: SBID[%d] out of range [SCHEDULER_BLOCK_ID_MAX = 0 to %d]\n",
+				     LINK_B, (SCHEDULER_BLOCK_ID_MAX - 1));
+				return count;
+			}
+
+			if (LEAF > (SCHEDULER_MAX_LEAF - 1)) {
+				TMU_PRINT("Wrong:SB[%d]:LF[%d]>[%d:LF=0~7\n",
+				     LINK_B, LEAF, LINK_B);
+				return count;
+			}
+		}
+
+		for (i = 2; ((num - 1) > i && i < 10); i++) {
+			LINK_B = sbit[i - 2];
+			LEAF = leaf[i - 2];
+
+			if (i == 2) {
+				LINK_A = qid;
+				TMU_PRINT("QID %d->SB %d:LEAF %d->", LINK_A,
+					  LINK_B, LEAF);
+				tmu_egress_queue_create(LINK_A,
+							((LINK_B << 3) +
+							 LEAF), egress_port);
+				tmu_set_wfq(((LINK_B << 3) + LEAF), 0);
+				level--;
+			} else if (i != 2) {
+				TMU_PRINT("SB %d:LEAF %d->", LINK_B, LEAF);
+				tmu_sched_blk_create(LINK_A, level,
+						     ((LINK_B << 3) + LEAF),
+						     1, 0);
+				level--;
+			}
+
+			LINK_A = LINK_B;
+		}
+
+		TMU_PRINT("EP %d\n", egress_port);
+		tmu_sched_blk_create(LINK_A, level, egress_port, 0, 0);
+		tmu_egress_port_enable(egress_port, true);
+	} else {
+		TMU_PRINT("Wrong Parameter:\n");
+		tmu_proc_tmu_create_cascade_help();
+	}
+#endif
+	return count;
+}
+
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_qos.c b/drivers/net/ethernet/lantiq/datapath/datapath_qos.c
index bb4592e23..d3d10e05a 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_qos.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_qos.c
@@ -125,6 +125,28 @@ EXPORT_SYMBOL(dp_node_free);
 
 int dp_deq_port_res_get(struct dp_dequeue_res *res, int flag)
 {
+	dp_subif_t *subif;
+	struct pmac_port_info *port_info;
+
+	if (!res)
+		return DP_FAILURE;
+	if (res->dev) { /*fill dp_port if dev is provided */
+		subif = kzalloc(sizeof(*subif), GFP_KERNEL);
+		if (!subif)
+			return DP_FAILURE;
+		dp_get_netif_subifid(res->dev, NULL, NULL, NULL, subif, 0);
+		if (!subif->subif_num) {
+			PR_ERR("Not found dev %s\n", res->dev->name);
+			return DP_FAILURE;
+		}
+		res->dp_port = subif->port_id;
+		kfree(subif);
+		subif = NULL;
+	}
+	port_info = &dp_port_info[res->inst][res->dp_port];
+	DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL,
+		 "dp_deq_port_res_get: dp_port=%d tconf_idx=%d\n",
+		 res->dp_port, res->cqm_deq_idx);
 	dp_port_prop[res->inst].info.
 		dp_qos_platform_set(DEQ_PORT_RES_GET, res, flag);
 	return DP_SUCCESS;
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_soc.c b/drivers/net/ethernet/lantiq/datapath/datapath_soc.c
index e1ba835f7..407558d7d 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_soc.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_soc.c
@@ -39,6 +39,7 @@ int request_dp(u32 flag)
 	info.ops[1] = gsw_get_swcore_ops(1);
 #endif
 	info.cbm_inst = 0;
+	info.qos_inst = 0;
 	if (dp_request_inst(&info, flag)) {
 		PR_ERR("dp_request_inst failed\n");
 		return -1;
diff --git a/drivers/net/ethernet/lantiq/datapath/datapath_swdev.c b/drivers/net/ethernet/lantiq/datapath/datapath_swdev.c
index 9a4d97e4f..f9b6de7f8 100644
--- a/drivers/net/ethernet/lantiq/datapath/datapath_swdev.c
+++ b/drivers/net/ethernet/lantiq/datapath/datapath_swdev.c
@@ -61,28 +61,27 @@ struct br_info *dp_swdev_bridge_entry_lookup(char *br_name,
 	struct hlist_head *tmp;
 
 	idx = dp_swdev_cal_hash(br_name);
-	DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s hash index:%d\n", __func__, idx);
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "hash index:%d\n", idx);
 	tmp = (&g_bridge_id_entry_hash_table[inst][idx]);
 	hlist_for_each_entry(br_item, tmp, br_hlist) {
 		if (br_item) {
 			if (strcmp(br_name, br_item->br_device_name) == 0) {
 				DP_DEBUG(DP_DBG_FLAG_SWDEV,
-					 "%s hash entry found(%s)\n",
-					 __func__, br_name);
+					 "hash entry found(%s)\n",
+					 br_name);
 				return br_item;
 			}
 		}
 	}
-	DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s No hash entry found(%s)\n",
-		 __func__, br_name);
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "No hash entry found(%s)\n",
+		 br_name);
 	return NULL;
 }
 
-static void dp_swdev_remove_bridge_id_entry(struct br_info
-					   *br_item)
+static void dp_swdev_remove_bridge_id_entry(struct br_info *br_item)
 {
 	/*TODO reset switch bridge configurations*/
-	DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s hash del\n", __func__);
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "hash del\n");
 	hlist_del(&br_item->br_hlist);
 	kfree(br_item);
 }
@@ -96,8 +95,8 @@ static void dp_swdev_insert_bridge_id_entry(struct br_info
 	idx = dp_swdev_cal_hash(br_item->br_device_name);
 	tmp = (&g_bridge_id_entry_hash_table[br_item->inst][idx]);
 	hlist_add_head(&br_item->br_hlist, tmp);
-	DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s hash added idx:%d bridge(%s)\n",
-		 __func__, idx, br_item->br_device_name);
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "hash added idx:%d bridge(%s)\n",
+		 idx, br_item->br_device_name);
 }
 
 int dp_swdev_bridge_id_entry_free(int instance)
@@ -188,8 +187,7 @@ static int dp_swdev_clr_gswip_cfg(struct bridge_id_entry_item *br_item,
 
 	if (br_item->flags == BRIDGE_NO_ACTION) {
 		DP_DEBUG(DP_DBG_FLAG_SWDEV,
-			 "%s bport not added so no action required\n",
-			 __func__);
+			 "bport not added so no action required\n");
 		return 0;
 	}
 	br_info = dp_swdev_bridge_entry_lookup(br_item->br_device_name,
@@ -206,16 +204,84 @@ static int dp_swdev_clr_gswip_cfg(struct bridge_id_entry_item *br_item,
 				swdev_free_brcfg(br_item->inst, br_item->fid);
 			dp_swdev_remove_bridge_id_entry(br_info);
 			DP_DEBUG(DP_DBG_FLAG_SWDEV,
-				 "%s rem bport(%d),bridge(%s)\n",
-				 __func__, br_item->portid,
+				 "rem bport(%d),bridge(%s)\n",
+				 br_item->portid,
 				 br_item->br_device_name);
 		}
-		DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s rem bport(%d)\n",
-			 __func__, br_item->portid);
+		DP_DEBUG(DP_DBG_FLAG_SWDEV, "rem bport(%d)\n",
+			 br_item->portid);
 		return 0;
 	}
 	DP_DEBUG(DP_DBG_FLAG_SWDEV,
-		 "%s No configuration,Pls check!!\n", __func__);
+		 "No configuration,Pls check!!\n");
+	return 0;
+}
+
+static int dp_swdev_cfg_vlan(struct bridge_id_entry_item *br_item,
+			     struct net_device *dev)
+{
+	struct br_info *br_info;
+	struct vlan_prop vlan_prop = {0};
+	struct dp_dev *dp_dev;
+	struct logic_dev *tmp = NULL;
+	u32 idx;
+	int vap, i;
+
+	//if (br_info->flag & LOGIC_DEV_REGISTER) {
+	if (br_item->flags & LOGIC_DEV_REGISTER) {
+		//get_vlan_via_dev(dev, &vlan_prop);
+		idx = dp_dev_hash(dev, NULL);
+		dp_dev = dp_dev_lookup(&dp_dev_list[idx], dev, NULL, 0);
+		vap = GET_VAP(dp_dev->ctp,
+			      dp_port_info[br_item->inst][dp_dev->ep].vap_offset,
+			      dp_port_info[br_item->inst][dp_dev->ep].vap_mask);
+		DP_DEBUG(DP_DBG_FLAG_SWDEV, "vap=%d ep=%d bp=%d\n",
+			 vap, dp_dev->ep, dp_dev->bp);
+		dp_port_prop[br_item->inst].info.dp_cfg_vlan(br_item->inst,
+							     vap, dp_dev->ep);
+	}
+	return 0;
+}
+
+static int dp_swdev_filter_vlan(struct net_device *dev,
+				const struct switchdev_obj *obj,
+				struct switchdev_trans *trans,
+				struct net_device *br_dev)
+{
+	struct br_info *br_info;
+	struct bridge_id_entry_item *br_item;
+	dp_subif_t subif = {0};
+
+	if (switchdev_trans_ph_prepare(trans)) {
+		/*Get current BPORT ID,instance from DP*/
+		if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) {
+			PR_ERR("%s dp_get_netif_subifid failed for %s\n",
+			       __func__, dev->name);
+			return -EOPNOTSUPP;
+		}
+		br_item = kmalloc(sizeof(*br_item), GFP_KERNEL);
+		if (!br_item)
+			//TODO need to check dequeue if no memory
+			return -ENOMEM;
+		br_item->inst = subif.inst;
+		/* current bridge member port*/
+		br_item->portid = subif.bport;
+		swdev_lock();
+		br_info = dp_swdev_bridge_entry_lookup(br_dev->name,
+						       subif.inst);
+		if (br_info) {
+			strcpy(br_item->br_device_name,
+			       br_info->br_device_name);
+			br_item->fid = br_info->fid;
+		}
+		switchdev_trans_item_enqueue(trans, br_item,
+					     kfree, &br_item->tritem);
+		swdev_unlock();
+		return 0;
+	}
+	br_item = switchdev_trans_item_dequeue(trans);
+	if (br_item)
+		dp_swdev_cfg_vlan(br_item, dev);
 	return 0;
 }
 
@@ -223,8 +289,7 @@ static int dp_swdev_cfg_gswip(struct bridge_id_entry_item *br_item, u8 *addr)
 {
 	struct br_info *br_info;
 
-	DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s britem flags:%x\n", __func__,
-		 br_item->flags);
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "britem flags:%x\n", br_item->flags);
 	if (br_item->flags & ADD_BRENTRY) {
 		DP_DEBUG(DP_DBG_FLAG_SWDEV, "Add br entry %s\n",
 			 br_item->br_device_name);
@@ -245,11 +310,11 @@ static int dp_swdev_cfg_gswip(struct bridge_id_entry_item *br_item, u8 *addr)
 			br_info->inst = br_item->inst;
 			br_info->cpu_port = ENABLE;
 			/* Logic dev flag added to verify if SWDEV registered
-			 * the logical i.e. VLAN device. Helpful during
+			 * the logical i.e. VLAN device.Helpful during
 			 * br/bport delete
 			 */
 			if (br_item->flags & LOGIC_DEV_REGISTER) {
-				br_item->flags &= ~LOGIC_DEV_REGISTER;
+				//br_item->flags &= ~LOGIC_DEV_REGISTER;
 				br_info->flag = LOGIC_DEV_REGISTER;
 			}
 			strcpy(br_info->br_device_name,
@@ -264,8 +329,8 @@ static int dp_swdev_cfg_gswip(struct bridge_id_entry_item *br_item, u8 *addr)
 							  br_item->portid);
 			br_item->flags &= ~ADD_BRENTRY;
 			DP_DEBUG(DP_DBG_FLAG_SWDEV,
-				 "%s added bport(%d),bridge(%s)\n",
-				 __func__, br_item->portid,
+				 "added bport(%d),bridge(%s)\n",
+				 br_item->portid,
 				 br_info->br_device_name);
 			return 0;
 		}
@@ -274,6 +339,7 @@ static int dp_swdev_cfg_gswip(struct bridge_id_entry_item *br_item, u8 *addr)
 						       br_item->inst);
 		if (!br_info)
 			return 0;
+		br_info->flag = 0;
 		if (br_item->flags & LOGIC_DEV_REGISTER)
 			br_info->flag = LOGIC_DEV_REGISTER;
 		dp_swdev_add_bport_to_list(br_info, br_item->portid);
@@ -281,12 +347,11 @@ static int dp_swdev_cfg_gswip(struct bridge_id_entry_item *br_item, u8 *addr)
 			swdev_bridge_port_cfg_set(br_info,
 						  br_item->inst,
 						  br_item->portid);
-		DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s added bport(%d)\n",
-			 __func__, br_item->portid);
+		DP_DEBUG(DP_DBG_FLAG_SWDEV, "added bport(%d)\n",
+			 br_item->portid);
 		return 0;
 	}
-	DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s No configuration,Pls check!!\n",
-		 __func__);
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "No configuration,Pls check!!\n");
 	return 0;
 }
 
@@ -308,8 +373,8 @@ static int dp_swdev_add_if(struct net_device *dev,
 	if (switchdev_trans_ph_prepare(trans)) {
 		/*Get current BPORT ID,instance from DP*/
 		if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) {
-			PR_ERR("%s dp_get_netif_subifid failed for %s\n",
-			       __func__, dev->name);
+			PR_ERR("dp_get_netif_subifid failed for %s\n",
+			       dev->name);
 			/*Check bridge port exists otherwise register
 			 *device with datapath  i.e. only in case of new
 			 *VLAN interface
@@ -341,8 +406,8 @@ static int dp_swdev_add_if(struct net_device *dev,
 				}
 				flag = LOGIC_DEV_REGISTER;
 				DP_DEBUG(DP_DBG_FLAG_SWDEV,
-					 "registered subif:%d\n",
-					 subif.bport);
+					 "registered subif,bp=%d port=%d\n",
+					 subif.bport, subif.port_id);
 			} else {
 				return -EOPNOTSUPP;
 			}
@@ -388,8 +453,11 @@ static int dp_swdev_add_if(struct net_device *dev,
 	/*configure switch in commit phase and it cannot return failure*/
 	swdev_lock();
 	br_item = switchdev_trans_item_dequeue(trans);
-	if (br_item)
+	if (br_item) {
 		dp_swdev_cfg_gswip(br_item, addr);
+		if (br_item->flags & LOGIC_DEV_REGISTER)
+			dp_swdev_cfg_vlan(br_item, dev);/*only for vlan flag*/
+	}
 	swdev_unlock();
 	return 0;
 }
@@ -415,9 +483,10 @@ static int dp_swdev_del_if(struct net_device *dev,
 		/*Get current BR_PORT ID from DP*/
 		if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) {
 			DP_DEBUG(DP_DBG_FLAG_SWDEV,
-				 "%s dp_get_netif_subifid failed for %s\n",
-				 __func__, dev->name);
-			return -EINVAL;
+				 "dp_get_netif_subifid failed for %s\n",
+				 dev->name);
+			if (!is_vlan_dev(dev))
+				return -EINVAL;
 		}
 		br_item = kmalloc(sizeof(*br_item), GFP_KERNEL);
 		if (!br_item)
@@ -471,6 +540,11 @@ static int dp_swdev_del_if(struct net_device *dev,
 				 base ? base->name : "NULL");
 			if (!base)
 				base = dev;
+			/*the previous sequence was running into a deadlock in
+			 *taking the swdev_lock
+			 */
+			dp_swdev_cfg_vlan(br_item, dev);
+			swdev_unlock();
 			if (dp_get_netif_subifid(base, NULL, NULL,
 						 NULL, &subif, 0)) {
 				PR_ERR("dp_get_netif_subifid fail:%s\n",
@@ -483,9 +557,9 @@ static int dp_swdev_del_if(struct net_device *dev,
 			port = subif.port_id;
 			inst = subif.inst;
 			if (dp_register_subif(dp_port_info[inst][port]
-					      .owner,
-					      dev, dev->name, &subif,
-					      DP_F_DEREGISTER)) {
+						.owner,
+						dev, dev->name, &subif,
+						DP_F_DEREGISTER)) {
 				PR_ERR("dp_register_subif fail: %s\n",
 				       dev->name);
 				/*Cannot Return -EOPNOTSUPP
@@ -493,6 +567,7 @@ static int dp_swdev_del_if(struct net_device *dev,
 				 */
 				return 0;
 			}
+			swdev_lock();
 		}
 	}
 	swdev_unlock();
@@ -644,14 +719,13 @@ static int dp_swdev_port_attr_get(struct net_device *dev,
 		if (!br_info)
 			return 0;
 		if (br_info->fid < 0)
-			return 0;
-		snprintf(attr->u.ppid.id, sizeof(attr->u.ppid.id), "%d",
-			 br_info->fid);
-		attr->u.ppid.id_len = strlen(attr->u.ppid.id) + 1;
-		DP_DEBUG(DP_DBG_FLAG_SWDEV,
-			 "SWITCHDEV_ATTR_ID_PORT_PARENT_ID:%s fid:%d attr:%s\n",
-			 attr->orig_dev ? attr->orig_dev->name : "NULL",
-			 br_info->fid, attr->u.ppid.id);
+			return -EOPNOTSUPP;
+		attr->u.ppid.id_len = sizeof(br_info->fid);
+		memcpy(&attr->u.ppid.id, &br_info->fid, attr->u.ppid.id_len);
+		PR_INFO("SWITCHDEV_ATTR_ID_PORT_PARENT_ID:%s fid=%d\n",
+			attr->orig_dev ? attr->orig_dev->name : "NULL",
+			br_info->fid);
+		//err = 0;
 		break;
 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
 		break;
@@ -666,6 +740,7 @@ static int dp_swdev_port_obj_add(struct net_device *dev,
 				 struct switchdev_trans *trans)
 {
 	int err = -EOPNOTSUPP;
+	struct net_device *br_dev;
 #ifdef CONFIG_LTQ_DATAPATH_SWDEV_TEST
 	{
 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
@@ -678,7 +753,6 @@ static int dp_swdev_port_obj_add(struct net_device *dev,
 		 br_dev ? br_dev->name : "Null");
 		return 0;
 	}
-#endif
 	return err; //TODO
 
 	DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s id:%d flags:%d dev name:%s\r\n",
@@ -690,8 +764,16 @@ static int dp_swdev_port_obj_add(struct net_device *dev,
 	if (trans->ph_prepare == 1)
 		DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s ph->prepare:%d\r\n",
 			 __func__, trans->ph_prepare);
+#endif
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s id:%d flags:%d dev name:%s\r\n",
+		 __func__, obj->id,
+		 obj->flags, dev->name);
+	br_dev = netdev_master_upper_dev_get(obj->orig_dev);
+	if (!br_dev)
+		return err;
 	switch (obj->id) {
 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		dp_swdev_filter_vlan(obj->orig_dev, obj, trans, br_dev);
 		break;
 	case SWITCHDEV_OBJ_ID_PORT_FDB:
 		break;
@@ -874,6 +956,7 @@ int dp_notif_br_alloc(struct net_device *br_dev)
 			br_info->fid = br_id;
 			br_info->inst = 0;
 			br_info->cpu_port = ENABLE;
+			br_info->flag = 0;
 			strcpy(br_info->br_device_name, br_dev->name);
 			INIT_LIST_HEAD(&br_info->bp_list);
 			dp_swdev_insert_bridge_id_entry(br_info);
@@ -896,17 +979,23 @@ void dp_port_register_switchdev(struct dp_dev  *dp_dev,
 {
 	if (dp_port_prop[dp_dev->inst].info.swdev_flag == 1) {
 		/*backup ops*/
-		SET_OPS(dp_dev->old_dev_ops,
-			(struct net_device_ops *)dp_port->netdev_ops);
-		SET_OPS(dp_dev->old_swdev_ops,
-			(struct switchdev_ops *)dp_port->switchdev_ops);
+		dp_dev->old_dev_ops = NULL;
+		dp_dev->old_swdev_ops = NULL;
+		if (dp_port->netdev_ops)
+			SET_OPS(dp_dev->old_dev_ops,
+				(struct net_device_ops *)dp_port->netdev_ops);
+		if (dp_dev->old_swdev_ops)
+			SET_OPS(dp_dev->old_swdev_ops,
+				(struct switchdev_ops *)dp_port->switchdev_ops);
 		/*create new one */
 		/* Current comment it out
 		 *SET_OPS(sub_info->new_dev_ops, dp_port->netdev_ops);
 		 *SET_OPS(sub_info->new_swdev_ops, dp_port->switchdev_ops);
 		 */
-		dp_dev->new_dev_ops = *dp_port->netdev_ops;
-		dp_dev->new_swdev_ops = *dp_port->switchdev_ops;
+		if (dp_port->netdev_ops)
+			dp_dev->new_dev_ops = *dp_port->netdev_ops;
+		if (dp_port->switchdev_ops)
+			dp_dev->new_swdev_ops = *dp_port->switchdev_ops;
 		/*tune new ops */
 		dp_dev->new_swdev_ops.switchdev_port_attr_get =
 							dp_swdev_port_attr_get;
@@ -954,7 +1043,7 @@ void dp_port_deregister_switchdev(struct dp_dev *dp_dev,
 	struct net_device *br_dev;
 
 	/* Workaround for ethernet ifconfig down case
-	 * to remove port from switchdev as dev is de-register
+	 * to remove port from switchdev as dev is de-registered
 	 * from DP lib
 	 */
 	if (netif_is_bridge_port(dev)) {
@@ -968,8 +1057,8 @@ void dp_port_deregister_switchdev(struct dp_dev *dp_dev,
 		}
 	}
 	if (dp_dev->old_swdev_ops && dp_dev->old_dev_ops) {
-	dev->switchdev_ops = dp_dev->old_swdev_ops;
-	dev->netdev_ops = dp_dev->old_dev_ops;
+		dev->switchdev_ops = dp_dev->old_swdev_ops;
+		dev->netdev_ops = dp_dev->old_dev_ops;
 	}
 }
 
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip30/Kconfig b/drivers/net/ethernet/lantiq/datapath/gswip30/Kconfig
index c510e8415..161052dc9 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip30/Kconfig
+++ b/drivers/net/ethernet/lantiq/datapath/gswip30/Kconfig
@@ -1,26 +1,25 @@
 menuconfig LTQ_DATAPATH_HAL_GSWIP30
-        bool "Datapath HAL_GSWIP30"
-        default y
-        depends on LTQ_DATAPATH
-        ---help---
-          Datapath Lib is to provide common rx/tx wrapper Lib without taking
-          care of much HW knowledge and also provide common interface for legacy
-          devices and different HW like to CBM or LRO.
-          Take note:
-            All devices need to register to datapath Lib first
+	bool "Datapath HAL_GSWIP30"
+	default y
+	depends on LTQ_DATAPATH
+	---help---
+	  Datapath Lib is to provide common rx/tx wrapper Lib without taking
+	  care of much HW knowledge and also provide common interface for legacy
+	  devices and different HW like to CBM or LRO.
+	  Take note: All devices need to register to datapath Lib first
 
 if LTQ_DATAPATH_HAL_GSWIP30
 config LTQ_DATAPATH_HAL_GSWIP30_MIB
-        bool "Datapath aggregated mib support"
-        depends on LTQ_DATAPATH_HAL_GSWIP30 && SOC_GRX500 && LTQ_TMU && LTQ_PPA_TMU_MIB_SUPPORT
-        default y
-        ---help---
-           It is to aggregate GSWIP-L/R, TMU and driver's MIB counter
-config LTQ_DATAPATH_HAL_GSWIP30_CPUFREQ
-        bool "Datapath DFS(COC) support"
-        depends on LTQ_DATAPATH && LTQ_CPUFREQ && LTQ_ETHSW_API
-        default y
-        ---help---
-           It is to support DFS(COC) in Datapath
+	bool "Datapath aggregated mib support"
+	depends on LTQ_DATAPATH_HAL_GSWIP30 && SOC_GRX500 && LTQ_TMU && LTQ_PPA_TMU_MIB_SUPPORT
+	default y
+	---help---
+	  It is to aggregate GSWIP-L/R, TMU and driver's MIB counter
 
+config LTQ_DATAPATH_HAL_GSWIP30_CPUFREQ
+	bool "Datapath DFS(COC) support"
+	depends on LTQ_DATAPATH && LTQ_CPUFREQ && LTQ_ETHSW_API
+	default y
+	---help---
+	  It is to support DFS(COC) in Datapath
 endif
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_lookup_proc.c b/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_lookup_proc.c
index e875617c1..39c99c099 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_lookup_proc.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_lookup_proc.c
@@ -192,7 +192,7 @@ static void lookup_pattern_match(int tmp_index)
 	int t[LOOKUP_FIELD_BITS] = { 0 };
 	int index;
 
-	DP_DEBUG(DP_DBG_FLAG_DBG,
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP,
 		 "trying with tmp_index=0x%x with lookup_match_num=%d\n",
 		 tmp_index, lookup_match_num);
 	pattern_match_flag = PATTERN_MATCH_INIT;
@@ -202,12 +202,12 @@ static void lookup_pattern_match(int tmp_index)
 		index = tmp_index;
 		for (i = 0; i < LOOKUP_FIELD_BITS; i++)
 			index |= (t[i] << i);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "don't care[14]=");
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "don't care[14]=");
 		for (i = 0; i < LOOKUP_FIELD_BITS; i++)
-			DP_DEBUG(DP_DBG_FLAG_DBG, "%d ", t[i]);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "\n");
+			DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%d ", t[i]);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 
-		DP_DEBUG(DP_DBG_FLAG_DBG, "don't care index=%x\n", index);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "don't care index=%x\n", index);
 
 		if (lookup_tbl_flags[index] == ENTRY_USED) {
 			pattern_match_flag = PATTERN_MATCH_FAIL;
@@ -221,7 +221,7 @@ static void lookup_pattern_match(int tmp_index)
 			first_qid = qid;
 		} else if (first_qid != qid) {
 			pattern_match_flag = PATTERN_MATCH_FAIL;
-			DP_DEBUG(DP_DBG_FLAG_DBG, "first_qid(%d) != qid(%d)\n",
+			DP_DEBUG(DP_DBG_FLAG_LOOKUP, "first_qid(%d) != qid(%d)\n",
 				 first_qid, qid);
 			goto END;
 		}
@@ -250,7 +250,7 @@ static void lookup_pattern_match(int tmp_index)
 			if (lookup_mask1[i])
 				lookup_match_mask[lookup_match_num] |= 1 << i;
 		lookup_match_num++;
-		DP_DEBUG(DP_DBG_FLAG_DBG,
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP,
 			 "left_n=%d lookup_mask_n=%d. Need reduce=%d\n",
 			 left_n, lookup_mask_n, (1 << lookup_mask_n));
 		left_n -= (1 << lookup_mask_n);
@@ -270,11 +270,11 @@ static int list_care_combination(int tmp_index)
 		index = tmp_index;
 		for (i = 0; i < LOOKUP_FIELD_BITS; i++)
 			index |= (t[i] << i);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "care index=%x\n", index);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "care t[14]=");
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "care index=%x\n", index);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "care t[14]=");
 		for (k = 0; k < LOOKUP_FIELD_BITS; k++)
-			DP_DEBUG(DP_DBG_FLAG_DBG, "%d ", t[k]);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "\n");
+			DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%d ", t[k]);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 		lookup_pattern_match(index);
 	}
 
@@ -291,19 +291,19 @@ static int check_pattern(int *data, int r)
 	int i;
 
 	memset(lookup_mask1, 0, sizeof(lookup_mask1));
-	DP_DEBUG(DP_DBG_FLAG_DBG, "data:");
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "data:");
 	for (i = 0; i < r; i++) {
-		DP_DEBUG(DP_DBG_FLAG_DBG, "%d ", data[i]);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%d ", data[i]);
 		lookup_mask1[data[i]] = CARE_NOT_FLAG;
 	}
 	lookup_mask_n = r;
 	pattern_match_flag = 0;
-	DP_DEBUG(DP_DBG_FLAG_DBG, "\n");
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 
-	DP_DEBUG(DP_DBG_FLAG_DBG, "Don't care flag: ");
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "Don't care flag: ");
 	for (i = 0; i < LOOKUP_FIELD_BITS; i++)
-		DP_DEBUG(DP_DBG_FLAG_DBG, "%c ", lookup_mask1[i] ? 'x' : '0');
-	DP_DEBUG(DP_DBG_FLAG_DBG, "\n");
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%c ", lookup_mask1[i] ? 'x' : '0');
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 
 	list_care_combination(tmp_pattern_port_id << 4);
 	return 0;
@@ -332,7 +332,7 @@ int find_pattern(int port_id, struct seq_file *s, int qid)
 		if (left_n <= 0)
 			break;
 		c_not_care_walkthrought(arr, n, r);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "left_n=%d\n", left_n);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "left_n=%d\n", left_n);
 		if (!left_n)
 			break;
 	}
@@ -397,7 +397,7 @@ ssize_t proc_get_qid_via_index30(struct file *file, const char *buf,
 	int num;
 
 	len = (count >= sizeof(data)) ? (sizeof(data) - 1) : count;
-	DP_DEBUG(DP_DBG_FLAG_DBG, "len=%d\n", len);
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "len=%d\n", len);
 
 	if (len <= 0) {
 		err = -EFAULT;
@@ -494,7 +494,7 @@ void lookup_table_via_qid(int qid)
 {
 	u32 index, tmp, i, j, k, f = 0;
 
-	DP_DEBUG(DP_DBG_FLAG_DBG,
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP,
 		 "Try to find all lookup flags mapped to qid %d\n", qid);
 	for (i = 0; i < 16; i++) {	/*ep */
 		for (j = 0; j < 16; j++) {	/*class */
@@ -518,7 +518,7 @@ void lookup_table_remap(int old_q, int new_q)
 {
 	u32 index, tmp, i, j, k, f = 0;
 
-	DP_DEBUG(DP_DBG_FLAG_DBG,
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP,
 		 "Try to remap lookup flags mapped from old_q %d to new_q %d\n",
 		 old_q, new_q);
 	for (i = 0; i < 16; i++) {	/*ep */
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_mib.c b/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_mib.c
index 5b7cc3a85..53cea28eb 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_mib.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_mib.c
@@ -220,7 +220,9 @@ static u64 wraparound(u64 curr, u64 last, u32 size)
 	if ((size > 4) || /*for 8 bytes(64bit mib),no need to do wraparound*/
 	    (curr >= last))
 		return curr - last;
-
+	PR_INFO("Wraparound happen: \n");
+	PR_INFO("  current mib: 0x%x\n", curr);
+	PR_INFO("  last    mib: 0x%x\n", last);
 	return ((u64)WRAPAROUND_MAX_32) + (u64)curr - last;
 }
 
@@ -245,7 +247,7 @@ static int port_mib_wraparound(u32 ep, struct mibs_low_lvl_port *curr,
 		last_tmp = &last->r;
 	}
 	/*First handle common RMON mib */
-	spin_lock_bh(&dp_mib_lock);
+	//spin_lock_bh(&dp_mib_lock);
 	aggregate_mib[ep].curr.rx_good_bytes +=
 	    RMON_PORT_WRAP(curr_tmp, last_tmp, nRxGoodBytes);
 	aggregate_mib[ep].curr.rx_bad_bytes +=
@@ -318,7 +320,7 @@ static int port_mib_wraparound(u32 ep, struct mibs_low_lvl_port *curr,
 
 		/*save */
 		*last = *curr;
-		spin_unlock_bh(&dp_mib_lock);
+		//spin_unlock_bh(&dp_mib_lock);
 		return 0;
 	}
 
@@ -383,7 +385,7 @@ static int port_mib_wraparound(u32 ep, struct mibs_low_lvl_port *curr,
 	}
 	/*save */
 	*last = *curr;
-	spin_unlock_bh(&dp_mib_lock);
+	//spin_unlock_bh(&dp_mib_lock);
 	return 0;
 }
 
@@ -411,7 +413,7 @@ static int vap_mib_wraparound(dp_subif_t *subif,
 	if ((ep <= 0) ||
 	    (ep >= PMAC_MAX_NUM))
 		return -1;
-	spin_lock_bh(&dp_mib_lock);
+	//spin_lock_bh(&dp_mib_lock);
 	aggregate_mib[ep].curr_vap[vap].rx_pkts_itf +=
 	    VAP_RMON_WRAP_ITF(curr, last, nRxPktsCount);
 	aggregate_mib[ep].curr_vap[vap].rx_disc_pkts_itf +=
@@ -433,7 +435,7 @@ static int vap_mib_wraparound(dp_subif_t *subif,
 	    VAP_RMON_WRAP_DRV(curr, last, tx_drop_pkts);
 	/*save */
 	*last = *curr;
-	spin_unlock_bh(&dp_mib_lock);
+	//spin_unlock_bh(&dp_mib_lock);
 	return 0;
 }
 
@@ -1050,6 +1052,7 @@ int dp_get_port_vap_mib_30(dp_subif_t *subif, void *priv,
 	    (subif->port_id >= PMAC_MAX_NUM) ||
 	    !net_mib)
 		return -1;
+	spin_lock_bh(&dp_mib_lock);
 	port_id = subif->port_id;
 	vap = GET_VAP(subif->subif,
 		      PORT_INFO(0, subif->port_id, vap_offset),
@@ -1081,7 +1084,7 @@ int dp_get_port_vap_mib_30(dp_subif_t *subif, void *priv,
 		    aggregate_mib[port_id].curr_vap[vap].tx_disc_pkts_drv;
 		net_mib->tx_packets =
 		    aggregate_mib[port_id].curr_vap[vap].tx_pkts_itf;
-
+		spin_unlock_bh(&dp_mib_lock);
 		return 0;
 	}
 
@@ -1548,7 +1551,7 @@ int dp_get_port_vap_mib_30(dp_subif_t *subif, void *priv,
 			 aggregate_mib[port_id].curr.rx_good_bytes,
 			 aggregate_mib[port_id].curr.rx_bad_bytes);
 	}
-
+	spin_unlock_bh(&dp_mib_lock);
 	return 0;
 }
 
@@ -1608,8 +1611,9 @@ int dp_clear_netif_mib_30(dp_subif_t *subif, void *priv, u32 flag)
 
 	gsw_l = dp_port_prop[inst].ops[GSWIP_L];
 	gsw_r = dp_port_prop[inst].ops[GSWIP_R];
-
+	
 	if (!subif) { /*clear all */
+		spin_lock_bh(&dp_mib_lock);
 		gsw_mib_reset_30(0, 0);
 		gsw_mib_reset_30(1, 0);
 		tmu_reset_mib_all(flag);
@@ -1632,17 +1636,20 @@ int dp_clear_netif_mib_30(dp_subif_t *subif, void *priv, u32 flag)
 		memset(aggregate_mib_r, 0, sizeof(aggregate_mib_r));
 		memset(&last, 0, sizeof(last));
 		memset(&last_vap, 0, sizeof(last_vap));
+		spin_unlock_bh(&dp_mib_lock);
 		return 0;
 	}
 
 	if ((subif->port_id <= 0) ||
 	    subif->port_id >= PMAC_MAX_NUM)
 		return -1;
+
 	port_id = subif->port_id;
 	vap = GET_VAP(subif->subif,
 		      PORT_INFO(0, port_id, vap_offset),
 		      PORT_INFO(0, port_id, vap_mask));
 	if ((flag & DP_F_STATS_SUBIF)) {
+		spin_lock_bh(&dp_mib_lock);
 		/*clear the specific subif mib counter */
 		clear_gsw_itf_mib(subif, flag);
 		dp_clear_mib(subif, 0);
@@ -1663,9 +1670,10 @@ int dp_clear_netif_mib_30(dp_subif_t *subif, void *priv, u32 flag)
 		memset(&last_vap[port_id][vap], 0,
 		       sizeof(last_vap[port_id][vap]));
 		/*how about last[] & last_vap[]*/
+		spin_unlock_bh(&dp_mib_lock);
 		return 0;
 	}
-
+	spin_lock_bh(&dp_mib_lock);
 	/*Clear port based RMON mib */
 	DP_DEBUG(DP_DBG_FLAG_MIB,
 		 "dp_clear_netif_mib port %u mib flag=0x%x\n", port_id, flag);
@@ -1739,6 +1747,7 @@ int dp_clear_netif_mib_30(dp_subif_t *subif, void *priv, u32 flag)
 	subif->subif = -1;
 	clear_gsw_itf_mib(subif, 0);
 	memset(&last[port_id], 0, sizeof(last[port_id]));
+	spin_unlock_bh(&dp_mib_lock);
 	return 0;
 }
 
@@ -2060,7 +2069,8 @@ ssize_t proc_mib_vap_write(struct file *file, const char *buf, size_t count,
 int mib_wraparound_thread(void *data)
 {
 	while (1) {
-		mib_wraparound_timer_poll(0);
+		if (poll_interval) 
+			mib_wraparound_timer_poll(0);
 		msleep(poll_interval / HZ * 1000 / PMAC_MAX_NUM / 2);
 		DP_DEBUG(DP_DBG_FLAG_MIB, "mib_wraparound_thread\n");
 	}
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_proc.c b/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_proc.c
index e27dc6285..7a732d8cb 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_proc.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip30/datapath_proc.c
@@ -176,10 +176,11 @@ static ssize_t proc_parser_write(struct file *file, const char *buf,
 	char *param_list[20];
 	s8 cpu = 0, mpe1 = 0, mpe2 = 0, mpe3 = 0, flag = 0;
 	static int pce_rule_id;
-	GSW_PCE_rule_t pce;
+	static GSW_PCE_rule_t pce;
 	int inst = 0;
 	struct core_ops *gsw_handle;
 
+	memset(&pce, 0, sizeof(pce));
 	gsw_handle = dp_port_prop[inst].ops[GSWIP_R];
 	len = (sizeof(str) > count) ? count : sizeof(str) - 1;
 	len -= copy_from_user(str, buf, len);
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/Kconfig b/drivers/net/ethernet/lantiq/datapath/gswip31/Kconfig
index df86a9a1a..d5c8cbac2 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/Kconfig
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/Kconfig
@@ -1,33 +1,49 @@
 menuconfig LTQ_DATAPATH_HAL_GSWIP31
-        bool "Datapath HAL_GSWIP31"
-        default y
-        depends on LTQ_DATAPATH
-        ---help---
-          Datapath Lib is to provide common rx/tx wrapper Lib without taking
-          care of much HW knowledge and also provide common interface for legacy
-          devices and different HW like to CBM or LRO.
-          Take note:
-            All devices need to register to datapath Lib first
+	bool "Datapath HAL_GSWIP31"
+	default y
+	depends on LTQ_DATAPATH
+	---help---
+	  Datapath Lib is to provide common rx/tx wrapper Lib without taking
+	  care of much HW knowledge and also provide common interface for legacy
+	  devices and different HW like to CBM or LRO.
+	  Take note: All devices need to register to datapath Lib first
 
 if LTQ_DATAPATH_HAL_GSWIP31
 config LTQ_DATAPATH_HAL_GSWIP31_MIB
-        bool "Datapath aggregated mib support"
-        depends on LTQ_DATAPATH_HAL_GSWIP30 && SOC_GRX500 && LTQ_TMU && LTQ_PPA_TMU_MIB_SUPPORT
-        default y
-        ---help---
-           It is to aggregate GSWIP-L/R, TMU and driver's MIB counter
+	bool "Datapath aggregated mib support"
+	depends on LTQ_DATAPATH_HAL_GSWIP30 && SOC_GRX500 && LTQ_TMU && LTQ_PPA_TMU_MIB_SUPPORT
+	default y
+	---help---
+	  It is to aggregate GSWIP-L/R, TMU and driver's MIB counter
+
 config LTQ_DATAPATH_HAL_GSWIP30_CPUFREQ
-        bool "Datapath DFS(COC) support"
-        depends on LTQ_DATAPATH && LTQ_CPUFREQ && LTQ_ETHSW_API
-        default y
-        ---help---
-           It is to support DFS(COC) in Datapath
+	bool "Datapath DFS(COC) support"
+	depends on LTQ_DATAPATH && LTQ_CPUFREQ && LTQ_ETHSW_API
+	default y
+	---help---
+	  It is to support DFS(COC) in Datapath
+
 config LTQ_DATAPATH_DDR_SIMULATE_GSWIP31
-        bool "Datapath Simulation GSWIP3.1 based on GRX500 board"
-        default n
-        depends on LTQ_DATAPATH_DBG
-        ---help---
-          Datapath Debug Tool for GSWIP DDR simulation
-                  Only for debugging purpose
-                  By default it should be disabled.
+	bool "Datapath Simulation GSWIP3.1 based on GRX500 board"
+	default n
+	depends on LTQ_DATAPATH_DBG
+	---help---
+		Datapath Debug Tool for GSWIP DDR simulation
+		Only for debugging purpose
+		By default it should be disabled.
+
+config LTQ_DATAPATH_DUMMY_QOS
+	bool "datapath dummy QOS based on slim QOS driver or real QOS API with falcon_test API"
+	default y
+	depends on LTQ_PPV4_QOS_SLIM || (LTQ_PPV4_QOS || LTQ_PPV4)
+
+config LTQ_DATAPATH_DUMMY_QOS_VIA_FALCON_TEST
+	bool "datapath dummy QOS via ppv4 qos driver's falcon_test api, like slim driver"
+	default y
+	depends on (LTQ_PPV4_QOS || LTQ_PPV4) && !LTQ_PPV4_QOS_SLIM && LTQ_DATAPATH_DUMMY_QOS
+
+config LTQ_DATAPATH_QOS_HAL
+	bool "datapath QOS hal"
+	default n
+	depends on (LTQ_PPV4_QOS || LTQ_PPV4) && !LTQ_DATAPATH_DUMMY_QOS_VIA_FALCON_TEST
 endif
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/Makefile b/drivers/net/ethernet/lantiq/datapath/gswip31/Makefile
index 4473af94f..c9a605e07 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/Makefile
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/Makefile
@@ -2,7 +2,9 @@ ifneq ($(CONFIG_LTQ_DATAPATH_DDR_SIMULATE_GSWIP31),)
 obj-$(CONFIG_LTQ_DATAPATH) += datapath_gswip_simulate.o
 endif
 
-obj-$(CONFIG_LTQ_DATAPATH) += datapath_misc.o datapath_gswip.o datapath_proc.o datapath_ppv4.o datapath_lookup_proc.o datapath_ppv4_api.o 
+obj-$(CONFIG_LTQ_DATAPATH) += datapath_misc.o datapath_gswip.o datapath_proc.o
+obj-$(CONFIG_LTQ_DATAPATH) += datapath_debugfs.o datapath_ppv4.o
+obj-$(CONFIG_LTQ_DATAPATH) += datapath_lookup_proc.o datapath_ppv4_api.o
 
 ifneq ($(CONFIG_LTQ_DATAPATH_HAL_GSWIP31_MIB),)
 obj-$(CONFIG_LTQ_DATAPATH) += datapath_mib.o
@@ -13,5 +15,5 @@ obj-$(CONFIG_LTQ_DATAPATH) += datapath_coc.o
 endif
 
 ifneq ($(CONFIG_LTQ_DATAPATH_SWITCHDEV),)
-obj-$(CONFIG_LTQ_DATAPATH) += datapath_switchdev.o
+obj-$(CONFIG_LTQ_DATAPATH) += datapath_switchdev.o datapath_ext_vlan.o
 endif
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_debugfs.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_debugfs.c
new file mode 100644
index 000000000..d2bbea0be
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_debugfs.c
@@ -0,0 +1,166 @@
+#include <linux/klogging.h>
+#include <net/datapath_proc_api.h>	/*for proc api */
+#include <net/datapath_api.h>
+#include <linux/list.h>
+
+#include "../datapath.h"
+#include "datapath_misc.h"
+
+#include "../datapath_swdev.h"
+
+#define PRINTK  LOGF_KLOG_CONT
+#define DP_MIB_Q "qos_mib"
+#define PRINT_Q_MIB(i, c, x, y, z)\
+	PR_INFO("Q[%03d]:0x%08x 0x%08x 0x%08x 0x%08x\n",\
+		i, c, x, y, z)\
+
+static ssize_t dp_qos_write(struct file *file, const char __user *buf,
+		      size_t count, loff_t *ppos)
+{
+	int len;
+	char str[64];
+	char *p = (char *)str;
+	char *param_list[5] = { 0 };
+	int num = 0;
+	int start, end, i;
+	int inst = 0;
+	u32 c;
+	u32 total_accept_pkt, total_drop_pkt, red_drop_ktp;
+	u32 gree_b, yellow_b;
+
+	len = (sizeof(str) > count) ? count : sizeof(str) - 1;
+	len -= copy_from_user(str, buf, len);
+	str[len] = 0;
+	if (!len)
+		return count;
+
+	num = dp_split_buffer(p, param_list, ARRAY_SIZE(param_list));
+	if ((num == 1) && (strncmp(param_list[0], "help", 4) == 0)) {
+		goto help;
+	} else if (num >= 1) {
+		if (dp_strcmpi(param_list[0], "q") == 0) {
+			start = 0;
+			end = MAX_QUEUE;
+			if (param_list[1])
+				start = dp_atoi(param_list[1]);
+			if (param_list[2])
+				end = dp_atoi(param_list[2]);
+			if (start < 0)
+				start = 0;
+			if (end <= start)
+				end = start + 1;
+			if (end > MAX_QUEUE)
+				end = MAX_QUEUE;
+			PR_INFO("%5s:%10s %10s  %10s  %10s (%d-%d)\n",
+				"QID", "qocc(b)", "accept(p)", "drop(p)", "red_drop(p)",
+				start, end);
+			for (i = start; i < end; i++) {
+				get_q_qocc(inst, i, &c);
+				get_q_mib(inst, i, &total_accept_pkt,
+					  &total_drop_pkt, &red_drop_ktp);
+//	  			if (c || total_accept_pkt || total_drop_pkt ||
+//				    red_drop_ktp)
+					PRINT_Q_MIB(i, c, total_accept_pkt,
+					    total_drop_pkt,	red_drop_ktp);
+			}
+
+		} else if (dp_strcmpi(param_list[0], "p") == 0) {
+            start = 0;
+			end = MAX_QUEUE;
+			if (param_list[1])
+				start = dp_atoi(param_list[1]);
+			if (param_list[2])
+				end = dp_atoi(param_list[2]);
+			if (start < 0)
+				start = 0;
+			if (end <= start)
+				end = start + 1;
+			if (end > MAX_QUEUE)
+				end = MAX_QUEUE;
+			PR_INFO("Port Id :green(b)   yellow(b)  (%d_%d)\n",
+				start, end);
+			for (i = start; i < end; i++) {
+				get_p_mib(inst, i, &gree_b, &yellow_b);
+//				if (gree_b || yellow_b)
+					PR_INFO("P[%03d]: 0x%08x 0x%08x\n",
+						i, gree_b, yellow_b);
+			}
+		}
+	}
+	return count;
+help:   /*                        [0]    [1]*/
+	PR_INFO("queue mib: echo <q> <start qid> <end qid> > %s\n",
+		"/proc/dp/" DP_MIB_Q);
+	PR_INFO("port  mib: echo <p> <start port_id> <end port_id> > %s\n",
+		"/proc/dp/" DP_MIB_Q);
+	return count;
+}
+
+static int dp_qos_seq_read(struct seq_file *s, void *v)
+{
+	int start, end, i;
+	int inst = 0;
+	u32 c;
+	u32 total_accept_pkt, total_drop_pkt, red_drop_ktp;
+	u32 gree_b, yellow_b;
+
+	start = 0;
+	end = MAX_QUEUE;
+	seq_printf(s, "%5s:%10s %10s  %10s  %10s (%d-%d)\n",
+		"QID", "qocc(b)", "accept(p)", "drop(p)", "red_drop(p)",
+		start, end);
+	for (i = start; i < end; i++) {
+		get_q_qocc(inst, i, &c);
+		get_q_mib(inst, i, &total_accept_pkt,
+		  &total_drop_pkt, &red_drop_ktp);
+		if (c || total_accept_pkt || total_drop_pkt ||
+		    red_drop_ktp)
+			seq_printf(s, "Q[%03d]:0x%08x 0x%08x 0x%08x 0x%08x\n", i, c, total_accept_pkt, total_drop_pkt, red_drop_ktp);
+	}
+	seq_printf(s, "Port Id :green(b)   yellow(b)  (%d_%d)\n",
+		start, end);
+	for (i = start; i < end; i++) {
+		get_p_mib(inst, i, &gree_b, &yellow_b);
+		if (gree_b || yellow_b)
+			seq_printf(s, "P[%03d]: 0x%08x 0x%08x\n",
+				i, gree_b, yellow_b);
+	}
+
+	return 0;
+}
+
+static int dp_qos_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dp_qos_seq_read, inode->i_private);
+}
+
+static const struct file_operations dp_qos_fops = {
+	.open = dp_qos_open,
+	.read = seq_read,
+	.write = dp_qos_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int datapath_debugfs_init(struct datapath_ctrl *pctrl)
+{
+	char datapath_dir[64] = {0};
+	struct dentry *file;
+
+	strlcpy(datapath_dir, pctrl->name, sizeof(datapath_dir));
+	pctrl->debugfs = debugfs_create_dir(datapath_dir, NULL);
+	if (!pctrl->debugfs)
+		return -ENOMEM;
+
+	file = debugfs_create_file("qos_mib", 0644, pctrl->debugfs,
+		   pctrl, &dp_qos_fops);
+   	if (!file)
+		goto err;
+
+	return 0;
+err:
+	debugfs_remove_recursive(pctrl->debugfs);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(datapath_debugfs_init);
+
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ext_vlan.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ext_vlan.c
new file mode 100644
index 000000000..07489b874
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ext_vlan.c
@@ -0,0 +1,666 @@
+/*#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+
+#include "lantiq_gsw.h"
+#include "lantiq_gsw_api.h"
+#include "gsw_flow_ops.h"*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <net/datapath_api.h>
+#include "../datapath.h"
+
+/*
+ * Adaption
+ */
+/*#define kfree(x)	free(x)
+#define kmalloc(x, y)	malloc(x)*/
+
+/*
+ * Structure passed in by user
+ */
+#if 0
+struct vlan_info1 {
+	/* Changed this TPID field to GSWIP API enum type.
+	 * We do not have flexible for any TPID, only following are supported:
+	 * 1. ignore (don't care)
+	 * 2. 0x8100
+	 * 3. Value configured int VTE Type register
+	 */
+	GSW_ExtendedVlanFilterTpid_t tpid;  /* TPID like 0x8100, 0x8800 */
+	u16 vid ;  /*VLAN ID*/
+	/*note: user priority/CFI both don��t care */
+	/* DSCP to Priority value mapping is possible */
+};
+
+struct vlan1 {
+	int bp;  /*assigned bp for this single VLAN dev */
+	struct vlan_info1 outer_vlan; /*out vlan info */
+	/* Add Ethernet type with GSWIP API enum type.
+	 * Following types are supported:
+	 * 1. ignore	(don't care)
+	 * 2. IPoE  	(0x0800)
+	 * 3. PPPoE 	(0x8863 or 0x8864)
+	 * 4. ARP   	(0x0806)
+	 * 5. IPv6 IPoE	(0x86DD)
+	 */
+	GSW_ExtendedVlanFilterEthertype_t ether_type;
+};
+
+struct vlan2 {
+	int bp;  /*assigned bp for this double VLAN dev */
+	struct vlan_info1 outer_vlan;  /*out vlan info */
+	struct vlan_info1 inner_vlan;   /*in vlan info */
+	/* Add Ethernet type with GSWIP API enum type.
+	 * Following types are supported:
+	 * 1. ignore	(don't care)
+	 * 2. IPoE  	(0x0800)
+	 * 3. PPPoE 	(0x8863 or 0x8864)
+	 * 4. ARP   	(0x0806)
+	 * 5. IPv6 IPoE	(0x86DD)
+	 */
+	GSW_ExtendedVlanFilterEthertype_t ether_type;
+};
+#endif
+
+/*
+ * Structure only used by function set_gswip_ext_vlan
+ */
+struct priv_ext_vlan {
+	/* number of bridge ports has VLAN */
+	u32 num_bp;
+	/* bridge port list (vlan1_list + vlan2_list) */
+	u32 bp[1];
+};
+
+/*
+ * Supporting Functions
+ */
+static int update_vlan0(struct core_ops *ops, u32 bp,
+			GSW_EXTENDEDVLAN_config_t *vcfg)
+{
+	GSW_return_t ret;
+	u32 block = vcfg->nExtendedVlanBlockId;
+
+	memset(vcfg, 0, sizeof(*vcfg));
+	vcfg->nExtendedVlanBlockId = block;
+	vcfg->nEntryIndex = 0;
+	vcfg->sFilter.sOuterVlan.eType = GSW_EXTENDEDVLAN_FILTER_TYPE_NO_TAG;
+	vcfg->sFilter.eEtherType = GSW_EXTENDEDVLAN_FILTER_ETHERTYPE_NO_FILTER;
+	vcfg->sTreatment.bReassignBridgePort = LTQ_TRUE;
+	vcfg->sTreatment.nNewBridgePortId = bp;
+	ret = ops->gsw_extvlan_ops.ExtendedVlan_Set(ops, vcfg);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	memset(vcfg, 0, sizeof(*vcfg));
+	vcfg->nExtendedVlanBlockId = block;
+	vcfg->nEntryIndex = 1;
+	vcfg->sFilter.sOuterVlan.eType = GSW_EXTENDEDVLAN_FILTER_TYPE_DEFAULT;
+	vcfg->sFilter.sInnerVlan.eType = GSW_EXTENDEDVLAN_FILTER_TYPE_NO_TAG;
+	vcfg->sFilter.eEtherType = GSW_EXTENDEDVLAN_FILTER_ETHERTYPE_NO_FILTER;
+	vcfg->sTreatment.bReassignBridgePort = LTQ_TRUE;
+	vcfg->sTreatment.nNewBridgePortId = bp;
+	ret = ops->gsw_extvlan_ops.ExtendedVlan_Set(ops, vcfg);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	memset(vcfg, 0, sizeof(*vcfg));
+	vcfg->nExtendedVlanBlockId = block;
+	vcfg->nEntryIndex = 2;
+	vcfg->sFilter.sInnerVlan.eType = GSW_EXTENDEDVLAN_FILTER_TYPE_DEFAULT;
+	vcfg->sFilter.eEtherType = GSW_EXTENDEDVLAN_FILTER_ETHERTYPE_NO_FILTER;
+	vcfg->sTreatment.bReassignBridgePort = LTQ_TRUE;
+	vcfg->sTreatment.nNewBridgePortId = bp;
+	ret = ops->gsw_extvlan_ops.ExtendedVlan_Set(ops, vcfg);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	return 0;
+}
+
+static int update_vlan1(struct core_ops *ops, int base, int num,
+			struct vlan1 *vlan_list, int drop,
+			GSW_EXTENDEDVLAN_config_t *vcfg)
+{
+	GSW_return_t ret;
+	u32 block = vcfg->nExtendedVlanBlockId;
+	int i;
+
+	for (i = 0; i < num; i++, base += 2) {
+		memset(vcfg, 0, sizeof(*vcfg));
+		vcfg->nExtendedVlanBlockId = block;
+		vcfg->nEntryIndex = base;
+
+		vcfg->sFilter.sOuterVlan.eType =
+			GSW_EXTENDEDVLAN_FILTER_TYPE_NORMAL;
+		vcfg->sFilter.sOuterVlan.bPriorityEnable = LTQ_FALSE;
+		vcfg->sFilter.sOuterVlan.bVidEnable = LTQ_TRUE;
+		vcfg->sFilter.sOuterVlan.nVidVal = vlan_list[i].outer_vlan.vid;
+		vcfg->sFilter.sOuterVlan.eTpid = vlan_list[i].outer_vlan.tpid;
+		vcfg->sFilter.sOuterVlan.eDei =
+			GSW_EXTENDEDVLAN_FILTER_DEI_NO_FILTER;
+
+		vcfg->sFilter.sInnerVlan.eType =
+			GSW_EXTENDEDVLAN_FILTER_TYPE_NO_TAG;
+
+		vcfg->sFilter.eEtherType = vlan_list[i].ether_type;
+
+		if (drop) {
+			vcfg->sTreatment.eRemoveTag =
+				GSW_EXTENDEDVLAN_TREATMENT_DISCARD_UPSTREAM;
+		} else {
+			vcfg->sTreatment.eRemoveTag =
+				GSW_EXTENDEDVLAN_TREATMENT_REMOVE_1_TAG;
+			vcfg->sTreatment.bReassignBridgePort = LTQ_TRUE;
+			vcfg->sTreatment.nNewBridgePortId = vlan_list[i].bp;
+		}
+
+		ret = ops->gsw_extvlan_ops.ExtendedVlan_Set(ops, vcfg);
+
+		if (ret != GSW_statusOk)
+			return -EIO;
+
+		memset(vcfg, 0, sizeof(*vcfg));
+		vcfg->nExtendedVlanBlockId = block;
+		vcfg->nEntryIndex = base + 1;
+
+		vcfg->sFilter.sOuterVlan.eType =
+			GSW_EXTENDEDVLAN_FILTER_TYPE_NORMAL;
+		vcfg->sFilter.sOuterVlan.bPriorityEnable = LTQ_FALSE;
+		vcfg->sFilter.sOuterVlan.bVidEnable = LTQ_TRUE;
+		vcfg->sFilter.sOuterVlan.nVidVal = vlan_list[i].outer_vlan.vid;
+		vcfg->sFilter.sOuterVlan.eTpid = vlan_list[i].outer_vlan.tpid;
+		vcfg->sFilter.sOuterVlan.eDei =
+			GSW_EXTENDEDVLAN_FILTER_DEI_NO_FILTER;
+
+		vcfg->sFilter.sInnerVlan.eType =
+			GSW_EXTENDEDVLAN_FILTER_TYPE_NO_FILTER;
+
+		vcfg->sFilter.eEtherType = vlan_list[i].ether_type;
+
+		if (drop) {
+			vcfg->sTreatment.eRemoveTag =
+				GSW_EXTENDEDVLAN_TREATMENT_DISCARD_UPSTREAM;
+		} else {
+			vcfg->sTreatment.eRemoveTag =
+				GSW_EXTENDEDVLAN_TREATMENT_REMOVE_1_TAG;
+			vcfg->sTreatment.bReassignBridgePort = LTQ_TRUE;
+			vcfg->sTreatment.nNewBridgePortId = vlan_list[i].bp;
+		}
+
+		ret = ops->gsw_extvlan_ops.ExtendedVlan_Set(ops, vcfg);
+
+		if (ret != GSW_statusOk)
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static int update_vlan2(struct core_ops *ops, int base, int num,
+			struct vlan2 *vlan_list, int drop,
+			GSW_EXTENDEDVLAN_config_t *vcfg)
+{
+	GSW_return_t ret;
+	u32 block = vcfg->nExtendedVlanBlockId;
+	int i;
+
+	for (i = 0; i < num; i++, base++) {
+		memset(vcfg, 0, sizeof(*vcfg));
+		vcfg->nExtendedVlanBlockId = block;
+		vcfg->nEntryIndex = base;
+
+		vcfg->sFilter.sOuterVlan.eType =
+			GSW_EXTENDEDVLAN_FILTER_TYPE_NORMAL;
+		vcfg->sFilter.sOuterVlan.bPriorityEnable = LTQ_FALSE;
+		vcfg->sFilter.sOuterVlan.bVidEnable = LTQ_TRUE;
+		vcfg->sFilter.sOuterVlan.nVidVal = vlan_list[i].outer_vlan.vid;
+		vcfg->sFilter.sOuterVlan.eTpid = vlan_list[i].outer_vlan.tpid;
+		vcfg->sFilter.sOuterVlan.eDei =
+			GSW_EXTENDEDVLAN_FILTER_DEI_NO_FILTER;
+
+		vcfg->sFilter.sInnerVlan.eType =
+			GSW_EXTENDEDVLAN_FILTER_TYPE_NORMAL;
+		vcfg->sFilter.sInnerVlan.bPriorityEnable = LTQ_FALSE;
+		vcfg->sFilter.sInnerVlan.bVidEnable = LTQ_TRUE;
+		vcfg->sFilter.sInnerVlan.nVidVal = vlan_list[i].inner_vlan.vid;
+		vcfg->sFilter.sInnerVlan.eTpid = vlan_list[i].inner_vlan.tpid;
+		vcfg->sFilter.sInnerVlan.eDei =
+			GSW_EXTENDEDVLAN_FILTER_DEI_NO_FILTER;
+
+		vcfg->sFilter.eEtherType = vlan_list[i].ether_type;
+
+		if (drop) {
+			vcfg->sTreatment.eRemoveTag =
+				GSW_EXTENDEDVLAN_TREATMENT_DISCARD_UPSTREAM;
+		} else {
+			vcfg->sTreatment.eRemoveTag =
+				GSW_EXTENDEDVLAN_TREATMENT_REMOVE_2_TAG;
+			vcfg->sTreatment.bReassignBridgePort = LTQ_TRUE;
+			vcfg->sTreatment.nNewBridgePortId = vlan_list[i].bp;
+		}
+
+		ret = ops->gsw_extvlan_ops.ExtendedVlan_Set(ops, vcfg);
+
+		if (ret != GSW_statusOk)
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static int update_ctp(struct core_ops *ops, struct ext_vlan_info *vlan)
+{
+	static GSW_CTP_portConfig_t ctp;
+	static GSW_EXTENDEDVLAN_config_t vcfg;
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "bp=%d v1=%d v1_d=%d v2=%d v2_d=%d\n",
+		vlan->bp, vlan->n_vlan1, vlan->n_vlan1_drop,
+		vlan->n_vlan2, vlan->n_vlan2_drop);
+
+	GSW_return_t ret, ret1;
+	ltq_bool_t enable;
+	u32 block;
+	GSW_EXTENDEDVLAN_alloc_t alloc;
+	int i;
+
+	memset(&ctp, 0, sizeof(ctp));
+	memset(&alloc, 0, sizeof(GSW_EXTENDEDVLAN_alloc_t));
+
+	ctp.nLogicalPortId = vlan->logic_port;
+	ctp.nSubIfIdGroup = vlan->subif_grp;
+	ctp.eMask = GSW_CTP_PORT_CONFIG_MASK_INGRESS_VLAN;
+	ret = ops->gsw_ctp_ops.CTP_PortConfigGet(ops, &ctp);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	enable = ctp.bIngressExtendedVlanEnable;
+	block = ctp.nIngressExtendedVlanBlockId;
+
+	/*disable previous vlan block operation,if enabled and
+	  free the previous allocated vlan blocks
+	  so that new vlan block can be allocated and configured to ctp*/
+	if(enable) {
+		ctp.bIngressExtendedVlanEnable = LTQ_FALSE;
+		ret = ops->gsw_ctp_ops.CTP_PortConfigSet(ops, &ctp);
+		if (ret != GSW_statusOk) {
+			PR_INFO("ERROR : ingress VLan operation disable fail in ctp\n");
+			return -EIO;
+		} else {
+			PR_INFO("ingress VLan operation disabled in ctp\n");
+		}
+
+		alloc.nExtendedVlanBlockId = block;
+		ret = ops->gsw_extvlan_ops.ExtendedVlan_Free(ops, &alloc);
+		if (ret != GSW_statusOk) {
+			PR_INFO("VLAN Free fail\n");
+			return -EIO;
+			PR_INFO("VLAN Free Success\n");
+		}
+	}
+	memset(&alloc, 0, sizeof(GSW_EXTENDEDVLAN_alloc_t));
+
+	alloc.nNumberOfEntries += vlan->n_vlan1 * 2;
+	alloc.nNumberOfEntries += vlan->n_vlan2;
+	alloc.nNumberOfEntries += vlan->n_vlan1_drop * 2;
+	alloc.nNumberOfEntries += vlan->n_vlan2_drop;
+	if (alloc.nNumberOfEntries == 0) {
+		PR_INFO("nNumberOfEntries == 0 , returning to caller\n");
+		return 0;
+	}
+
+	alloc.nNumberOfEntries += 3;
+	ret = ops->gsw_extvlan_ops.ExtendedVlan_Alloc(ops, &alloc);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+
+	vcfg.nExtendedVlanBlockId = alloc.nExtendedVlanBlockId;
+	ret = update_vlan0(ops, vlan->bp, &vcfg);
+
+	if (ret < 0)
+		goto UPDATE_ERROR;
+
+	i = 3;
+	ret = update_vlan2(ops, i, vlan->n_vlan2, vlan->vlan2_list, 0, &vcfg);
+
+	if (ret < 0)
+		goto UPDATE_ERROR;
+
+	i += vlan->n_vlan2;
+	ret = update_vlan2(ops, i, vlan->n_vlan2_drop, vlan->vlan2_drop_list, 1, &vcfg);
+
+	if (ret < 0)
+		goto UPDATE_ERROR;
+
+	i += vlan->n_vlan2_drop;
+	ret = update_vlan1(ops, i, vlan->n_vlan1, vlan->vlan1_list, 0, &vcfg);
+
+	if (ret < 0)
+		goto UPDATE_ERROR;
+
+	i += vlan->n_vlan1;
+	ret = update_vlan1(ops, i, vlan->n_vlan1_drop, vlan->vlan1_drop_list, 1, &vcfg);
+
+	if (ret < 0)
+		goto UPDATE_ERROR;
+
+	ctp.bIngressExtendedVlanEnable = LTQ_TRUE;
+	ctp.nIngressExtendedVlanBlockId = alloc.nExtendedVlanBlockId;
+	ctp.nIngressExtendedVlanBlockSize = 0;
+	ret = ops->gsw_ctp_ops.CTP_PortConfigSet(ops, &ctp);
+
+	if (ret != GSW_statusOk) {
+		PR_INFO("Enable ingress vlan in ctp fail\n");
+		return -EIO;
+	} else {
+		PR_INFO("Enable ingress vlan in ctp success\n");
+		return 0;
+	}
+UPDATE_ERROR:
+	ops->gsw_extvlan_ops.ExtendedVlan_Free(ops, &alloc);
+	return ret;
+}
+
+static int bp_add_vlan1(struct core_ops *ops, struct vlan1 *vlan,
+			GSW_BRIDGE_portConfig_t *bpcfg, GSW_EXTENDEDVLAN_config_t *vcfg)
+{
+	int ret;
+	GSW_EXTENDEDVLAN_alloc_t alloc = {0};
+	GSW_ExtendedVlanFilterType_t types[6] = {
+		GSW_EXTENDEDVLAN_FILTER_TYPE_NO_TAG,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_NO_FILTER,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_DEFAULT,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_NO_TAG,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_NO_FILTER,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_DEFAULT
+	};
+	u32 i;
+
+	memset(bpcfg, 0, sizeof(*bpcfg));
+	bpcfg->nBridgePortId = vlan->bp;
+	bpcfg->eMask = GSW_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN;
+	ret = ops->gsw_brdgport_ops.BridgePort_ConfigGet(ops, bpcfg);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	/* This bridge port should have no egress VLAN configured yet */
+	if (bpcfg->bEgressExtendedVlanEnable != LTQ_FALSE)
+		return -EINVAL;
+
+	alloc.nNumberOfEntries = ARRAY_SIZE(types) / 2;
+	ret = ops->gsw_extvlan_ops.ExtendedVlan_Alloc(ops, &alloc);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	for (i = 0; i < alloc.nNumberOfEntries; i++) {
+		memset(vcfg, 0, sizeof(*vcfg));
+		vcfg->nExtendedVlanBlockId = alloc.nExtendedVlanBlockId;
+		vcfg->nEntryIndex = i;
+		vcfg->sFilter.sOuterVlan.eType = types[i * 2];
+		vcfg->sFilter.sInnerVlan.eType = types[i * 2 + 1];
+		vcfg->sFilter.eEtherType = vlan->ether_type; /* filter ether_type as ingress */
+
+		vcfg->sTreatment.bAddOuterVlan = LTQ_TRUE;
+		vcfg->sTreatment.sOuterVlan.ePriorityMode =
+			GSW_EXTENDEDVLAN_TREATMENT_PRIORITY_VAL;
+		vcfg->sTreatment.sOuterVlan.ePriorityVal = 0;
+		vcfg->sTreatment.sOuterVlan.eVidMode =
+			GSW_EXTENDEDVLAN_TREATMENT_VID_VAL;
+		vcfg->sTreatment.sOuterVlan.eVidVal = vlan->outer_vlan.vid;
+		vcfg->sTreatment.sOuterVlan.eTpid = vlan->outer_vlan.tpid;
+		vcfg->sTreatment.sOuterVlan.eDei =
+			GSW_EXTENDEDVLAN_TREATMENT_DEI_0;
+
+		vcfg->sTreatment.bAddInnerVlan = LTQ_FALSE;
+
+		ret = ops->gsw_extvlan_ops.ExtendedVlan_Set(ops, vcfg);
+
+		if (ret != GSW_statusOk) {
+			PR_INFO("Failed in updating Extended VLAN entry (%u, %u).\n",
+				alloc.nExtendedVlanBlockId, i);
+			ops->gsw_extvlan_ops.ExtendedVlan_Free(ops, &alloc);
+			return -EIO;
+		}
+	}
+
+	bpcfg->bEgressExtendedVlanEnable = LTQ_TRUE;
+	bpcfg->nEgressExtendedVlanBlockId = alloc.nExtendedVlanBlockId;
+	bpcfg->nEgressExtendedVlanBlockSize = 0;
+	ret = ops->gsw_brdgport_ops.BridgePort_ConfigSet(ops, bpcfg);
+
+	if (ret != GSW_statusOk) {
+		PR_INFO("Failed in attaching Extended VLAN to Bridge Port.\n");
+		ops->gsw_extvlan_ops.ExtendedVlan_Free(ops, &alloc);
+		return -EIO;
+	}
+	else
+		return 0;
+}
+
+static int bp_add_vlan2(struct core_ops *ops, struct vlan2 *vlan,
+			GSW_BRIDGE_portConfig_t *bpcfg, GSW_EXTENDEDVLAN_config_t *vcfg)
+{
+	int ret;
+	GSW_EXTENDEDVLAN_alloc_t alloc = {0};
+	GSW_ExtendedVlanFilterType_t types[6] = {
+		GSW_EXTENDEDVLAN_FILTER_TYPE_NO_TAG,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_NO_FILTER,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_DEFAULT,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_NO_TAG,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_NO_FILTER,
+		GSW_EXTENDEDVLAN_FILTER_TYPE_DEFAULT
+	};
+	u32 i;
+
+	memset(bpcfg, 0, sizeof(*bpcfg));
+	bpcfg->nBridgePortId = vlan->bp;
+	bpcfg->eMask = GSW_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN;
+	ret = ops->gsw_brdgport_ops.BridgePort_ConfigGet(ops, bpcfg);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	/* This bridge port should have no egress VLAN configured yet */
+	if (bpcfg->bEgressExtendedVlanEnable != LTQ_FALSE)
+		return -EINVAL;
+
+	alloc.nNumberOfEntries = ARRAY_SIZE(types) / 2;
+	ret = ops->gsw_extvlan_ops.ExtendedVlan_Alloc(ops, &alloc);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	for (i = 0; i < alloc.nNumberOfEntries; i++) {
+		memset(vcfg, 0, sizeof(*vcfg));
+		vcfg->nExtendedVlanBlockId = alloc.nExtendedVlanBlockId;
+		vcfg->nEntryIndex = i;
+		vcfg->sFilter.sOuterVlan.eType = types[i * 2];
+		vcfg->sFilter.sInnerVlan.eType = types[i * 2 + 1];
+		vcfg->sFilter.eEtherType = vlan->ether_type; /* filter ether_type as ingress */
+
+		vcfg->sTreatment.bAddOuterVlan = LTQ_TRUE;
+		vcfg->sTreatment.sOuterVlan.ePriorityMode =
+			GSW_EXTENDEDVLAN_TREATMENT_PRIORITY_VAL;
+		vcfg->sTreatment.sOuterVlan.ePriorityVal = 0;
+		vcfg->sTreatment.sOuterVlan.eVidMode =
+			GSW_EXTENDEDVLAN_TREATMENT_VID_VAL;
+		vcfg->sTreatment.sOuterVlan.eVidVal = vlan->outer_vlan.vid;
+		vcfg->sTreatment.sOuterVlan.eTpid = vlan->outer_vlan.tpid;
+		vcfg->sTreatment.sOuterVlan.eDei =
+			GSW_EXTENDEDVLAN_TREATMENT_DEI_0;
+
+		vcfg->sTreatment.bAddInnerVlan = LTQ_TRUE;
+		vcfg->sTreatment.sInnerVlan.ePriorityMode =
+			GSW_EXTENDEDVLAN_TREATMENT_PRIORITY_VAL;
+		vcfg->sTreatment.sInnerVlan.ePriorityVal = 0;
+		vcfg->sTreatment.sInnerVlan.eVidMode =
+			GSW_EXTENDEDVLAN_TREATMENT_VID_VAL;
+		vcfg->sTreatment.sInnerVlan.eVidVal = vlan->inner_vlan.vid;
+		vcfg->sTreatment.sInnerVlan.eTpid = vlan->inner_vlan.tpid;
+		vcfg->sTreatment.sInnerVlan.eDei =
+			GSW_EXTENDEDVLAN_TREATMENT_DEI_0;
+
+		ret = ops->gsw_extvlan_ops.ExtendedVlan_Set(ops, vcfg);
+
+		if (ret != GSW_statusOk) {
+			PR_INFO("Failed in updating Extended VLAN entry (%u, %u).\n",
+				alloc.nExtendedVlanBlockId, i);
+			ops->gsw_extvlan_ops.ExtendedVlan_Free(ops, &alloc);
+			return -EIO;
+		}
+	}
+
+	bpcfg->bEgressExtendedVlanEnable = LTQ_TRUE;
+	bpcfg->nEgressExtendedVlanBlockId = alloc.nExtendedVlanBlockId;
+	bpcfg->nEgressExtendedVlanBlockSize = 0;
+	ret = ops->gsw_brdgport_ops.BridgePort_ConfigSet(ops, bpcfg);
+
+	if (ret != GSW_statusOk) {
+		PR_INFO("Failed in attaching Extended VLAN to Bridge Port.\n");
+		ops->gsw_extvlan_ops.ExtendedVlan_Free(ops, &alloc);
+		return -EIO;
+	}
+	else
+		return 0;
+}
+
+static int bp_add_vlan(struct core_ops *ops, struct ext_vlan_info *vlan,
+		       int idx, GSW_BRIDGE_portConfig_t *bpcfg)
+{
+	static GSW_EXTENDEDVLAN_config_t vcfg;
+
+	if (idx < vlan->n_vlan1)
+		return bp_add_vlan1(ops, vlan->vlan1_list + idx, bpcfg, &vcfg);
+	else
+		return bp_add_vlan2(ops,
+				    vlan->vlan2_list + (idx - vlan->n_vlan1),
+				    bpcfg, &vcfg);
+}
+
+static int bp_rm_vlan(struct core_ops *ops, u32 bp,
+		      GSW_BRIDGE_portConfig_t *bpcfg)
+{
+	DP_DEBUG(DP_DBG_FLAG_SWDEV,"bp=%d\n", bp);
+	int ret;
+	GSW_EXTENDEDVLAN_alloc_t alloc = {0};
+
+	memset(bpcfg, 0, sizeof(*bpcfg));
+	bpcfg->nBridgePortId = bp;
+	bpcfg->eMask = GSW_BRIDGE_PORT_CONFIG_MASK_EGRESS_VLAN;
+	ret = ops->gsw_brdgport_ops.BridgePort_ConfigGet(ops, bpcfg);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	if (bpcfg->bEgressExtendedVlanEnable == LTQ_FALSE)
+		return 0;
+
+	alloc.nExtendedVlanBlockId = bpcfg->nEgressExtendedVlanBlockId;
+	bpcfg->bEgressExtendedVlanEnable = LTQ_FALSE;
+	ret = ops->gsw_brdgport_ops.BridgePort_ConfigSet(ops, bpcfg);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+
+	ret = ops->gsw_extvlan_ops.ExtendedVlan_Free(ops, &alloc);
+
+	if (ret != GSW_statusOk)
+		return -EIO;
+	else
+		return 0;
+}
+
+/*
+ * Function for VLAN configure
+ */
+int set_gswip_ext_vlan(struct core_ops *ops, struct ext_vlan_info *vlan,
+		       int flag)
+{
+	static GSW_BRIDGE_portConfig_t bpcfg;
+
+	int ret;
+	struct priv_ext_vlan *old_priv, *new_priv;
+	size_t new_priv_size;
+	int i, j;
+
+	/* Assumptions:
+	 * 1. Every time this function is called, one and only one "vlan" is
+	 *    added or removed. No replacement happens.
+	 * 2. The content of 2 vlan_list (not vlan_drop_list) keeps sequence.
+	 *    Only one new item is inserted or removed. No re-ordering happens.
+	 * 3. Bridge ports are not freed before this function is called. This
+	 *    is not compulsary. Just in case any resource free function
+	 *    such as free VLAN block, disable VLAN from bridge port, fails.
+	 * 4. In egress, assume packet at bridge port has no VLAN before
+	 *    Extended VLAN processing to save 2 Extended VLAN entries. This
+	 *    can be changed later if required.
+	 */
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "ext vlan info bp=%d logic=%d subif=%d\n",
+		vlan->bp, vlan->logic_port, vlan->subif_grp);
+	ret = update_ctp(ops, vlan);
+
+	if (ret < 0)
+		return ret;
+
+	/* prepare new private data */
+	new_priv_size = sizeof(*new_priv);
+	new_priv_size += sizeof(new_priv->bp[0])
+			 * (vlan->n_vlan1 + vlan->n_vlan2);
+	new_priv = kmalloc(new_priv_size, GFP_KERNEL);
+
+	if (new_priv == NULL)
+		return -ENOMEM;
+
+	new_priv->num_bp = (u32)(vlan->n_vlan1 + vlan->n_vlan2);
+	new_priv->bp[0] = vlan->bp;
+
+	for (i = 0, j = 1; i < vlan->n_vlan1; i++, j++)
+		new_priv->bp[j] = vlan->vlan1_list[i].bp;
+
+	for (i = 0; i < vlan->n_vlan2; i++, j++)
+		new_priv->bp[j] = vlan->vlan2_list[i].bp;
+
+	/* remember pointer to old private data */
+	old_priv = vlan->priv;
+
+	if (old_priv == NULL) {
+		/* no old private data, vlan is added */
+		ret = bp_add_vlan(ops, vlan, 0, &bpcfg);
+	} else if (old_priv->num_bp < new_priv->num_bp) {
+		/* vlan added */
+		for (i = 0;
+		     (u32)i < old_priv->num_bp
+		     && old_priv->bp[i] == new_priv->bp[i];
+		     i++);
+
+		ret = bp_add_vlan(ops, vlan, i, &bpcfg);
+	} else if (old_priv->num_bp > new_priv->num_bp) {
+		/* vlan removed */
+		for (i = 0;
+		     (u32)i < new_priv->num_bp
+		     && old_priv->bp[i] == new_priv->bp[i];
+		     i++);
+
+		bp_rm_vlan(ops, old_priv->bp[i], &bpcfg);
+		ret = 0;
+	} else
+		ret = 0;
+
+	/* return new private data in vlan structure */
+	vlan->priv = new_priv;
+
+	/* free old private data if any */
+	if (old_priv != NULL)
+		kfree(old_priv);
+
+	return ret;
+}
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_gswip.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_gswip.c
index 826ce56b2..545f6856a 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_gswip.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_gswip.c
@@ -608,7 +608,8 @@ struct gsw_itf *ctp_port_assign(int inst, u8 ep, int bp_default,
 		return NULL;
 	}
 
-	PR_ERR("assign ep=%d with eMode=%d\n", ep, assign->emode);
+	DP_DEBUG(DP_DBG_FLAG_DBG, "assign ep=%d with eMode=%d\n",
+		 ep, assign->emode);
 	itf_assign[ep].mode = assign->emode;
 	itf_assign[ep].n = ctp_assign.nNumberOfCtpPort;
 	itf_assign[ep].start = ctp_assign.nFirstCtpPortId;
@@ -701,7 +702,7 @@ int alloc_bridge_port(int inst, int port_id, int subif, int fid, int bp_member)
  */
 int free_bridge_port(int inst, int bp)
 {
-	GSW_BRIDGE_portConfig_t tmp = {0}, tmp2 = {0};
+	GSW_BRIDGE_portConfig_t *tmp = NULL, *tmp2 = NULL;
 	int i, j;
 	GSW_return_t ret;
 	struct core_ops *gsw_handle;
@@ -709,40 +710,44 @@ int free_bridge_port(int inst, int bp)
 	gsw_handle = dp_port_prop[inst].ops[GSWIP_L];
 	if (bp == CPU_BP)
 		return 0;
+
+	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+	tmp2 = kzalloc(sizeof(*tmp2), GFP_KERNEL);
+	if (!tmp || !tmp2)
+		goto FREE_EXIT;
+	
 	/*read out this delting bridge port's member*/
-	memset(&tmp, 0, sizeof(tmp));
-	tmp.nBridgePortId = bp;
-	tmp.eMask = GSW_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP;
+	tmp->nBridgePortId = bp;
+	tmp->eMask = GSW_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP;
 	ret = gsw_core_api((dp_gsw_cb)gsw_handle->gsw_brdgport_ops
-			   .BridgePort_ConfigGet, gsw_handle, &tmp);
+			   .BridgePort_ConfigGet, gsw_handle, tmp);
 	if (ret != GSW_statusOk) {
 		PR_ERR("Failed GSW_BRIDGE_PORT_CONFIG_GET: %d\n", bp);
 		goto EXIT;
 	}
 	/*remove this delting bridgeport from other bridge port's member*/
-	memset(&tmp2, 0, sizeof(tmp2));
-	for (i = 0; i < ARRAY_SIZE(tmp.nBridgePortMap); i++) {
+	for (i = 0; i < ARRAY_SIZE(tmp->nBridgePortMap); i++) {
 		for (j = 0; j < 16 /*u16*/; j++) {
-			if (!(tmp.nBridgePortMap[i] & (1 << j)))
+			if (!(tmp->nBridgePortMap[i] & (1 << j)))
 				continue; /*not memmber bit set */
-			memset(tmp2.nBridgePortMap, 0,
-			       sizeof(tmp2.nBridgePortMap));
-			tmp2.eMask =
+			memset(tmp2->nBridgePortMap, 0,
+			       sizeof(tmp2->nBridgePortMap));
+			tmp2->eMask =
 				GSW_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP;
-			tmp2.nBridgePortId = i * 16 + j;
+			tmp2->nBridgePortId = i * 16 + j;
 			ret = gsw_core_api((dp_gsw_cb)gsw_handle
 					   ->gsw_brdgport_ops
 					   .BridgePort_ConfigGet, gsw_handle,
-					   &tmp2);
+					   tmp2);
 			if (ret != GSW_statusOk) {
 				PR_ERR("Failed GSW_BRIDGE_PORT_CONFIG_GET\n");
 				goto EXIT;
 			}
-			UNSET_BP_MAP(tmp2.nBridgePortMap, bp);
+			UNSET_BP_MAP(tmp2->nBridgePortMap, bp);
 			ret = gsw_core_api((dp_gsw_cb)gsw_handle
 					   ->gsw_brdgport_ops
 					   .BridgePort_ConfigSet, gsw_handle,
-					   &tmp2);
+					   tmp2);
 			if (ret != GSW_statusOk) {
 				PR_ERR("Failed GSW_BRIDGE_PORT_CONFIG_SET\n");
 				goto EXIT;
@@ -751,15 +756,18 @@ int free_bridge_port(int inst, int bp)
 	}
 EXIT:
 	/*FRee thie bridge port */
-	memset(&tmp, 0, sizeof(tmp));
-	tmp.nBridgePortId = bp;
-	tmp.eMask = GSW_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP;
+	memset(tmp, 0, sizeof(tmp));
+	tmp->nBridgePortId = bp;
+	tmp->eMask = GSW_BRIDGE_PORT_CONFIG_MASK_BRIDGE_PORT_MAP;
 	ret = gsw_core_api((dp_gsw_cb)gsw_handle->gsw_brdgport_ops
-			   .BridgePort_Free, gsw_handle, &tmp);
-	if (ret != GSW_statusOk) {
+			   .BridgePort_Free, gsw_handle, tmp);
+	if (ret != GSW_statusOk)
 		PR_ERR("Failed to GSW_BRIDGE_PORT_FREE:%d\n", bp);
-		return -1;
-	}
+FREE_EXIT:
+	if (tmp)
+		kfree(tmp);
+	if (tmp2)
+		kfree(tmp2);
 	return 0;
 }
 
@@ -814,3 +822,30 @@ int dp_gswip_mac_entry_del(int bport, int fid, int inst, u8 *addr)
 	}
 	return 0;
 }
+
+int cpu_vlan_mod_dis(int inst)
+{
+	GSW_QoS_portRemarkingCfg_t cfg = {0};
+	struct core_ops *ops;
+	int ret;
+
+	ops = dp_port_prop[inst].ops[GSWIP_L];
+
+	cfg.nPortId = 0;
+	ret = gsw_core_api((dp_gsw_cb)ops->gsw_qos_ops.
+			   QoS_PortRemarkingCfgGet, ops, &cfg);
+	if (ret != GSW_statusOk){
+		PR_ERR("QoS_PortRemarkingCfgGet failed\n");
+		return -1;
+	}
+
+	cfg.bPCP_EgressRemarkingEnable = LTQ_FALSE;
+	ret = gsw_core_api((dp_gsw_cb)ops->gsw_qos_ops.
+			   QoS_PortRemarkingCfgSet, ops, &cfg);
+	if (ret != GSW_statusOk){
+		PR_ERR("QoS_PortRemarkingCfgSet failed\n");
+		return -1;
+	}
+
+	return 0;
+}
\ No newline at end of file
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_lookup_proc.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_lookup_proc.c
index 62fcf97a5..9ab991884 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_lookup_proc.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_lookup_proc.c
@@ -192,7 +192,7 @@ static void lookup_pattern_match(int tmp_index)
 	int t[LOOKUP_FIELD_BITS] = { 0 };
 	int index;
 
-	DP_DEBUG(DP_DBG_FLAG_DBG,
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP,
 		 "trying with tmp_index=0x%x with lookup_match_num=%d\n",
 		 tmp_index, lookup_match_num);
 	pattern_match_flag = PATTERN_MATCH_INIT;
@@ -202,12 +202,12 @@ static void lookup_pattern_match(int tmp_index)
 		index = tmp_index;
 		for (i = 0; i < LOOKUP_FIELD_BITS; i++)
 			index |= (t[i] << i);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "don't care[14]=");
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "don't care[14]=");
 		for (i = 0; i < LOOKUP_FIELD_BITS; i++)
-			DP_DEBUG(DP_DBG_FLAG_DBG, "%d ", t[i]);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "\n");
+			DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%d ", t[i]);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 
-		DP_DEBUG(DP_DBG_FLAG_DBG, "don't care index=%x\n", index);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "don't care index=%x\n", index);
 
 		if (lookup_tbl_flags[index] == ENTRY_USED) {
 			pattern_match_flag = PATTERN_MATCH_FAIL;
@@ -221,13 +221,13 @@ static void lookup_pattern_match(int tmp_index)
 			first_qid = qid;
 		} else if (first_qid != qid) {
 			pattern_match_flag = PATTERN_MATCH_FAIL;
-			DP_DEBUG(DP_DBG_FLAG_DBG, "first_qid(%d) != qid(%d)\n",
+			DP_DEBUG(DP_DBG_FLAG_LOOKUP, "first_qid(%d) != qid(%d)\n",
 				 first_qid, qid);
 			goto END;
 		}
 	}
 
- END:
+END:
 	/*save the result if necessary here */
 	if (pattern_match_flag == PATTERN_MATCH_START) {
 		/*pass since still not fail yet */
@@ -250,7 +250,7 @@ static void lookup_pattern_match(int tmp_index)
 			if (lookup_mask1[i])
 				lookup_match_mask[lookup_match_num] |= 1 << i;
 		lookup_match_num++;
-		DP_DEBUG(DP_DBG_FLAG_DBG,
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP,
 			 "left_n=%d lookup_mask_n=%d. Need reduce=%d\n",
 			 left_n, lookup_mask_n, (1 << lookup_mask_n));
 		left_n -= (1 << lookup_mask_n);
@@ -270,11 +270,11 @@ static int list_care_combination(int tmp_index)
 		index = tmp_index;
 		for (i = 0; i < LOOKUP_FIELD_BITS; i++)
 			index |= (t[i] << i);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "care index=%x\n", index);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "care t[14]=");
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "care index=%x\n", index);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "care t[14]=");
 		for (k = 0; k < LOOKUP_FIELD_BITS; k++)
-			DP_DEBUG(DP_DBG_FLAG_DBG, "%d ", t[k]);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "\n");
+			DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%d ", t[k]);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 		lookup_pattern_match(index);
 	}
 
@@ -291,19 +291,19 @@ static int check_pattern(int *data, int r)
 	int i;
 
 	memset(lookup_mask1, 0, sizeof(lookup_mask1));
-	DP_DEBUG(DP_DBG_FLAG_DBG, "data:");
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "data:");
 	for (i = 0; i < r; i++) {
-		DP_DEBUG(DP_DBG_FLAG_DBG, "%d ", data[i]);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%d ", data[i]);
 		lookup_mask1[data[i]] = CARE_NOT_FLAG;
 	}
 	lookup_mask_n = r;
 	pattern_match_flag = 0;
-	DP_DEBUG(DP_DBG_FLAG_DBG, "\n");
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 
-	DP_DEBUG(DP_DBG_FLAG_DBG, "Don't care flag: ");
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "Don't care flag: ");
 	for (i = 0; i < LOOKUP_FIELD_BITS; i++)
-		DP_DEBUG(DP_DBG_FLAG_DBG, "%c ", lookup_mask1[i] ? 'x' : '0');
-	DP_DEBUG(DP_DBG_FLAG_DBG, "\n");
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%c ", lookup_mask1[i] ? 'x' : '0');
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 
 	list_care_combination(tmp_pattern_port_id << 4);
 	return 0;
@@ -333,7 +333,7 @@ int find_pattern(int port_id, struct seq_file *s, int qid)
 		if (left_n <= 0)
 			break;
 		c_not_care_walkthrought(arr, n, r);
-		DP_DEBUG(DP_DBG_FLAG_DBG, "left_n=%d\n", left_n);
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "left_n=%d\n", left_n);
 		if (!left_n)
 			break;
 	}
@@ -398,7 +398,7 @@ ssize_t proc_get_qid_via_index31(struct file *file, const char *buf,
 	int num;
 
 	len = (count >= sizeof(data)) ? (sizeof(data) - 1) : count;
-	DP_DEBUG(DP_DBG_FLAG_DBG, "len=%d\n", len);
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP, "len=%d\n", len);
 
 	if (len <= 0) {
 		err = -EFAULT;
@@ -412,7 +412,7 @@ ssize_t proc_get_qid_via_index31(struct file *file, const char *buf,
 		return count;
 	}
 
-	data[len - 1] = 0;	/* Make string */
+	data[len - 1] = 0; /* Make string */
 	num = dp_split_buffer(data, param_list, ARRAY_SIZE(param_list));
 
 	if (num <= 1)
@@ -429,16 +429,15 @@ ssize_t proc_get_qid_via_index31(struct file *file, const char *buf,
 			return count;
 		}
 		qid = dp_atoi(param_list[2]);
-
 		/*workaround for mask support */
 		if (get_dont_care_lookup(param_list[1]) == 0) {
 			lookup_table_recursive(LOOKUP_FIELD_BITS - 1, 0, 1,
 					       qid);
 			return count;
 		}
+		PR_INFO("Set to queue[%u] done\n", qid);
 		set_lookup_qid_via_index(lookup_index, qid);
-		PR_INFO("Set lookup[%u 0x%x] ->     queue[%u]\n",
-			lookup_index, lookup_index, qid);
+		return count;
 	} else if ((dp_strcmpi(param_list[0], "get") == 0) ||
 		   (dp_strcmpi(param_list[0], "read") == 0)) {
 		if (get_dont_care_lookup(param_list[1]) == 0) {
@@ -449,6 +448,7 @@ ssize_t proc_get_qid_via_index31(struct file *file, const char *buf,
 		qid = get_lookup_qid_via_index(lookup_index);
 		PR_INFO("Get lookup[%05u 0x%04x] ->     queue[%u]\n",
 			lookup_index, lookup_index, qid);
+		return count;
 	} else if (dp_strcmpi(param_list[0], "find") == 0) {
 		/*read out its all flags for specified qid */
 		int i;
@@ -467,11 +467,13 @@ ssize_t proc_get_qid_via_index31(struct file *file, const char *buf,
 		int new_q = dp_atoi(param_list[2]);
 
 		lookup_table_remap(old_q, new_q);
+		PR_INFO("remap queue[%d] to queue[%d] done\n",
+			old_q, new_q);
 		return count;
 	}
 
 	goto help;
- help:
+help:
 	PR_INFO("Usage: echo set lookup_flags queue_id > /proc/dp/lookup\n");
 	PR_INFO("     : echo get lookup_flags > /proc/dp/lookup\n");
 	PR_INFO("     : echo find  <x> > /proc/dp/lookup\n");
@@ -495,7 +497,7 @@ void lookup_table_via_qid(int qid)
 {
 	u32 index, tmp, i, j, k, f = 0;
 
-	DP_DEBUG(DP_DBG_FLAG_DBG,
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP,
 		 "Try to find all lookup flags mapped to qid %d\n", qid);
 	for (i = 0; i < 16; i++) {	/*ep */
 		for (j = 0; j < 16; j++) {	/*class */
@@ -519,7 +521,7 @@ void lookup_table_remap(int old_q, int new_q)
 {
 	u32 index, tmp, i, j, k, f = 0;
 
-	DP_DEBUG(DP_DBG_FLAG_DBG,
+	DP_DEBUG(DP_DBG_FLAG_LOOKUP,
 		 "Try to remap lookup flags mapped from old_q %d to new_q %d\n",
 		 old_q, new_q);
 	for (i = 0; i < 16; i++) {	/*ep */
@@ -531,9 +533,10 @@ void lookup_table_remap(int old_q, int new_q)
 					continue;
 				set_lookup_qid_via_index(index, new_q);
 				f = 1;
-				PR_INFO("Remap lookup[%05u 0x%04x] %s[%d]\n",
-					index, index,
-					"->     queue", new_q);
+				DP_DEBUG(DP_DBG_FLAG_LOOKUP,
+					 "Remap lookup[%05u 0x%04x] %s[%d]\n",
+					 index, index,
+					 "->     queue", new_q);
 			}
 		}
 	}
@@ -568,7 +571,6 @@ int get_dont_care_lookup(char *s)
 
 	if (len >= LOOKUP_FIELD_BITS + 1)
 		len = LOOKUP_FIELD_BITS + 1;
-	PR_INFO("len=%d\n", len);
 	for (i = len - 1, j = 0; i >= 1; i--, j++) {
 		if ((s[i] == 'x') || (s[i] == 'X')) {
 			lookup_mask2[j] = 1;
@@ -584,14 +586,15 @@ int get_dont_care_lookup(char *s)
 		}
 	}
 	if (flag) {
-		PR_INFO("\nGet lookup flag: ");
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\nGet lookup flag: ");
 		for (i = LOOKUP_FIELD_BITS - 1; i >= 0; i--) {
 			if (lookup_mask2[i])
-				PR_INFO("x");
+				DP_DEBUG(DP_DBG_FLAG_LOOKUP, "x");
 			else
-				PR_INFO("%d", lookup_flags2[i]);
+				DP_DEBUG(DP_DBG_FLAG_LOOKUP, "%d",
+					lookup_flags2[i]);
 		}
-		PR_INFO("\n");
+		DP_DEBUG(DP_DBG_FLAG_LOOKUP, "\n");
 
 		return 0;
 	} else {
@@ -606,11 +609,12 @@ void lookup_table_recursive(int k, int tmp_index, int set_flag, int qid)
 	if (k < 0) {	/*finish recursive and start real read/set action */
 		if (set_flag) {
 			set_lookup_qid_via_index(tmp_index, qid);
-			PR_INFO("Set lookup[%05u 0x%04x] ->     queue[%d]\n",
-				tmp_index, tmp_index, qid);
+			DP_DEBUG(DP_DBG_FLAG_LOOKUP,
+				 "Set lookup[%05u/0x%04x] ->     queue[%d]\n",
+				 tmp_index, tmp_index, qid);
 		} else {
 			qid = get_lookup_qid_via_index(tmp_index);
-			PR_INFO("Get lookup[%05u 0x%04x] ->     queue[%d]\n",
+			PR_INFO("Get lookup[%05u/0x%04x] ->     queue[%d]\n",
 				tmp_index, tmp_index, qid);
 		}
 		return;
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_misc.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_misc.c
index b5992cdfa..b50adf05d 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_misc.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_misc.c
@@ -44,6 +44,8 @@
 #include "datapath_switchdev.h"
 #endif
 
+static struct datapath_ctrl dp_ctrl;
+
 static void init_dma_desc_mask(void)
 {
 	/*mask 0: to remove the bit, 1 -- keep the bit */
@@ -235,7 +237,8 @@ void dump_rx_dma_desc(struct dma_rx_desc_0 *desc_0,
 	PR_INFO(" DMA Descripotr:D0=0x%08x D1=0x%08x D2=0x%08x D3=0x%08x\n",
 		*(u32 *)desc_0, *(u32 *)desc_1,
 		*(u32 *)desc_2, *(u32 *)desc_3);
-	PR_INFO("  DW0:res=%d tunl_id=%d flow_id=%d eth_type=%d subif=0x%04x\n",
+	PR_INFO("  DW0:redir=%d res=%d tunl=%d flow=%d ether=%d subif=0x%04x\n",
+		desc_0->field.redir,
 		desc_0->field.resv, desc_0->field.tunnel_id,
 		desc_0->field.flow_id, desc_0->field.eth_type,
 		desc_0->field.dest_sub_if_id);
@@ -272,7 +275,8 @@ void dump_tx_dma_desc(struct dma_tx_desc_0 *desc_0,
 	PR_INFO(" DMA Descripotr:D0=0x%08x D1=0x%08x D2=0x%08x D3=0x%08x\n",
 		*(u32 *)desc_0, *(u32 *)desc_1,
 		*(u32 *)desc_2, *(u32 *)desc_3);
-	PR_INFO("  DW0:res=%d tunl_id=%d flow_id=%d eth_type=%d subif=0x%04x\n",
+	PR_INFO("  DW0:redir=%d res=%d tunl=%d flow=%d ether=%d subif=0x%04x\n",
+		desc_0->field.redir,
 		desc_0->field.resv, desc_0->field.tunnel_id,
 		desc_0->field.flow_id, desc_0->field.eth_type,
 		desc_0->field.dest_sub_if_id);
@@ -280,9 +284,9 @@ void dump_tx_dma_desc(struct dma_tx_desc_0 *desc_0,
 		desc_1->field.session_id, desc_1->field.tcp_err,
 		desc_1->field.nat, desc_1->field.dec, desc_1->field.enc,
 		desc_1->field.mpe2, desc_1->field.mpe1);
-	PR_INFO("  color=%02d ep=%02d ip=%02d\n",
+	PR_INFO("  color=%02d ep=%02d ip=%02d class=%d\n",
 		desc_1->field.color, desc_1->field.ep,
-		desc_1->field.ip);
+		desc_1->field.ip, desc_1->field.classid);
 	PR_INFO("  DW2:data_ptr=0x%08x\n", desc_2->field.data_ptr);
 	PR_INFO("  DW3:own=%d c=%d sop=%d eop=%d dic=%d pdu_type=%d\n",
 		desc_3->field.own, desc_3->field.c, desc_3->field.sop,
@@ -437,41 +441,51 @@ void dp_sys_mib_reset_31(u32 flag)
 #endif
 }
 
+#ifndef CONFIG_LTQ_DATAPATH_QOS_HAL
 int alloc_q_to_port(struct ppv4_q_sch_port *info, u32 flag)
 {
 	struct ppv4_queue q;
 	struct ppv4_port port;
-	int i;
 	int inst = info->inst;
-	struct hal_priv *priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
+	struct hal_priv *priv = HAL(inst);
 
-	if (!priv)
+	if (!priv) {
 		PR_ERR("why priv NULL ???\n");
+		return -1;
+	}
 
-	if (priv->deq_port_stat[info->cqe_deq].flag != PP_NODE_ALLOC) {
-		port.qos_deq_port = info->cqe_deq;
+	if (priv->deq_port_stat[info->cqe_deq].flag == PP_NODE_FREE) {
+		port.cqm_deq_port = info->cqe_deq;
 		port.tx_pkt_credit = info->tx_pkt_credit;
 		port.tx_ring_addr = info->tx_ring_addr;
 		port.tx_ring_size = info->tx_ring_size;
+		port.inst = inst;
+		port.dp_port = info->dp_port;
 		if (dp_pp_alloc_port(&port)) {
 			PR_ERR("%s fail for deq_port=%d qos_deq_port=%d\n",
 			       "dp_pp_alloc_port",
-			       port.qos_deq_port, port.qos_deq_port);
+			       port.cqm_deq_port, port.qos_deq_port);
 			return -1;
 		}
-		priv->deq_port_stat[info->cqe_deq].node_id = port.node_id;
-		priv->deq_port_stat[info->cqe_deq].flag = PP_NODE_ALLOC;
+		//priv->deq_port_stat[info->cqe_deq].node_id = port.node_id;
+		//priv->deq_port_stat[info->cqe_deq].flag = PP_NODE_ALLOC;
+		info->port_node = port.node_id;
 		info->f_deq_port_en = 1;
 	} else {
 		port.node_id = priv->deq_port_stat[info->cqe_deq].node_id;
 	}
 	q.qid = -1;
 	q.parent = port.node_id;
+	q.inst = inst;
+#ifdef CONFIG_LTQ_DATAPATH_DUMMY_QOS
+	q.dq_port = info->cqe_deq; /*for qos slim driver only */
+#endif
 	if (dp_pp_alloc_queue(&q)) {
 		PR_ERR("%s fail\n",
 		       "dp_pp_alloc_queue");
 		return -1;
 	}
+#ifdef DP_TMP_DEL
 	/*Save queue/scheduler/port information */
 	for (i = 0; i < MAX_PP_CHILD_PER_NODE; i++) {
 		if (priv->deq_port_stat[info->cqe_deq].child[i].flag ==
@@ -483,20 +497,59 @@ int alloc_q_to_port(struct ppv4_q_sch_port *info, u32 flag)
 		PR_ERR("Failed to find one free child ??\n");
 		return -1;
 	}
-	PR_ERR("Get qid=%d for cqe_deq=%d\n", q.qid, info->cqe_deq);
+	DP_DEBUG(DP_DBG_FLAG_DBG,
+		 "Get qid=%d for cqe_deq=%d\n", q.qid, info->cqe_deq);
 	priv->deq_port_stat[info->cqe_deq].child[i].flag = PP_NODE_ALLOC;
 	priv->deq_port_stat[info->cqe_deq].child[i].node_id = q.node_id;
 	priv->deq_port_stat[info->cqe_deq].child[i].type = DP_NODE_QUEUE;
 	priv->deq_port_stat[info->cqe_deq].flag = PP_NODE_ALLOC;
+#endif
 	PORT_VAP(info->inst, info->dp_port, info->ctp, qid) = q.qid;
 	PORT_VAP(info->inst, info->dp_port, info->ctp, q_node) = q.node_id;
 	info->qid = q.qid;
+	info->q_node = q.node_id;
 	priv->qos_queue_stat[q.qid].deq_port = info->cqe_deq;
 	priv->qos_queue_stat[q.qid].node_id = q.node_id;
 	priv->qos_queue_stat[q.qid].flag = PP_NODE_ALLOC;
-	PR_ERR("alloc_q_to_port done\n");
+	DP_DEBUG(DP_DBG_FLAG_DBG, "alloc_q_to_port done\n");
 	return 0;
 }
+#else
+int alloc_q_to_port(struct ppv4_q_sch_port *info, u32 flag)
+{
+	struct dp_node_link link = {0};
+	
+	link.cqm_deq_port.cqm_deq_port = info->cqe_deq;
+	link.dp_port = info->dp_port; /*in case for qos node alloc resv pool*/
+	link.inst = info->inst;
+	link.node_id.q_id = DP_NODE_AUTO_ID;
+	link.node_type = DP_NODE_QUEUE;
+	link.p_node_id.cqm_deq_port = info->cqe_deq;
+	link.p_node_type = DP_NODE_PORT;
+
+#ifdef XXX_TEST /*testing only */
+	{
+		static int kk = 0;
+		info->qid = ++kk;
+		info->q_node = kk * 2;
+		info->port_node = kk * 2 + 1;
+
+		return 0;
+	}
+#endif
+	
+	if (dp_node_link_add(&link, 0)) {
+		PR_ERR("dp_node_link_add_31 fail: cqm_deq_port=%d\n",
+		       info->cqe_deq);
+		return -1;
+	}
+	info->f_deq_port_en = 1;
+	info->qid = link.node_id.q_id;
+	PR_INFO("qid=%d p_node_id=%d for cqm port=%d\n",
+		link.node_id.q_id, link.p_node_id.cqm_deq_port, info->cqe_deq);
+	return 0;
+}
+#endif /*CONFIG_LTQ_DATAPATH_QOS_HAL*/
 
 int dp_platform_queue_set(int inst, u32 flag)
 {
@@ -508,7 +561,9 @@ int dp_platform_queue_set(int inst, u32 flag)
 	u8 f_cpu_q = 0;
 	struct cbm_dp_en_data en_data = {0};
 	struct hal_priv *priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
+	struct pmac_port_info *port_info;
 
+	port_info = &dp_port_info[inst][0]; /*CPU*/
 	if ((flag & DP_PLATFORM_DE_INIT) == DP_PLATFORM_DE_INIT) {
 		PR_ERR("Need to free resoruce in the future\n");
 		return 0;
@@ -517,6 +572,7 @@ int dp_platform_queue_set(int inst, u32 flag)
 	/*Allocate a drop queue*/
 	if (priv->ppv4_drop_q < 0) {
 		q.parent = 0;
+		q.inst = inst;
 		if (dp_pp_alloc_queue(&q)) {
 			PR_ERR("%s fail to alloc a drop queue ??\n",
 			       "dp_pp_alloc_queue");
@@ -524,7 +580,7 @@ int dp_platform_queue_set(int inst, u32 flag)
 		}
 		priv->ppv4_drop_q = q.qid;
 	} else {
-		PR_ERR("Hardcoded drop queue: %d\n", priv->ppv4_drop_q);
+		PR_INFO("drop queue: %d\n", priv->ppv4_drop_q);
 	}
 	/*Map all lookup entry to drop queue at the beginning*/
 	lookup.mode = CQE_LU_MODE0;
@@ -551,26 +607,65 @@ int dp_platform_queue_set(int inst, u32 flag)
 	/*Alloc queue/scheduler/port per CPU port */
 	cpu_data.dp_inst = inst;
 	cpu_data.cbm_inst = dp_port_prop[inst].cbm_inst;
+#if IS_ENABLED(CONFIG_LTQ_DATAPATH_DDR_SIMULATE_GSWIP31)
+	cpu_data.dq_tx_push_info[0].deq_port = 0;
+	cpu_data.dq_tx_push_info[1].deq_port = -1;
+	cpu_data.dq_tx_push_info[2].deq_port = -1;
+	cpu_data.dq_tx_push_info[3].deq_port = -1;
+#else
 	ret = cbm_cpu_port_get(&cpu_data, 0);
+#endif
 	if (ret == -1) {
 		PR_ERR("%s fail for CPU Port. Why ???\n",
 		       "cbm_cpu_port_get");
 		return -1;
 	}
+	port_info->deq_port_base = 0;
+	port_info->deq_port_num = 0;
 	for (i = 0; i < ARRAY_SIZE(cpu_data.dq_tx_push_info); i++) {
 		if (cpu_data.dq_tx_push_info[i].deq_port == (u32)-1)
 			continue;
-		PR_ERR("cpu(%d) deq_port=%d", i,
-		       cpu_data.dq_tx_push_info[i].deq_port);
+		DP_DEBUG(DP_DBG_FLAG_DBG, "cpu(%d) deq_port=%d",
+			 i, cpu_data.dq_tx_push_info[i].deq_port);
 		q_port.cqe_deq = cpu_data.dq_tx_push_info[i].deq_port;
 		q_port.tx_pkt_credit = cpu_data.dq_tx_push_info[i].
 							tx_pkt_credit;
 		q_port.tx_ring_addr = cpu_data.dq_tx_push_info[i].tx_ring_addr;
 		q_port.tx_ring_size = cpu_data.dq_tx_push_info[i].tx_ring_size;
+
+		/*Sotre Ring Info */
+		priv->deq_port_info[q_port.cqe_deq].tx_pkt_credit =
+			cpu_data.dq_tx_push_info[i].tx_pkt_credit;
+		priv->deq_port_info[q_port.cqe_deq].tx_ring_addr =
+			cpu_data.dq_tx_push_info[i].tx_ring_addr;
+		priv->deq_port_info[q_port.cqe_deq].tx_ring_size =
+			cpu_data.dq_tx_push_info[i].tx_ring_size;
+		PR_INFO("Store CPU ring info\n");
+		PR_INFO("  ring_address[%d]=0x%x\n",
+			q_port.cqe_deq,
+			priv->deq_port_info[q_port.cqe_deq].tx_ring_addr);
+		PR_INFO("  ring_size[%d]=%d\n",
+			q_port.cqe_deq,
+			priv->deq_port_info[q_port.cqe_deq].tx_ring_size);
+		PR_INFO("  credit[%d]=%d\n",
+			q_port.cqe_deq,
+			priv->deq_port_info[q_port.cqe_deq].tx_pkt_credit);
 		q_port.inst = inst;
 		q_port.dp_port = PMAC_CPU_ID;
+		PR_INFO("CPU[%d] ring addr=%x\n", i,
+			cpu_data.dq_tx_push_info[i].tx_ring_addr);
 		q_port.ctp = i; /*fake CTP for CPU port to store its qid*/
-		alloc_q_to_port(&q_port, 0);
+		PR_INFO("alloc_q_to_port...\n");
+		if (alloc_q_to_port(&q_port, 0) ) {
+			PR_ERR("alloc_q_to_port fail for dp_port=%d\n",
+				q_port.dp_port);
+			return -1;
+		}
+		port_info->deq_port_num ++;
+		port_info->subif_info[i].qid = q_port.qid;
+		port_info->subif_info[i].q_node = q_port.q_node;
+		port_info->subif_info[i].qos_deq_port = q_port.port_node;
+		port_info->subif_info[i].cqm_deq_port = q_port.cqe_deq;
 		if (!f_cpu_q) {
 			f_cpu_q = 1;
 			/*Map all CPU port's lookup to its 1st queue only */
@@ -617,7 +712,6 @@ static int dp_platform_set(int inst, u32 flag)
 		priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
 		priv->ppv4_drop_q = MAX_QUEUE - 1; /*Need change later */
 		gsw_handle = dp_port_prop[inst].ops[0];
-		PR_ERR("dp_platform_set\n");
 		if (!inst)/*only inst zero need DMA descriptor */
 			init_dma_desc_mask();
 		if (!dp_port_prop[inst].ops[0] ||
@@ -627,13 +721,17 @@ static int dp_platform_set(int inst, u32 flag)
 		}
 		if (!inst)
 			dp_sub_proc_install_31();
+		init_qos_fn();
+		dp_ctrl.name = "datapath";
+		datapath_debugfs_init(&dp_ctrl);
 		/*just for debugging purpose */
 		dp_port_info[inst][0].subif_info[0].bp = CPU_BP;
 		INIT_LIST_HEAD(&dp_port_info[inst][0].subif_info[0].logic_dev);
 
 		priv->bp_def = alloc_bridge_port(inst, CPU_PORT, CPU_SUBIF,
 						 CPU_FID, CPU_BP);
-		PR_ERR("bp_def[%d]=%d\n", inst, priv->bp_def);
+		DP_DEBUG(DP_DBG_FLAG_DBG, "bp_def[%d]=%d\n",
+			 inst, priv->bp_def);
 
 		if (!inst) /*only inst zero will support mib feature */
 			mib_init(0);
@@ -658,7 +756,19 @@ static int dp_platform_set(int inst, u32 flag)
 			return -1;
 		}
 
-		dp_platform_queue_set(inst, flag);
+		if (init_ppv4_qos(inst, flag)) {
+			PR_ERR("init_ppv4_qos fail\n");
+			return -1;
+		}
+		if (dp_platform_queue_set(inst, flag)) {
+			PR_ERR("dp_platform_queue_set fail\n");
+
+			return -1;
+		}
+		if (cpu_vlan_mod_dis(inst)) {
+			PR_ERR("cpu_vlan_mod_dis fail\n");
+			return -1;
+		}
 		return 0;
 	}
 
@@ -666,6 +776,7 @@ static int dp_platform_set(int inst, u32 flag)
 	priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
 	if (priv->bp_def)
 		free_bridge_port(inst, priv->bp_def);
+	init_ppv4_qos(inst, flag); /*de-initialize qos */
 	kfree(priv);
 	dp_port_prop[inst].priv_hal = NULL;
 	return 0;
@@ -673,12 +784,42 @@ static int dp_platform_set(int inst, u32 flag)
 
 static int port_platform_set(int inst, u8 ep, u32 flags)
 {
+	int idx, i;
 	struct hal_priv *priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
-
 	struct gsw_itf *itf = ctp_port_assign(inst, ep, priv->bp_def, flags);
+	struct pmac_port_info *port_info = &dp_port_info[inst][ep];
 	/*reset_gsw_itf(ep); */
 	dp_port_info[inst][ep].itf_info = itf;
+	if (flags & DP_F_DEREGISTER) {
+		return 0;
+	}
 
+	PR_INFO("priv=%p deq_port_stat=%p qdev=%p\n",
+		priv, 
+		priv  ?priv->deq_port_stat:NULL,
+		priv  ?priv->qdev:NULL);
+	idx = port_info->deq_port_base;
+	
+	for (i = 0; i < port_info->deq_port_num; i++) {
+		priv->deq_port_info[i + idx].tx_ring_addr =
+			port_info->tx_ring_addr +
+			(port_info->tx_ring_offset * i);
+		priv->deq_port_info[i + idx].tx_ring_size =
+			port_info->tx_ring_size;
+		priv->deq_port_info[i + idx].tx_pkt_credit =
+			port_info->tx_pkt_credit;
+	}
+#if IS_ENABLED(CONFIG_LTQ_DATAPATH_DBG)
+	if (DP_DBG_FLAG_DBG & dp_dbg_flag) {
+		for (i = 0; i < port_info->deq_port_num; i++) {
+			PR_INFO("cqm[%d]: addr=%x credit=%d size==%d\n",
+				i + idx,
+				priv->deq_port_info[i + idx].tx_ring_addr,
+				priv->deq_port_info[i + idx].tx_pkt_credit,
+				priv->deq_port_info[i + idx].tx_ring_size);
+		}
+	}
+#endif
 	return 0;
 }
 
@@ -727,7 +868,7 @@ static int subif_hw_set(int inst, int portid, int subif_ix,
 			struct subif_platform_data *data, u32 flags)
 {
 	struct ppv4_q_sch_port q_port = {0};
-	cbm_queue_map_entry_t lookup = {0};
+	static cbm_queue_map_entry_t lookup = {0};
 	u32 lookup_f = CBM_QUEUE_MAP_F_FLOWID_L_DONTCARE |
 		CBM_QUEUE_MAP_F_FLOWID_H_DONTCARE |
 		CBM_QUEUE_MAP_F_EN_DONTCARE |
@@ -735,9 +876,14 @@ static int subif_hw_set(int inst, int portid, int subif_ix,
 		CBM_QUEUE_MAP_F_MPE1_DONTCARE |
 		CBM_QUEUE_MAP_F_MPE2_DONTCARE |
 		CBM_QUEUE_MAP_F_TC_DONTCARE;
-	int subif;
+	int subif, deq_port_idx = 0;
 	struct pmac_port_info *port_info;
+	struct hal_priv *priv = HAL(inst);
 
+	if (!data) {
+		PR_ERR("data NULL\n");
+		return -1;
+	}
 	port_info = &dp_port_info[inst][portid];
 	subif = SET_VAP(subif_ix, port_info->vap_offset,
 			port_info->vap_mask);
@@ -750,33 +896,56 @@ static int subif_hw_set(int inst, int portid, int subif_ix,
 		PR_ERR("need more for logical dev??\n");
 		return 0;
 	}
-	PR_ERR("inst=%d portid=%d dp numsubif=%d subif_ix=%d\n", inst, portid,
-	       port_info->num_subif, subif_ix);
+	DP_DEBUG(DP_DBG_FLAG_DBG,
+		 "inst=%d portid=%d dp numsubif=%d subif_ix=%d\n", inst, portid,
+		 port_info->num_subif, subif_ix);
+	if (data->subif_data)
+		deq_port_idx = data->subif_data->deq_port_idx;
+	if (port_info->deq_port_num < deq_port_idx + 1) {
+		PR_ERR("Wrong deq_port_idx(%d), should < %d\n",
+		       deq_port_idx, port_info->deq_port_num);
+		return -1;
+	}
 	/*1st subif need config queue*/
 	if (!port_info->num_subif)
 		goto QUEUE_CFG;
-	if (!data || !data->subif_data ||
+	if (!data->subif_data ||
 	    !(data->subif_data->flag_ops & DP_SUBIF_Q_PER_CTP))
 		return 0;
-
+	
 QUEUE_CFG:
-	q_port.cqe_deq = port_info->deq_port_base;
-	if ((port_info->deq_port_num > 1) &&
-	    (data && data->subif_data && data->subif_data))
-		q_port.cqe_deq += data->subif_data->deq_port_idx;
-	PR_ERR("cqe_deq=%d\n", q_port.cqe_deq);
-	q_port.tx_pkt_credit = port_info->tx_pkt_credit;
-	q_port.tx_ring_addr = port_info->tx_ring_addr;
-	q_port.tx_ring_size = port_info->tx_ring_size;
+	PR_INFO("QUEUE_CFG\n");
+	q_port.cqe_deq = port_info->deq_port_base + deq_port_idx;
+	DP_DEBUG(DP_DBG_FLAG_DBG, "cqe_deq=%d\n", q_port.cqe_deq);
+	if (!priv) {
+		PR_ERR("priv NULL\n");
+		return -1;
+	}
+	PR_INFO("priv=%p deq_port_stat=%p qdev=%p\n",
+		priv,
+		priv  ?priv->deq_port_stat:NULL,
+		priv  ?priv->qdev:NULL);
+	PR_INFO("cqe_deq=%d inst=%d\n", q_port.cqe_deq, inst);
+	q_port.tx_pkt_credit =
+		priv->deq_port_info[q_port.cqe_deq].tx_pkt_credit;
+	q_port.tx_ring_addr =
+		priv->deq_port_info[q_port.cqe_deq].tx_ring_addr;
+	q_port.tx_ring_size = 
+		priv->deq_port_info[q_port.cqe_deq].tx_ring_size;
+
 	q_port.inst = inst;
 	q_port.dp_port = portid;
 	q_port.ctp = subif_ix;
-
-	alloc_q_to_port(&q_port, 0);
+	if (alloc_q_to_port(&q_port, 0)) {
+		PR_ERR("alloc_q_to_port fail for dp_port=%d\n",
+			q_port.dp_port);
+		return -1;
+	}
 	port_info->subif_info[subif_ix].qid = q_port.qid;
 	port_info->subif_info[subif_ix].q_node = q_port.q_node;
-	port_info->subif_info[subif_ix].qos_deq_port = q_port.cqe_deq;
+	port_info->subif_info[subif_ix].qos_deq_port = q_port.port_node;
 	port_info->subif_info[subif_ix].cqm_deq_port = q_port.cqe_deq;
+	port_info->subif_info[subif_ix].cqm_port_idx = deq_port_idx;
 
 	/*Set CPU port to Mode0 only*/
 	lookup.mode = dp_port_info[inst][portid].cqe_lu_mode;
@@ -796,8 +965,9 @@ QUEUE_CFG:
 			  q_port.qid,
 			  &lookup,
 			  lookup_f);
-	PR_ERR("cbm_queue_map_set qid=%d for dp_port=%d\n",
-	       q_port.qid, lookup.ep);
+	DP_DEBUG(DP_DBG_FLAG_DBG,
+		 "cbm_queue_map_set qid=%d for dp_port=%d\n",
+		 q_port.qid, lookup.ep);
 	if (q_port.f_deq_port_en)
 		data->act = TRIGGER_CQE_DP_ENABLE;
 	return 0;
@@ -806,17 +976,31 @@ QUEUE_CFG:
 static int subif_hw_reset(int inst, int portid, int subif_ix,
 			  struct subif_platform_data *data, u32 flags)
 {
+#ifndef CONFIG_LTQ_DATAPATH_QOS_HAL
 	struct hal_priv *priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
+#endif /*CONFIG_LTQ_DATAPATH_QOS_HAL*/
 	struct pmac_port_info *port_info = &dp_port_info[inst][portid];
+	struct dp_node_alloc node;
 
 	reset_ctp_bp(inst, subif_ix, portid,
 		     dp_port_info[inst][portid].subif_info[subif_ix].bp);
 	free_bridge_port(inst,
 			 dp_port_info[inst][portid].subif_info[subif_ix].bp);
-	pp_qos_queue_remove(NULL, port_info->subif_info[subif_ix].qid);
-	pp_qos_port_remove(NULL, port_info->subif_info[subif_ix].qos_deq_port);
-	priv->deq_port_stat[port_info->subif_info[subif_ix].qos_deq_port].flag =
+#ifdef CONFIG_LTQ_DATAPATH_QOS_HAL
+	node.inst = inst;
+	node.dp_port = portid;
+	node.type = DP_NODE_QUEUE;
+	node.id.q_id = port_info->subif_info[subif_ix].qid;
+	dp_node_free(&node, 0);
+#else
+	qos_queue_flush(priv->qdev, port_info->subif_info[subif_ix].q_node);
+	qos_queue_remove(priv->qdev, port_info->subif_info[subif_ix].q_node);
+	qos_port_remove(priv->qdev,
+			port_info->subif_info[subif_ix].qos_deq_port);
+	priv->deq_port_stat[port_info->subif_info[subif_ix].cqm_deq_port].flag =
 		PP_NODE_FREE;
+#endif /* CONFIG_LTQ_DATAPATH_QOS_HAL */
+
 	return 0;
 }
 
@@ -961,6 +1145,7 @@ int register_dp_cap_gswip31(int flag)
 	cap.info.swdev_bridge_port_cfg_set = dp_swdev_bridge_port_cfg_set;
 	cap.info.dp_mac_set = dp_gswip_mac_entry_add;
 	cap.info.dp_mac_reset = dp_gswip_mac_entry_del;
+	cap.info.dp_cfg_vlan = dp_gswip_ext_vlan;
 #endif
 	cap.info.cap.tx_hw_chksum = 0;
 	cap.info.cap.rx_hw_chksum = 0;
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_misc.h b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_misc.h
index 504df73a0..c0aae97fd 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_misc.h
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_misc.h
@@ -13,6 +13,8 @@
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
 #include "datapath_ppv4.h"
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 
 #define PMAC_MAX_NUM  16
 #define PAMC_LAN_MAX_NUM 7
@@ -76,14 +78,28 @@ struct resv_info {
 	struct resv_sch *resv_sched; /*!< reserved schedulers info */
 };
 
+struct pp_qos_dev;
 struct hal_priv {
 	struct cqm_deq_stat deq_port_stat[MAX_CQM_DEQ];
 	struct pp_queue_stat qos_queue_stat[MAX_QUEUE];
 	struct pp_sch_stat qos_sch_stat[QOS_MAX_NODES];
+	struct cqm_port_info deq_port_info[MAX_CQM_DEQ];
 
 	struct resv_info resv[MAX_DP_PORTS];
 	int bp_def;
-	s32 ppv4_drop_q;
+	struct pp_qos_dev *qdev; /*ppv4 qos dev */
+	s32 ppv4_drop_q;  /*drop queue: physical id */
+	int cqm_drop_p;
+	unsigned int ppv4_drop_p;  /*drop qos port: workaround for PPV4 API issue.  */
+	unsigned int ppv4_tmp_p; /*workaround for ppv4 queue allocate to
+				  *to get physical queue id
+				  */
+	
+};
+
+struct datapath_ctrl {
+    struct dentry *debugfs;
+    const char *name;
 };
 
 #define SET_PMAC_IGP_EGP(pmac, port_id) ((pmac)->igp_egp = (port_id) & 0xF)
@@ -106,11 +122,27 @@ int proc_print_ctp_bp_info(struct seq_file *s, int inst,
 			   struct pmac_port_info *port,
 			   int subif_index, u32 flag);
 
+#if IS_ENABLED(CONFIG_LTQ_DATAPATH_SWITCHDEV)
 int dp_gswip_mac_entry_add(int bport, int fid, int inst, u8 *addr);
 int dp_gswip_mac_entry_del(int bport, int fid, int inst, u8 *addr);
+int set_gswip_ext_vlan(struct core_ops *ops, struct ext_vlan_info *vlan,
+		       int flag);
+#endif
 int qos_platform_set(int cmd_id, void *node, int flag);
 int dp_node_alloc_31(struct dp_node_alloc *node, int flag);
 int dp_node_free_31(struct dp_node_alloc *node, int flag);
+int dp_deq_port_res_get_31(struct dp_dequeue_res *res, int flag);
+int dp_node_link_en_get_31(struct dp_node_link_enable *en, int flag);
+int dp_node_link_en_set_31(struct dp_node_link_enable *en, int flag);
+int dp_qos_link_prio_set_31(struct dp_node_prio *info, int flag);
+int dp_qos_link_prio_get_31(struct dp_node_prio *info, int flag);
+int dp_node_link_add_31(struct dp_node_link *info, int flag);
+int dp_link_add_31(struct dp_qos_link *cfg, int flag);
+int dp_link_get_31(struct dp_qos_link *cfg, int flag);
+int dp_node_unlink_31(struct dp_node_link *info, int flag);
+int dp_node_link_get_31(struct dp_node_link *info, int flag);
+int dp_queue_conf_set_31(struct dp_queue_conf *cfg, int flag);
+int dp_queue_conf_get_31(struct dp_queue_conf *cfg, int flag);
 
 #if IS_ENABLED(CONFIG_LTQ_DATAPATH_DDR_SIMULATE_GSWIP31)
 GSW_return_t gsw_core_api_ddr_simu31(dp_gsw_cb func, void *ops, void *param);
@@ -156,4 +188,14 @@ ssize_t proc_get_qid_via_index31(struct file *file, const char *buf,
 				 size_t count, loff_t *ppos);
 ssize_t proc_get_qid_via_index(struct file *file, const char *buf,
 			       size_t count, loff_t *ppos);
+int datapath_debugfs_init(struct datapath_ctrl *pctrl);
+int get_q_qocc(int inst, int qid, u32 *c);
+int get_q_mib(int inst, int qid,
+      u32 *total_accept,
+      u32 *total_drop,
+      u32 *red_drop);
+int get_p_mib(int inst, int pid,
+      u32 *green /*green bytes*/,
+      u32 *yellow/*yellow bytes*/);
+int cpu_vlan_mod_dis(int inst);
 #endif
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4.c
index 01dd09a51..9ce537fe4 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4.c
@@ -11,44 +11,83 @@
 #include "../datapath.h"
 #include "datapath_misc.h"
 
-#define DUMMY_PPV4_QOS
-#ifdef DUMMY_PPV4_QOS
+int (*qos_queue_remove)(struct pp_qos_dev *qos_dev, unsigned int id);
+int (*qos_queue_allocate)(struct pp_qos_dev *qos_dev, unsigned int *id);
+int (*qos_queue_info_get)(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_queue_info *info);
+int (*qos_port_remove)(struct pp_qos_dev *qos_dev, unsigned int id);
+int (*qos_sched_allocate)(struct pp_qos_dev *qos_dev, unsigned int *id);
+int (*qos_sched_remove)(struct pp_qos_dev *qos_dev, unsigned int id);
+int (*qos_port_allocate)(struct pp_qos_dev *qos_dev,
+			 unsigned int physical_id,
+			 unsigned int *id);
+int (*qos_port_set)(struct pp_qos_dev *qos_dev, unsigned int id,
+		    const struct pp_qos_port_conf *conf);
+void (*qos_port_conf_set_default)(struct pp_qos_port_conf *conf);
+void (*qos_queue_conf_set_default)(struct pp_qos_queue_conf *conf);
+int (*qos_queue_set)(struct pp_qos_dev *qos_dev, unsigned int id,
+		     const struct pp_qos_queue_conf *conf);
+void (*qos_sched_conf_set_default)(struct pp_qos_sched_conf *conf);
+int (*qos_sched_set)(struct pp_qos_dev *qos_dev, unsigned int id,
+		     const struct pp_qos_sched_conf *conf);
+int (*qos_queue_conf_get)(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_queue_conf *conf);
+int (*qos_queue_flush)(struct pp_qos_dev *qos_dev, unsigned int id);
+int (*qos_sched_conf_get)(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_sched_conf *conf);
+int (*qos_sched_get_queues)(struct pp_qos_dev *qos_dev, unsigned int id,
+			    uint16_t *queue_ids, unsigned int size,
+			    unsigned int *queues_num);
+int (*qos_port_get_queues)(struct pp_qos_dev *qos_dev, unsigned int id,
+			   uint16_t *queue_ids, unsigned int size,
+				  unsigned int *queues_num);
+int (*qos_port_conf_get)(struct pp_qos_dev *qdev, unsigned int id,
+				   struct pp_qos_port_conf *conf);
+int (*qos_port_info_get)(struct pp_qos_dev *qdev, unsigned int id,
+			 struct pp_qos_port_info *info);
+struct pp_qos_dev *(*qos_dev_open)(unsigned int id);
+int (*qos_dev_init)(struct pp_qos_dev *qos_dev,
+		    struct pp_qos_init_param *conf);
+
+#ifdef CONFIG_LTQ_DATAPATH_DUMMY_QOS
 struct fixed_q_port {
-	int deq_port;
-	int queue_id;
-	int port_node;
-	int queue_node;
-	int q_used;
+	int deq_port; /*cqm dequeue port */
+	int queue_id; /*queue physical id*/
+	int port_node; /*qos dequeue port node id */
+	int queue_node; /*queue node id */
+	int q_used;    /*flag to indicate used or free*/
 };
 
 struct fixed_q_port q_port[] = {
-	{0, 14, 1, 2, 0},
-	{0, 74, 3, 4, 0},
-	{0, 30, 5, 6, 0},
-	{0, 87, 7, 8, 0},
-	{7, 235, 9, 10, 0},
-	{7, 42, 11, 12, 0},
-	{7, 242, 13, 14, 0},
-	{26, 190, 15, 16, 0},
-	{25, 119, 17, 18, 0},
-	{26, 103, 19, 20, 0}
+	{0, 14, 0, 2, 0},
+	{0, 74, 0, 4, 0},
+	{0, 30, 0, 6, 0},
+	{0, 87, 0, 8, 0},
+	{7, 235, 7, 10, 0},
+	{7, 42, 7, 12, 0},
+	{7, 242, 7, 14, 0},
+	{26, 190, 26, 16, 0},
+	{26, 119, 26, 18, 0},
+	{26, 103, 26, 20, 0}
 };
 
-int pp_qos_queue_remove(struct pp_qos_dev *qos_dev, int id)
+static struct pp_qos_dev qdev;
+
+int test_qos_queue_remove(struct pp_qos_dev *qos_dev, unsigned int id)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(q_port); i++) {
 		if (q_port[i].queue_id == id) {
 			q_port[i].q_used = PP_NODE_FREE;
-			PR_INFO("to free qid=%d\n", id);
-			break;
+			DP_DEBUG(DP_DBG_FLAG_DBG, "to free qid=%d\n", id);
+			return 0;
 		}
 	}
-	return 0;
+	return -1;
 }
 
-int pp_qos_queue_allocate(struct pp_qos_dev *qos_dev, int *id)
+int test_qos_queue_allocate(struct pp_qos_dev *qos_dev, unsigned int *id)
 {
 	int i;
 
@@ -56,23 +95,25 @@ int pp_qos_queue_allocate(struct pp_qos_dev *qos_dev, int *id)
 		if ((q_port[i].deq_port == qos_dev->dq_port) &&
 		    (q_port[i].q_used == PP_NODE_FREE)) {
 			q_port[i].q_used = PP_NODE_ALLOC;
-			PR_INFO("allocate qnode_id=%d\n", q_port[i].queue_node);
+			DP_DEBUG(DP_DBG_FLAG_DBG, "allocate qnode_id=%d\n",
+				 q_port[i].queue_node);
 			*id = q_port[i].queue_node;
-			break;
+			return 0;
 		}
 	}
-	return 0;
+	return -1;
 }
 
-int pp_qos_queue_info_get(struct pp_qos_dev *qos_dev, int id,
+int test_qos_queue_info_get(struct pp_qos_dev *qos_dev, unsigned int id,
 			  struct pp_qos_queue_info *info)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(q_port); i++) {
-		if ((q_port[i].queue_node == id) &&
-		    (q_port[i].q_used == PP_NODE_ALLOC)) {
-			PR_INFO("allocate qphy_id=%d\n", q_port[i].queue_id);
+		if ((q_port[i].queue_node == id) /*&&
+		    (q_port[i].q_used == PP_NODE_ALLOC)*/) {
+			DP_DEBUG(DP_DBG_FLAG_DBG, "q[%d]'s qid=%d\n",
+				 id, q_port[i].queue_id);
 			info->physical_id = q_port[i].queue_id;
 			return 0;
 		}
@@ -80,21 +121,167 @@ int pp_qos_queue_info_get(struct pp_qos_dev *qos_dev, int id,
 	return -1;
 }
 
-int pp_qos_port_remove(struct pp_qos_dev *qos_dev, int id)
+int test_qos_port_remove(struct pp_qos_dev *qos_dev, unsigned int id)
+{
+	return 0;
+}
+
+int test_qos_sched_allocate(struct pp_qos_dev *qos_dev, unsigned int *id)
+{
+	return 0;
+}
+
+int test_qos_sched_remove(struct pp_qos_dev *qos_dev, unsigned int id)
+{
+	return 0;
+}
+
+int test_qos_port_allocate(struct pp_qos_dev *qos_dev, unsigned int physical_id,
+			 unsigned int *id)
+{
+	int i;
+
+	if (!id)
+		return -1;
+
+	for (i = 0; i < ARRAY_SIZE(q_port); i++) {
+		if (physical_id == q_port[i].deq_port) {
+			*id = q_port[i].port_node;
+			DP_DEBUG(DP_DBG_FLAG_DBG,
+				 "Ok: Deq_port/Node_id=%d/%d\n",
+				 physical_id, *id);
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int test_qos_port_set(struct pp_qos_dev *qos_dev, unsigned int id,
+		    const struct pp_qos_port_conf *conf)
+{
+	return 0;
+}
+
+void test_qos_port_conf_set_default(struct pp_qos_port_conf *conf)
+{
+}
+
+void test_qos_queue_conf_set_default(struct pp_qos_queue_conf *conf)
+{
+}
+
+int test_qos_queue_set(struct pp_qos_dev *qos_dev, unsigned int id,
+		     const struct pp_qos_queue_conf *conf)
+{
+	return 0;
+}
+
+void test_qos_sched_conf_set_default(struct pp_qos_sched_conf *conf)
+{
+}
+
+int test_qos_sched_set(struct pp_qos_dev *qos_dev, unsigned int id,
+		     const struct pp_qos_sched_conf *conf)
+{
+	return 0;
+}
+
+int test_qos_queue_conf_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_queue_conf *conf)
+{
+	int i;
+
+	if (!conf)
+		return -1;
+	memset(conf, 0, sizeof(*conf));
+	for (i = 0; i < ARRAY_SIZE(q_port); i++) {
+		if (id == q_port[i].queue_node) {
+			conf->queue_child_prop.parent = q_port[i].port_node;
+			conf->common_prop.bandwidth_limit = 0;
+			conf->common_prop.suspended = 0;
+			conf->blocked = 0;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+int test_qos_queue_flush(struct pp_qos_dev *qos_dev, unsigned int id)
+{
+	return 0;
+}
+
+int test_qos_sched_conf_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_sched_conf *conf)
+{
+	return -1;
+}
+
+int test_qos_sched_get_queues(struct pp_qos_dev *qos_dev, unsigned int id,
+			    uint16_t *queue_ids, unsigned int size,
+			    unsigned int *queues_num)
+{
+	return 0;
+}
+
+int test_qos_port_get_queues(struct pp_qos_dev *qos_dev, unsigned int id,
+			   uint16_t *queue_ids, unsigned int size,
+			   unsigned int *queues_num)
+{
+	int i, num = 0;
+
+	for (i = 0; i < ARRAY_SIZE(q_port); i++) {
+		if (q_port[i].port_node != id)
+			continue;
+		if (queue_ids && (num < size)) {
+			queue_ids[num] = q_port[i].queue_node;
+			DP_DEBUG(DP_DBG_FLAG_QOS,
+				 "saved[%d] qid[%d/%d] for cqm[%d/%d]\n",
+				 num,
+				 q_port[i].queue_id,
+				 q_port[i].queue_node,
+				 q_port[i].deq_port,
+				 q_port[i].port_node);
+		} else {
+			DP_DEBUG(DP_DBG_FLAG_QOS,
+				 "unsaved[%d]: qid[%d/%d] for cqm[%d/%d]\n",
+				 num,
+				 q_port[i].queue_id,
+				 q_port[i].queue_node,
+				 q_port[i].deq_port,
+				 q_port[i].port_node);
+		}
+		num++;
+	}
+	if (queues_num)
+		*queues_num = num;
+	return 0;
+}
+
+int test_qos_port_conf_get(struct pp_qos_dev *qdev, unsigned int id,
+			   struct pp_qos_port_conf *conf)
 {
 	return 0;
 }
 
-int pp_qos_sched_allocate(struct pp_qos_dev *qos_dev, int *id)
+int test_qos_port_info_get(struct pp_qos_dev *qdev, unsigned int id,
+			   struct pp_qos_port_info *info)
 {
 	return 0;
 }
 
-int pp_qos_sched_remove(struct pp_qos_dev *qos_dev, int id)
+/*this test code is only support one instance */
+struct pp_qos_dev *test_qos_dev_open(unsigned int id)
+{
+	return &qdev;
+}
+
+int test_qos_dev_init(struct pp_qos_dev *qos_dev, struct pp_qos_init_param *conf)
 {
 	return 0;
 }
 
+#ifdef DUMMY_PPV4_QOS_API_OLD
 s32 qos_node_config(struct qos_node_api_param *param)
 {
 	int i;
@@ -107,9 +294,10 @@ s32 qos_node_config(struct qos_node_api_param *param)
 		for (i = 0; i < ARRAY_SIZE(q_port); i++) {
 			if (param->deq_port == q_port[i].deq_port) {
 				param->out_param.node_id = q_port[i].port_node;
-				PR_ERR("Ok: Deq_port/Node_id=%d/%d\n",
-				       param->deq_port,
-				       param->out_param.node_id);
+				DP_DEBUG(DP_DBG_FLAG_DBG,
+					 "Ok: Deq_port/Node_id=%d/%d\n",
+					 param->deq_port,
+					 param->out_param.node_id);
 				return 0;
 			}
 		}
@@ -127,12 +315,13 @@ s32 qos_node_config(struct qos_node_api_param *param)
 				param->out_param.node_id = q_port[i].queue_node;
 				param->out_param.queue_id = q_port[i].queue_id;
 				q_port[i].q_used = 1;
-				PR_ERR("Ok: Deq_port/Node_id=%d/%d %s=%d/%d\n",
-				       q_port[i].deq_port,
-				       param->node_conf.parent_node_id,
-				       "Queue_id/Node",
-				       param->out_param.queue_id,
-				       param->out_param.node_id);
+				DP_DEBUG(DP_DBG_FLAG_DBG,
+					 "Ok:Deq_port/Node_id=%d/%d %s=%d/%d\n",
+					 q_port[i].deq_port,
+					 param->node_conf.parent_node_id,
+					 "Queue_id/Node",
+					 param->out_param.queue_id,
+					 param->out_param.node_id);
 				return 0;
 			}
 		}
@@ -141,8 +330,65 @@ s32 qos_node_config(struct qos_node_api_param *param)
 	       param->node_conf.parent_node_id);
 	return -1;
 }
-#endif
+#endif /*DUMMY_PPV4_QOS_API_OLD*/
 
+#endif /*CONFIG_LTQ_DATAPATH_DUMMY_QOS*/
+
+void init_qos_fn(void)
+{
+#ifdef CONFIG_LTQ_DATAPATH_DUMMY_QOS
+	qos_queue_remove = test_qos_queue_remove;
+	qos_queue_allocate = test_qos_queue_allocate;
+	qos_queue_info_get = test_qos_queue_info_get;
+	qos_port_remove = test_qos_port_remove;
+	qos_sched_allocate = test_qos_sched_allocate;
+	qos_sched_remove = test_qos_sched_remove;
+	qos_port_allocate = test_qos_port_allocate;
+	qos_port_set = test_qos_port_set;
+	qos_port_conf_set_default = test_qos_port_conf_set_default;
+	qos_queue_conf_set_default = test_qos_queue_conf_set_default;
+	qos_queue_set = test_qos_queue_set;
+	qos_sched_conf_set_default = test_qos_sched_conf_set_default;
+	qos_sched_set = test_qos_sched_set;
+	qos_queue_conf_get = test_qos_queue_conf_get;
+	qos_queue_flush = test_qos_queue_flush;
+	qos_sched_conf_get = test_qos_sched_conf_get;
+	qos_sched_get_queues = test_qos_sched_get_queues;
+	qos_port_get_queues = test_qos_port_get_queues;
+	qos_port_conf_get = test_qos_port_conf_get;
+	qos_dev_open = test_qos_dev_open;
+	qos_dev_init = test_qos_dev_init;
+#elif CONFIG_LTQ_PPV4_QOS
+	qos_queue_remove = pp_qos_queue_remove;
+	qos_queue_allocate = pp_qos_queue_allocate;
+	qos_queue_info_get = pp_qos_queue_info_get;
+	qos_port_remove = pp_qos_port_remove;
+	qos_sched_allocate = pp_qos_sched_allocate;
+	qos_sched_remove = pp_qos_sched_remove;
+	qos_port_allocate = pp_qos_port_allocate;
+	qos_port_set = pp_qos_port_set;
+	qos_port_conf_set_default = pp_qos_port_conf_set_default;
+	qos_queue_conf_set_default = pp_qos_queue_conf_set_default;
+	qos_queue_set = pp_qos_queue_set;
+	qos_sched_conf_set_default = pp_qos_sched_conf_set_default;
+	qos_sched_set = pp_qos_sched_set;
+	qos_queue_conf_get = pp_qos_queue_conf_get;
+	qos_queue_flush = pp_qos_queue_flush;
+	qos_sched_conf_get = pp_qos_sched_conf_get;
+	qos_sched_get_queues = pp_qos_sched_get_queues;
+	qos_port_get_queues = pp_qos_port_get_queues;
+	qos_port_conf_get = pp_qos_port_conf_get;
+	qos_dev_open = pp_qos_dev_open;
+	qos_dev_init = pp_qos_dev_init;
+#else
+	/*all NULL function pointer */
+	PR_INFO("call QOS function pointer set to NULL\n");
+#endif /*CONFIG_LTQ_DATAPATH_DUMMY_QOS*/
+
+}
+
+
+#ifdef DUMMY_PPV4_QOS_API_OLD
 /*Port Add
  * Input
  *    Operation Type: Add
@@ -171,6 +417,7 @@ s32 qos_node_config(struct qos_node_api_param *param)
 int dp_pp_alloc_port(struct ppv4_port *info)
 {
 	struct qos_node_api_param param = {0};
+	struct hal_priv *priv = HAL(info->inst);
 
 	param.op_type = QOS_OP_ADD;
 	param.node_conf_flags = NODE_PARAM_FLAG_BW_LIMIT_SET |
@@ -189,12 +436,14 @@ int dp_pp_alloc_port(struct ppv4_port *info)
 	param.port_conf.port_tx_packets_credit = info->tx_pkt_credit;
 	param.port_conf.port_tx_ring_address = info->tx_ring_addr;
 	param.port_conf.port_tx_ring_size = info->tx_ring_size;
-	param.deq_port = info->qos_deq_port;
+	param.deq_port = info->cqm_port;
 
 	if (qos_node_config(&param)) {
 		PR_ERR("Failed to alloc QoS for deq_port=%d\n", param.deq_port);
 		return -1;
 	}
+	priv->deq_port_stat[info->cqm_port].flag = PP_NODE_ALLOC;
+	priv->deq_port_stat[info->cqm_port].node_id = param.out_param.node_id;
 	info->node_id = param.out_param.node_id;
 	return 0;
 }
@@ -313,3 +562,232 @@ int dp_pp_alloc_queue(struct ppv4_queue *info)
 	return 0;
 }
 
+#else /*DUMMY_PPV4_QOS_API_OLD*/
+
+int dp_pp_alloc_port(struct ppv4_port *info)
+{
+	int qos_p_id = 0;
+	struct pp_qos_port_conf conf;
+	struct hal_priv *priv = HAL(info->inst);
+	struct pp_qos_dev *qos_dev = priv->qdev;
+
+	if (qos_port_allocate(qos_dev,
+			      info->cqm_deq_port,
+			      &qos_p_id)) {
+		PR_ERR("Failed to alloc QoS for deq_port=%d\n",
+		       info->cqm_deq_port);
+		return -1;
+	}
+	DP_DEBUG(DP_DBG_FLAG_QOS,
+		 "qos_port_allocate ok with port(cqm/qos)=%d/%d\n",
+		 info->cqm_deq_port, qos_p_id);
+
+	qos_port_conf_set_default(&conf);
+	conf.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	conf.ring_address = (void *)info->tx_ring_addr;
+	conf.ring_size = info->tx_ring_size;
+	conf.packet_credit_enable = 1;
+	conf.credit = info->tx_pkt_credit;
+#if IS_ENABLED(CONFIG_LTQ_DATAPATH_DBG)
+	if (dp_dbg_flag & DP_DBG_FLAG_QOS) {
+		DP_DEBUG(DP_DBG_FLAG_QOS,
+			 "qos_port_set info for p[%d/%d] dp_port=%d:\n",
+			 info->cqm_deq_port, qos_p_id,
+			 info->dp_port);
+		DP_DEBUG(DP_DBG_FLAG_QOS, "  arbitration=%d\n",
+			 conf.port_parent_prop.arbitration);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  ring_address=0x%x\n",
+			 (unsigned int)conf.ring_address);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  ring_size=%d\n",
+			 conf.ring_size);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  packet_credit_enable=%d\n",
+			 conf.packet_credit_enable);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  credit=%d\n",
+			 conf.credit);
+	}
+#endif
+	if (qos_port_set(qos_dev, qos_p_id, &conf)) {
+		PR_ERR("qos_port_set fail for port(cqm/qos) %d/%d\n",
+		       info->cqm_deq_port, qos_p_id);
+		qos_port_remove(qos_dev, qos_p_id);
+		return -1;
+	}
+	DP_DEBUG(DP_DBG_FLAG_QOS,
+		 "qos_port_set ok for port(cqm/qos) %d/%d\n",
+		       info->cqm_deq_port, qos_p_id);
+	info->node_id = qos_p_id;
+	priv->deq_port_stat[info->cqm_deq_port].flag = PP_NODE_ALLOC;
+	priv->deq_port_stat[info->cqm_deq_port].node_id = qos_p_id;
+	info->node_id = qos_p_id;
+	return 0;
+}
+
+int dp_pp_alloc_queue(struct ppv4_queue *info)
+{
+	struct pp_qos_queue_conf conf;
+	int q_node_id;
+	struct pp_qos_queue_info q_info;
+	struct hal_priv *priv = HAL(info->inst);
+	struct pp_qos_dev *qos_dev = priv->qdev;
+
+#ifdef CONFIG_LTQ_DATAPATH_DUMMY_QOS
+	qos_dev->dq_port = info->dq_port;
+#endif
+	if (qos_queue_allocate(qos_dev, &q_node_id)) {
+#ifdef CONFIG_LTQ_DATAPATH_DUMMY_QOS
+		PR_ERR("qos_queue_allocate fail for dq_port %d\n",
+		       info->dq_port);
+#else
+		PR_ERR("qos_queue_allocate fail\n");
+#endif
+		return -1;
+	}
+	DP_DEBUG(DP_DBG_FLAG_QOS,"qos_queue_allocate ok q_node=%d\n", q_node_id);
+
+	qos_queue_conf_set_default(&conf);
+	conf.wred_enable = 0;
+	conf.queue_wred_max_allowed = 0x400; /*max qocc in pkt */
+	conf.queue_child_prop.parent = info->parent;
+	if (qos_queue_set(qos_dev, q_node_id, &conf)) {
+		PR_ERR("qos_queue_set fail for node_id=%d to parent=%d\n",
+		       q_node_id, info->parent);
+		return -1;
+	}
+	DP_DEBUG(DP_DBG_FLAG_QOS, "To attach q_node=%d to parent_node=%d\n",
+		 q_node_id, conf.queue_child_prop.parent);
+	if (qos_queue_info_get(qos_dev, q_node_id, &q_info)) {
+		PR_ERR("qos_queue_info_get fail for queue node_id=%d\n",
+		       q_node_id);
+		return -1;
+	}
+	info->qid = q_info.physical_id;
+	info->node_id = q_node_id;
+	DP_DEBUG(DP_DBG_FLAG_QOS,"Attached q[%d/%d] to parent_node=%d\n",
+		 q_info.physical_id, q_node_id,
+		 info->parent);
+	return 0;
+}
+#endif /*DUMMY_PPV4_QOS_API_OLD*/
+
+int init_ppv4_qos(int inst, int flag)
+{
+	union {
+		struct pp_qos_port_conf p_conf;
+		struct pp_qos_queue_conf q_conf;
+		struct pp_qos_queue_info q_info;
+		struct pp_qos_init_param param;
+	} t = {0};
+	int res, i;
+	struct hal_priv *priv = HAL(inst);
+#ifdef CONFIG_LTQ_DATAPATH_QOS_HAL
+	unsigned int q;
+	int drop_cqm_port = 26 + 63;
+#endif
+	if (!(flag & DP_PLATFORM_INIT)) {
+		PR_INFO("need to implement de-initialization for qos later\n");
+		priv->qdev = NULL;
+		return DP_SUCCESS;
+	}
+	priv->qdev = qos_dev_open(dp_port_prop[inst].qos_inst);
+	PR_INFO("qos_dev_open qdev=%p\n", priv->qdev);
+	if (!priv->qdev) {
+		PR_ERR("Could not open qos instance %d\n",
+		       dp_port_prop[inst].qos_inst);
+		return DP_FAILURE;
+	}
+	memset(&t.param, 0, sizeof(t.param));
+	t.param.wred_total_avail_resources = 0x10000;
+	t.param.wred_p_const = 512;
+	t.param.wred_max_q_size = 10000;
+	/*reserve all ppv4 port to 1:1 sequnce link cqm port */
+	for (i = 0; i < ARRAY_SIZE(t.param.reserved_ports); i++)
+		t.param.reserved_ports[i] = 1;
+	res = qos_dev_init(priv->qdev, &t.param);
+	if (res) {
+		PR_ERR("qos_dev_init fail for qos inst %d\n",
+		       dp_port_prop[inst].qos_inst);
+		return DP_FAILURE;
+	}
+	PR_INFO("qos_dev_init done\n");
+#ifdef CONFIG_LTQ_DATAPATH_QOS_HAL
+	PR_INFO("Hard-coded last port as drop port: %d\n",
+		drop_cqm_port);
+	PR_INFO("priv=%p deq_port_stat=%p q_dev=%p\n",
+		priv, priv  ?priv->deq_port_stat:NULL,
+		priv  ?priv->qdev:NULL);
+	if (qos_port_allocate(priv->qdev,
+			      drop_cqm_port,
+			      &priv->ppv4_drop_p)) {
+		PR_ERR("Failed to alloc  qos drop port=%d\n",
+			       drop_cqm_port);
+		return -1;
+	}
+	qos_port_conf_set_default(&t.p_conf);
+	t.p_conf.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	t.p_conf.ring_address = (void *)0x24590;  /*dummy addres*/
+	t.p_conf.ring_size = 1;
+	t.p_conf.packet_credit_enable = 0;
+	t.p_conf.credit = 2;
+	t.p_conf.common_prop.suspended = 1;
+	
+#if IS_ENABLED(CONFIG_LTQ_DATAPATH_DBG)
+	if (dp_dbg_flag & DP_DBG_FLAG_QOS) {
+		DP_DEBUG(DP_DBG_FLAG_QOS,
+			 "qos_port_set param: %d/%d for drop pot:\n",
+			 drop_cqm_port, priv->ppv4_drop_p);
+		DP_DEBUG(DP_DBG_FLAG_QOS, "  arbitration=%d\n",
+			 t.p_conf.port_parent_prop.arbitration);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  ring_address=0x%x\n",
+			 (unsigned int)t.p_conf.ring_address);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  ring_size=%d\n",
+			 t.p_conf.ring_size);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  packet_credit_enable=%d\n",
+			 t.p_conf.packet_credit_enable);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  credit=%d\n",
+			 t.p_conf.credit);
+		DP_DEBUG(DP_DBG_FLAG_QOS,"  suspend=%d\n",
+			 t.p_conf.common_prop.suspended);
+	}
+#endif
+	if (qos_port_set(priv->qdev, priv->ppv4_drop_p, &t.p_conf)) {
+		PR_ERR("qos_port_set fail for port(cqm/qos) %d/%d\n",
+		       drop_cqm_port, priv->ppv4_drop_p);
+		qos_port_remove(priv->qdev, priv->ppv4_drop_p);
+		return -1;
+	}
+
+	if (qos_queue_allocate(priv->qdev, &q)) {
+		PR_ERR("qos_queue_allocate fail\n");
+		qos_port_remove(priv->qdev, q);
+		return -1;
+	}
+	DP_DEBUG(DP_DBG_FLAG_QOS,"ppv4_drop_q alloc ok q_node=%d\n", q);
+
+	qos_queue_conf_set_default(&t.q_conf);\
+	t.q_conf.blocked = 1; /*drop mode */
+	t.q_conf.wred_enable = 0;
+	t.q_conf.queue_wred_max_allowed = 0x400; /*max qocc in pkt */
+	t.q_conf.queue_child_prop.parent = priv->ppv4_drop_p;
+	if (qos_queue_set(priv->qdev, q, &t.q_conf)) {
+		PR_ERR("qos_queue_set fail for node_id=%d to parent=%d\n",
+		       q, t.q_conf.queue_child_prop.parent);
+		return -1;
+	}
+	DP_DEBUG(DP_DBG_FLAG_QOS, "To attach q_node=%d to parent_node=%d\n",
+		 q, priv->ppv4_drop_p);
+	if (qos_queue_info_get(priv->qdev, q, &t.q_info)) {
+		PR_ERR("qos_queue_info_get fail for queue node_id=%d\n",
+		       q);
+		return -1;
+	}
+	priv->ppv4_drop_q = t.q_info.physical_id;
+	priv->cqm_drop_p = drop_cqm_port;
+	DP_DEBUG(DP_DBG_FLAG_QOS, "Drop queue q[%d/%d] to parent=%d/%d\n",
+		 priv->ppv4_drop_q, q,
+		 drop_cqm_port,
+		 priv->ppv4_drop_p);
+#endif
+	PR_INFO("init_ppv4_qos done\n");
+	
+	return res;
+}
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4.h b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4.h
index 5f9fc09d8..6fcd963fe 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4.h
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4.h
@@ -10,13 +10,34 @@
 #ifndef DATAPATH_PPV4_H
 #define DATAPATH_PPV4_H
 
+#ifdef CONFIG_LTQ_DATAPATH_DUMMY_QOS
+struct pp_qos_dev {
+	int dq_port;
+};
+#endif
+
+#define MAX_CQM_DEQ 128 /*Need further check*/
+#define MAX_QUEUE   256 /*Need further check*/
+#define MAX_PP_CHILD_PER_NODE  8 /*Maximum queue per scheduler*/
+#define MAX_Q_PER_PORT 32 /* Maximum queue per port */
+#define QOS_MAX_NODES  2048 /*Maximum PPV4 nodes */
+
+#define HAL(inst) ((struct hal_priv *)dp_port_prop[inst].priv_hal)
+#define PARENT(x) (x.queue_child_prop.parent)
+#define PARENT_S(x) (x.sched_child_prop.parent)
+
 struct ppv4_queue {
+	int inst;  /* dp instance */
 	u16 qid;  /*-1 means dynammic, otherwise already configured*/
 	u16 node_id; /*output */
 	u16 sch;  /*-1 means dynammic, otherwise already configured*/
 	u16 parent; /*-1 means no parent.
 		     *it is used for shared dropping queueu purpose
 		     */
+
+#ifdef CONFIG_LTQ_DATAPATH_DUMMY_QOS
+	int dq_port; /*cqm dequeue port for qos slim driver queue alloc only */
+#endif
 };
 
 struct ppv4_scheduler {
@@ -26,9 +47,10 @@ struct ppv4_scheduler {
 };
 
 struct ppv4_port {
+	int inst;
 	u16 dp_port;
-	u16 qos_deq_port; /*-1 means dynammic, otherwise already specified*/
-	u16 cqm_deq_port;
+	u16 qos_deq_port; /*-1 means dynammic, otherwise already specified. Remove in new datapath lib*/
+	u16 cqm_deq_port;  /*rename in new datapath lib*/
 	u16 node_id; /*output */
 
 	u32 tx_pkt_credit;  /*PP port tx bytes credit */
@@ -51,15 +73,7 @@ struct ppv4_q_sch_port {
 	u32 q_node;
 	u32 schd_node;
 	u32 port_node;
-	u32 f_deq_port_en:1; /*Need enable dequee port later */
-};
-
-struct pp_q_list {
-	u32 flag:1; /*0: valid 1-used 2-reserved*/
-	u16 qid;  /*physical queue id */
-	u16 node;
-	u16 parent_type;  /*scheduler/port*/
-	u16 parent;
+	u32 f_deq_port_en:1; /*flag to trigger cbm_dp_enable */
 };
 
 struct pp_sch_list {
@@ -83,21 +97,11 @@ enum PP_NODE_STAT {
 };
 
 struct pp_node {
-	enum PP_NODE_STAT flag; /*0: valid 1-used 2-reserved*/
-	u16 type;
-	u16 node_id;
-	struct pp_node *child[MAX_PP_CHILD_PER_NODE];
+	enum PP_NODE_STAT flag; /*0: FREE 1-used/alloc*/
+	u16 type; /*node type*/
+	u16 node_id;  /*node id */
 };
 
-struct cqm_deq_stat {
-	enum PP_NODE_STAT flag; /*0: valid 1-used 2-reserved*/
-	u16 deq_id; /*qos dequeue port physical id. Maybe no need */
-	u16 node_id; /* qos dequeue port's node id */
-	struct pp_node child[MAX_PP_CHILD_PER_NODE];
-};
-
-#define QOS_MAX_NODES 2048
-
 struct pp_queue_stat {
 	enum PP_NODE_STAT flag; /*0: valid 1-used 2-reserved*/
 	u16 deq_port; /*cqm dequeue port id */
@@ -106,15 +110,78 @@ struct pp_queue_stat {
 };
 
 struct pp_sch_stat {
-	enum PP_NODE_STAT flag; /*0: valid 1-used 2-reserved*/
+	enum PP_NODE_STAT c_flag; /*sch flag linked to child*/
+	enum PP_NODE_STAT p_flag; /*sch flag linked to parent*/
 	u16 resv_idx; /* index of reserve table */
+	struct pp_node child[MAX_PP_CHILD_PER_NODE];
+	u16 child_num; /* Number of child */
+	int type; /*Node type for queue/sch/port:
+	           *sched table is not just for scheduler, also for queue/port
+	           *It is table index is based on logical node id,
+	           *not just scheduler id
+		   */
+	struct pp_node parent; /*valid for node type queue/sch*/
+};
+
+struct cqm_port_info {
+	u32 tx_pkt_credit;  /*PP port tx bytes credit */
+	u32 tx_ring_addr;  /*PP port ring address. should follow HW definition*/
+	u32 tx_ring_size; /*PP port ring size */
 };
 
+struct cqm_deq_stat {
+	enum PP_NODE_STAT flag; /*0: valid 1-used 2-reserved*/
+	u16 deq_id; /*qos dequeue port physical id. Maybe no need */
+	u16 node_id; /* qos dequeue port's node id */
+	struct pp_node child[MAX_PP_CHILD_PER_NODE];
+	u16 child_num; /* Number of child */
+};
+
+void init_qos_fn(void);
+extern int (*qos_queue_remove)(struct pp_qos_dev *qos_dev, unsigned int id);
+extern int (*qos_queue_allocate)(struct pp_qos_dev *qos_dev, unsigned int *id);
+extern int (*qos_queue_info_get)(struct pp_qos_dev *qos_dev, unsigned int id,
+				 struct pp_qos_queue_info *info);
+extern int (*qos_port_remove)(struct pp_qos_dev *qos_dev, unsigned int id);
+extern int (*qos_sched_allocate)(struct pp_qos_dev *qos_dev, unsigned int *id);
+extern int (*qos_sched_remove)(struct pp_qos_dev *qos_dev, unsigned int id);
+extern int (*qos_port_allocate)(struct pp_qos_dev *qos_dev,
+				unsigned int physical_id,
+				unsigned int *id);
+extern int (*qos_port_set)(struct pp_qos_dev *qos_dev, unsigned int id,
+		    const struct pp_qos_port_conf *conf);
+extern void (*qos_port_conf_set_default)(struct pp_qos_port_conf *conf);
+extern void (*qos_queue_conf_set_default)(struct pp_qos_queue_conf *conf);
+extern int (*qos_queue_set)(struct pp_qos_dev *qos_dev, unsigned int id,
+			    const struct pp_qos_queue_conf *conf);
+extern void (*qos_sched_conf_set_default)(struct pp_qos_sched_conf *conf);
+extern int (*qos_sched_set)(struct pp_qos_dev *qos_dev, unsigned int id,
+			    const struct pp_qos_sched_conf *conf);
+extern int (*qos_queue_conf_get)(struct pp_qos_dev *qos_dev, unsigned int id,
+				 struct pp_qos_queue_conf *conf);
+extern int (*qos_queue_flush)(struct pp_qos_dev *qos_dev, unsigned int id);
+extern int (*qos_sched_conf_get)(struct pp_qos_dev *qos_dev, unsigned int id,
+			         struct pp_qos_sched_conf *conf);
+extern int (*qos_sched_get_queues)(struct pp_qos_dev *qos_dev, unsigned int id,
+				   uint16_t *queue_ids, unsigned int size,
+				   unsigned int *queues_num);
+extern int (*qos_port_get_queues)(struct pp_qos_dev *qos_dev, unsigned int id,
+				  uint16_t *queue_ids, unsigned int size,
+				  unsigned int *queues_num);
+extern int (*qos_port_conf_get)(struct pp_qos_dev *qdev, unsigned int id,
+				   struct pp_qos_port_conf *conf);
+extern int (*qos_port_info_get)(struct pp_qos_dev *qdev, unsigned int id,
+				struct pp_qos_port_info *info);
+extern struct pp_qos_dev *(*qos_dev_open)(unsigned int id);
+extern int (*qos_dev_init)(struct pp_qos_dev *qos_dev,
+			   struct pp_qos_init_param *conf);
+
 int dp_pp_alloc_port(struct ppv4_port *info);
 int dp_pp_alloc_sched(struct ppv4_scheduler *info);
 int dp_pp_alloc_queue(struct ppv4_queue *info);
 int alloc_q_to_port(struct ppv4_q_sch_port *info, u32 flag);
 extern struct cqm_deq_stat deq_port_stat[MAX_CQM_DEQ];
 extern struct pp_queue_stat qos_queue_stat[MAX_QUEUE];
+int init_ppv4_qos(int inst, int flag);
 #endif /*DATAPATH_PPV4_H*/
 
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4_api.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4_api.c
index afabc1047..0b9065c1f 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4_api.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_ppv4_api.c
@@ -1,32 +1,70 @@
 #include <net/datapath_api.h>
 #include <net/datapath_api_qos.h>
+#include <net/pp_qos_drv.h>
 #include "../datapath.h"
 #include "datapath_misc.h"
+#include <net/lantiq_cbm_api.h>
+
+enum flag {
+	DP_NODE_DEC = BIT(0), /*flag to reduce node counter*/
+	DP_NODE_INC = BIT(1), /*flag to increase node counter*/
+	DP_NODE_RST = BIT(2), /*flag to reset node counter*/
+	C_FLAG = BIT(8), /*sch flag linked to node*/
+	P_FLAG = BIT(9) /*sch flag linked to parent*/
+};
 
 int qos_platform_set(int cmd_id, void *node, int flag)
 {
+	struct dp_node_link *node_link = (struct dp_node_link *)node;
+	int inst;
+	struct hal_priv *priv;
+	
+	if (!node)
+		return DP_FAILURE;
+	inst = node_link->inst;
+	priv = HAL(inst);
+	if (!priv->qdev) {
+		PR_ERR("qdev NULL with inst=%d\n", inst);
+		return DP_FAILURE;
+	}
+	
 	switch (cmd_id) {
 	case NODE_LINK_ADD:
+		dp_node_link_add_31((struct dp_node_link *)node, flag);
 		break;
 	case NODE_LINK_GET:
+		dp_node_link_get_31((struct dp_node_link *)node, flag);
 		break;
 	case NODE_LINK_EN_GET:
+		dp_node_link_en_get_31((struct dp_node_link_enable *)node,
+				       flag);
 		break;
 	case NODE_LINK_EN_SET:
+		dp_node_link_en_set_31((struct dp_node_link_enable *)node,
+				       flag);
 		break;
 	case NODE_UNLINK:
+		dp_node_unlink_31((struct dp_node_link *)node, flag);
 		break;
 	case LINK_ADD:
+		dp_link_add_31((struct dp_qos_link *)node, flag);
 		break;
 	case LINK_GET:
+		dp_link_get_31((struct dp_qos_link *)node, flag);
 		break;
 	case LINK_PRIO_SET:
+		dp_qos_link_prio_set_31((struct dp_node_prio *)node,
+					flag);
 		break;
 	case LINK_PRIO_GET:
+		dp_qos_link_prio_get_31((struct dp_node_prio *)node,
+					flag);
 		break;
 	case QUEUE_CFG_SET:
+		dp_queue_conf_set_31((struct dp_queue_conf *)node, flag);
 		break;
 	case QUEUE_CFG_GET:
+		dp_queue_conf_get_31((struct dp_queue_conf *)node, flag);
 		break;
 	case SHAPER_SET:
 		break;
@@ -39,6 +77,7 @@ int qos_platform_set(int cmd_id, void *node, int flag)
 		dp_node_free_31((struct dp_node_alloc *)node, flag);
 		break;
 	case DEQ_PORT_RES_GET:
+		dp_deq_port_res_get_31((struct dp_dequeue_res *)node, flag);
 		break;
 	case COUNTER_MODE_SET:
 		break;
@@ -54,191 +93,2182 @@ int qos_platform_set(int cmd_id, void *node, int flag)
 	return DP_SUCCESS;
 }
 
-int dp_node_alloc_resv_31(struct dp_node_alloc *node, int flag)
+/* get_qid_by_node API
+ * checks for queue node id
+ * upon Success
+ *    return physical id of queue
+ *    else return DP_FAILURE
+ */
+static int get_qid_by_node(int inst, int node_id, int flag)
+{
+	int i;
+	struct hal_priv *priv = HAL(inst);
+
+	for (i = 0; i < MAX_QUEUE; i++) {
+		if (node_id == priv->qos_queue_stat[i].node_id)
+			return i;
+	}
+	return DP_FAILURE;
+}
+
+/* get_cqm_deq_port_by_node API
+ * checks for qos deque port
+ * upon Success
+ *    return physical cqm_deq_port id
+ *    else return DP_FAILURE
+ */
+static int get_cqm_deq_port_by_node(int inst, int node_id, int flag)
+{
+	int i;
+	struct hal_priv *priv = HAL(inst);
+
+	for (i = 0; i < MAX_CQM_DEQ; i++) {
+		if (node_id == priv->deq_port_stat[i].node_id)
+			return i;
+	}
+	return DP_FAILURE;
+}
+
+/* get_node_type_by_node_id API
+ * get node_type node_id in sch global table
+ * upon Success
+ *    return node_type of node_id
+ */
+static int get_node_type_by_node_id(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+
+	return priv->qos_sch_stat[node_id].type;
+}
+
+/* get_free_child_idx API
+ * check free flag for child in parent's table and return index
+ * else return DP_FAILURE
+ */
+static int get_free_child_idx(int inst, int node_id, int flag)
+{
+	int i;
+	struct hal_priv *priv = HAL(inst);
+
+	for (i = 0; i <= MAX_PP_CHILD_PER_NODE; i++) {
+		if (priv->qos_sch_stat[node_id].child[i].flag == PP_NODE_FREE)
+			return i;
+	}
+	return DP_FAILURE;
+}
+
+/* get_parent_node API
+ * check parent flag in node global table if active retrun parent id
+ * else return DP_FAILURE
+ */
+static int get_parent_node(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+
+	if (priv->qos_sch_stat[node_id].parent.flag)
+		return priv->qos_sch_stat[node_id].parent.node_id;
+	return DP_FAILURE;
+}
+
+/* get_child_idx_node_id API
+ * check free flag in parent's global table and return index
+ * else return DP_FAILURE
+ */
+static int get_child_idx_node_id(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int i, p_id;
+
+	p_id = priv->qos_sch_stat[node_id].parent.node_id;
+
+	for (i = 0; i <= MAX_PP_CHILD_PER_NODE; i++) {
+		if (node_id == priv->qos_sch_stat[p_id].child[i].node_id)
+			return i;
+	}
+	return DP_FAILURE;
+}
+
+/* node_queue_dec API
+ * for queue id = node_id, flag = DP_NODE_DEC
+ * Set Queue flag from PP_NODE_ACTIVE to PP_NODE_ALLOC
+ * else return DP_FAILURE
+ */
+static int node_queue_dec(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int phy_id = get_qid_by_node(inst, node_id, flag);
+
+	if (phy_id == DP_FAILURE) {
+		PR_ERR("get_qid_by_node failed\n");
+		return DP_FAILURE;
+	}
+
+	if (!(priv->qos_queue_stat[phy_id].flag & PP_NODE_ACTIVE))
+		PR_ERR("Wrong Q[%d] Stat(%d):Expect ACTIVE\n", phy_id,
+		       priv->qos_queue_stat[phy_id].flag);
+	if (!priv->qos_sch_stat[node_id].parent.flag)
+		PR_ERR("Wrong Q[%d]'s Parent Stat(%d):Expect ACTIVE\n", node_id,
+		       priv->qos_sch_stat[node_id].parent.flag);
+	priv->qos_sch_stat[node_id].parent.node_id = 0;
+	priv->qos_sch_stat[node_id].parent.type = 0;
+	priv->qos_sch_stat[node_id].parent.flag = 0;
+	priv->qos_queue_stat[phy_id].flag = PP_NODE_ALLOC;
+	return DP_SUCCESS;
+}
+
+/* node_queue_inc API
+ * for queue id = node_id, flag = DP_NODE_INC
+ * Set Queue flag from PP_NODE_ALLOC to PP_NODE_ACTIVE
+ * else return DP_FAILURE
+ */
+static int node_queue_inc(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int phy_id = get_qid_by_node(inst, node_id, flag);
+
+	if (phy_id == DP_FAILURE) {
+		PR_ERR("get_qid_by_node failed\n");
+		return DP_FAILURE;
+	}
+
+	if (!(priv->qos_queue_stat[phy_id].flag & PP_NODE_ALLOC))
+		PR_ERR("Wrong Q[%d] Stat(%d):Expect ALLOC\n", phy_id,
+		       priv->qos_queue_stat[phy_id].flag);
+	priv->qos_queue_stat[phy_id].flag = PP_NODE_ACTIVE;
+	return DP_SUCCESS;
+}
+
+/* node_queue_rst API
+ * for queue id = node_id, flag = DP_NODE_RST
+ * Set Queue flag from PP_NODE_ALLOC to PP_NODE_FREE
+ * Set allocated memory free
+ * else return DP_FAILURE
+ */
+static int node_queue_rst(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int phy_id = get_qid_by_node(inst, node_id, flag);
+	int resv_idx = priv->qos_queue_stat[phy_id].resv_idx;
+
+	if (phy_id == DP_FAILURE) {
+		PR_ERR("get_qid_by_node failed\n");
+		return DP_FAILURE;
+	}
+
+	if (!(priv->qos_queue_stat[phy_id].flag & PP_NODE_ALLOC))
+		PR_ERR("Wrong Q[%d] Stat(%d):Expect ALLOC\n", phy_id,
+		       priv->qos_queue_stat[phy_id].flag);
+	/* check resv_idx if valid then remove resource from resv table*/
+	if (priv->qos_queue_stat[phy_id].flag & PP_NODE_RESERVE) {
+		if (priv->resv[resv_idx].resv_q->flag != PP_NODE_ALLOC)
+			PR_ERR("Wrong Q[%d] Stat(%d):Expect ALLOC\n", phy_id,
+			       priv->resv[resv_idx].resv_q->flag);
+		/*reset flag in reserved table*/
+		priv->resv[resv_idx].resv_q->flag = PP_NODE_FREE;
+	}
+	/*Remove resource from global table*/
+	memset(&priv->qos_queue_stat[phy_id], 0,
+	       sizeof(priv->qos_queue_stat[phy_id]));
+	memset(&priv->qos_sch_stat[node_id], 0,
+	       sizeof(priv->qos_sch_stat[node_id]));
+	priv->qos_queue_stat[phy_id].resv_idx = -1;
+	return DP_SUCCESS;
+}
+
+/* node_sched_dec API
+ * for scheduler id = node_id, flag = DP_NODE_DEC
+ * Set Sched flag from PP_NODE_ACTIVE to PP_NODE_ALLOC
+ * else return DP_FAILURE
+ */
+static int node_sched_dec(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int idx = get_child_idx_node_id(inst, node_id, 0);
+	if (idx == DP_FAILURE) {
+		PR_ERR("get_child_idx_node_id failed\n");
+		return DP_FAILURE;
+	}
+
+	if (flag & C_FLAG) {
+		if (!priv->qos_sch_stat[node_id].child_num ||
+		    !(priv->qos_sch_stat[node_id].c_flag & PP_NODE_ACTIVE)) {
+			PR_ERR("Wrong Sch[%d] Stat(%d)/child_num(%d):%s\n",
+			       node_id, priv->qos_sch_stat[node_id].c_flag,
+			       priv->qos_sch_stat[node_id].child_num,
+			       "Expect ACTIVE Or non-zero child_num");
+			return DP_FAILURE;
+		}
+		if (priv->qos_sch_stat[node_id].child[idx].flag) {
+			priv->qos_sch_stat[node_id].child[idx].node_id = 0;
+			priv->qos_sch_stat[node_id].child[idx].type = 0;
+			priv->qos_sch_stat[node_id].child[idx].flag = 0;
+		}
+		priv->qos_sch_stat[node_id].child_num--;
+		if (!priv->qos_sch_stat[node_id].child_num)
+			priv->qos_sch_stat[node_id].c_flag = PP_NODE_ALLOC;
+		return DP_SUCCESS;
+	} else if (flag & P_FLAG) {
+		if (!(priv->qos_sch_stat[node_id].p_flag & PP_NODE_ACTIVE))
+			PR_ERR("Wrong Sch[%d] Stat(%d):Expect ACTIVE\n",
+			       node_id, priv->qos_sch_stat[node_id].p_flag);
+		if (!priv->qos_sch_stat[node_id].parent.flag)
+			PR_ERR("Wrong Sch[%d]'s Parent Stat(%d):Expect ACTIV\n",
+			       node_id,
+			       priv->qos_sch_stat[node_id].parent.flag);
+		priv->qos_sch_stat[node_id].parent.node_id = 0;
+		priv->qos_sch_stat[node_id].parent.type = 0;
+		priv->qos_sch_stat[node_id].parent.flag = 0;
+		priv->qos_sch_stat[node_id].p_flag = PP_NODE_ALLOC;
+		return DP_SUCCESS;
+	}
+	return DP_FAILURE;
+}
+
+/* node_sched_inc API
+ * for scheduler id = node_id, flag = DP_NODE_INC
+ * Set Sched flag from PP_NODE_ALLOC to PP_NODE_ACTIVE
+ * else return DP_FAILURE
+ */
+static int node_sched_inc(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	
+	if (flag & C_FLAG) {
+		int idx;
+		int child_type;
+	
+		idx = get_free_child_idx(inst, node_id, 0);
+		if (idx == DP_FAILURE) {
+			PR_ERR("get_free_child_idx failed\n");
+			return DP_FAILURE;
+		}
+
+		if (priv->qos_sch_stat[node_id].child_num &&
+		    !(priv->qos_sch_stat[node_id].c_flag & PP_NODE_ACTIVE)) {
+			PR_ERR("Wrong Sch[%d] Stat(%d)/child_num(%d):%s\n",
+			       node_id, priv->qos_sch_stat[node_id].c_flag,
+			       priv->qos_sch_stat[node_id].child_num,
+			       "Expect ACTIVE And Non-Zero child_num");
+			return DP_FAILURE;
+		}
+		if (!priv->qos_sch_stat[node_id].child_num &&
+		    !(priv->qos_sch_stat[node_id].c_flag & PP_NODE_ALLOC)) {
+			PR_ERR("Wrong Sch[%d] Stat(%d)/child_num(%d):%s\n",
+			       node_id, priv->qos_sch_stat[node_id].c_flag,
+			       priv->qos_sch_stat[node_id].child_num,
+			       "Expect ALLOC And zero child_num");
+			return DP_FAILURE;
+		}
+		if (idx == DP_FAILURE) {
+			PR_ERR("get_free_child_idx failed\n");
+			return DP_FAILURE;
+		}
+		child_type = get_node_type_by_node_id(inst, node_id, 0);
+		priv->qos_sch_stat[node_id].child[idx].type = child_type;
+		priv->qos_sch_stat[node_id].child[idx].node_id = node_id;
+		priv->qos_sch_stat[node_id].child[idx].flag = PP_NODE_ACTIVE;
+		priv->qos_sch_stat[node_id].child_num++;
+		priv->qos_sch_stat[node_id].c_flag = PP_NODE_ACTIVE;
+		return DP_SUCCESS;
+	} else if (flag & P_FLAG) {
+		if (!(priv->qos_sch_stat[node_id].p_flag & PP_NODE_ALLOC)) {
+			PR_ERR("Wrong Sch[%d] Stat(%d):Expect ALLOC\n",
+			       node_id, priv->qos_sch_stat[node_id].p_flag);
+			return DP_FAILURE;
+		}
+		priv->qos_sch_stat[node_id].p_flag = PP_NODE_ACTIVE;
+		return DP_SUCCESS;
+	}
+	return DP_FAILURE;
+}
+
+/* node_sched_rst API
+ * for scheduler id = node_id, flag = DP_NODE_RST
+ * sanity check for child_num and both c and p_flag in alloc state
+ * then reset whole sched
+ * Set Sched flag from PP_NODE_ALLOC to PP_NODE_FREE
+ * else return DP_FAILURE
+ */
+static int node_sched_rst(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int resv_idx = priv->qos_sch_stat[node_id].resv_idx;
+
+	/*sanity check for child_num and both c and p_flag in alloc state
+	 *then reset whole sched
+	 */
+	if (priv->qos_sch_stat[node_id].child_num ||
+	    !(priv->qos_sch_stat[node_id].c_flag & PP_NODE_ALLOC) ||
+	    !(priv->qos_sch_stat[node_id].p_flag & PP_NODE_ALLOC)) {
+		PR_ERR("Wrong Sch[%d] c_flag(%d)/p_flag(%d)/child_num(%d):%s\n",
+		       node_id, priv->qos_sch_stat[node_id].c_flag,
+		       priv->qos_sch_stat[node_id].p_flag,
+		       priv->qos_sch_stat[node_id].child_num,
+		       "Expect c_flag OR p_flag ALLOC OR Non-zero child_num");
+		return DP_FAILURE;
+	}
+	/*Free Reserved Resource*/
+	if (priv->resv[resv_idx].resv_sched->flag == PP_NODE_ALLOC) {
+		priv->resv[resv_idx].resv_sched->flag = PP_NODE_FREE;
+		priv->qos_sch_stat[node_id].resv_idx = -1;
+	}
+	/*Free Global Resource*/
+	memset(&priv->qos_sch_stat[node_id], 0,
+	       sizeof(priv->qos_sch_stat[node_id]));
+	priv->qos_sch_stat[node_id].resv_idx = -1;
+	return DP_SUCCESS;
+}
+
+/* node_port_dec API
+ * Check for child_num and active flag
+ * for port logical node_id, flag = DP_NODE_DEC
+ * Set Port flag from PP_NODE_ACTIVE to PP_NODE_ALLOC
+ * else return DP_FAILURE
+ */
+static int node_port_dec(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int phy_id = get_cqm_deq_port_by_node(inst, node_id, flag);
+	int idx = get_child_idx_node_id(inst, node_id, 0);
+
+	if (idx == DP_FAILURE) {
+		PR_ERR("get_child_idx_node_id failed\n");
+		return DP_FAILURE;
+	}
+
+	if (phy_id == DP_FAILURE) {
+		PR_ERR("get_cqm_deq_port_by_node failed\n");
+		return DP_FAILURE;
+	}
+
+	if (!priv->deq_port_stat[phy_id].child_num ||
+	    !(priv->deq_port_stat[phy_id].flag & PP_NODE_ACTIVE)) {
+		PR_ERR("Wrong P[%d] Stat(%d)/child_num(%d):%s\n",
+		       phy_id, priv->deq_port_stat[phy_id].flag,
+		       priv->deq_port_stat[phy_id].child_num,
+		       "Expect ACTIVE Or non-zero child_num");
+		return DP_FAILURE;
+	}
+	if (priv->qos_sch_stat[node_id].child[idx].flag) {
+		priv->qos_sch_stat[node_id].child[idx].node_id = 0;
+		priv->qos_sch_stat[node_id].child[idx].type = 0;
+		priv->qos_sch_stat[node_id].child[idx].flag = 0;
+	}
+	priv->deq_port_stat[phy_id].child_num--;
+	if (!priv->deq_port_stat[phy_id].child_num)
+		priv->deq_port_stat[phy_id].flag = PP_NODE_ALLOC;
+	return DP_SUCCESS;
+}
+
+/* node_port_inc API
+ * for port logical node_id, flag = DP_NODE_INC
+ * Set Port flag from PP_NODE_ALLOC to PP_NODE_ACTIVE
+ * else return DP_FAILURE
+ */
+static int node_port_inc(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int phy_id = get_cqm_deq_port_by_node(inst, node_id, flag);
+	int child_type = get_node_type_by_node_id(inst, node_id, 0);
+	int idx = get_free_child_idx(inst, node_id, 0);
+
+	if (phy_id == DP_FAILURE) {
+		PR_ERR("get_cqm_deq_port_by_node failed\n");
+		return DP_FAILURE;
+	}
+
+	if (idx == DP_FAILURE) {
+		PR_ERR("get_free_child_idx failed\n");
+		return DP_FAILURE;
+	}
+
+	if (priv->deq_port_stat[phy_id].child_num &&
+	    !(priv->deq_port_stat[phy_id].flag & PP_NODE_ACTIVE)) {
+		PR_ERR("Wrong P[%d] Stat(%d)/child_num(%d):%s\n", phy_id,
+		       priv->deq_port_stat[phy_id].flag,
+		       priv->deq_port_stat[phy_id].child_num,
+		       "Expect ACTIVE And Non-Zero child_num");
+		return DP_FAILURE;
+	}
+	if (!priv->deq_port_stat[phy_id].child_num &&
+	    !(priv->deq_port_stat[phy_id].flag & PP_NODE_ALLOC)) {
+		PR_ERR("Wrong P[%d] Stat(%d)/child_num(%d):%s\n", phy_id,
+		       priv->deq_port_stat[phy_id].flag,
+		       priv->deq_port_stat[phy_id].child_num,
+		       "Expect ALLOC And Zero child_num");
+		return DP_FAILURE;
+	}
+	priv->qos_sch_stat[node_id].child[idx].type = child_type;
+	priv->qos_sch_stat[node_id].child[idx].node_id = node_id;
+	priv->qos_sch_stat[node_id].child[idx].flag = PP_NODE_ACTIVE;
+	priv->qos_sch_stat[node_id].child_num++;
+	priv->deq_port_stat[phy_id].child_num++;
+	priv->deq_port_stat[phy_id].flag = PP_NODE_ACTIVE;
+	return DP_SUCCESS;
+}
+
+/* node_port_rst API
+ * Check for child_num and alloc flag
+ * for port logical node_id, flag = DP_NODE_RST
+ * Set Port flag from PP_NODE_ALLOC to PP_NODE_FREE
+ * else return DP_FAILURE
+ */
+static int node_port_rst(int inst, int node_id, int flag)
+{
+	struct hal_priv *priv = HAL(inst);
+	int phy_id = get_cqm_deq_port_by_node(inst, node_id, flag);
+
+	if (phy_id == DP_FAILURE) {
+		PR_ERR("get_cqm_deq_port_by_node failed\n");
+		return DP_FAILURE;
+	}
+
+	if (priv->deq_port_stat[phy_id].child_num ||
+	    !(priv->deq_port_stat[phy_id].flag & PP_NODE_ALLOC)) {
+		PR_ERR("Wrong P[%d] Stat(%d)/child_num(%d):%s\n", phy_id,
+		       priv->deq_port_stat[phy_id].flag,
+		       priv->deq_port_stat[phy_id].child_num,
+		       "Expect ALLOC Or non-zero child_num");
+		return DP_FAILURE;
+	}
+	memset(&priv->deq_port_stat[phy_id], 0,
+	       sizeof(priv->deq_port_stat[phy_id]));
+	memset(&priv->qos_sch_stat[node_id], 0,
+	       sizeof(priv->qos_sch_stat[node_id]));
+	return DP_SUCCESS;
+}
+
+/* node_stat_update API
+ * node_id is logical node id
+ * if flag = DP_NODE_DEC
+ *           update flag PP_NODE_ACTIVE to PP_NODE_ALLOC if needed
+ *           update child info
+ * else if flag = DP_NODE_INC
+ *           update flag PP_NODE_ALLOC to PP_NODE_ACTIVE
+ * else if flag = DP_NODE_RST
+ *           update flag PP_NODE_ALLOC to PP_NODE_FREE
+ *           reset table info
+ * else return DP_FAILURE
+ */
+static int node_stat_update(int inst, int node_id, int flag)
+{
+	int node_type = get_node_type_by_node_id(inst, node_id, 0);
+
+	if (flag & DP_NODE_DEC) {
+		if (node_type == DP_NODE_QUEUE)
+			return node_queue_dec(inst, node_id, flag);
+		else if (node_type == DP_NODE_SCH)
+			return node_sched_dec(inst, node_id, flag);
+		else if (node_type == DP_NODE_PORT)
+			return node_port_dec(inst, node_id, flag);
+		return DP_FAILURE;
+	} else if (flag & DP_NODE_INC) {
+		if (node_type == DP_NODE_QUEUE)
+			return node_queue_inc(inst, node_id, flag);
+		else if (node_type == DP_NODE_SCH) {
+			return node_sched_inc(inst, node_id, flag);
+		} else if (node_type == DP_NODE_PORT)
+			return node_port_inc(inst, node_id, flag);
+		return DP_FAILURE;
+	} else if (flag & DP_NODE_RST) {
+		if (node_type == DP_NODE_QUEUE)
+			return node_queue_rst(inst, node_id, flag);
+		else if (node_type == DP_NODE_SCH)
+			return node_sched_rst(inst, node_id, flag);
+		else if (node_type == DP_NODE_PORT)
+			return node_port_rst(inst, node_id, flag);
+		return DP_FAILURE;
+	}
+	return DP_FAILURE;
+}
+
+/* dp_link_unset API
+ * call node_stat_update with DP_NODE_DEC flag
+ * upon success unlinks node to parent and returns DP_SUCCESS
+ * else return DP_FAILURE
+ */
+static int dp_link_unset(struct dp_node_link *info, int flag)
+{
+	int node_id, p_id;
+	struct pp_qos_queue_conf queue_cfg;
+	struct pp_qos_sched_conf sched_cfg;
+	struct hal_priv *priv = HAL(info->inst);
+
+	if (info->node_type == DP_NODE_QUEUE) {
+		node_id = priv->qos_queue_stat[info->node_id.q_id].node_id;
+		if (qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) {
+			PR_ERR("failed to qos_queue_conf_get\n");
+			return DP_FAILURE;
+		}
+		if (queue_cfg.queue_child_prop.parent ==
+						*(int *)&info->p_node_id) {
+			if (qos_queue_remove(priv->qdev, node_id)) {
+				PR_ERR("failed to qos_queue_remove\n");
+				return DP_FAILURE;
+			}
+		}
+		goto EXIT;
+	} else if (info->node_type == DP_NODE_SCH) {
+		if (qos_sched_conf_get(priv->qdev, info->node_id.sch_id,
+					  &sched_cfg)) {
+			PR_ERR("failed to qos_queue_conf_get\n");
+			return DP_FAILURE;
+		}
+		if (sched_cfg.sched_child_prop.parent ==
+						*(int *)&info->p_node_id) {
+			if (qos_sched_remove(priv->qdev,
+						info->node_id.sch_id)) {
+				PR_ERR("failed to qos_sched_remove\n");
+				return DP_FAILURE;
+			}
+		}
+		goto EXIT;
+	}
+	return DP_FAILURE;
+
+EXIT:
+	/*Child flag update after unlink*/
+	node_stat_update(info->inst, *(int *)&info->node_id,
+			 DP_NODE_DEC | P_FLAG);
+	/*reduce child_num in parent's global table*/
+	if (info->p_node_type == DP_NODE_SCH)
+		node_stat_update(info->inst, PARENT(queue_cfg),
+				 DP_NODE_DEC | C_FLAG);
+	else /*convert parent to cqm_deq_port*/
+		p_id = get_cqm_deq_port_by_node(info->inst,
+						*(int *)&info->p_node_id,
+						flag);
+		node_stat_update(info->inst, p_id, DP_NODE_DEC);
+	return DP_SUCCESS;
+}
+
+/* dp_node_alloc_resv_pool API
+ * Checks for flag and input node
+ * upon success allocate resource from reserve table
+ * otherwise return failure
+ */
+static int dp_node_alloc_resv_pool(struct dp_node_alloc *node, int flag)
+{
+	int i, cnt, phy_id, node_id;
+	struct hal_priv *priv = node ? HAL(node->inst) : NULL;
+	struct resv_q *resv_q = priv->resv[node->dp_port].resv_q;
+
+	if (!node) {
+		PR_ERR("priv NULL\n");
+		return -1;
+	}
+	if (!priv) {
+		PR_ERR("priv NULL\n");
+		return -1;
+	}
+	PR_INFO("inst=%d dp_port=%d num_resv_q=%d num_resv_sched=%d\n",
+		node->inst,
+		node->dp_port,
+		priv->resv[node->dp_port].num_resv_q,
+		priv->resv[node->dp_port].num_resv_sched);
+	if (node->type == DP_NODE_QUEUE) {
+		cnt = priv->resv[node->dp_port].num_resv_q;
+		if (!cnt)
+			return DP_FAILURE;
+		PR_INFO("try to look for resv sche...\n");
+		for (i = 0; i < cnt; i++) {
+			if (resv_q[i].flag != PP_NODE_FREE)
+				continue;
+			phy_id = resv_q[i].physical_id;
+			node_id = resv_q[i].id;
+			resv_q[i].flag = PP_NODE_ALLOC;
+			priv->qos_queue_stat[phy_id].flag = PP_NODE_RESERVE |
+								PP_NODE_ALLOC;
+			priv->qos_queue_stat[phy_id].node_id = node_id;
+			priv->qos_queue_stat[phy_id].resv_idx = i;
+			priv->qos_sch_stat[node_id].type = DP_NODE_QUEUE;
+			node->id = (union dp_node_id)phy_id;
+			return DP_SUCCESS;
+		}
+	} else if (node->type == DP_NODE_SCH) {
+		struct resv_sch *resv_sched;
+		cnt = priv->resv[node->dp_port].num_resv_sched;
+		if (!cnt)
+			return DP_FAILURE;
+		resv_sched = priv->resv[node->dp_port].resv_sched;
+		for (i = 0; i < cnt; i++) {
+			if (resv_sched[i].flag != PP_NODE_FREE)
+				continue;
+			PR_INFO("try to look for resv sche...\n");
+			node_id = resv_sched[i].id;
+			resv_sched[i].flag = PP_NODE_ALLOC;
+			priv->qos_sch_stat[node_id].c_flag = PP_NODE_RESERVE |
+								PP_NODE_ALLOC;
+			priv->qos_sch_stat[node_id].p_flag = PP_NODE_RESERVE |
+								PP_NODE_ALLOC;
+			priv->qos_sch_stat[node_id].resv_idx = i;
+			priv->qos_sch_stat[node_id].child_num = 0;
+			priv->qos_sch_stat[node_id].type = DP_NODE_SCH;
+			node->id = (union dp_node_id)node_id;
+			return DP_SUCCESS;
+		}
+	}
+	return DP_FAILURE;
+}
+
+/* dp_node_alloc_global_pool API
+ * Checks for flag and input node
+ * upon success allocate resource from global table
+ * otherwise return failure
+ */
+static int dp_node_alloc_global_pool(struct dp_node_alloc *node, int flag)
+{
+	int id, phy_id;
+	struct pp_qos_queue_info *qos_queue_info;
+	struct pp_qos_queue_conf *q_conf;
+	struct hal_priv *priv = HAL(node->inst);
+
+	qos_queue_info = kzalloc(sizeof(*qos_queue_info), GFP_KERNEL);
+	q_conf = kzalloc(sizeof(*q_conf), GFP_KERNEL);
+	if (!qos_queue_info || !q_conf)
+		goto EXIT;
+	if (node->type == DP_NODE_QUEUE) {
+		if (qos_queue_allocate(priv->qdev, &id)) {
+			PR_ERR("qos_queue_allocate failed\n");
+			goto EXIT;
+		}
+		PR_INFO("qos_queue_allocate: %d\n", id);
+#define WORKAROUND_PORT_SET
+//#define WORKAROUND_DROP_PORT
+#ifdef WORKAROUND_PORT_SET /*workaround to get physcal queue id */
+		/*workarpound: in order to get queue's physical queue id,
+		 *it is a must call pp_qos_queue_set first,
+		 *then call qos_queue_info_get to get physical queue id
+		 */
+		qos_queue_conf_set_default(q_conf);
+		q_conf->wred_enable = 0;
+		q_conf->queue_wred_max_allowed = 0x400; /*max qocc in pkt */
+#ifdef WORKAROUND_DROP_PORT /*use drop port */
+		q_conf->queue_child_prop.parent = priv->ppv4_drop_p;
+#else
+		q_conf->queue_child_prop.parent = priv->ppv4_tmp_p;
+#endif /*WORKAROUND_DROP_PORT*/
+		if (qos_queue_set(priv->qdev, id, q_conf)) {
+			PR_ERR("qos_queue_set fail for queue=%d to parent=%d\n",
+			       id, q_conf->queue_child_prop.parent);
+			return -1;
+		}
+		PR_INFO("Workaround queue(/%d)-> tmp parent(/%d)\n",
+		        id, q_conf->queue_child_prop.parent);
+		/*workarpound end ---- */
+#endif /*WORKAROUND_PORT_SET */
+		if (qos_queue_info_get(priv->qdev, id, qos_queue_info)) {
+			qos_queue_remove(priv->qdev, id);
+			PR_ERR("qos_queue_info_get: %d\n", id);
+			goto EXIT;
+		}
+
+		phy_id = qos_queue_info->physical_id;
+		PR_INFO("queue info: %d/%d to parent(/%d)\n", phy_id, id,
+			q_conf->queue_child_prop.parent);
+		priv->qos_queue_stat[phy_id].flag = PP_NODE_ALLOC;
+		priv->qos_queue_stat[phy_id].node_id = id;
+		priv->qos_queue_stat[phy_id].resv_idx = -1;
+		priv->qos_sch_stat[id].type = DP_NODE_QUEUE;
+		node->id = (union dp_node_id)phy_id;
+		return DP_SUCCESS;
+	} else if (node->type == DP_NODE_SCH) {
+		if (qos_sched_allocate(priv->qdev, &id)) {
+			PR_ERR("failed to qos_sched_allocate\n");
+			qos_sched_remove(priv->qdev, id);
+			goto EXIT;
+		}
+		priv->qos_sch_stat[id].c_flag = PP_NODE_ALLOC;
+		priv->qos_sch_stat[id].p_flag = PP_NODE_ALLOC;
+		priv->qos_sch_stat[id].resv_idx = -1;
+		priv->qos_sch_stat[id].child_num = 0;
+		priv->qos_sch_stat[id].type = DP_NODE_SCH;
+		node->id = (union dp_node_id)id;
+		return DP_SUCCESS;
+	}
+
+EXIT:
+	if (!q_conf)
+		kfree(q_conf);
+	if (!qos_queue_info)
+		kfree(qos_queue_info);
+	return DP_FAILURE;
+}
+
+/* dp_alloc_qos_port API
+ * upon success returns qos_deq_port
+ * otherwise return failure
+ */
+static int dp_alloc_qos_port(struct dp_node_alloc *node, int flag)
+{
+	unsigned int qos_port;
+	int cqm_deq_port;
+	struct pp_qos_port_conf  port_cfg;
+	struct hal_priv *priv = HAL(node->inst);
+
+	if (!priv) {
+		PR_ERR("priv NULL\n");
+		goto EXIT;
+	}
+	cqm_deq_port = node->id.cqm_deq_port;
+	DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL,
+		 "inst=%d dp_port=%d cqm_deq_port=%d\n",
+		 node->inst, node->dp_port, cqm_deq_port);
+	if (cqm_deq_port == DP_NODE_AUTO_ID) {
+		PR_ERR("cqm_deq_port wrong: %d\n", cqm_deq_port);
+		goto EXIT;
+	}
+	if (priv->deq_port_stat[cqm_deq_port].flag != PP_NODE_FREE) {
+		PR_INFO("cqm_deq_port[%d] alread init:\n", cqm_deq_port);
+		return priv->deq_port_stat[cqm_deq_port].node_id;
+	}
+	if (qos_port_allocate(priv->qdev, cqm_deq_port, &qos_port)) {
+		PR_ERR("failed to qos_port_allocate\n");
+		goto EXIT;
+	}
+	PR_INFO("qos_port_alloc succeed: %d/%d\n", cqm_deq_port, qos_port);
+	/*Configure QOS dequeue port*/
+	qos_port_conf_set_default(&port_cfg);
+	port_cfg.ring_address =
+		(void *)priv->deq_port_info[cqm_deq_port].tx_ring_addr;
+	port_cfg.ring_size = priv->deq_port_info[cqm_deq_port].tx_ring_size;
+	port_cfg.credit = priv->deq_port_info[cqm_deq_port].tx_pkt_credit;
+	if (port_cfg.credit)
+		port_cfg.packet_credit_enable = 1;
+
+	PR_INFO("qos_port_set parameter: %d/%d\n", cqm_deq_port, qos_port);
+	PR_INFO("  ring_address=0x%p\n", port_cfg.ring_address);
+	PR_INFO("  ring_size=%d\n", port_cfg.ring_size);
+	PR_INFO("  credit=%d\n", port_cfg.credit);
+	PR_INFO("  packet_credit_enable=%d\n", port_cfg.packet_credit_enable);
+	PR_INFO("priv->qdev=0x%p\n", priv->qdev);
+	
+	if (qos_port_set(priv->qdev, qos_port, &port_cfg)) {
+		PR_ERR("qos_port_set fail for port %d/%d\n",
+		       cqm_deq_port, qos_port);
+		qos_port_remove(priv->qdev, qos_port);
+		goto EXIT;
+	}
+	priv->deq_port_stat[cqm_deq_port].flag = PP_NODE_ALLOC;
+	priv->qos_sch_stat[qos_port].type = DP_NODE_PORT;
+	priv->deq_port_stat[cqm_deq_port].node_id = qos_port;
+	DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL,
+		 "dp_alloc_qos_port ok: port=%d/%d for dp_port=%d\n",
+		 cqm_deq_port, qos_port, node->dp_port);
+	return qos_port;
+EXIT:
+	return DP_FAILURE;
+}
+
+/* dp_node_alloc_31 API
+ * Checks for flag and input node type
+ * upon success allocate node from global/reserve resource
+ * otherwise return failure
+ */
+int dp_node_alloc_31(struct dp_node_alloc *node, int flag)
+{
+	struct hal_priv *priv = HAL(node->inst);
+
+	if (!priv) {
+		PR_ERR("priv is NULL cannot proceed!!\n");
+		return DP_FAILURE;
+	}
+
+	if (flag && DP_ALLOC_RESV_ONLY) {
+	/*if can get from its reserved resource and return DP_SUCCESS.
+	 *	Otherwise return DP_FAIL;
+	 */
+		return dp_node_alloc_resv_pool(node, flag);
+	}
+	if (flag && DP_ALLOC_GLOBAL_ONLY) {
+	/* if can get from global pool and return DP_SUCCESS.
+	 *	Otherwise return DP_FAILURE;
+	 */
+		return dp_node_alloc_global_pool(node, flag);
+	}
+	if (flag && DP_ALLOC_GLOBAL_FIRST) {
+	/* if can get from the global pool, return DP_SUCCESS;
+	 * if can get from its reserved resource and return DP_SUCCESS;
+	 * return DP_FAILURE;
+	 */
+		if (dp_node_alloc_global_pool(node, flag) == DP_SUCCESS)
+			return DP_SUCCESS;
+		return dp_node_alloc_resv_pool(node, flag);
+	}
+	/*default order: reserved pool,*/
+	/* if can get from its reserved resource and return DP_SUCCESS
+	 * if can get from the global pool, return DP_SUCCESS
+	 * return DP_FAILURE
+	 */
+	if (dp_node_alloc_resv_pool(node, flag) == DP_SUCCESS)
+		return DP_SUCCESS;
+	return dp_node_alloc_global_pool(node, flag);
+}
+
+/* dp_node_free_31 API
+ * if (this node is not unlinked yet)
+ * unlink it
+ * If (this node is a  reserved node)
+ * return this node to this device’s reserved node table
+ * mark this node as Free in this device’s reserved node table
+ * else
+ * return this node to the system global table
+ * mark this node free in system global table
+ */
+int dp_node_free_31(struct dp_node_alloc *node, int flag)
+{
+	int sch_id, phy_id, node_id, parent_id, p_type;
+	struct hal_priv *priv = HAL(node->inst);
+	struct pp_qos_queue_conf queue_cfg;
+	struct pp_qos_sched_conf sched_cfg;
+
+	if (!priv) {
+		PR_ERR("priv is NULL cannot proceed!!\n");
+		return DP_FAILURE;
+	}
+
+	if (node->type == DP_NODE_QUEUE) {
+		node_id = priv->qos_queue_stat[node->id.q_id].node_id;
+
+		if (priv->qos_queue_stat[node->id.q_id].flag & PP_NODE_FREE) {
+			PR_ERR("Node Q[%d] is already Free Stat\n",
+			       node->id.q_id);
+			return DP_FAILURE;
+		}
+
+		if (qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) {
+			if (node_stat_update(node->inst, node_id, DP_NODE_RST))
+				PR_ERR("node_stat_update failed\n");
+			return DP_SUCCESS;
+		}
+		parent_id = queue_cfg.queue_child_prop.parent;
+		p_type = get_node_type_by_node_id(node->inst, parent_id, flag);
+		/*Remove Queue link*/
+		if (qos_queue_remove(priv->qdev, node_id)) {
+			PR_ERR("failed to qos_queue_remove\n");
+			return DP_FAILURE;
+		}
+		PR_INFO("Queue[%d/%d] removed\n", node->id.q_id, node_id);
+		if (node_stat_update(node->inst, node_id, DP_NODE_DEC)) {
+			PR_ERR("node_stat_update failed\n");
+			return DP_FAILURE;
+		}
+		/*call node_stat_update to update parent status*/
+		if (node_stat_update(node->inst, parent_id,
+				     DP_NODE_DEC | C_FLAG)) {
+			PR_ERR("node_stat_update failed\n");
+			return DP_FAILURE;
+		}
+		/*call node_Stat_update to free the node*/
+		if (node_stat_update(node->inst, node_id, DP_NODE_RST)) {
+			PR_ERR("node_stat_update failed\n");
+			return DP_FAILURE;
+		}
+		PR_INFO("Queue[%d/%d] removed updated\n",
+			 node->id.q_id, node_id);
+		return DP_SUCCESS;
+	} else if (node->type == DP_NODE_SCH) {
+		sch_id = node->id.sch_id;
+
+		if (priv->qos_sch_stat[sch_id].child_num)
+			return DP_FAILURE;
+
+		if (priv->qos_sch_stat[sch_id].p_flag & PP_NODE_FREE) {
+			PR_ERR("Node Sch[%d] is already Free Stat\n", sch_id);
+			return DP_FAILURE;
+		}
+
+		if (qos_sched_conf_get(priv->qdev, sch_id, &sched_cfg)) {
+			if (node_stat_update(node->inst, sch_id,
+					     DP_NODE_RST | P_FLAG))
+				PR_ERR("node_stat_update failed\n");
+			return DP_SUCCESS;
+		}
+		parent_id = sched_cfg.sched_child_prop.parent;
+		p_type = get_node_type_by_node_id(node->inst, parent_id, flag);
+		/*Remove Sched link*/
+		if (qos_sched_remove(priv->qdev, sch_id)) {
+			PR_ERR("failed to qos_sched_remove\n");
+			return DP_FAILURE;
+		}
+		if (node_stat_update(node->inst, sch_id,
+				     DP_NODE_DEC | P_FLAG)) {
+			PR_ERR("node_stat_update failed\n");
+			return DP_FAILURE;
+		}
+		/*call node_stat_update to update parent status*/
+		if (node_stat_update(node->inst, parent_id,
+				     DP_NODE_DEC | C_FLAG)) {
+			PR_ERR("node_stat_update failed\n");
+			return DP_FAILURE;
+		}
+		/*call node_stat_update to free the node*/
+		if (node_stat_update(node->inst, sch_id,
+				     DP_NODE_RST | P_FLAG)) {
+			PR_ERR("node_stat_update failed\n");
+			return DP_FAILURE;
+		}
+		return DP_SUCCESS;
+	} else if (node->type == DP_NODE_PORT) {
+		phy_id = node->id.cqm_deq_port;
+		node_id = priv->deq_port_stat[phy_id].node_id;
+
+		if (priv->deq_port_stat[phy_id].child_num)
+			return DP_FAILURE;
+		if (priv->deq_port_stat[phy_id].flag & PP_NODE_ACTIVE) {
+			if (node_stat_update(node->inst, node_id, DP_NODE_DEC))
+				PR_ERR("node_stat_update failed\n");
+		}
+		if (priv->deq_port_stat[phy_id].flag & PP_NODE_ALLOC) {
+			/*call node_stat_update to free the node*/
+			if (node_stat_update(node->inst, node_id, DP_NODE_RST))
+				PR_ERR("node_stat_update failed\n");
+			return DP_SUCCESS;
+		}
+		return DP_FAILURE;
+	}
+	return DP_FAILURE;
+}
+
+/* dp_qos_parent_chk API
+ * checks for parent type
+ * upon Success
+ *    return parent node id
+ * else return DP_FAILURE
+ */
+static int dp_qos_parent_chk(struct dp_node_link *info, int flag)
+{
+	struct dp_node_alloc node;
+
+	if (info->p_node_type == DP_NODE_SCH) {
+		if (info->p_node_id.sch_id == DP_NODE_AUTO_ID) {
+			node.inst = info->inst;
+			node.type = info->p_node_type;
+			node.dp_port = info->dp_port;
+
+			if ((dp_node_alloc_31(&node, flag)) == DP_FAILURE) {
+				PR_ERR("dp_node_alloc_31 queue alloc fail\n");
+				return DP_FAILURE;
+			}
+			info->p_node_id = node.id;
+		}
+		return info->p_node_id.sch_id;
+	} else if (info->p_node_type == DP_NODE_PORT) {
+		node.inst = info->inst;
+		node.id = info->cqm_deq_port;
+		node.type = info->p_node_type;
+		node.dp_port = info->dp_port;
+		return dp_alloc_qos_port(&node, flag);
+	}
+	return DP_FAILURE;
+}
+
+/* dp_node_link_get_31 API
+ * upon success check node link info and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_node_link_get_31(struct dp_node_link *info, int flag)
+{
+	struct pp_qos_queue_conf queue_cfg;
+	struct pp_qos_sched_conf sched_cfg;
+	struct hal_priv *priv = HAL(info->inst);
+	int node_id;
+
+	if (info->node_type == DP_NODE_QUEUE) {
+		node_id = priv->qos_queue_stat[info->node_id.q_id].node_id;
+		if (qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) {
+			PR_ERR("failed to qos_queue_conf_get\n");
+			return DP_FAILURE;
+		}
+		if (!queue_cfg.queue_child_prop.parent)
+			return DP_FAILURE;
+		if (priv->qos_queue_stat[info->node_id.q_id].flag !=
+		    PP_NODE_ACTIVE) {
+			return DP_FAILURE;
+		}
+		info->p_node_id.sch_id =
+			queue_cfg.queue_child_prop.parent;
+		info->p_node_type = get_node_type_by_node_id(info->inst,
+				queue_cfg.queue_child_prop.parent, flag);
+		info->prio_wfq = queue_cfg.queue_child_prop.priority;
+		info->arbi = 0;
+		info->leaf = 0;
+		return DP_SUCCESS;
+	} else if (info->node_type == DP_NODE_SCH) {
+		if (qos_sched_conf_get(priv->qdev, info->node_id.sch_id,
+				       &sched_cfg)) {
+			PR_ERR("failed to qos_sched_conf_get\n");
+			return DP_FAILURE;
+		}
+		if (!sched_cfg.sched_child_prop.parent)
+			return DP_FAILURE;
+		if (priv->qos_sch_stat[info->node_id.sch_id].c_flag !=
+		    PP_NODE_ACTIVE) {
+			return DP_FAILURE;
+		}
+		info->p_node_id.sch_id = sched_cfg.sched_child_prop.parent;
+		info->p_node_type = get_node_type_by_node_id(info->inst,
+				sched_cfg.sched_child_prop.parent, flag);
+		info->prio_wfq = sched_cfg.sched_child_prop.priority;
+		info->arbi = sched_cfg.sched_parent_prop.arbitration;
+		info->leaf = 0;
+		return DP_SUCCESS;
+	}
+	return DP_FAILURE;
+}
+
+/* dp_link_set API
+ * upon success links node to parent and returns DP_SUCCESS
+ * else return DP_FAILURE
+ */
+static int dp_link_set(struct dp_node_link *info, int parent_node, int flag)
+{
+	int node_id;
+	int res = DP_FAILURE;
+	int node_flag = DP_NODE_INC;
+	struct hal_priv *priv = HAL(info->inst);
+	struct pp_qos_queue_conf *queue_cfg;
+	struct pp_qos_sched_conf *sched_cfg;
+
+	queue_cfg = kzalloc(sizeof(*queue_cfg), GFP_KERNEL);
+	sched_cfg = kzalloc(sizeof(*sched_cfg), GFP_KERNEL);
+	if (!sched_cfg || !sched_cfg)
+		goto EXIT;
+	
+	if (info->node_type == DP_NODE_QUEUE) {
+		qos_queue_conf_set_default(queue_cfg);
+		queue_cfg->queue_child_prop.parent = parent_node;
+		queue_cfg->wred_enable = 0;
+		queue_cfg->queue_wred_max_allowed = 0x400;
+		queue_cfg->queue_child_prop.priority = info->prio_wfq;
+		/* convert q_id to logical node id and pass it to
+		 * low level api
+		 */
+		node_id = priv->qos_queue_stat[info->node_id.q_id].node_id;
+		PR_INFO("Try to link Q[%d/%d] to parent %d/%d]\n",
+			info->node_id.q_id,
+			node_id,
+			info->cqm_deq_port.cqm_deq_port,
+			queue_cfg->queue_child_prop.parent);
+		if (qos_queue_set(priv->qdev, node_id, queue_cfg)) {
+			PR_ERR("failed to qos_queue_set\n");
+			qos_queue_remove(priv->qdev, node_id);
+			goto ERROR_EXIT;
+		}
+		goto EXIT;
+	} else if (info->node_type == DP_NODE_SCH) {
+		qos_sched_conf_set_default(sched_cfg);
+		sched_cfg->sched_child_prop.parent = parent_node;
+		sched_cfg->sched_child_prop.priority = info->prio_wfq;
+		sched_cfg->sched_parent_prop.arbitration = info->arbi;
+		node_id = info->node_id.sch_id;
+
+		if (qos_sched_set(priv->qdev, node_id, sched_cfg)) {
+			PR_ERR("failed to qos_sched_set\n");
+			qos_sched_remove(priv->qdev, node_id);
+			goto ERROR_EXIT;
+		}
+		node_flag |= P_FLAG;
+		goto EXIT;
+	}
+	goto ERROR_EXIT;
+EXIT:
+	res = DP_SUCCESS;
+	/*fill parent info in child's global table*/
+	priv->qos_sch_stat[node_id].parent.node_id = parent_node;
+	priv->qos_sch_stat[node_id].parent.type = info->p_node_type;
+	priv->qos_sch_stat[node_id].parent.flag = PP_NODE_ACTIVE;
+	/*increase child_num in parent's global table and status*/
+	PR_INFO("node_stat_update after dp_link_set start\n");
+	node_stat_update(info->inst, node_id, node_flag);
+	node_stat_update(info->inst, parent_node, DP_NODE_INC | C_FLAG);
+	PR_INFO("node_stat_update after dp_link_set end\n");
+ERROR_EXIT:
+	if (queue_cfg)
+		kfree(queue_cfg);
+	if (sched_cfg)
+		kfree(sched_cfg);
+	return res;
+}
+
+/* dp_qos_link_prio_set_31 API
+ * set node_prio struct for link and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_qos_link_prio_set_31(struct dp_node_prio *info, int flag)
+{
+	struct pp_qos_queue_conf queue_cfg;
+	struct pp_qos_sched_conf sched_cfg;
+	struct pp_qos_port_conf port_cfg;
+	struct hal_priv *priv = HAL(info->inst);
+	int node_id;
+
+	if (!info) {
+		PR_ERR("info cannot be NULL\n");
+		return DP_FAILURE;
+	}
+
+	if (info->type == DP_NODE_QUEUE) {
+		node_id = priv->qos_queue_stat[info->id.q_id].node_id;
+		if (qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) {
+			PR_ERR("failed to qos_queue_conf_get\n");
+			return DP_FAILURE;
+		}
+		queue_cfg.queue_child_prop.priority = info->prio_wfq;
+		if (qos_queue_set(priv->qdev, node_id, &queue_cfg)) {
+			PR_ERR("failed to qos_queue_set\n");
+			return DP_FAILURE;
+		}
+		return DP_SUCCESS;
+	} else if (info->type == DP_NODE_SCH) {
+		if (qos_sched_conf_get(priv->qdev, info->id.sch_id,
+					  &sched_cfg)) {
+			PR_ERR("failed to qos_sched_conf_get\n");
+			return DP_FAILURE;
+		}
+		sched_cfg.sched_parent_prop.arbitration = info->arbi;
+		sched_cfg.sched_child_prop.priority = info->prio_wfq;
+		if (qos_sched_set(priv->qdev, node_id, &sched_cfg)) {
+			PR_ERR("failed to qos_sched_set\n");
+			return DP_FAILURE;
+		}
+		return DP_SUCCESS;
+	} else if (info->type == DP_NODE_PORT) {
+		node_id = priv->deq_port_stat[info->id.cqm_deq_port].node_id;
+		if (qos_port_conf_get(priv->qdev, node_id, &port_cfg)) {
+			PR_ERR("failed to qos_port_conf_get\n");
+			return DP_FAILURE;
+		}
+		port_cfg.port_parent_prop.arbitration = info->arbi;
+		if (qos_port_set(priv->qdev, node_id, &port_cfg)) {
+			PR_ERR("failed to qos_port_set\n");
+			return DP_FAILURE;
+		}
+		return DP_SUCCESS;
+	}
+	return DP_FAILURE;
+}
+
+/* dp_qos_link_prio_get_31 API
+ * get node_prio struct for link and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_qos_link_prio_get_31(struct dp_node_prio *info, int flag)
+{
+	struct pp_qos_queue_conf queue_cfg;
+	struct pp_qos_sched_conf sched_cfg;
+	struct pp_qos_port_conf port_cfg;
+	struct hal_priv *priv = HAL(info->inst);
+	int node_id;
+
+	if (!info) {
+		PR_ERR("info cannot be NULL\n");
+		return DP_FAILURE;
+	}
+
+	if (info->type == DP_NODE_QUEUE) {
+		node_id = priv->qos_queue_stat[info->id.q_id].node_id;
+		if (qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) {
+			PR_ERR("failed to qos_queue_conf_get\n");
+			return DP_FAILURE;
+		}
+		info->arbi = 0;
+		info->prio_wfq = queue_cfg.queue_child_prop.priority;
+		return DP_SUCCESS;
+	} else if (info->type == DP_NODE_SCH) {
+		if (qos_sched_conf_get(priv->qdev, info->id.sch_id,
+					  &sched_cfg)) {
+			PR_ERR("failed to qos_sched_conf_get\n");
+			return DP_FAILURE;
+		}
+		info->arbi = sched_cfg.sched_parent_prop.arbitration;
+		info->prio_wfq = sched_cfg.sched_child_prop.priority;
+		return DP_SUCCESS;
+	} else if (info->type == DP_NODE_PORT) {
+		node_id = priv->deq_port_stat[info->id.cqm_deq_port].node_id;
+		if (qos_port_conf_get(priv->qdev, node_id, &port_cfg)) {
+			PR_ERR("failed to qos_port_conf_get\n");
+			return DP_FAILURE;
+		}
+		info->arbi = port_cfg.port_parent_prop.arbitration;
+		info->prio_wfq = 0;
+		return DP_SUCCESS;
+	}
+	return DP_FAILURE;
+}
+
+/* dp_deq_port_res_get_31 API
+ * Remove link of attached nodes and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_deq_port_res_get_31(struct dp_dequeue_res *res, int flag)
+{
+	union local_t {
+		struct pp_qos_port_info p_info;
+		struct pp_qos_queue_info q_info;
+		struct pp_qos_queue_conf q_conf;
+		struct pp_qos_sched_conf sched_conf;
+	};
+	
+	struct hal_priv *priv = HAL(res->inst);
+	static union local_t t;
+	uint16_t q_ids[MAX_Q_PER_PORT] = {0};
+	unsigned int q_num;
+	unsigned int q_size = MAX_Q_PER_PORT;
+	int i, j, k;
+	int port_num = 1;
+	int p_id,idx;
+	struct pmac_port_info *port_info;
+
+	if (!res) {
+		PR_ERR("res cannot be NULL\n");
+		return DP_FAILURE;
+	}
+	port_info = &dp_port_info[res->inst][res->dp_port];
+	if (!port_info->deq_port_num)
+		return DP_FAILURE;
+	if (res->cqm_deq_idx == DEQ_PORT_OFFSET_ALL) {
+		port_num = port_info->deq_port_num;
+		res->cqm_deq_port = port_info->deq_port_base;
+		res->num_deq_ports = port_info->deq_port_num;
+	} else {
+		res->cqm_deq_port = port_info->deq_port_base + res->cqm_deq_idx;
+		res->num_deq_ports = 1;
+	}
+	DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL,
+		 "dp_deq_port_res_get_31:dp_port=%d cqm_deq_port=(%d ~%d) %d\n",
+		 res->dp_port,
+		 res->cqm_deq_port, res->cqm_deq_port + port_num - 1,
+		 port_info->deq_port_num);
+	res->num_q = 0;
+	idx = 0;
+	for (k = res->cqm_deq_port;
+	     k < (res->cqm_deq_port + port_num);
+	     k++) {
+		if (priv->deq_port_stat[k].flag == PP_NODE_FREE)
+			continue;
+		q_num = 0;
+		if (qos_port_get_queues(priv->qdev,
+		    priv->deq_port_stat[k].node_id,
+		    q_ids, q_size, &q_num)) {
+			PR_ERR("qos_port_get_queues: port[%d/%d]\n",
+			       k,
+			       priv->deq_port_stat[k].node_id);
+			return DP_FAILURE;
+		}
+		res->num_q += q_num;
+		if (!res->q_res)
+			continue;
+		DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "port[%d/%d] queue list\n",
+			 k, priv->deq_port_stat[k].node_id);
+		for (i = 0; i < q_num; i++)
+			DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL,
+				 "  q[/%d]\n", q_ids[i]);
+		for (i = 0; (i < q_num) && (idx < res->q_res_size); i++) {
+			memset(&t.q_info, 0, sizeof(t.q_info));
+			if (qos_queue_info_get(priv->qdev,
+			    q_ids[i], &t.q_info)) {
+				PR_ERR("qos_port_info_get fail:q[/%d]\n",
+				       q_ids[i]);
+				continue;
+			}
+			res->q_res[idx].q_id = t.q_info.physical_id;
+			res->q_res[idx].q_node = q_ids[i];
+			DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "q[%d/%d]\n",
+				 t.q_info.physical_id, q_ids[i]);
+			res->q_res[idx].sch_lvl = 0;
+			memset(&t.q_conf, 0, sizeof(t.q_conf));
+			if (qos_queue_conf_get(priv->qdev,
+			    q_ids[i], &t.q_conf)) {
+			    PR_ERR("qos_port_conf_get fail:q[/%d]\n",
+				       q_ids[i]);
+			    continue;
+			}
+			p_id = t.q_conf.queue_child_prop.parent;
+			j = 0;
+			do {
+#if 0  /*currently no scheduler yet. So parent should be port */
+				if (priv->qos_sch_stat[p_id].type !=
+				    DP_NODE_PORT) {
+					PR_ERR("why parent=%d not port type\n",
+					       p_id);
+					/*workaround */
+					priv->qos_sch_stat[p_id].type =
+						DP_NODE_PORT;
+				}
+#endif
+				if (priv->qos_sch_stat[p_id].type ==
+				    DP_NODE_SCH) {
+				 	res->q_res[idx].sch_id[j] = p_id;
+					j++;
+					res->q_res[idx].sch_lvl = j;
+				} else if (priv->qos_sch_stat[p_id].type ==
+					   DP_NODE_PORT){ /*port */
+					res->q_res[idx].qos_deq_port = p_id;
+					res->q_res[idx].cqm_deq_port = k;
+					break;
+				} else {
+					PR_ERR("wrong p[/%d] type:%d\n",
+					       p_id,
+					       priv->qos_sch_stat[p_id].type);
+					break;
+				}
+				/*get next parent */
+				if (qos_sched_conf_get(priv->qdev,
+				    p_id, &t.sched_conf)) {
+				    PR_ERR("qos_sched_conf_get fail:sch[/%d]\n",
+					   p_id);
+					break;
+				}
+				p_id = t.sched_conf.sched_child_prop.parent;
+				idx ++;
+			} while (1);
+		}
+		return DP_SUCCESS;
+	}
+	return DP_FAILURE;
+}
+
+/* dp_node_unlink_31 API
+ * check child node keep queue in blocked state
+ * flush queues and return DP_SUCCESS
+ * Else return DP_FAILURE
+ */
+int dp_node_unlink_31(struct dp_node_link *info, int flag)
+{
+	struct pp_qos_queue_conf queue_cfg;
+	struct pp_qos_sched_conf sched_cfg;
+	struct hal_priv *priv = HAL(info->inst);
+	u16 queue_buf[MAX_Q_PER_PORT] = {0};
+	int queue_size = MAX_Q_PER_PORT;
+	int queue_num = 0;
+	int i, node_id;
+
+	if (info->node_type == DP_NODE_QUEUE) {
+		node_id = priv->qos_queue_stat[info->node_id.q_id].node_id;
+		/*Need to check ACTIVE Flag*/
+		if (priv->qos_queue_stat[node_id].flag != PP_NODE_ACTIVE)
+			PR_ERR("Wrong Queue[%d] Stat(%d):Expect ACTIVE\n",
+			       node_id, priv->qos_queue_stat[node_id].flag);
+		if (qos_queue_conf_get(priv->qdev, node_id, &queue_cfg) == 0) {
+			if (queue_cfg.blocked == 0) {
+				queue_cfg.blocked = 1;/*temp disable*/
+				if (qos_queue_set(priv->qdev, node_id,
+						     &queue_cfg)) {
+					PR_ERR("qos_queue_set fail\n");
+					return DP_FAILURE;
+				}
+			}
+			/* Remap lookup table*/
+			if (qos_queue_flush(priv->qdev, node_id)) {
+				PR_ERR("qos_queue_flush fail\n");
+				return DP_FAILURE;
+			}
+		}
+	} else if (info->node_type == DP_NODE_SCH) {
+		if (priv->qos_sch_stat[info->node_id.sch_id].c_flag !=
+								PP_NODE_ACTIVE)
+			PR_ERR("Wrong Sched FLAG Expect ACTIVE\n");
+		if (qos_sched_conf_get(priv->qdev, info->node_id.sch_id,
+					  &sched_cfg))
+			return DP_FAILURE;
+		if (qos_sched_get_queues(priv->qdev, info->node_id.sch_id,
+					    queue_buf, queue_size, &queue_num))
+			return DP_FAILURE;
+		if (!queue_num)
+			PR_ERR("DP_NODE_SCH has no queues\n");
+		if (flag != DP_NODE_SMART_UNLINK) {
+			/*unlink current parent and child */
+			if (qos_queue_conf_get(priv->qdev, queue_buf[queue_num],
+						  &queue_cfg))
+				return DP_FAILURE;
+			queue_cfg.blocked = 1;/*temp disable*/
+			if (qos_queue_set(priv->qdev, queue_buf[queue_num],
+					     &queue_cfg)) {
+				PR_ERR("qos_queue_set fail\n");
+				return DP_FAILURE;
+			}
+			/* Remap lookup table*/
+			if (qos_queue_flush(priv->qdev, queue_buf[queue_num])) {
+				PR_ERR("qos_queue_flush fail\n");
+				return DP_FAILURE;
+			}
+		}
+		for (i = 0; i < queue_num; i++) {
+			if (qos_queue_conf_get(priv->qdev, queue_buf[i],
+						  &queue_cfg))
+				continue;
+			if (queue_cfg.blocked)
+				continue;
+			queue_cfg.blocked = 1;/*temp disable*/
+			if (qos_queue_set(priv->qdev, queue_buf[i],
+					     &queue_cfg)) {
+				PR_ERR("qos_queue_set fail\n");
+				continue;
+			}
+			/* Remap lookup table*/
+			if (qos_queue_flush(priv->qdev, queue_buf[i])) {
+				PR_ERR("qos_queue_flush fail\n");
+				continue;
+			}
+		}
+	}
+	return DP_SUCCESS;
+}
+
+/* dp_node_link_add_31 API
+ * check for parent type and allocate parent node
+ * then check for child type and allocate child node
+ * then call dp_link_set api to link child to parent
+ * upon success links node to given parent and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_node_link_add_31(struct dp_node_link *info, int flag)
 {
+	struct local_var {
+		struct pp_qos_queue_conf queue_cfg;
+		struct pp_qos_sched_conf sched_cfg;
+		struct dp_node_alloc node;
+		u16 queue_buf[MAX_Q_PER_PORT];
+		int q_orig_block[MAX_Q_PER_PORT];
+		int queue_size ;
+		int queue_num;
+		int node_id;
+		struct pp_node parent;
+		int f_child_free;
+		int f_parent_free;
+		int f_sch_auto_id;
+	};
 	int i;
-	int inst = node->inst;
-	int cnt;
-	struct hal_priv *priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
-	int phy_id;
-	int node_id;
-	int port = node->dp_port;
-	struct resv_q *resv_q = priv->resv[port].resv_q;
+	int res = DP_SUCCESS;
+	struct hal_priv *priv = HAL(info->inst);
+	struct local_var *t = NULL;
 
-	if (node->type == DP_NODE_QUEUE) {
-		cnt = priv->resv[node->dp_port].num_resv_q;
-		for (i = 0; i <= cnt; i++) {
-			if (resv_q[i].flag != PP_NODE_FREE)
-				continue;
-			phy_id = resv_q[i].physical_id;
-			node_id = resv_q[i].id;
-			resv_q[i].flag = PP_NODE_ALLOC;
-			priv->qos_queue_stat[phy_id].flag = PP_NODE_RESERVE |
-								PP_NODE_ALLOC;
-			priv->qos_queue_stat[phy_id].node_id = node_id;
-			priv->qos_queue_stat[phy_id].resv_idx = i;
-			node->id = (union dp_node_id)phy_id;
-			return DP_SUCCESS;
+	if (!info) {
+		PR_ERR("info NULL\n");
+		goto EXIT_ERR;
+	}
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (!t) {
+		PR_ERR("fail to alloc %d bytes\n", sizeof(*t));
+		goto EXIT_ERR;
+	}
+	for (i = 0; i < ARRAY_SIZE(t->q_orig_block); i++)
+		t->q_orig_block[i] = -1;
+	t->queue_size = MAX_Q_PER_PORT;
+	
+	DP_DEBUG(DP_DBG_FLAG_QOS,
+		 "inst=%d dp_port=%d\n",
+		 info->inst, info->dp_port);
+	/*Get Parent node_id after sanity check*/
+	if (info->p_node_type == DP_NODE_SCH &&
+	    info->p_node_id.sch_id == DP_NODE_AUTO_ID)
+		t->f_sch_auto_id = 1;
+	i = dp_qos_parent_chk(info, flag);
+	if (i == DP_FAILURE) {
+		PR_ERR("dp_qos_parent_chk fail\n");
+		goto EXIT_ERR;
+	}
+	t->parent.node_id = i;
+	t->parent.type = info->p_node_type;
+	t->parent.flag = 1;
+	PR_INFO("dp_qos_parent_chk succeed: parent node %d\n",
+		t->parent.node_id);
+	/*workaround to pass parrent to queue allcoate api*/
+	priv->ppv4_tmp_p = t->parent.node_id;
+	if (t->f_sch_auto_id)
+		t->f_parent_free = 1;
+
+	/*Get Child node after sanity check*/
+	if (info->node_type == DP_NODE_QUEUE) {
+		if (info->node_id.q_id == DP_NODE_AUTO_ID) {
+			t->node.inst = info->inst;
+			t->node.dp_port = info->dp_port;
+			t->node.type = info->node_type;
+			if ((dp_node_alloc_31(&t->node, flag)) == DP_FAILURE) {
+				PR_ERR("dp_node_alloc_31 queue alloc fail\n");
+				goto EXIT_ERR;
+			}
+			info->node_id = t->node.id;
+			t->f_child_free = 1;
+		}
+		/*add check for free flag and error*/
+		if (priv->qos_queue_stat[info->node_id.q_id].flag ==
+		    PP_NODE_FREE) {
+			PR_ERR("Invalid Queue ID:%d\n", info->node_id.q_id);
+			goto EXIT_ERR;
+		}
+		/* convert q_id to logical node id and pass it to
+		 * low level api
+		 */
+		t->node_id = priv->qos_queue_stat[info->node_id.q_id].node_id;
+		if ((qos_queue_conf_get(priv->qdev, t->node_id,
+		    &t->queue_cfg) == 0) &&
+		    (t->queue_cfg.queue_child_prop.parent ==
+		     priv->ppv4_drop_p)){
+			if (t->queue_cfg.blocked == 0) {
+				t->queue_cfg.blocked = 1;/*temp disable*/
+				t->q_orig_block[0] = 0;/*flag for queue restore*/
+				t->queue_num = 1;/*queue to restore*/
+				t->queue_buf[0] = t->node_id;
+				if (qos_queue_set(priv->qdev, t->node_id,
+						  &t->queue_cfg)) {
+					PR_ERR("qos_queue_set fail\n");
+					goto EXIT_ERR;
+				}
+				PR_INFO("block queue[%d/%d]\n",
+					info->node_id.q_id, t->node_id);
+			}
+#if 0  /*tmp remove q flush */
+			DP_DEBUG(DP_DBG_FLAG_QOS,
+				 "qos_queue_flush queue[%d]\n", t->node_id);
+			if (qos_queue_flush(priv->qdev, t->node_id)) {
+				PR_ERR("qos_queue_flush fail\n");
+				goto EXIT_ERR;
+			}
+#endif
+			/*Child flag update before link*/
+			DP_DEBUG(DP_DBG_FLAG_QOS,
+				 "node_stat_update for queue[%d]\n",
+				 t->node_id);
+			if (node_stat_update(info->inst, t->node_id,
+					     DP_NODE_DEC)) {
+				PR_ERR("node_stat_update fail\n");
+				goto EXIT_ERR;
+			}
+			/*reduce child_num in parent's global table*/
+			DP_DEBUG(DP_DBG_FLAG_QOS,
+				 "node_stat_update parent %d for q[%d]\n",
+				 PARENT(t->queue_cfg), t->node_id);
+			if (node_stat_update(info->inst, PARENT(t->queue_cfg),
+					     DP_NODE_DEC | C_FLAG)) {
+				PR_ERR("node_stat_update fail\n");
+				goto EXIT_ERR;
+			}
 		}
+		/*link set*/
+		/*if parent is same, but need to fill in other parameters for
+		 *parents hence commenting below code
+		 */
+		/*if (info->p_node_id.sch_id == parent.node_id ||
+		 *    info->p_node_id.cqm_deq_port == parent.node_id)
+		 *	goto EXIT_ERR;
+		 */
+		if (dp_link_set(info, t->parent.node_id, flag)) {
+			PR_ERR("dp_link_set fail to link to parent\n");
+			goto EXIT_ERR;
+		}
+	} else if (info->node_type == DP_NODE_SCH) {
+		if (info->node_id.sch_id == DP_NODE_AUTO_ID) {
+			t->node.inst = info->inst;
+			t->node.dp_port = info->dp_port;
+			t->node.type = info->node_type;
+
+			if ((dp_node_alloc_31(&t->node, flag)) == DP_FAILURE) {
+				PR_ERR("dp_node_alloc_31 sched alloc fail\n");
+				goto EXIT_ERR;
+			}
+			info->node_id = t->node.id;
+			t->f_child_free = 1;
+		}
+		/*add check for free flag and error*/
+		if (priv->qos_sch_stat[info->node_id.sch_id].p_flag ==
+		    PP_NODE_FREE) {
+			PR_ERR("Invalid Sched ID:%d\n", info->node_id.sch_id);
+			goto EXIT_ERR;
+		}
+		if (qos_sched_conf_get(priv->qdev, info->node_id.sch_id,
+					  &t->sched_cfg) == 0) {
+			qos_sched_get_queues(priv->qdev, info->node_id.sch_id,
+						t->queue_buf, t->queue_size,
+						&t->queue_num);
+			for (i = 0; i < t->queue_num; i++) {
+				if (qos_queue_conf_get(priv->qdev,
+						       t->queue_buf[i],
+						       &t->queue_cfg))
+					continue;
+				if (t->queue_cfg.blocked)
+					continue;
+				t->queue_cfg.blocked = 1;/*temp disable*/
+				t->q_orig_block[i] = 0;/*flag for queue restore*/
+				if (qos_queue_set(priv->qdev, t->queue_buf[i],
+						  &t->queue_cfg)) {
+					PR_ERR("qos_queue_set fail\n");
+					goto EXIT_ERR;
+				}
+				if (qos_queue_flush(priv->qdev, t->queue_buf[i])) {
+					PR_ERR("qos_queue_flush fail\n");
+					goto EXIT_ERR;
+				}
+			}
+			/*update flag for sch node*/
+			if (node_stat_update(info->inst, info->node_id.sch_id,
+					     DP_NODE_DEC | P_FLAG)) {
+				PR_ERR("node_stat_update fail\n");
+				goto EXIT_ERR;
+			}
+			/*reduce child_num in parent's global table*/
+			if (node_stat_update(info->inst, PARENT_S(t->sched_cfg),
+					     DP_NODE_DEC | C_FLAG)) {
+				PR_ERR("node_stat_update fail\n");
+				goto EXIT_ERR;
+			}
+		}
+		/*if parent is same, but need to fill in other parameters for
+		 *parents hence commenting below code
+		 */
+		/*if (info->p_node_id.sch_id == parent.node_id ||
+		 *    info->p_node_id.cqm_deq_port == parent.node_id)
+		 *	goto EXIT_ERR;
+		 */
+		if (dp_link_set(info, t->parent.node_id, flag)) {
+			PR_ERR("dp_link_set failed to link to parent\n");
+			goto EXIT_ERR;
+		}
+	}
+
+EXIT:
+	for (i = 0; i <= t->queue_num; i++) {
+		if (t->q_orig_block[i] < 0)
+			continue;
+		if (qos_queue_conf_get(priv->qdev, t->queue_buf[i], &t->queue_cfg))
+			continue;
+		t->queue_cfg.blocked = t->q_orig_block[i];/*restore*/
+		qos_queue_set(priv->qdev, t->queue_buf[i], &t->queue_cfg);
+	}
+	if (t)
+		kfree(t);
+	return res;
+
+EXIT_ERR:
+	if (t->f_child_free) {
+		if (t->node.type == DP_NODE_QUEUE)
+			qos_queue_remove(priv->qdev, t->node_id);
+		else
+			qos_sched_remove(priv->qdev, t->node.id.sch_id);
+	}
+	if (t->f_parent_free) {
+		if (info->p_node_type == DP_NODE_PORT)/* condition will not
+						       * meet for port
+						       */
+			qos_port_remove(priv->qdev, t->parent.node_id);
+		else
+			qos_sched_remove(priv->qdev, t->parent.node_id);
+	}
+	if (t)
+		kfree(t);
+	res = DP_FAILURE;
+	goto EXIT;
+}
+
+/* dp_queue_conf_set_31 API
+ * Set Current Queue config and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_queue_conf_set_31(struct dp_queue_conf *cfg, int flag)
+{
+	struct pp_qos_queue_conf *conf;
+	struct hal_priv *priv = HAL(cfg->inst);
+	int node_id;
+
+	if ((cfg->q_id < 0 || cfg->q_id > MAX_QUEUE) &&
+	    (priv->qos_queue_stat[cfg->q_id].flag == PP_NODE_FREE)) {
+		PR_ERR("Invalid Queue ID:%d\n", cfg->q_id);
 		return DP_FAILURE;
-	} else if (node->type == DP_NODE_SCH) {
-		struct resv_sch *resv_sched = priv->resv[port].resv_sched;
+	}
+	node_id = priv->qos_queue_stat[cfg->q_id].node_id;
 
-		cnt = priv->resv[node->dp_port].num_resv_sched;
-		for (i = 0; i <= cnt; i++) {
-			if (resv_sched[i].flag != PP_NODE_FREE)
-				continue;
-			node_id = resv_sched[i].id;
-			resv_sched[i].flag = PP_NODE_ALLOC;
-			priv->qos_sch_stat[node_id].flag = PP_NODE_RESERVE |
-								PP_NODE_ALLOC;
-			priv->qos_sch_stat[node_id].resv_idx = i;
-			node->id = (union dp_node_id)node_id;
+	if (qos_queue_conf_get(priv->qdev, node_id, conf)) {
+		PR_ERR("qos_queue_conf_get fail:%d\n", cfg->q_id);
+		return DP_FAILURE;
+	}
+	if (flag & (cfg->act & DP_NODE_DIS))
+		conf->blocked = 1;
+	else if (flag & (cfg->act & DP_NODE_EN))
+		conf->blocked = 0;
+	if (flag & (cfg->act & DP_NODE_SUSPEND))
+		conf->common_prop.suspended = 1;
+	else if (flag & (cfg->act & DP_NODE_RESUME))
+		conf->common_prop.suspended = 0;
+	if (flag & (cfg->drop == DP_QUEUE_DROP_WRED)) {
+		conf->wred_enable = 1;
+		conf->queue_wred_max_avg_green = cfg->max_size[0];
+		conf->queue_wred_max_avg_yellow = cfg->max_size[1];
+		conf->queue_wred_min_avg_green = cfg->min_size[0];
+		conf->queue_wred_min_avg_yellow = cfg->min_size[1];
+		conf->queue_wred_slope_green = cfg->wred_slope[0];
+		conf->queue_wred_slope_yellow = cfg->wred_slope[1];
+		conf->queue_wred_min_guaranteed = cfg->wred_min_guaranteed;
+		conf->queue_wred_max_allowed = cfg->wred_max_allowed;
+	} else if (flag & (cfg->drop == DP_QUEUE_DROP_TAIL)) {
+		PR_ERR("Further check PPv4 Tail Drop Capability.\n");
+		conf->wred_enable = 0;
+		conf->queue_wred_min_avg_green = cfg->min_size[0];
+		conf->queue_wred_min_avg_yellow = cfg->min_size[1];
+		conf->queue_wred_min_guaranteed = cfg->wred_min_guaranteed;
+		conf->queue_wred_max_allowed = cfg->wred_max_allowed;
+	}
+	if (qos_queue_set(priv->qdev, node_id, conf)) {
+		PR_ERR("failed to qos_queue_set:%d\n", cfg->q_id);
+		return DP_FAILURE;
+	}
+	return DP_SUCCESS;
+}
+
+/* dp_queue_conf_get_31 API
+ * Get Current Queue config and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_queue_conf_get_31(struct dp_queue_conf *cfg, int flag)
+{
+	int node_id;
+	struct pp_qos_queue_conf *conf;
+	struct hal_priv *priv = HAL(cfg->inst);
+
+	if ((cfg->q_id < 0 || cfg->q_id > MAX_QUEUE) &&
+	    (priv->qos_queue_stat[cfg->q_id].flag == PP_NODE_FREE)) {
+		PR_ERR("Invalid Queue ID:%d\n", cfg->q_id);
+		return DP_FAILURE;
+	}
+	node_id = priv->qos_queue_stat[cfg->q_id].node_id;
+
+	if (qos_queue_conf_get(priv->qdev, node_id, conf)) {
+		PR_ERR("qos_queue_conf_get fail\n");
+		return DP_FAILURE;
+	}
+	if (conf->blocked && conf->common_prop.suspended)
+		cfg->act = DP_NODE_SUSPEND;
+	else if (!conf->blocked && conf->common_prop.suspended)
+		cfg->act = DP_NODE_RESUME;
+	else if (conf->blocked)
+		cfg->act = DP_NODE_DIS;
+	else
+		cfg->act = DP_NODE_EN;
+
+	if (conf->wred_enable) {
+		cfg->drop = DP_QUEUE_DROP_WRED;
+		cfg->wred_slope[0] = conf->queue_wred_slope_green;
+		cfg->wred_slope[1] = conf->queue_wred_slope_yellow;
+		cfg->wred_slope[2] = 0;
+		cfg->wred_max_allowed = conf->queue_wred_max_allowed;
+		cfg->wred_min_guaranteed = conf->queue_wred_min_guaranteed;
+		cfg->min_size[0] = conf->queue_wred_min_avg_green;
+		cfg->min_size[1] = conf->queue_wred_min_avg_yellow;
+		cfg->min_size[2] = 0;
+		cfg->max_size[0] = conf->queue_wred_max_avg_green;
+		cfg->max_size[1] = conf->queue_wred_max_avg_yellow;
+		cfg->max_size[2] = 0;
+		//cfg->unit = conf->max_burst;
+		return DP_SUCCESS;
+	}
+	cfg->drop = DP_QUEUE_DROP_TAIL;
+	cfg->min_size[0] = conf->queue_wred_min_avg_green;
+	cfg->min_size[1] = conf->queue_wred_min_avg_yellow;
+	cfg->max_size[0] = conf->queue_wred_max_avg_green;
+	cfg->max_size[1] = conf->queue_wred_max_avg_yellow;
+	//cfg->unit = conf->max_burst;
+	return DP_SUCCESS;
+}
+
+/* dp_node_link_en_set_31 API
+ * Enable current link node and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_node_link_en_set_31(struct dp_node_link_enable *en, int flag)
+{
+	struct hal_priv *priv = HAL(en->inst);
+	int phy_id;
+
+	if (!en) {
+		PR_ERR("en info cannot be NULL\n");
+		return DP_FAILURE;
+	}
+
+	if (en->type == DP_NODE_QUEUE) {
+		phy_id = get_qid_by_node(en->inst, en->id.q_id, 0);
+		if ((priv->qos_queue_stat[phy_id].flag &
+		    PP_NODE_ACTIVE) &&
+		    (priv->qos_sch_stat[en->id.q_id].parent.flag &
+		    PP_NODE_ACTIVE)) {
+			en->en = DP_NODE_EN;
+			return DP_SUCCESS;
+		}
+	} else if (en->type == DP_NODE_SCH) {
+		if ((priv->qos_sch_stat[en->id.sch_id].p_flag &
+		    PP_NODE_ACTIVE) &&
+		    (priv->qos_sch_stat[en->id.sch_id].parent.flag &
+		    PP_NODE_ACTIVE)) {
+			en->en = DP_NODE_EN;
+			return DP_SUCCESS;
+		}
+	} else if (en->type == DP_NODE_PORT) {
+		phy_id = get_qid_by_node(en->inst, en->id.cqm_deq_port, 0);
+		if (priv->deq_port_stat[phy_id].flag & PP_NODE_ACTIVE) {
+			en->en = DP_NODE_EN;
 			return DP_SUCCESS;
 		}
-		return DP_FAILURE;
 	}
 	return DP_FAILURE;
 }
 
-int dp_node_alloc_global_31(struct dp_node_alloc *node, int flag)
+/* dp_node_link_en_get_31 API
+ * Get status of link node and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_node_link_en_get_31(struct dp_node_link_enable *en, int flag)
 {
-	int id;
-	int inst = node->inst;
-	struct pp_qos_dev *qos_dev;
-	struct pp_qos_queue_info *qos_queue_info;
-	struct hal_priv *priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
-	int phy_id;
+	struct hal_priv *priv = HAL(en->inst);
+	static struct pp_qos_queue_conf q_conf;
+	static struct pp_qos_sched_conf sched_conf;
+	static struct pp_qos_port_conf p_conf;
 
-	if (node->type == DP_NODE_QUEUE) {
-		pp_qos_queue_allocate(qos_dev, &id);
-		if (pp_qos_queue_info_get(qos_dev, id, qos_queue_info)) {
-			pp_qos_queue_remove(qos_dev, id);
+	if (!priv ||!priv->qdev) {
+		PR_ERR("priv or priv->qdev NULL\n");
+		return DP_FAILURE;
+	}
+	if (!en) {
+		PR_ERR("en info NULL\n");
+		return DP_FAILURE;
+	}
+	PR_INFO("en->id.q_id=%d\n", en->id.q_id);
+	en->en = 0;
+	if (en->type == DP_NODE_QUEUE) {
+		memset(&q_conf, 0, sizeof(q_conf));
+		if (qos_queue_conf_get(priv->qdev,
+		    priv->qos_queue_stat[en->id.q_id].node_id,
+		    &q_conf)) {
+			PR_ERR("qos_queue_conf_get fail: q[%d]\n",
+			       en->id.q_id);
 			return DP_FAILURE;
 		}
-		phy_id = qos_queue_info->physical_id;
-		priv->qos_queue_stat[phy_id].flag = PP_NODE_ALLOC;
-		priv->qos_queue_stat[phy_id].node_id = id;
-		priv->qos_queue_stat[phy_id].resv_idx = -1;
-		node->id = (union dp_node_id)phy_id;
+		if (q_conf.blocked)
+			en->en |= DP_NODE_DIS;
+		if (q_conf.common_prop.suspended)
+			en->en |= DP_NODE_SUSPEND;
+	} else if (en->type == DP_NODE_SCH) {
+		memset(&sched_conf, 0, sizeof(sched_conf));
+		if (qos_sched_conf_get(priv->qdev,
+		    en->id.sch_id, &sched_conf)) {
+			PR_ERR("qos_sched_conf_get fail: sched[/%d]\n",
+			       en->id.sch_id);
+			return DP_FAILURE;
+		}
+		if (sched_conf.common_prop.suspended)
+			en->en |= DP_NODE_SUSPEND;
+	} else if (en->type == DP_NODE_PORT) {
+		memset(&p_conf, 0, sizeof(p_conf));
+		if (qos_port_conf_get(priv->qdev,
+		    priv->deq_port_stat[en->id.cqm_deq_port].node_id,
+		    &p_conf)) {
+			PR_ERR("qos_queue_conf_get fail: port[%d]\n",
+			       en->id.cqm_deq_port);
+			return DP_FAILURE;
+		}
+		if (p_conf.common_prop.suspended)
+			en->en |= DP_NODE_SUSPEND;
+	}
+	return DP_SUCCESS;
+}
+
+/* dp_link_get_31 API
+ * get full link based on queue and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_link_get_31(struct dp_qos_link *cfg, int flag)
+{
+	struct pp_qos_queue_conf queue_cfg;
+	struct pp_qos_sched_conf sched_cfg;
+	struct hal_priv *priv = HAL(cfg->inst);
+	int i;
+	int node_id = priv->qos_queue_stat[cfg->q_id].node_id;
+
+	if (!cfg) {
+		PR_ERR("cfg cannot be NULL\n");
+		return DP_FAILURE;
+	}
+
+	if (priv->qos_queue_stat[cfg->q_id].flag != PP_NODE_ACTIVE) {
+		PR_ERR("Incorrect queue:%d state:expect ACTIV\n", cfg->q_id);
+		return DP_FAILURE;
+	}
+
+	if (qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) {
+		PR_ERR("failed to qos_queue_conf_get\n");
+		return DP_FAILURE;
+	}
+
+	if (priv->qos_sch_stat[node_id].parent.type == DP_NODE_PORT) {
+		cfg->q_arbi = 0;
+		cfg->q_leaf = 0;
+		cfg->n_sch_lvl = 0;
+		cfg->q_prio_wfq = queue_cfg.queue_child_prop.priority;
+		cfg->cqm_deq_port = priv->qos_sch_stat[node_id].parent.node_id;
 		return DP_SUCCESS;
-	} else if (node->type == DP_NODE_SCH) {
-		pp_qos_sched_allocate(qos_dev, &id);
-		priv->qos_sch_stat[id].flag = PP_NODE_ALLOC;
-		priv->qos_sch_stat[id].resv_idx = -1;
-		node->id = (union dp_node_id)id;
+	} else if (priv->qos_sch_stat[node_id].parent.type == DP_NODE_SCH) {
+		cfg->q_arbi = 0;
+		cfg->q_leaf = 0;
+		cfg->q_prio_wfq = queue_cfg.queue_child_prop.priority;
+		cfg->sch[0].id = priv->qos_sch_stat[node_id].parent.node_id;
+		for (i = (cfg->n_sch_lvl - 1); i <= DP_MAX_SCH_LVL; i++) {
+			if (qos_sched_conf_get(priv->qdev, cfg->sch[i].id,
+						  &sched_cfg)) {
+				PR_ERR("failed to qos_sched_conf_get\n");
+				return DP_FAILURE;
+			}
+			cfg->sch[i].arbi =
+					sched_cfg.sched_parent_prop.arbitration;
+			cfg->sch[i].prio_wfq =
+					sched_cfg.sched_child_prop.priority;
+			cfg->sch[i].id =
+				priv->qos_sch_stat[i + 1].parent.node_id;
+			cfg->sch[i].leaf = 0;
+			cfg->n_sch_lvl = i;
+		}
+		cfg->cqm_deq_port = priv->qos_sch_stat[i].parent.node_id;
 		return DP_SUCCESS;
 	}
 	return DP_FAILURE;
 }
 
-int dp_node_alloc_31(struct dp_node_alloc *node, int flag)
+/* dp_link_add_31 API
+ * configure end to end link and return DP_SUCCESS
+ * else return DP_FAILURE
+ */
+int dp_link_add_31(struct dp_qos_link *cfg, int flag)
 {
-	int inst = node->inst;
-	struct hal_priv *priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
+	struct dp_node_link *info;
+	int i, node_id;
+	int f_q_free = 0;
+	int f_q_auto = 0; /*flag if node is DP_NODE_AUTO_ID*/
+	struct f {
+		u16 flag;
+		u16 sch_id;
+		u16 f_auto; /*flag if node is DP_NODE_AUTO_ID*/
+	};
+	struct f f_sch_free[DP_MAX_SCH_LVL] = {0};
 
-	if (!priv) {
-		PR_ERR("priv is NULL cannot proceed!!\n");
+	if (!cfg) {
+		PR_ERR("cfg cannot be NULL\n");
 		return DP_FAILURE;
 	}
 
-	if (flag  && DP_ALLOC_RESV_ONLY) {
-	/* if can get from its reserved resource and return DP_SUCCESS.
-	 *	Otherwise return DP_FAIL;
-	 */
-		return dp_node_alloc_resv_31(node, flag);
+	if (cfg->n_sch_lvl > DP_MAX_SCH_LVL) {
+		PR_ERR("Incorrect sched_lvl:%s(%d) > %s(%d)\n",
+		       "cfg->n_sch_lvl", cfg->n_sch_lvl,
+		       "DP_MAX_SCH_LVL", DP_MAX_SCH_LVL);
+		return DP_FAILURE;
 	}
-	if (flag  && DP_ALLOC_GLOBAL_ONLY) {
-	/* if can get from global pool and return DP_SUCCESS.
-	 *	Otherwise return DP_FAILURE;
-	 */
-		return dp_node_alloc_global_31(node, flag);
+
+	info->inst = cfg->inst;
+	info->dp_port = cfg->dp_port;
+	info->node_id.q_id = cfg->q_id;
+	info->cqm_deq_port.cqm_deq_port = cfg->cqm_deq_port;
+
+	if (cfg->q_id == DP_NODE_AUTO_ID)
+		f_q_auto = 1;
+
+	if (cfg->n_sch_lvl) {
+		/*link sched to port*/
+		if (cfg->sch[cfg->n_sch_lvl - 1].id == DP_NODE_AUTO_ID)
+			f_sch_free[cfg->n_sch_lvl - 1].f_auto = 1;
+		info->node_id.sch_id = cfg->sch[cfg->n_sch_lvl - 1].id;
+		info->node_type = DP_NODE_SCH;
+		info->p_node_id.cqm_deq_port = cfg->cqm_deq_port;
+		info->p_node_type = DP_NODE_PORT;
+		info->arbi = cfg->sch[cfg->n_sch_lvl - 1].arbi;
+		info->leaf = cfg->sch[cfg->n_sch_lvl - 1].leaf;
+		info->prio_wfq = cfg->sch[cfg->n_sch_lvl - 1].prio_wfq;
+		f_sch_free[cfg->n_sch_lvl - 1].flag = 1;
+		f_sch_free[cfg->n_sch_lvl - 1].sch_id =
+						cfg->sch[cfg->n_sch_lvl - 1].id;
+
+		if (dp_node_link_add_31(info, flag)) {
+			PR_ERR("Failed to link Sch:%d to Port:%d\n",
+			       cfg->sch[cfg->n_sch_lvl].id, cfg->cqm_deq_port);
+			goto EXIT;
+		}
+
+		/*link sched to sched*/
+		for (i = (cfg->n_sch_lvl - 2); i >= 0; i--) {
+			if (cfg->sch[i].id == DP_NODE_AUTO_ID)
+				f_sch_free[i].f_auto = 1;
+			info->node_id.sch_id = cfg->sch[i].id;
+			info->node_type = DP_NODE_SCH;
+			info->p_node_id.sch_id = cfg->sch[i + 1].id;
+			info->p_node_type = DP_NODE_SCH;
+			info->arbi = cfg->sch[i].arbi;
+			info->leaf = cfg->sch[i].leaf;
+			info->prio_wfq = cfg->sch[i].prio_wfq;
+			f_sch_free[i].flag = 1;
+			f_sch_free[i].sch_id = cfg->sch[i].id;
+
+			if (dp_node_link_add_31(info, flag)) {
+				PR_ERR("Failed to link Sch:%d to Sch:%d\n",
+				       cfg->sch[i].id, cfg->sch[i + 1].id);
+				goto EXIT;
+			}
+		}
+		/*link Queue to sched*/
+		info->node_type = DP_NODE_QUEUE;
+		info->p_node_id.sch_id = cfg->sch[0].id;
+		info->p_node_type = DP_NODE_SCH;
+		info->arbi = cfg->sch[0].arbi;
+		info->leaf = cfg->sch[0].leaf;
+		info->prio_wfq = cfg->sch[0].prio_wfq;
+		f_q_free = 1;
+
+		if (dp_node_link_add_31(info, flag)) {
+			PR_ERR("Failed to link Q:%d to Sch:%d\n",
+			       cfg->q_id, cfg->sch[0].id);
+			goto EXIT;
+		}
+	} else {
+		/*link Queue to Port*/
+		info->node_type = DP_NODE_QUEUE;
+		info->p_node_id.cqm_deq_port = cfg->cqm_deq_port;
+		info->p_node_type = DP_NODE_PORT;
+		info->arbi = cfg->q_arbi;
+		info->leaf = cfg->q_leaf;
+		info->prio_wfq = cfg->q_prio_wfq;
+		f_q_free = 1;
+
+		if (dp_node_link_add_31(info, flag)) {
+			PR_ERR("Failed to link Q:%d to Port:%d\n",
+			       cfg->q_id, cfg->cqm_deq_port);
+			goto EXIT;
+		}
 	}
-	if (flag  && DP_ALLOC_GLOBAL_FIRST) {
-	/* if can get from the global pool, return DP_SUCCESS;
-	 * if can get from its reserved resource and return DP_SUCCESS;
-	 * return DP_FAILURE;
-	 */
-		if (dp_node_alloc_global_31(node, flag) == DP_SUCCESS)
-			return DP_SUCCESS;
-		return dp_node_alloc_resv_31(node, flag);
+
+EXIT:
+	for (i = (cfg->n_sch_lvl - 1); i >= 0; i--) {
+		if (!f_sch_free[i].flag)
+			return DP_FAILURE;
+		/*sch is auto_alloc move it to FREE*/
+		if (f_sch_free[i].f_auto)
+			node_id = f_sch_free[i].sch_id;
+			dp_node_free_31((struct dp_node_alloc *)node_id,
+					flag);
+		/*sch provided by caller move it to ALLOC*/
+		node_stat_update(info->inst, f_sch_free[i].sch_id, DP_NODE_DEC);
+		return DP_FAILURE;
 	}
-	/*default order: reserved pool, */
-	/* if can get from its reserved resource and return DP_SUCCESS.
-	 * if can get from the global pool, return DP_SUCCESS
-	 * return DP_FAILURE
-	 */
-	if (dp_node_alloc_resv_31(node, flag) == DP_SUCCESS)
-		return DP_SUCCESS;
-	return dp_node_alloc_global_31(node, flag);
+	if (f_q_free) {
+		/*queue is auto_alloc move it to FREE*/
+		if (f_q_auto)
+			dp_node_free_31((struct dp_node_alloc *)cfg->q_id,
+					flag);
+		/*queue provided by caller move it to ALLOC*/
+		node_stat_update(info->inst, cfg->q_id, DP_NODE_DEC);
+		return DP_FAILURE;
+	}
+	return DP_SUCCESS;
 }
 
-int dp_node_free_31(struct dp_node_alloc *node, int flag)
-{
-/* if (this node is not unlinked yet)
- * unlink it
- * If (this node is a  reserved node)
- * return this node to this device’s reserved node table
- * mark this node as Free in this device’s reserved node table
- * else
- * return this node to the system global table
- * mark this node free in system global table
+/* dp_link_free_31 API
+ * Remove link of attached nodes and return DP_SUCCESS
+ * else return DP_FAILURE
  */
-	int sch_id;
-	struct pp_qos_dev *qos_dev;
-	int inst = node->inst;
-	struct hal_priv *priv = (struct hal_priv *)dp_port_prop[inst].priv_hal;
-	int port = node->dp_port;
-	int phy_id;
-	int node_id;
-	int resv_idx;
+static int dp_link_free_31(struct dp_qos_link *cfg, int flag)
+{
+	struct dp_node_link *info;
 
-	if (!priv) {
-		PR_ERR("priv is NULL cannot proceed!!\n");
+	if (!cfg) {
+		PR_ERR("cfg cannot be NULL\n");
 		return DP_FAILURE;
 	}
 
-	if (node->type == DP_NODE_QUEUE) {
-		struct resv_q *resv_q = priv->resv[port].resv_q;
-
-		phy_id = node->id.q_id;
-		node_id = priv->qos_queue_stat[phy_id].node_id;
-		if (!(priv->qos_queue_stat[phy_id].flag & PP_NODE_RESERVE)) {
-			/* Free to global resource */
-			pp_qos_queue_remove(qos_dev, node_id);
-			priv->qos_queue_stat[phy_id].flag = PP_NODE_FREE;
-			priv->qos_queue_stat[phy_id].node_id = 0;
-			priv->qos_queue_stat[phy_id].resv_idx = -1;
-			return DP_SUCCESS;
-		}
-		/* Return to reserved resource */
-		resv_idx = priv->qos_queue_stat[phy_id].resv_idx;
-		resv_q[resv_idx].flag = PP_NODE_FREE;
-		priv->qos_queue_stat[phy_id].flag = PP_NODE_FREE;
-		priv->qos_queue_stat[phy_id].node_id = 0;
-		priv->qos_queue_stat[phy_id].resv_idx = -1;
-		return DP_SUCCESS;
-	} else if (node->type == DP_NODE_SCH) {
-		struct resv_sch *resv_sched = priv->resv[port].resv_sched;
+	if (cfg->n_sch_lvl > DP_MAX_SCH_LVL) {
+		PR_ERR("Incorrect sched_lvl:%s(%d) > %s(%d)\n",
+		       "cfg->n_sch_lvl", cfg->n_sch_lvl,
+		       "DP_MAX_SCH_LVL", DP_MAX_SCH_LVL);
+		return DP_FAILURE;
+	}
 
-		sch_id = node->id.sch_id;
-		if (!(priv->qos_sch_stat[sch_id].flag & PP_NODE_RESERVE)) {
-			/* Free to global resource */
-			pp_qos_sched_remove(qos_dev, sch_id);
-			priv->qos_sch_stat[sch_id].flag = PP_NODE_FREE;
-			priv->qos_sch_stat[sch_id].resv_idx = -1;
-			return DP_SUCCESS;
+	info->inst = cfg->inst;
+	info->dp_port = cfg->dp_port;
+	info->node_id.q_id = cfg->q_id;
+	info->cqm_deq_port.cqm_deq_port = cfg->cqm_deq_port;
+
+	if (info->p_node_type == DP_NODE_PORT) {
+		/*unlink sched to port*/
+		if (info->node_type == DP_NODE_SCH) {
+			info->node_id.sch_id = cfg->sch[cfg->n_sch_lvl - 1].id;
+			dp_node_unlink_31(info, flag);
+			dp_link_unset(info, flag);
+		}
+		/*unlink Queue to Port*/
+		if (info->node_type == DP_NODE_QUEUE) {
+			info->node_id.q_id = cfg->q_id;
+			dp_node_unlink_31(info, flag);
+			dp_link_unset(info, flag);
 		}
-		/* Return to reserved resource */
-		resv_idx = priv->qos_sch_stat[sch_id].resv_idx;
-		resv_sched[resv_idx].flag = PP_NODE_FREE;
-		priv->qos_sch_stat[sch_id].flag = PP_NODE_FREE;
-		priv->qos_sch_stat[sch_id].resv_idx = -1;
+	} else if (info->p_node_type == DP_NODE_SCH) {
+		/*unlink sched to sched*/
+		return DP_FAILURE;
+		/*unlink Queue to sched*/
+	}
+	return DP_FAILURE;
+}
+
+int dp_shaper_conf_set_31(struct dp_shaper_conf *cfg, int flag)
+{
+	if ((cfg->cmd == DP_SHAPER_CMD_ADD) && (cfg->type == DP_NODE_QUEUE)) {
+		//if (set_lookup_qid_via_index)
+		return DP_SUCCESS;
+	} else if ((cfg->cmd == DP_SHAPER_CMD_ADD) &&
+		   (cfg->type == DP_NODE_SCH)) {
+		return DP_SUCCESS;
+	} else if ((cfg->cmd == DP_SHAPER_CMD_REMOVE) &&
+		   (cfg->type == DP_NODE_QUEUE)) {
+		return DP_SUCCESS;
+	} else if ((cfg->cmd == DP_SHAPER_CMD_REMOVE) &&
+		   (cfg->type == DP_NODE_SCH)) {
+		return DP_SUCCESS;
+	} else if ((cfg->cmd == DP_SHAPER_CMD_DISABLE) &&
+		   (cfg->type == DP_NODE_QUEUE)) {
 		return DP_SUCCESS;
+	} else if ((cfg->cmd == DP_SHAPER_CMD_DISABLE) &&
+		   (cfg->type == DP_NODE_SCH)) {
+		return DP_SUCCESS;
+	}
+	return DP_FAILURE;
+}
+
+int dp_shaper_conf_get_31(struct dp_shaper_conf *cfg, int flag)
+{
+	struct hal_priv *priv = HAL(cfg->inst);
+
+	if (cfg->type == DP_NODE_QUEUE) {
+		if (priv->qos_queue_stat[cfg->id.q_id].flag == PP_NODE_ACTIVE)
+			cfg->id.q_id =
+				priv->qos_queue_stat[cfg->id.q_id].node_id;
+	} else if (cfg->type == DP_NODE_SCH) {
+		if (priv->qos_queue_stat[cfg->id.sch_id].flag == PP_NODE_ACTIVE)
+			return cfg->id.sch_id;
 	}
 	return DP_FAILURE;
 }
 
+int dp_queue_map_get_31(struct queue_map_get *cfg, int flag)
+{
+	return DP_FAILURE;
+}
+
+int dp_queue_map_set_31(struct queue_map_set *cfg, int flag)
+{
+	return DP_FAILURE;
+}
+
+int dp_counter_mode_set_31(struct dp_counter_conf *cfg, int flag)
+{
+	return DP_FAILURE;
+}
+
+int dp_counter_mode_get_31(struct dp_counter_conf *cfg, int flag)
+{
+	return DP_FAILURE;
+}
+
 #ifdef PP_QOS_LINK_EXAMPLE
 /* Example: queue -> QOS/CQM dequeue port
  * inst: dp instance
@@ -246,7 +2276,8 @@ int dp_node_free_31(struct dp_node_alloc *node, int flag)
  * t_conf: for pon, it is used to get cqm dequeue port via t-cont index
  *         for others, its value should be 0
  * q_node: queueu node id
-*/
+ */
+
 int ppv4_queue_port_example(int inst, int dp_port, int t_cont, int q_node)
 {
 	struct pp_qos_port_conf  port_cfg;
@@ -254,45 +2285,46 @@ int ppv4_queue_port_example(int inst, int dp_port, int t_cont, int q_node)
 	int qos_port_node;
 	int rc;
 	int cqm_deq_port;
-	struct pp_qos_dev * qos_dev = NULL;
 	struct pmac_port_info *port_info;
 
 	port_info = &dp_port_info[inst][dp_port];
 	cqm_deq_port = port_info->deq_port_base + t_cont;
 
-	/*Allocate qos dequeue port's node id via cqm_deq_port */
-	rc = pp_qos_port_allocate(qos_dev, cqm_deq_port, &qos_port_node);
+	/*Allocate qos dequeue port's node id via cqm_deq_port*/
+	rc = qos_port_allocate(priv->qdev, cqm_deq_port, &qos_port_node);
 	if (rc) {
-		pr_err("failed to pp_qos_port_allocate\n");
+		PR_ERR("failed to qos_port_allocate\n");
 		return -1;
 	}
 
-	/*Configure QOS dequeue port */
-	pp_qos_port_conf_set_default(&port_cfg);
+	/*Configure QOS dequeue port*/
+	qos_port_conf_set_default(&port_cfg);
 	port_cfg.ring_address = (void *)(port_info->tx_ring_addr +
 		port_info->tx_ring_offset * t_cont);
 	port_cfg.ring_size = port_info->tx_ring_size;
 	if (port_info->tx_pkt_credit) {
 		port_cfg.packet_credit_enable = 1;
-		port_cfg.packet_credit = port_info->tx_pkt_credit;
+		port_cfg.credit = port_info->tx_pkt_credit;
 	}
-	if (port_info->tx_pkt_credit) {
+#ifdef TX_BYTE_CREDIT
+	if (port_info->tx_b_credit) {
 		port_cfg.byte_credit_enable = 1;
 		port_cfg.byte_credit = port_info->tx_pkt_credit;
 	}
+#endif
 #ifdef EXT_BW
 	port_cfg.parent.arbitration = ARBITRATION_WRR;
 	port_cfg.common.bandwidth_limit = 1000;
 #endif
-	rc = pp_qos_port_set(qos_dev, qos_port_node, &port_cfg);
+	rc = qos_port_set(priv->qdev, qos_port_node, &port_cfg);
 	if (rc) {
-		pr_err("failed to pp_qos_port_set\n");
-		pp_qos_port_remove(qos_dev, qos_port_node);
+		PR_ERR("failed to qos_port_set\n");
+		qos_port_remove(priv->qdev, qos_port_node);
 		return -1;
 	}
 
-	/*Attach queue to QoS port */
-	pp_qos_queue_conf_set_default(&queue_cfg);
+	/*Attach queue to QoS port*/
+	qos_queue_conf_set_default(&queue_cfg);
 	queue_cfg.queue_child_prop.parent = qos_port_node;
 #ifdef EXT_BW
 	queue_cfg.max_burst  = 64;
@@ -300,16 +2332,15 @@ int ppv4_queue_port_example(int inst, int dp_port, int t_cont, int q_node)
 	queue_cfg.queue_wred_min_guaranteed = 1;
 	queue_cfg.queue_wred_max_allowed = 10;
 #endif
-	rc = pp_qos_queue_set(qos_dev, q_node, &queue_cfg);
+	rc = qos_queue_set(priv->qdev, q_node, &queue_cfg);
 	if (rc) {
-		pr_err("failed to pp_qos_queue_set\n");
-		pp_qos_port_remove(qos_dev, qos_port_node);
+		PR_ERR("failed to qos_queue_set\n");
+		qos_port_remove(priv->qdev, qos_port_node);
 		return -1;
 	}
 	return 0;
 }
 
-
 /* Example: queue -> scheduler1- > scheduler 2 -> QOS/CQM dequeue port
  * inst: dp instance
  * dp_port: dp port id
@@ -318,7 +2349,7 @@ int ppv4_queue_port_example(int inst, int dp_port, int t_cont, int q_node)
  * q_node: queueu node id
  * sch_node1: schduler node which connected with queue
  * sch_node2: schduler node which connected with sch_node1
-*/
+ */
 int ppv4_queue_scheduler(int inst, int dp_port, int t_cont, int q_node,
 			 int sch_node1, int sch_node2)
 {
@@ -328,27 +2359,26 @@ int ppv4_queue_scheduler(int inst, int dp_port, int t_cont, int q_node,
 	int qos_port_node;
 	int rc;
 	int cqm_deq_port;
-	struct pp_qos_dev * qos_dev = NULL;
 	struct pmac_port_info *port_info;
 
 	port_info = &dp_port_info[inst][dp_port];
 	cqm_deq_port = port_info->deq_port_base + t_cont;
 
-	/*Allocate qos dequeue port's node id via cqm_deq_port */
-	rc = pp_qos_port_allocate(qos_dev, cqm_deq_port, &qos_port_node);
+	/*Allocate qos dequeue port's node id via cqm_deq_port*/
+	rc = qos_port_allocate(priv->qdev, cqm_deq_port, &qos_port_node);
 	if (rc) {
-		pr_err("failed to pp_qos_port_allocate\n");
+		PR_ERR("failed to qos_port_allocate\n");
 		return -1;
 	}
 
-	/*Configure QOS dequeue port */
-	pp_qos_port_conf_set_default(&port_cfg);
+	/*Configure QOS dequeue port*/
+	qos_port_conf_set_default(&port_cfg);
 	port_cfg.ring_address = (void *)(port_info->tx_ring_addr +
 		port_info->tx_ring_offset * t_cont);
 	port_cfg.ring_size = port_info->tx_ring_size;
 	if (port_info->tx_pkt_credit) {
 		port_cfg.packet_credit_enable = 1;
-		port_cfg.packet_credit = port_info->tx_pkt_credit;
+		port_cfg.credit = port_info->tx_pkt_credit;
 	}
 	if (port_info->tx_pkt_credit) {
 		port_cfg.byte_credit_enable = 1;
@@ -358,15 +2388,15 @@ int ppv4_queue_scheduler(int inst, int dp_port, int t_cont, int q_node,
 	port_cfg.parent.arbitration = ARBITRATION_WRR;
 	port_cfg.common.bandwidth_limit = 1000;
 #endif
-	rc = pp_qos_port_set(qos_dev, qos_port_node, &port_cfg);
+	rc = qos_port_set(priv->qdev, qos_port_node, &port_cfg);
 	if (rc) {
-		pr_err("failed to pp_qos_port_set\n");
-		pp_qos_port_remove(qos_dev, qos_port_node);
+		PR_ERR("failed to qos_port_set\n");
+		qos_port_remove(priv->qdev, qos_port_node);
 		return -1;
 	}
 
-	/*Attach queue to sch_node1 */
-	pp_qos_queue_conf_set_default(&queue_cfg);
+	/*Attach queue to sch_node1*/
+	qos_queue_conf_set_default(&queue_cfg);
 	queue_cfg.queue_child_prop.parent = sch_node1;
 #ifdef EXT_BW
 	queue_cfg.max_burst  = 64;
@@ -374,34 +2404,33 @@ int ppv4_queue_scheduler(int inst, int dp_port, int t_cont, int q_node,
 	queue_cfg.queue_wred_min_guaranteed = 1;
 	queue_cfg.queue_wred_max_allowed = 10;
 #endif
-	rc = pp_qos_queue_set(qos_dev, q_node, &queue_cfg);
+	rc = qos_queue_set(priv->qdev, q_node, &queue_cfg);
 	if (rc) {
-		pr_err("failed to pp_qos_queue_set\n");
-		pp_qos_port_remove(qos_dev, qos_port_node);
+		PR_ERR("failed to qos_queue_set\n");
+		qos_port_remove(priv->qdev, qos_port_node);
 		return -1;
 	}
 
-	/*Attach sch_node1 to sch_node2 */
-	pp_qos_sched_conf_set_default(&sched_cfg);
+	/*Attach sch_node1 to sch_node2*/
+	qos_sched_conf_set_default(&sched_cfg);
 	sched_cfg.sched_child_prop.parent = sch_node2;
-	rc = pp_qos_sched_set(qos_dev, sch_node1, &sched_cfg);
+	rc = qos_sched_set(priv->qdev, sch_node1, &sched_cfg);
 	if (rc) {
-		pr_err("failed to pp_qos_sched_set\n");
-		pp_qos_port_remove(qos_dev, qos_port_node);
+		PR_ERR("failed to qos_sched_set\n");
+		qos_port_remove(priv->qdev, qos_port_node);
 		return -1;
 	}
 
-	/*Attach sch_node2 to qos/cqm dequeue port */
-	pp_qos_sched_conf_set_default(&sched_cfg);
+	/*Attach sch_node2 to qos/cqm dequeue port*/
+	qos_sched_conf_set_default(&sched_cfg);
 	sched_cfg.sched_child_prop.parent = qos_port_node;
-	rc = pp_qos_sched_set(qos_dev, sch_node2, &sched_cfg);
+	rc = qos_sched_set(priv->qdev, sch_node2, &sched_cfg);
 	if (rc) {
-		pr_err("failed to pp_qos_sched_set\n");
-		pp_qos_port_remove(qos_dev, qos_port_node);
+		PR_ERR("failed to qos_sched_set\n");
+		qos_port_remove(priv->qdev, qos_port_node);
 		return -1;
 	}
-	
+
 	return 0;
 }
 #endif /* PP_QOS_LINK_EXAMPLE*/
-
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_proc.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_proc.c
index 7ae5cdc3b..8899a5d7e 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_proc.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_proc.c
@@ -33,7 +33,6 @@
 #define PROC_PMAC  "pmac"
 #define PROC_EP "ep" /*EP/port ID info */
 #define DP_PROC_CBMLOOKUP "lookup"
-#define DP_TX_PKT "tx"
 #define DP_MIB_Q "qos_mib"
 
 struct list_head fdb_tbl_list;
@@ -148,10 +147,11 @@ ssize_t proc_parser_write(struct file *file, const char *buf,
 	char *param_list[20];
 	s8 cpu = 0, mpe1 = 0, mpe2 = 0, mpe3 = 0, flag = 0;
 	int pce_rule_id;
-	GSW_PCE_rule_t pce;
+	static GSW_PCE_rule_t pce;
 	int inst = 0;
 	struct core_ops *gsw_handle;
 
+	memset(&pce, 0, sizeof(pce));
 	gsw_handle = dp_port_prop[inst].ops[GSWIP_R];
 	len = (sizeof(str) > count) ? count : sizeof(str) - 1;
 	len -= copy_from_user(str, buf, len);
@@ -919,152 +919,6 @@ help:
 	return count;
 }
 
-/*flag: for dp_xmit
- *pool: 1-CPU
- */
-static int dp_send_packet(u8 *pdata, int len, char *devname, u32 flag,
-			  u32 pool, u32 discard)
-{
-	struct sk_buff *skb;
-	dp_subif_t subif = { 0 };
-	#define PAD  32
-
-	if (pool == 1) {
-		len += PAD;
-		if (len < ETH_ZLEN)
-			len = ETH_ZLEN;
-		skb = alloc_skb(len, GFP_ATOMIC);
-		if (skb)
-			skb_reserve(skb, PAD);
-		PR_INFO_ONCE("Allocate CPU buffer\n");
-	} else {
-		skb = cbm_alloc_skb(len + 8, GFP_ATOMIC);
-		PR_INFO_ONCE("Allocate bm buffer\n");
-	}
-
-	if (unlikely(!skb)) {
-		PR_ERR("allocate cbm buffer fail\n");
-		return -1;
-	}
-
-	skb->DW0 = 0;
-	skb->DW1 = 0;
-	skb->DW2 = 0;
-	skb->DW3 = 0;
-	if (discard) {
-		((struct dma_tx_desc_3 *)&skb->DW3)->field.dic = 1;
-		PR_INFO_ONCE("discard bit set in DW3\n");
-	}
-	memcpy(skb->data, pdata, len);
-	skb->len = 0;
-	skb_put(skb, len);
-	skb->dev = dev_get_by_name(&init_net, devname);
-
-	if (dp_get_netif_subifid(skb->dev, skb, NULL, skb->data, &subif, 0)) {
-		PR_ERR("dp_get_netif_subifid failed for %s\n",
-		       skb->dev->name);
-		dev_kfree_skb_any(skb);
-		return -1;
-	}
-
-	((struct dma_tx_desc_1 *)&skb->DW1)->field.ep = subif.port_id;
-	((struct dma_tx_desc_0 *)&skb->DW0)->field.dest_sub_if_id =
-		subif.subif;
-
-	if (dp_xmit(skb->dev, &subif, skb, skb->len, flag))
-		return -1;
-
-	return 0;
-}
-
-static u8 ipv4_plain_udp[] = {
-	0x00, 0x00, 0x01, 0x00, 0x00, 0x01, /*mac */
-	0x00, 0x10, 0x94, 0x00, 0x00, 0x02,
-	0x08, 0x00,	/*type */
-	0x45, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x11, /*ip hdr*/
-	0x3A, 0x56, 0xC0, 0x55, 0x01, 0x02, 0xC0, 0x00, 0x00, 0x01,
-	0x04, 0x00, 0x00, 0x00, 0x00, 0x2A, 0x7A, 0x41, 0x00, 0x00, /*udp hdr*/
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00
-};
-
-static u8 ipv4_plain_tcp[1514] = {
-	0x00, 0x01, 0x01, 0x01, 0x01, 0x01, /*mac */
-	0x00, 0x10, 0x94, 0x00, 0x00, 0x02,
-	0x08, 0x00,	/*type */
-	0x45, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x06, /*ip hdr*/
-	0x3A, 0x61, 0xC0, 0x55, 0x01, 0x02, 0xC0, 0x00, 0x00, 0x01,
-	0x04, 0x00, 0x04, 0x00, 0x00, 0x01, 0xE2, 0x40, 0x00, 0x03, /*tcp hdr*/
-	0x94, 0x47, 0x50, 0x10, 0x10, 0x00, 0x9F, 0xD9, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*data */
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00
-};
-
-ssize_t proc_tx_pkt(struct file *file, const char *buf,
-		    size_t count, loff_t *ppos)
-{
-	int len = 0;
-	char data[100];
-	char *param_list[10];
-	int num, pkt_num, size, times, pool = 0, disc = 0;
-	char *p;
-
-	len = (count >= sizeof(data)) ? (sizeof(data) - 1) : count;
-	DP_DEBUG(DP_DBG_FLAG_DBG, "len=%d\n", len);
-
-	if (len <= 0) {
-		PR_ERR("Wrong len value (%d)\n", len);
-		return count;
-	}
-	if (copy_from_user(data, buf, len)) {
-		PR_ERR("copy_from_user fail");
-		return count;
-	}
-	data[len - 1] = 0; /* Make string */
-	num = dp_split_buffer(data, param_list, ARRAY_SIZE(param_list));
-	if (num <= 1)
-		goto help;
-	pkt_num = dp_atoi(param_list[1]);
-	if (pkt_num <= 0)
-		pkt_num = 1;
-	if (dp_strcmpi("tcp", param_list[2]) == 0) {
-		p = ipv4_plain_tcp;
-		size = sizeof(ipv4_plain_tcp);
-		PR_INFO("send tcp packet\n");
-	} else { /*udp*/
-		p = ipv4_plain_udp;
-		size = sizeof(ipv4_plain_udp);
-		PR_INFO("send udp packet\n");
-	}
-	if (dp_strcmpi("bm", param_list[3]) == 0) {
-		PR_INFO("BM\n");
-		pool = 0;
-	} else {
-		PR_INFO("CPU buffer\n");
-		pool = 1;
-	}
-	if (dp_strcmpi("disc", param_list[4]) == 0) {
-		PR_INFO("disc\n");
-		disc = 1;
-	}
-	times = 0;
-	while (pkt_num--) {
-		if (dp_send_packet(p, size, param_list[0], 0, pool, disc))
-			break;
-		times++;
-	}
-	PR_INFO("sent %d packet already\n", times);
-	return count;
-help:   /*                        [0]         [1]         [2]     [3] [4]*/
-	PR_INFO("tx packet: echo <dev name> <packet num> <udp/tcp> %s %s\n",
-		"<pool type:cpu/bm>",
-		"<disc>");
-	return count;
-}
-
 /*in bytes*/
 int get_q_qocc(int inst, int qid, u32 *c)
 {
@@ -1190,12 +1044,15 @@ ssize_t proc_qos_mib(struct file *file, const char *buf,
 				PR_INFO("P[%03d]: 0x%08x 0x%08x\n",
 					i, gree_b, yellow_b);
 		}
-	}
+	} else
+		goto help;
 
 	return count;
 help:   /*                        [0]    [1]*/
-	PR_INFO("queue mib: echo <q> <start qid> <end qid>\n");
-	PR_INFO("port  mib: echo <p> <start port_id> <end port_id>\n");
+	PR_INFO("queue mib: echo <q> <start qid> <end qid> > %s\n",
+		"/proc/dp/" DP_MIB_Q);
+	PR_INFO("port  mib: echo <p> <start port_id> <end port_id> > %s\n",
+		"/proc/dp/" DP_MIB_Q);
 	return count;
 }
 
@@ -2548,7 +2405,6 @@ static struct dp_proc_entry dp_proc_entries[] = {
 #endif
 	{DP_PROC_CBMLOOKUP, NULL, lookup_dump31, lookup_start31,
 		proc_get_qid_via_index31},
-	{DP_TX_PKT, NULL, NULL, NULL, proc_tx_pkt},
 	{DP_MIB_Q, NULL, NULL, NULL, proc_qos_mib},
 
 	/*the last place holder */
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_switchdev.c b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_switchdev.c
index c6b88da84..84491f5a0 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_switchdev.c
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_switchdev.c
@@ -206,8 +206,7 @@ int dp_swdev_bridge_port_cfg_reset(struct br_info *br_item,
 						   gsw_handle, &brportcfg);
 				if (ret != GSW_statusOk) {
 					PR_ERR
-					("failed getting br port cfg:%s\r\n",
-					  __func__);
+					("failed getting br port cfg\r\n");
 					return -1;
 				}
 				UNSET_BP_MAP(brportcfg.nBridgePortMap, bport);
@@ -222,8 +221,7 @@ int dp_swdev_bridge_port_cfg_reset(struct br_info *br_item,
 						   .BridgePort_ConfigSet,
 						   gsw_handle, &brportcfg);
 				if (ret != GSW_statusOk) {
-					PR_ERR("Fail alloc/cfg br port %s\n",
-					       __func__);
+					PR_ERR("Fail alloc/cfg br port\n");
 					return -1;
 				}
 			}
@@ -286,3 +284,79 @@ int dp_swdev_free_brcfg(int inst, u16 fid)
 	DP_DEBUG(DP_DBG_FLAG_SWDEV, "FID(%d) freed for inst:%d\n", fid, inst);
 	return 0;
 }
+
+int dp_gswip_ext_vlan(int inst, int vap, int ep)
+{
+	struct core_ops *gsw_handle;
+	struct ext_vlan_info *vlan;
+	struct vlan_prop vlan_prop = {0};
+	struct pmac_port_info *port;
+	struct logic_dev *tmp = NULL;
+	int flag = 0, ret, i = 0;
+	int v1 = 0, v2 = 0;
+
+	gsw_handle = dp_port_prop[inst].ops[0];
+	port = &dp_port_info[inst][ep];
+	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan) {
+		PR_ERR("failed to alloc ext_vlan of %d bytes\n", sizeof(*vlan));
+		return 0; //Need to return -1 or ??
+	}
+	vlan->vlan2_list = kzalloc(sizeof(*vlan->vlan2_list), GFP_KERNEL);
+	vlan->vlan1_list = kzalloc(sizeof(*vlan->vlan1_list), GFP_KERNEL);
+	list_for_each_entry(tmp, &dp_port_info[inst][ep].
+			    subif_info[vap].logic_dev, list) {
+		DP_DEBUG(DP_DBG_FLAG_SWDEV, "tmp dev name:%s\n",
+			 tmp->dev ? tmp->dev->name : "NULL");
+		get_vlan_via_dev(tmp->dev, &vlan_prop);
+		if (vlan_prop.num == 2) {
+			DP_DEBUG(DP_DBG_FLAG_SWDEV,
+				 "VLAN Inner proto=%x, vid=%d\n",
+				 vlan_prop.in_proto, vlan_prop.in_vid);
+			DP_DEBUG(DP_DBG_FLAG_SWDEV,
+				 "VLAN out proto=%x, vid=%d\n",
+				 vlan_prop.out_proto, vlan_prop.out_vid);
+			vlan->vlan2_list[v2].outer_vlan.vid = vlan_prop.out_vid;
+			vlan->vlan2_list[v2].outer_vlan.tpid =
+							vlan_prop.out_proto;
+			vlan->vlan2_list[v2].ether_type = 0;
+			vlan->vlan2_list[v2].inner_vlan.vid = vlan_prop.in_vid;
+			vlan->vlan2_list[v2].inner_vlan.tpid =
+							vlan_prop.in_proto;
+			vlan->vlan2_list[v2].bp = tmp->bp;
+			v2 += 1;
+		} else if (vlan_prop.num == 1) {
+			DP_DEBUG(DP_DBG_FLAG_SWDEV,
+				 "outer VLAN proto=%x, vid=%d\n",
+				 vlan_prop.out_proto, vlan_prop.out_vid);
+			vlan->vlan1_list[v1].outer_vlan.vid = vlan_prop.out_vid;
+			vlan->vlan1_list[v1].outer_vlan.tpid =
+							vlan_prop.out_proto;
+			vlan->vlan1_list[v1].bp = tmp->bp;
+			v1 += 1;
+		}
+		i += 1;
+	}
+	DP_DEBUG(DP_DBG_FLAG_SWDEV, "vlan1=%d vlan2=%d total vlan int=%d\n",
+		 v1, v2, i);
+	vlan->n_vlan1 = v1;
+	vlan->n_vlan2 = v2;
+	vlan->bp = port->subif_info[vap].bp;
+	vlan->logic_port = port->port_id;
+	vlan->subif_grp = port->subif_info[vap].subif;/*subif value*/
+
+	if (port->subif_info[vap].swdev_priv)
+		vlan->priv = port->subif_info[vap].swdev_priv;
+	else
+		vlan->priv = NULL;
+	ret = set_gswip_ext_vlan(gsw_handle, vlan, flag);
+	if (ret == 0)
+		port->subif_info[vap].swdev_priv = vlan->priv;
+	else
+		PR_ERR("set gswip ext vlan return error\n");
+	kfree(vlan->vlan1_list);
+	kfree(vlan->vlan2_list);
+	kfree(vlan);
+
+	return 0; /*return -EIO from GSWIP but later cannot fail swdev*/
+}
diff --git a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_switchdev.h b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_switchdev.h
index 9035270d7..7072b241b 100644
--- a/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_switchdev.h
+++ b/drivers/net/ethernet/lantiq/datapath/gswip31/datapath_switchdev.h
@@ -8,4 +8,5 @@ int dp_swdev_bridge_port_cfg_reset(struct br_info *br_item,
 				   int inst, int bport);
 int dp_swdev_bridge_cfg_set(int inst, u16 fid);
 int dp_swdev_free_brcfg(int inst, u16 fid);
+int dp_gswip_ext_vlan(int inst, int vap, int ep);
 #endif
diff --git a/drivers/net/ethernet/lantiq/ppv4/Kconfig b/drivers/net/ethernet/lantiq/ppv4/Kconfig
index e0877d495..ad99d326d 100644
--- a/drivers/net/ethernet/lantiq/ppv4/Kconfig
+++ b/drivers/net/ethernet/lantiq/ppv4/Kconfig
@@ -10,3 +10,36 @@ config LTQ_PPV4
 	Turn on this option to enable BM and QOS driver which is a special hardware
 	present in the FALCONMX series of SoCs to manage the network buffers
 	in HW.
+
+# fallback if full driver is not included
+config LTQ_PPV4_QOS_SLIM
+	bool
+	depends on LTQ_PPV4 && !LTQ_PPV4_QOS
+	default y
+
+config LTQ_PPV4_QOS
+	tristate "QoS driver"
+	depends on LTQ_PPV4
+	---help---
+	  This is the driver for the PPv4 QoS hardware in Falcon Mountain.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called pp_qos_drv.
+
+config LTQ_PPV4_QOS_TEST
+	bool "Enable test code for QoS driver"
+	depends on LTQ_PPV4_QOS
+
+
+config LTQ_PPV4_BM_SLIM
+	bool
+	depends on LTQ_PPV4 && !LTQ_PPV4_BM
+	default y
+
+config LTQ_PPV4_BM
+	tristate "BM driver"
+	depends on LTQ_PPV4
+	---help---
+	  This is the driver for the PPv4 BM hardware in Falcon Mountain.
+	  To compile this driver as a module, choose M here. The module
+	  will be called pp_bm_drv.
diff --git a/drivers/net/ethernet/lantiq/ppv4/Makefile b/drivers/net/ethernet/lantiq/ppv4/Makefile
index c76330cdb..54a712a5f 100644
--- a/drivers/net/ethernet/lantiq/ppv4/Makefile
+++ b/drivers/net/ethernet/lantiq/ppv4/Makefile
@@ -1,6 +1,13 @@
 #
-# Makefile for BM driver.
+# Makefile for BM and QoS drivers.
 #
 
-obj-$(CONFIG_LTQ_PPV4) +=  bm_drv_slim.o qos_drv_slim.o
+# initial small drivers
+#obj-$(CONFIG_LTQ_PPV4) += bm_drv_slim.o
+obj-$(CONFIG_LTQ_PPV4_BM_SLIM) += bm_drv_slim.o
+obj-$(CONFIG_LTQ_PPV4_QOS_SLIM) += qos_drv_slim.o
 
+# full drivers
+obj-$(CONFIG_LTQ_PPV4_QOS) += qos/
+#obj-$(CONFIG_LTQ_PPV4) += bm/
+obj-$(CONFIG_LTQ_PPV4_BM) += bm/
diff --git a/drivers/net/ethernet/lantiq/ppv4/bm/Makefile b/drivers/net/ethernet/lantiq/ppv4/bm/Makefile
new file mode 100644
index 000000000..87cd75f19
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/bm/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for PPv4 BM driver.
+#
+
+obj-$(CONFIG_LTQ_PPV4) += ppv4_bm.o
+ppv4_bm-y 	:= pp_bm_drv.o pp_bm_debugfs.o
+#ccflags-y	+= -Iinclude/net -DNO_FW -DPP_QOS_DISABLE_CMDQ
+ccflags-y	+= -Iinclude/net -DQOS_CPU_UC_SAME_ENDIANESS
+
+#ccflags-$(CONFIG_LTQ_PPV4_QOS_TEST) += -DPP_QOS_TEST
diff --git a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c
new file mode 100644
index 000000000..0f4b7570d
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c
@@ -0,0 +1,316 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include "pp_bm_drv_internal.h"
+
+#define PP_BM_DEBUGFS_DIR "ppv4_bm"
+
+//!< debugfs dir
+static struct dentry *bm_dir;
+
+ssize_t bm_commmads_read(void *data, u64 *val)
+{
+	pr_info("commands:\n");
+
+	pr_info("1. Init buffer manager\n");
+	pr_info("2. Allocate default buffer\n");
+	pr_info("3. De-Allocate default buffer\n");
+	pr_info("4. Print DB counters\n");
+	pr_info("5. Print HW counters\n");
+
+	return 0;
+}
+
+void dump_db(struct bmgr_driver_private *pdata)
+{
+	u8	idx;
+	u8	idx2;
+	u32 num = 0;
+	u32 pool_id = 0;
+	u32 max_allowed = 0;
+	struct bmgr_policy_db_entry *policy;
+	struct bmgr_pool_db_entry *pool;
+	struct bmgr_group_db_entry	*group;
+	struct bmgr_pool_in_policy_info *pool_in_policy;
+
+	pr_info("=================================\n");
+	pr_info("====== BUFFER MANAGER DUMP ======\n");
+	pr_info("=================================\n");
+
+	// Dump all DB general counters
+	pr_info("======= GENERAL COUNTERS ========\n");
+	pr_info("Number of active pools:    %d\n",
+		pdata->driver_db.num_pools);
+	pr_info("Number of active groups:   %d\n",
+		pdata->driver_db.num_groups);
+	pr_info("Number of active policies: %d\n",
+		pdata->driver_db.num_policies);
+
+	// Dump all pools
+	pr_info("============= POOLS =============\n");
+	for (idx = 0; idx < PP_BMGR_MAX_POOLS; idx++) {
+		pool = &pdata->driver_db.pools[idx];
+
+		if (!pool->is_busy)
+			continue;
+
+		pr_info("Active pool %d:\n", idx);
+		pr_info("\t\tNum allocated buffers: %d\n",
+			pool->num_allocated_buffers);
+		pr_info("\t\tNum deallocated buffers: %d\n",
+			pool->num_deallocated_buffers);
+		pr_info("\t\tInternal table address: %x\n",
+			(phys_addr_t)pool->internal_pointers_tables);
+		pr_info("\t\tNum buffers: %d\n",
+			pool->pool_params.num_buffers);
+		pr_info("\t\tSize of buffer: %d\n",
+			pool->pool_params.size_of_buffer);
+		pr_info("\t\tGroup id: %d\n",
+			pool->pool_params.group_id);
+		pr_info("\t\tBase address low: 0x%x\n",
+			pool->pool_params.base_addr_low);
+		pr_info("\t\tBase address high: 0x%x\n",
+			pool->pool_params.base_addr_high);
+		pr_info("\t\tFlags: %d\n",
+			pool->pool_params.flags);
+	}
+
+	// Dump all groups
+	pr_info("============= GROUPS ============\n");
+	for (idx = 0; idx < PP_BMGR_MAX_GROUPS; idx++) {
+		group = &pdata->driver_db.groups[idx];
+
+		if (group->num_pools_in_group) {
+			pr_info("Active group %d:\n", idx);
+			pr_info("\t\tAvailable buffers: %d\n",
+				group->available_buffers);
+			pr_info("\t\tReserved buffers: %d\n",
+				group->reserved_buffers);
+			pr_info("\t\tPools in group:");
+			num = group->num_pools_in_group;
+			for (idx2 = 0; idx2 < num; idx2++) {
+				if (group->pools[idx2])
+					pr_info(" %d", idx2);
+			}
+			pr_info("\n");
+		}
+	}
+
+	// Dump all policies
+	pr_info("============= POLICIES ==========\n");
+	for (idx = 0; idx < PP_BMGR_MAX_POLICIES; idx++) {
+		policy = &pdata->driver_db.policies[idx];
+
+		if (!policy->is_busy)
+			continue;
+
+		pr_info("Active policy %d:\n", idx);
+		pr_info("\t\tNum allocated buffers: %d\n",
+			policy->num_allocated_buffers);
+		pr_info("\t\tNum deallocated buffers: %d\n",
+			policy->num_deallocated_buffers);
+		pr_info("\t\tMax allowed: %d\n",
+			policy->policy_params.max_allowed);
+		pr_info("\t\tMin guaranteed: %d\n",
+			policy->policy_params.min_guaranteed);
+		pr_info("\t\tGroup id: %d\n",
+			policy->policy_params.group_id);
+		pr_info("\t\tFlags: %d\n",
+			policy->policy_params.flags);
+		pr_info("\t\tPools in policy:");
+		num = policy->policy_params.num_pools_in_policy;
+		for (idx2 = 0; idx2 < num; idx2++) {
+			pool_in_policy =
+				&policy->policy_params.pools_in_policy[idx2];
+			pool_id = pool_in_policy->pool_id;
+			max_allowed = pool_in_policy->max_allowed;
+
+			pr_info("\t\t\t%d. id %d, max allowed %d\n",
+				idx2, pool_id, max_allowed);
+		}
+	}
+
+	pr_info("=================================\n");
+	pr_info("========== END OF DUMP ==========\n");
+	pr_info("=================================\n");
+}
+
+static struct bmgr_buff_info buff_info;
+
+ssize_t bm_commmads_write(void *data, u64 val)
+{
+	struct platform_device *pdev;
+	struct bmgr_driver_private *pdata;
+
+	pdev = data;
+	pdata = platform_get_drvdata(pdev);
+
+	dev_info(&pdev->dev, "command: %llu", val);
+
+	switch (val) {
+	case 1:
+		test_init_bmgr(&pdev->dev);
+		break;
+	case 2:
+		memset(&buff_info, 0x0, sizeof(buff_info));
+
+		bmgr_pop_buffer(&buff_info);
+
+		pr_info("pop buffer address 0x%x (high 0x%x) from policy %d pool %d\n",
+			buff_info.addr_low[0], buff_info.addr_high[0],
+			buff_info.policy_id, buff_info.pool_id[0]);
+		break;
+	case 3:
+		bmgr_push_buffer(&buff_info);
+
+		pr_info("push buffer address 0x%x (high 0x%x) from policy %d pool %d\n",
+			buff_info.addr_low[0], buff_info.addr_high[0],
+			buff_info.policy_id, buff_info.pool_id[0]);
+		break;
+	case 4:
+		dump_db(pdata);
+		break;
+	case 5:
+		print_hw_stats(0, 0, 0);
+		break;
+	default:
+		pr_info("unknown command\n");
+		break;
+	}
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(bm_commmads_fops,
+			 bm_commmads_read,
+			 bm_commmads_write,
+			 "%llu\n");
+
+int bm_dbg_dev_init(struct platform_device *pdev)
+{
+	struct bmgr_driver_private *pdata;
+	struct dentry *dent;
+	int err;
+
+	if (!pdev) {
+		dev_err(&pdev->dev, "Invalid platform device\n");
+		return -ENODEV;
+	}
+
+	pdata = platform_get_drvdata(pdev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "Invalid platform device data\n");
+		return -ENODEV;
+	}
+
+	pdata->debugfs_info.dir = bm_dir;
+
+	dent = debugfs_create_u32("reg_bar_offset", 0644,
+				  pdata->debugfs_info.dir,
+				  &pdata->debugfs_info.virt2phyoff);
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int)PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_u32 failed creating reg_bar_offset with %d\n",
+			err);
+		return err;
+	}
+
+	dent = debugfs_create_u32("ddr_bar_offset", 0644,
+				  pdata->debugfs_info.dir,
+				  &pdata->debugfs_info.ddrvirt2phyoff);
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int)PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_u32 failed creating ddr_bar_offset with %d\n",
+			err);
+		return err;
+	}
+
+	dent = debugfs_create_u32("fpga_bar_offset", 0644,
+				  pdata->debugfs_info.dir,
+				  &pdata->debugfs_info.fpga_offset);
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int)PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_u32 failed creating fpga_offset with %d\n",
+			err);
+		return err;
+	}
+
+	dent = debugfs_create_file("commands",
+				   0644,
+				   pdata->debugfs_info.dir,
+				pdev,
+				&bm_commmads_fops);
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int)PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_file failed creating commands with %d\n",
+			err);
+		return err;
+	}
+
+	return 0;
+}
+
+void bm_dbg_dev_clean(struct platform_device *pdev)
+{
+//	struct bmgr_driver_private *pdata;
+
+//	if (pdev) {
+//		pdata = platform_get_drvdata(pdev);
+//		if (pdata)
+//			debugfs_remove_recursive(pdata->dbg_info.dir);
+//	}
+}
+
+int bm_dbg_module_init(void)
+{
+	int rc;
+	struct dentry *dir;
+
+	dir = debugfs_create_dir(PP_BM_DEBUGFS_DIR, NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		rc = (int)PTR_ERR(dir);
+		pr_err("debugfs_create_dir failed with %d\n", rc);
+		return rc;
+	}
+
+	bm_dir = dir;
+
+	return 0;
+}
+
+void bm_dbg_module_clean(void)
+{
+	debugfs_remove_recursive(bm_dir); // NULL check occurs internally
+}
diff --git a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c
new file mode 100644
index 000000000..090cee508
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c
@@ -0,0 +1,2086 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+
+//#define _FPGA_
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/dma-mapping.h>
+
+#include "pp_bm_drv.h"
+#include "pp_bm_drv_internal.h"
+#include "pp_bm_regs.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_AUTHOR("obakshe");
+MODULE_DESCRIPTION("Intel(R) PPv4 buffer_manager Driver");
+MODULE_VERSION(BMGR_DRIVER_VERSION);
+
+////////////////////
+//// PROTOTYPES ////
+////////////////////
+
+// Static function only. External function are declared in the h file
+static int buffer_manager_probe(struct platform_device *pdev);
+static int buffer_manager_remove(struct platform_device *pdev);
+
+//////////////////////////
+//// Global variables ////
+//////////////////////////
+
+#define RC_SUCCESS	(0)
+
+static const struct of_device_id bm_match[] = {
+	{ .compatible = "intel,falconmx-bm" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bm_match);
+
+//!< Platform driver
+static struct platform_driver	g_bm_platform_driver = {
+	.driver = { .name = "buffer_manager", .of_match_table = bm_match, },
+	.probe = buffer_manager_probe,
+	.remove = buffer_manager_remove,
+};
+
+//!< global pointer for private database
+#ifdef _FPGA_
+	static struct bmgr_driver_private	real_driver_private;
+	static struct bmgr_driver_private	*this = &real_driver_private;
+#else
+	static struct bmgr_driver_private	*this;
+#endif
+
+void __iomem	*bm_config_addr_base;
+void __iomem	*bm_policy_mngr_addr_base;
+
+#define BM_BASE		(bm_config_addr_base)		// (0x18B00000)
+#define BM_RAM_BASE	(bm_policy_mngr_addr_base)	// (0x18B10000)
+
+#define IO_VIRT(phy_addr)	((phy_addr) + this->debugfs_info.virt2phyoff)
+#define DDR_VIRT(phy_addr)	((phy_addr) + this->debugfs_info.ddrvirt2phyoff)
+
+//#define RD_DDR32(addr)	(*(volatile u32 *)(DDR_VIRT(addr)))
+#define WR_DDR32(addr, var)	((*(volatile u32 *)(DDR_VIRT(addr))) = var)
+
+#define RD_REG_32(addr)	__raw_readl((volatile u32 *)addr)
+#define WR_REG_32(addr, val)	__raw_writel(val, (volatile u32 *)addr)
+//#define WR_REG_32(addr, var)	((*(volatile u32 *)(IO_VIRT(addr))) = var)
+//#define RD_REG_32(addr)	(*(volatile u32 *)(IO_VIRT(addr)))
+
+//////////////
+//// APIs ////
+//////////////
+
+/**************************************************************************
+ *! \fn	bmgr_db_lock
+ **************************************************************************
+ *
+ *  \brief	This function locks the DB in a recursion-save manner
+ *
+ *  \return	void
+ *
+ **************************************************************************/
+static inline void bmgr_db_lock(void)
+{
+	spin_lock_bh(&this->driver_db.db_lock);
+}
+
+/**************************************************************************
+ *! \fn	bmgr_db_unlock
+ **************************************************************************
+ *
+ *  \brief	This function unlocks the DB in a recursion-save manner
+ *
+ *  \return	void
+ *
+ **************************************************************************/
+static inline void bmgr_db_unlock(void)
+{
+	spin_unlock_bh(&this->driver_db.db_lock);
+}
+
+/**************************************************************************
+ *! \fn	buffer_manager_db_init
+ **************************************************************************
+ *
+ *  \brief	Initialize the DB
+ *
+ *  \param	priv:	buffer_manager private data pointer
+ *
+ *  \return	0 on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 buffer_manager_db_init(struct bmgr_driver_private *priv)
+{
+	struct bmgr_driver_db	*db = &priv->driver_db;
+
+	if (!db) {
+		pr_err("buffer_manager_db_init(): db was not initialized successfuly!\n");
+		return -EINVAL;
+	}
+
+	memset(db, 0, sizeof(struct bmgr_driver_db));
+
+	spin_lock_init(&db->db_lock);
+
+	pr_info("buffer_manager_db_init(): db was initialized successfuly\n");
+
+	return 0;
+}
+
+/**************************************************************************
+ *! \fn	buffer_manager_probe
+ **************************************************************************
+ *
+ *  \brief	probe platform device hook
+ *
+ *  \param	pdev:	platform device pointer
+ *
+ *  \return	0 on success, other error code on failure
+ *
+ **************************************************************************/
+static int buffer_manager_probe(struct platform_device *pdev)
+{
+	int							ret;
+	struct bmgr_driver_private	*priv;
+	struct resource				*res[2];
+	int							i;
+
+#ifdef _FPGA_
+	priv = this;
+#else
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+#endif
+
+	platform_set_drvdata(pdev, priv);
+	priv->pdev = pdev;
+	priv->enabled = true;
+	this = priv;
+
+	ret = buffer_manager_db_init(priv);
+	if (ret) {
+		kfree(priv);
+		return -ENOMEM;
+	}
+
+	/* load the memory ranges */
+	for (i = 0; i < 2; i++) {
+		res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res[i]) {
+			pr_err("failed to get resources %d\n", i);
+			return -ENOENT;
+		}
+	}
+
+	bm_config_addr_base = devm_ioremap_resource(&pdev->dev, res[0]);
+	bm_policy_mngr_addr_base = devm_ioremap_resource(&pdev->dev, res[1]);
+
+	if (!bm_config_addr_base || !bm_policy_mngr_addr_base) {
+		pr_err("failed to request and remap io ranges\n");
+		return -ENOMEM;
+	}
+
+	bm_dbg_dev_init(pdev);
+
+	return 0;
+}
+
+/**************************************************************************
+ *! \fn	buffer_manager_remove
+ **************************************************************************
+ *
+ *  \brief	probe platform device hook
+ *
+ *  \param	pdev:	platform device pointer
+ *
+ *  \return	0 on success, other error code on failure
+ *
+ **************************************************************************/
+static int buffer_manager_remove(struct platform_device *pdev)
+{
+	struct bmgr_driver_private	*priv = platform_get_drvdata(pdev);
+
+	priv->enabled = 0;
+
+	kfree(priv);
+
+	platform_set_drvdata(pdev, NULL);
+	this = NULL;
+
+	bm_dbg_dev_clean(pdev);
+
+	pr_info("buffer_manager_remove(): remove done\n");
+
+	return 0;
+}
+
+/**************************************************************************
+ *! \fn	buffer_manager_driver_init
+ **************************************************************************
+ *
+ *  \brief	Init platform device driver
+ *
+ *  \return	0 on success, other error code on failure
+ *
+ **************************************************************************/
+static int __init buffer_manager_driver_init(void)
+{
+	int ret;
+
+	bm_dbg_module_init();
+
+	ret = platform_driver_register(&g_bm_platform_driver);
+	if (ret < 0) {
+		pr_err("platform_driver_register failed (%d)\n", ret);
+		return ret;
+	}
+
+	pr_info("buffer manager driver init done\n");
+
+	return 0;
+}
+
+/**************************************************************************
+ *! \fn	buffer_manager_driver_exit
+ **************************************************************************
+ *
+ *  \brief	Exit platform device driver
+ *
+ *  \return	0 on success, other error code on failure
+ *
+ **************************************************************************/
+static void __exit buffer_manager_driver_exit(void)
+{
+	platform_driver_unregister(&g_bm_platform_driver);
+
+	bm_dbg_module_clean();
+
+	pr_info("buffer manager driver exit done\n");
+}
+
+/*************************************************/
+/**		Module Declarations		**/
+/*************************************************/
+//module_init(buffer_manager_driver_init);
+arch_initcall(buffer_manager_driver_init);
+module_exit(buffer_manager_driver_exit);
+
+void copy_dma(u32 src, u32 dst, u32 flags)
+{
+	u32	BASE_PXP_QOS_IP_ADDRESS = 0; //0x18800000;
+	u32 MCDMA0_BASE_ADDR = BASE_PXP_QOS_IP_ADDRESS + 0x50000;
+	u32 MCDMA_SRC_OFFSET = 0;
+	u32 MCDMA_DST_OFFSET = 0x4;
+	u32 MCDMA_CONTROL_OFFSET = 0x8;
+	u32 mcdma_channel = 3;
+	u32 active_bit = (1 << 30);
+	u32 pxp_offset = 0;
+	struct timespec start_ts;
+	struct timespec end_ts;
+	u32 DMA0_SRC_ADDR = MCDMA0_BASE_ADDR +
+		MCDMA_SRC_OFFSET + 0x40 * mcdma_channel;
+	u32 DMA0_DST_ADDR = MCDMA0_BASE_ADDR +
+		MCDMA_DST_OFFSET + 0x40 * mcdma_channel;
+	u32 DMA0_CTRL_ADDR = MCDMA0_BASE_ADDR +
+		MCDMA_CONTROL_OFFSET + 0x40 * mcdma_channel;
+
+	WR_REG_32(DMA0_SRC_ADDR, src - pxp_offset); //source address
+	WR_REG_32(DMA0_DST_ADDR, dst - pxp_offset); //destination address
+	pr_info("Copying from ADDR 0x%x to ADDR 0x%x\n",
+		src - pxp_offset, dst - pxp_offset);
+	// 8 bytes burst, total size is 8 byte which is 64 bits
+	WR_REG_32(DMA0_CTRL_ADDR, flags);
+	pr_info("SLEEP\n");
+	msleep(100);
+
+	getnstimeofday(&start_ts);
+
+	// wait for Active bit 30 will be 0
+	while ((RD_REG_32(DMA0_CTRL_ADDR) & active_bit) != 0) {
+		getnstimeofday(&end_ts);
+		if ((timespec_sub(end_ts, start_ts).tv_sec) > 5) {
+			pr_err("!!!!!!! DMA TIMEOUT !!!!!!!\n");
+			return;
+		}
+	}
+
+	msleep(100);
+	pr_info("Copying DONE (control 0x%x)\n", RD_REG_32(DMA0_CTRL_ADDR));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_wait_for_init_completion
+ **************************************************************************
+ *
+ *  \brief	This function reads the init bit and waits for completion
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_wait_for_init_completion(void)
+{
+	u32	st;
+	struct timespec start_ts;
+	struct timespec end_ts;
+
+	pr_info("Waiting for operation complete....");
+
+	getnstimeofday(&start_ts);
+
+	do {
+		getnstimeofday(&end_ts);
+		if ((timespec_sub(end_ts, start_ts).tv_sec) > 5) {
+			pr_err("!!!!!!! WAIT COMPLETETION TIMEOUT !!!!!!!\n");
+			return -EINVAL;
+		}
+		st = (RD_REG_32(BMGR_STATUS_REG_ADDR(BM_BASE)) & (1));
+	} while (st);
+
+	pr_info("Done\n");
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_is_pool_params_valid
+ **************************************************************************
+ *
+ *  \brief	Check wheather pool parameters are valid
+ *
+ *  \param	pool_params:	Pool param from user
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_is_pool_params_valid(
+		const struct bmgr_pool_params * const pool_params)
+{
+	// Validity check
+	if (!pool_params) {
+		pr_err("bmgr_is_pool_params_valid: pool_params is NULL\n");
+		return -EINVAL;
+	}
+
+	if (pool_params->size_of_buffer < BMGR_MIN_POOL_BUFFER_SIZE) {
+		pr_err("bmgr_is_pool_params_valid: size_of_buffer %d should be smaller than %d\n",
+		       pool_params->size_of_buffer,
+		       BMGR_MIN_POOL_BUFFER_SIZE);
+		return -EINVAL;
+	}
+
+	if (pool_params->size_of_buffer % BMGR_MIN_POOL_BUFFER_SIZE) {
+		pr_err("bmgr_is_pool_params_valid: size_of_buffer %d must be aligned to %d bytes\n",
+		       pool_params->size_of_buffer,
+		       BMGR_MIN_POOL_BUFFER_SIZE);
+		return -EINVAL;
+	}
+
+	if (pool_params->base_addr_low & 0x3F) {
+		pr_err("bmgr_is_pool_params_valid: base_addr_low %d must be aligned to %d bytes\n",
+		       pool_params->base_addr_low,
+		       BMGR_MIN_POOL_BUFFER_SIZE);
+		return -EINVAL;
+	}
+
+	// Num_buffers can be up to 2^24
+	if (pool_params->num_buffers >= 0x1000000) {
+		pr_err("Number of buffers can be up to 0x1000000\n");
+		return -EINVAL;
+	}
+
+	if (pool_params->group_id >= PP_BMGR_MAX_GROUPS) {
+		pr_err("bmgr_is_pool_params_valid: group_id %d must be smaller than %d\n",
+		       pool_params->group_id, PP_BMGR_MAX_GROUPS);
+		return -EINVAL;
+	}
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_is_policy_params_valid
+ **************************************************************************
+ *
+ *  \brief	Check wheather policy parameters are valid
+ *
+ *  \param	policy_params:	Policy param from user
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_is_policy_params_valid(
+		const struct bmgr_policy_params * const policy_params)
+{
+	if (!policy_params) {
+		pr_err("bmgr_is_policy_params_valid: policy_params is NULL\n");
+		return -EINVAL;
+	}
+
+	if (policy_params->group_id >= PP_BMGR_MAX_GROUPS) {
+		pr_err("group_id %d >= %d\n",
+		       policy_params->group_id, PP_BMGR_MAX_GROUPS);
+		return -EINVAL;
+	}
+
+	if (policy_params->num_pools_in_policy > PP_BMGR_MAX_POOLS_IN_POLICY) {
+		pr_err("num_pools_in_policy %d should be up to %d\n",
+		       policy_params->num_pools_in_policy,
+		       PP_BMGR_MAX_POOLS_IN_POLICY);
+		return -EINVAL;
+	}
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_buffer_manager_control
+ **************************************************************************
+ *
+ *  \brief	Sets the control register
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_buffer_manager_control(void)
+{
+	// Buffer manager client enable
+	WR_REG_32(BMGR_CTRL_REG_ADDR(BM_BASE), 0x1);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_enable_min_guarantee_per_pool
+ **************************************************************************
+ *
+ *  \brief	Enables/Disables minimum guarantee per pool
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	enable:		True to enable, false to disable
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_enable_min_guarantee_per_pool(u8 pool_id, u8 enable)
+{
+	u32	value;
+	u32	mask;
+
+	// Validity check
+	if (pool_id >= PP_BMGR_MAX_POOLS) {
+		pr_err("pool_id %d out of range %d\n",
+		       pool_id, PP_BMGR_MAX_POOLS);
+		return -EINVAL;
+	}
+
+	// Read value
+	value = RD_REG_32(BMGR_POOL_MIN_GRNT_MASK_REG_ADDR(BM_BASE));
+	mask = BIT(pool_id);
+
+	if (enable) {
+		value |= mask;
+	} else {
+		mask = ~mask;
+		value &= mask;
+	}
+
+	WR_REG_32(BMGR_POOL_MIN_GRNT_MASK_REG_ADDR(BM_BASE), value);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_pool_enable
+ **************************************************************************
+ *
+ *  \brief	Enable pool
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	enable:		True to enable, false to disable
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_pool_enable(u8 pool_id, bool enable)
+{
+	u32	value;
+	u32	mask;
+
+	// Validity check
+	if (pool_id >= PP_BMGR_MAX_POOLS) {
+		pr_err("pool_id %d out of range %d\n",
+		       pool_id, PP_BMGR_MAX_POOLS);
+		return -EINVAL;
+	}
+
+	// Read value
+	value = RD_REG_32(BMGR_POOL_ENABLE_REG_ADDR(BM_BASE));
+	mask = (BIT(pool_id) << 16) | BIT(pool_id);
+
+	if (enable) {
+		// Pool Enable
+		value |= mask;
+	} else {
+		// Pool disable
+		mask = ~mask;
+		value &= mask;
+	}
+
+	WR_REG_32(BMGR_POOL_ENABLE_REG_ADDR(BM_BASE), value);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_pool_reset_fifo
+ **************************************************************************
+ *
+ *  \brief	Resets the pool fifo
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_pool_reset_fifo(u8 pool_id)
+{
+	u32	reset_reg;
+
+	// Validity check
+	if (pool_id >= PP_BMGR_MAX_POOLS) {
+		pr_err("pool_id %d out of range %d\n",
+		       pool_id, PP_BMGR_MAX_POOLS);
+		return -EINVAL;
+	}
+
+	reset_reg = RD_REG_32(BMGR_POOL_FIFO_RESET_REG_ADDR(BM_BASE));
+
+	// Set the reset bit to 0
+	reset_reg &= ~BIT(pool_id);
+
+	// Pool FIFO Reset
+	WR_REG_32(BMGR_POOL_FIFO_RESET_REG_ADDR(BM_BASE), reset_reg);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_configure_ocp_master
+ **************************************************************************
+ *
+ *  \brief	Configures the burst size and number of bursts
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_configure_ocp_master(void)
+{
+	// OCP Master burst size
+	// 64B burst for all pools
+	WR_REG_32(BMGR_OCPM_BURST_SIZE_REG_ADDR(BM_BASE), 0);
+
+	// OCP Master number of bursts
+	// 1 burst for all pools
+	WR_REG_32(BMGR_OCPM_NUM_OF_BURSTS_REG_ADDR(BM_BASE), 0);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_ocp_burst_size
+ **************************************************************************
+ *
+ *  \brief	Gets burst size
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static u32 bmgr_get_ocp_burst_size(void)
+{
+	return RD_REG_32(BMGR_OCPM_BURST_SIZE_REG_ADDR(BM_BASE));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_pool_size
+ **************************************************************************
+ *
+ *  \brief	Sets pool size
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	num_buffers:	Number of buffers in pool
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_pool_size(u8 pool_id, u32 num_buffers)
+{
+	u32	burst_size = 0;
+	u32	reg = 0;
+	u32	mask = 3 << (2 * pool_id);
+
+	if (num_buffers > 128) {
+		burst_size = 3; // 512B
+	} else if (num_buffers > 64) {
+		burst_size = 2; // 256B
+	} else if (num_buffers > 32) {
+		burst_size = 1; // 128B
+	} else if (num_buffers > 16)	{
+		burst_size = 0; // 64B
+	} else {
+		pr_err("bmgr_set_pool_size(): minimum valid num_buffers (%d) is 16\n",
+		       num_buffers);
+		return -EINVAL;
+	}
+
+	// num buffer X pointer size must be
+	// multiplier of the burst size in bytes
+	if ((num_buffers % (1 << (4 + burst_size))) != 0) {
+		pr_err("bmgr_set_pool_size(): num_buffers %d must be multiplier of %d\n",
+		       num_buffers, 1 << (4 + burst_size));
+		return -EINVAL;
+	}
+
+	reg = RD_REG_32(BMGR_OCPM_BURST_SIZE_REG_ADDR(BM_BASE));
+	burst_size <<= (2 * pool_id);
+
+	reg &= ~(mask);
+	reg |= burst_size;
+
+	// OCP Master burst size
+	WR_REG_32(BMGR_OCPM_BURST_SIZE_REG_ADDR(BM_BASE), reg);
+
+	// Sets number of buffers in pools
+	WR_REG_32(BMGR_POOL_SIZE_REG_ADDR(BM_BASE, pool_id), num_buffers);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pool_size
+ **************************************************************************
+ *
+ *  \brief	Returns the number of buffers in pool
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Number of buffers in pool
+ *
+ **************************************************************************/
+static u32 bmgr_get_pool_size(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_SIZE_REG_ADDR(BM_BASE, pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_group_available_buffers
+ **************************************************************************
+ *
+ *  \brief  Sets the available buffers in group.
+ *          This is used to for better HW performance
+ *
+ *  \param	group_id:		Group ID
+ *  \param	available_buffers:	Available buffres in group
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_group_available_buffers(u8 group_id,
+					    u32 available_buffers)
+{
+	WR_REG_32(BMGR_GROUP_AVAILABLE_BUFF_REG_ADDR(BM_BASE, group_id),
+		  available_buffers);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_group_available_buffers
+ **************************************************************************
+ *
+ *  \brief	Returns the available buffers in group
+ *
+ *  \param	group_id:	Group ID
+ *
+ *  \return	Available buffers in group
+ *
+ **************************************************************************/
+static u32 bmgr_get_group_available_buffers(u8 group_id)
+{
+	return RD_REG_32(BMGR_GROUP_AVAILABLE_BUFF_REG_ADDR(BM_BASE,
+			 group_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_group_reserved_buffers
+ **************************************************************************
+ *
+ *  \brief	Sets the number of reserved buffers in group
+ *
+ *  \param	group_id:		Group ID
+ *  \param	reserved_buffers:	reserved buffers in group
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_group_reserved_buffers(u8 group_id, u32 reserved_buffers)
+{
+	WR_REG_32(BMGR_GROUP_RESERVED_BUFF_REG_ADDR(BM_BASE, group_id),
+		  reserved_buffers);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_group_reserved_buffers
+ **************************************************************************
+ *
+ *  \brief	Returns the number of reserved buffers in group
+ *
+ *  \param	group_id:	Group ID
+ *
+ *  \return	Number of reserved buffers in group
+ *
+ **************************************************************************/
+static u32 bmgr_get_group_reserved_buffers(u8 group_id)
+{
+	return RD_REG_32(BMGR_GROUP_RESERVED_BUFF_REG_ADDR(BM_BASE,
+			 group_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_pcu_fifo_base_address
+ **************************************************************************
+ *
+ *  \brief	Sets the pcu fifo base address (In SRAM)
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	base_address:	PCU Fifo base address
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_pcu_fifo_base_address(u8 pool_id, u32 base_address)
+{
+	WR_REG_32(BMGR_POOL_PCU_FIFO_BASE_ADDR_REG_ADDR(BM_BASE, pool_id),
+		  base_address);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pcu_fifo_base_address
+ **************************************************************************
+ *
+ *  \brief	Returns the pcu fifo base address (In SRAM)
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	PCU Fifo base address
+ *
+ **************************************************************************/
+static u32 bmgr_get_pcu_fifo_base_address(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_PCU_FIFO_BASE_ADDR_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_pcu_fifo_size
+ **************************************************************************
+ *
+ *  \brief	Sets the PCU fifo size
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	fifo_size:	PCU Fifo size
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_pcu_fifo_size(u8 pool_id, u32 fifo_size)
+{
+	WR_REG_32(BMGR_POOL_PCU_FIFO_SIZE_REG_ADDR(BM_BASE, pool_id),
+		  fifo_size);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pcu_fifo_size
+ **************************************************************************
+ *
+ *  \brief	Returns the PCU fifo size
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	PCU Fifo size
+ *
+ **************************************************************************/
+static u32 bmgr_get_pcu_fifo_size(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_PCU_FIFO_SIZE_REG_ADDR(BM_BASE, pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_pcu_fifo_occupancy
+ **************************************************************************
+ *
+ *  \brief	Sets the PCU fifo occupancy
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	fifo_occupancy:	Occupancy of the pool
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_pcu_fifo_occupancy(u8 pool_id, u32 fifo_occupancy)
+{
+	WR_REG_32(BMGR_POOL_PCU_FIFO_OCCUPANCY_REG_ADDR(BM_BASE, pool_id),
+		  fifo_occupancy);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pcu_fifo_occupancy
+ **************************************************************************
+ *
+ *  \brief	Returns the PCU fifo occupancy
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Occupancy of the pool
+ *
+ **************************************************************************/
+static u32 bmgr_get_pcu_fifo_occupancy(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_PCU_FIFO_OCCUPANCY_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_pcu_fifo_prog_empty
+ **************************************************************************
+ *
+ *  \brief	Sets the PCU fifo empty threshold
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	threshold:	PCU fifo empty threshold
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_pcu_fifo_prog_empty(u8 pool_id, u32 threshold)
+{
+	WR_REG_32(BMGR_POOL_PCU_FIFO_PROG_EMPTY_REG_ADDR(BM_BASE, pool_id),
+		  threshold);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pcu_fifo_prog_empty
+ **************************************************************************
+ *
+ *  \brief	Returns the PCU fifo empty threshold
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	PCU fifo empty threshold
+ *
+ **************************************************************************/
+static u32 bmgr_get_pcu_fifo_prog_empty(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_PCU_FIFO_PROG_EMPTY_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_pcu_fifo_prog_full
+ **************************************************************************
+ *
+ *  \brief	Sets the PCU fifo full threshold
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	threshold:	PCU fifo full threshold
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_pcu_fifo_prog_full(u8 pool_id, u32 threshold)
+{
+	WR_REG_32(BMGR_POOL_PCU_FIFO_PROG_FULL_REG_ADDR(BM_BASE, pool_id),
+		  threshold);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pcu_fifo_prog_full
+ **************************************************************************
+ *
+ *  \brief	Returns the PCU fifo prog full threshold
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static u32 bmgr_get_pcu_fifo_prog_full(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_PCU_FIFO_PROG_FULL_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_ext_fifo_base_addr_low
+ **************************************************************************
+ *
+ *  \brief	Sets the external fifo base low address
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	low_address:	External fifo base low address
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_ext_fifo_base_addr_low(u8 pool_id, u32 low_address)
+{
+	WR_REG_32(BMGR_POOL_EXT_FIFO_BASE_ADDR_LOW_REG_ADDR(BM_BASE, pool_id),
+		  low_address);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_ext_fifo_base_addr_low
+ **************************************************************************
+ *
+ *  \brief	Returns the external fifo base low address
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	External fifo base low address
+ *
+ **************************************************************************/
+static u32 bmgr_get_ext_fifo_base_addr_low(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_EXT_FIFO_BASE_ADDR_LOW_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_ext_fifo_base_addr_high
+ **************************************************************************
+ *
+ *  \brief	Sets the external fifo base high address
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	high_address:	External fifo base high address
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_ext_fifo_base_addr_high(u8 pool_id, u32 high_address)
+{
+	WR_REG_32(BMGR_POOL_EXT_FIFO_BASE_ADDR_HIGH_REG_ADDR(BM_BASE, pool_id),
+		  high_address);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_ext_fifo_base_addr_high
+ **************************************************************************
+ *
+ *  \brief	Returns the external fifo base high address
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	External fifo base high address
+ *
+ **************************************************************************/
+static u32 bmgr_get_ext_fifo_base_addr_high(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_EXT_FIFO_BASE_ADDR_HIGH_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_ext_fifo_occupancy
+ **************************************************************************
+ *
+ *  \brief	Sets external fifo occupancy
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	occupancy:	External fifo occupancy
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_ext_fifo_occupancy(u8 pool_id, u32 occupancy)
+{
+	WR_REG_32(BMGR_POOL_EXT_FIFO_OCC_REG_ADDR(BM_BASE, pool_id),
+		  occupancy);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_ext_fifo_occupancy
+ **************************************************************************
+ *
+ *  \brief	Returns the external fifo occupancy
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	External fifo occupancy
+ *
+ **************************************************************************/
+static u32 bmgr_get_ext_fifo_occupancy(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_EXT_FIFO_OCC_REG_ADDR(BM_BASE, pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pool_allocated_counter
+ **************************************************************************
+ *
+ *  \brief	Returns the number of allocated buffers in pool
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Number of allocated buffers
+ *
+ **************************************************************************/
+static u32 bmgr_get_pool_allocated_counter(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_ALLOCATED_COUNTER_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pool_pop_counter
+ **************************************************************************
+ *
+ *  \brief	Returns the number of pop operations on pool
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Number of pop operations
+ *
+ **************************************************************************/
+static u32 bmgr_get_pool_pop_counter(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_POP_COUNTER_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pool_push_counter
+ **************************************************************************
+ *
+ *  \brief	Returns the number of push operations on pool
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Number of push operations
+ *
+ **************************************************************************/
+static u32 bmgr_get_pool_push_counter(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_PUSH_COUNTER_REG_ADDR(BM_BASE, pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pool_burst_write_counter
+ **************************************************************************
+ *
+ *  \brief	Returns the burst write counter
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Burst write counter
+ *
+ **************************************************************************/
+static u32 bmgr_get_pool_burst_write_counter(u8 pool_id)
+{
+	return RD_REG_32(BMGR_DDR_BURST_WRITE_COUNTER_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pool_burst_read_counter
+ **************************************************************************
+ *
+ *  \brief	Returns the burst read counter
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Burst read counter
+ *
+ **************************************************************************/
+static u32 bmgr_get_pool_burst_read_counter(u8 pool_id)
+{
+	return RD_REG_32(BMGR_DDR_BURST_READ_COUNTER_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_pool_watermark_low_threshold
+ **************************************************************************
+ *
+ *  \brief	Sets the watermark low threshold
+ *
+ *  \param	pool_id:	Pool ID
+ *  \param	threshold:	low threshold
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_pool_watermark_low_threshold(u8 pool_id, u32 threshold)
+{
+	WR_REG_32(BMGR_POOL_WATERMARK_LOW_THRESHOLD_REG_ADDR(BM_BASE, pool_id),
+		  threshold);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_pool_watermark_low_threshold
+ **************************************************************************
+ *
+ *  \brief	Returns the watermark low threshold
+ *
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Low threshold
+ *
+ **************************************************************************/
+static u32 bmgr_get_pool_watermark_low_threshold(u8 pool_id)
+{
+	return RD_REG_32(BMGR_POOL_WATERMARK_LOW_THRESHOLD_REG_ADDR(BM_BASE,
+			 pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_policy_null_counter
+ **************************************************************************
+ *
+ *  \brief	Returns the policy null counter
+ *
+ *  \param	policy_id:	Policy ID
+ *
+ *  \return	Policy null counter
+ *
+ **************************************************************************/
+static u32 bmgr_get_policy_null_counter(u8 policy_id)
+{
+	return RD_REG_32(BMGR_POLICY_NULL_COUNTER_REG_ADDR(BM_BASE,
+			 policy_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_policy_max_allowed_per_policy
+ **************************************************************************
+ *
+ *  \brief	Sets policy max allowed buffers per policy
+ *
+ *  \param	policy_id:	Policy ID
+ *  \param	max_allowed:	Max allowed per this policy
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_policy_max_allowed_per_policy(u8 policy_id,
+						  u32 max_allowed)
+{
+	WR_REG_32(BMGR_POLICY_MAX_ALLOWED_ADDR(BM_RAM_BASE, policy_id),
+		  max_allowed);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_policy_max_allowed_per_policy
+ **************************************************************************
+ *
+ *  \brief	Returns the policy max allowed buffers per policy
+ *
+ *  \param	policy_id:	Policy ID
+ *
+ *  \return	Max allowed per this policy
+ *
+ **************************************************************************/
+static u32 bmgr_get_policy_max_allowed_per_policy(u8 policy_id)
+{
+	return RD_REG_32(BMGR_POLICY_MAX_ALLOWED_ADDR(BM_RAM_BASE,
+			 policy_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_policy_min_guaranteed_per_policy
+ **************************************************************************
+ *
+ *  \brief	Sets minimum guaranteed per policy
+ *
+ *  \param	min_guaranteed:	Minimum guaranteed per policy
+ *  \param	policy_id:	Policy ID
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_policy_min_guaranteed_per_policy(u8 policy_id,
+						     u32 min_guaranteed)
+{
+	WR_REG_32(BMGR_POLICY_MIN_GUARANTEED_ADDR(BM_RAM_BASE, policy_id),
+		  min_guaranteed);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_policy_min_guaranteed_per_policy
+ **************************************************************************
+ *
+ *  \brief	Returns minimum guaranteed per policy
+ *
+ *  \param	policy_id:	Policy ID
+ *
+ *  \return	Minimum guaranteed per policy
+ *
+ **************************************************************************/
+static u32 bmgr_get_policy_min_guaranteed_per_policy(u8 policy_id)
+{
+	return RD_REG_32(BMGR_POLICY_MIN_GUARANTEED_ADDR(BM_RAM_BASE,
+			 policy_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_policy_group_association
+ **************************************************************************
+ *
+ *  \brief	Maps group to policy
+ *
+ *  \param	policy_id:	Policy ID
+ *  \param	group_id:	Group ID
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_policy_group_association(u8 policy_id, u8 group_id)
+{
+	WR_REG_32(BMGR_POLICY_GROUP_ASSOCIATED_ADDR(BM_RAM_BASE, policy_id),
+		  (u32)group_id);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_policy_group_association
+ **************************************************************************
+ *
+ *  \brief	Returns the mapped group to this policy
+ *
+ *  \param	policy_id:	Policy ID
+ *
+ *  \return	The mapped group ID
+ *
+ **************************************************************************/
+static u32 bmgr_get_policy_group_association(u8 policy_id)
+{
+	return RD_REG_32(BMGR_POLICY_GROUP_ASSOCIATED_ADDR(BM_RAM_BASE,
+			 policy_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_policy_pool_mapping
+ **************************************************************************
+ *
+ *  \brief	Maps pool to policy (With priority)
+ *
+ *  \param	policy_id:	Policy ID
+ *  \param	pool_id:	Pool ID
+ *  \param	priority:	0 (highest priority) - 3 scale
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_policy_pool_mapping(u8 policy_id, u8 pool_id, u8 priority)
+{
+	u32	value;
+	u32	mask;
+
+	if (priority >= bmgr_policy_pool_priority_max)
+		pr_err("highest priority %d allowed is %d\n",
+		       priority, bmgr_policy_pool_priority_max - 1);
+
+	value = RD_REG_32(BMGR_POLICY_POOLS_MAPPING_ADDR(BM_RAM_BASE,
+							 policy_id));
+
+	// Clear relevant byte
+	mask = 0xFF << (8 * priority);
+	value &= ~mask;
+
+	// Prepare the value to be wriiten
+	value |= (pool_id << (8 * priority));
+
+	WR_REG_32(BMGR_POLICY_POOLS_MAPPING_ADDR(BM_RAM_BASE, policy_id),
+		  value);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_policy_pools_mapping
+ **************************************************************************
+ *
+ *  \brief	Returns the mapped pools to this policy
+ *
+ *  \param	policy_id:	Policy ID
+ *
+ *  \return	The mapped pool ID
+ *
+ **************************************************************************/
+static u32 bmgr_get_policy_pools_mapping(u8 policy_id)
+{
+	return RD_REG_32(BMGR_POLICY_POOLS_MAPPING_ADDR(BM_RAM_BASE,
+			 policy_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_set_policy_max_allowed_per_pool
+ **************************************************************************
+ *
+ *  \brief	Sets policy max allowed per pool
+ *
+ *  \param	policy_id:	Policy ID
+ *  \param	pool_id:	Pool ID
+ *  \param	max_allowed:	Max allowed for this pool
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+static s32 bmgr_set_policy_max_allowed_per_pool(u8 policy_id,
+						u8 pool_id,
+						u32 max_allowed)
+{
+	WR_REG_32(BMGR_POLICY_MAX_ALLOWED_PER_POOL_ADDR(BM_RAM_BASE,
+							policy_id,
+							pool_id),
+		  max_allowed);
+
+	return RC_SUCCESS;
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_policy_max_allowed_per_pool
+ **************************************************************************
+ *
+ *  \brief	Returns the max allowed per pool
+ *
+ *  \param	policy_id:	Policy ID
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Max allowed for this pool
+ *
+ **************************************************************************/
+static u32 bmgr_get_policy_max_allowed_per_pool(u8 policy_id, u8 pool_id)
+{
+	return RD_REG_32(BMGR_POLICY_MAX_ALLOWED_PER_POOL_ADDR(BM_RAM_BASE,
+							       policy_id,
+							       pool_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_policy_number_of_allocated_buffers
+ **************************************************************************
+ *
+ *  \brief	Returns the number of allocated buffer in this policy
+ *
+ *  \param	policy_id:	Policy ID
+ *
+ *  \return	Number of allocated buffer in this policy
+ *
+ **************************************************************************/
+static u32 bmgr_get_policy_number_of_allocated_buffers(u8 policy_id)
+{
+	return RD_REG_32(BMGR_POLICY_ALLOC_BUFF_COUNTER_ADDR(BM_RAM_BASE,
+			 policy_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_get_policy_number_of_allocated_buffers_per_pool
+ **************************************************************************
+ *
+ *  \brief	Returns the number of allocated buffers per pool in this policy
+ *
+ *  \param	policy_id:	Policy ID
+ *  \param	pool_id:	Pool ID
+ *
+ *  \return	Number of allocated buffers per pool in this policy
+ *
+ **************************************************************************/
+static u32 bmgr_get_policy_number_of_allocated_buffers_per_pool(u8 policy_id,
+								u8 pool_id)
+{
+	return RD_REG_32(BMGR_POLICY_ALLOC_BUFF_PER_POOL_COUNTER_ADDR(
+			 BM_RAM_BASE, pool_id, policy_id));
+}
+
+/**************************************************************************
+ *! \fn	bmgr_pop_buffer
+ **************************************************************************
+ *
+ *  \brief	Pops a buffer from the buffer manager
+ *
+ *  \param	buff_info:	Buffer information
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_pop_buffer(struct bmgr_buff_info * const buff_info)
+{
+	u32	address = BMGR_DATAPATH_BASE;
+	//u32	dst_addr = 0x600000;
+	u32	index = 0;
+	u32	low = 0;
+	u32	high = 0;
+	u32	ptr2pop[2 * PP_BMGR_MAX_BURST_IN_POP]; // 64 bit per allocation
+
+	if (!buff_info) {
+		pr_err("bmgr_pop_buffer: buff_info is NULL\n");
+		return -EINVAL;
+	}
+
+	if (buff_info->num_allocs > PP_BMGR_MAX_BURST_IN_POP) {
+		pr_err("bmgr_pop_buffer: num_allocs %d is limited with %d\n",
+		       buff_info->num_allocs, PP_BMGR_MAX_BURST_IN_POP);
+		return -EINVAL;
+	}
+
+	// Write the Single/Burst bit
+	if (buff_info->num_allocs > 1)
+		address |= BIT(16);
+
+	// Write the Policy
+	address |= (buff_info->policy_id << 8);
+
+	copy_dma(address, (u32)&ptr2pop[0]/*dst_addr*/,
+		 (0x80100000 | (8 * buff_info->num_allocs))/*0x80100008*/);
+
+	for (index = 0; index < 2 * buff_info->num_allocs; index += 2) {
+		low = ptr2pop[index]; // RD_DDR32(dst_addr+8*index);
+		high = ptr2pop[index + 1]; // RD_DDR32(dst_addr+4+8*index);
+
+		pr_info("POP ======> 0x%x ; 0x%x\n", low, high);
+
+		buff_info->addr_low[index] = low;
+		buff_info->addr_high[index] = high & 0xF;
+		buff_info->pool_id[index] = (high & 0xFF000000) >> 24;
+
+		pr_info("bmgr_pop_buffer: ---> pop buffer from address %p (pool %d, addr low %p, addr high %p)\n",
+			(void *)address, buff_info->pool_id[index],
+			(void *)buff_info->addr_low[index],
+			(void *)buff_info->addr_high[index]);
+
+		if (buff_info->pool_id[index] >= PP_BMGR_MAX_POOLS) {
+			this->driver_db.policies[buff_info->policy_id].
+				num_allocated_buffers += index;
+			pr_info("Can't pop from policy %d\n",
+				buff_info->policy_id);
+			return -ENOSPC;
+		}
+
+		this->driver_db.pools[buff_info->pool_id[index]].
+			num_allocated_buffers++;
+	}
+
+	this->driver_db.policies[buff_info->policy_id].
+		num_allocated_buffers += buff_info->num_allocs;
+
+	return RC_SUCCESS;
+}
+EXPORT_SYMBOL(bmgr_pop_buffer);
+
+/**************************************************************************
+ *! \fn	bmgr_push_buffer
+ **************************************************************************
+ *
+ *  \brief	Pushes a buffer back to the buffer manager
+ *
+ *  \param	buff_info:	Buffer information
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_push_buffer(struct bmgr_buff_info * const buff_info)
+{
+	u32	address = BMGR_DATAPATH_BASE;
+	//u32	src_addr = 0x600000;
+	u32	value = 0;
+	u32	index = 0;
+	u32	ptr2push[2 * PP_BMGR_MAX_BURST_IN_POP]; // 64 bit per allocation
+
+	if (!buff_info) {
+		pr_err("bmgr_push_buffer: buff_info is NULL\n");
+		return -EINVAL;
+	}
+
+	if (buff_info->num_allocs > PP_BMGR_MAX_BURST_IN_POP) {
+		pr_err("bmgr_push_buffer: num_allocs %d is limited with %d\n",
+		       buff_info->num_allocs, PP_BMGR_MAX_BURST_IN_POP);
+		return -EINVAL;
+	}
+
+	// Write the Single/Burst bit
+	if (buff_info->num_allocs > 1)
+		address |= BIT(16);
+
+	// Write the Policy
+	address |= (buff_info->policy_id << 8);
+
+	// write to ddr
+	for (index = 0; index < 2 * buff_info->num_allocs; index += 2) {
+		ptr2push[index] = buff_info->addr_low[index];
+		//WR_DDR32(src_addr+8*index, buff_info->addr_low[index]);
+		value = (buff_info->addr_high[index] & 0xF) |
+			 ((buff_info->pool_id[index] & 0xFF) << 24);
+		ptr2push[index + 1] = value;
+		//WR_DDR32(src_addr + 4 + 8 * index, value);
+
+		pr_info("bmgr_push_buffer: <--- push buffer to address %p (pool %d, addr low %p, addr high %p, value %d)\n",
+			(void *)address, buff_info->pool_id[index],
+			(void *)buff_info->addr_low[index],
+			(void *)buff_info->addr_high[index], value);
+
+		this->driver_db.pools[buff_info->pool_id[index]].
+			num_deallocated_buffers++;
+	}
+
+	copy_dma((u32)&ptr2push[0]/*src_addr*/, address,
+		 (0x80100000 | (8 * buff_info->num_allocs))/*0x80100008*/);
+
+	this->driver_db.policies[buff_info->policy_id].
+		num_deallocated_buffers += buff_info->num_allocs;
+
+	return RC_SUCCESS;
+}
+EXPORT_SYMBOL(bmgr_push_buffer);
+
+/**************************************************************************
+ *! \fn	bmgr_driver_init
+ **************************************************************************
+ *
+ *  \brief  Initializes the buffer manager driver.
+ *          Must be the first driver's function to be called
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_driver_init(void)
+{
+	u8 index;
+
+	pr_info("Buffer Manager driver is initializing....");
+
+	// @lock
+	bmgr_db_lock();
+
+	bmgr_set_buffer_manager_control();
+	bmgr_configure_ocp_master();
+
+	// Reset group reserved buffers
+	for (index = 0; index < PP_BMGR_MAX_GROUPS; index++)
+		bmgr_set_group_reserved_buffers(index, 0);
+
+	// @unlock
+	bmgr_db_unlock();
+
+	pr_info("Done\n");
+
+	return RC_SUCCESS;
+}
+EXPORT_SYMBOL(bmgr_driver_init);
+
+/**************************************************************************
+ *! \fn	bmgr_pool_configure
+ **************************************************************************
+ *
+ *  \brief	Configure a Buffer Manager pool
+ *
+ *  \param	pool_params:	Pool param from user
+ *  \param	pool_id[OUT]:	Pool ID
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_pool_configure(const struct bmgr_pool_params * const pool_params,
+			u8 * const pool_id)
+{
+	s32		status = RC_SUCCESS;
+	void	*pointers_table = NULL;
+	u32		index = 0;
+	u32		*temp_pointers_table_ptr = NULL;
+	u64		user_array_ptr;
+	u32		phy_ll_base;
+#ifdef _FPGA_
+	//u32	fpga_ddr_address = 0x500000;
+#endif
+
+	pr_info("Configuring buffer manager pool...");
+
+	// @lock
+	bmgr_db_lock();
+
+	// Validity check
+	status = bmgr_is_pool_params_valid(pool_params);
+	if (status != RC_SUCCESS)
+		goto unlock;
+
+	*pool_id = 0;
+
+	// Find next available slot in pools db
+	for (index = 0; index < PP_BMGR_MAX_POOLS; index++) {
+		if (this->driver_db.pools[index].is_busy == 0) {
+			*pool_id = index;
+			break;
+		}
+	}
+
+	// If not found (Can be done also using num_pools)
+	if (index == PP_BMGR_MAX_POOLS) {
+		pr_err("bmgr_pool_configure: pools DB is full!");
+		status = -EIO;
+
+		goto unlock;
+	}
+
+	// Allocate pool_param->pool_num_of_buff * POINTER_SIZE bytes array
+#ifdef _FPGA_
+	pointers_table = (void *)(pool_params->base_addr_low +
+		pool_params->num_buffers * pool_params->size_of_buffer +
+		0x1000);//(void *)fpga_ddr_address;
+#else
+	pointers_table = devm_kzalloc(&this->pdev->dev,
+				      sizeof(u32) * pool_params->num_buffers,
+				      GFP_KERNEL);
+#endif
+	if (!pointers_table) {
+		pr_err("bmgr_pool_configure: Failed to allocate pointers_table, num_buffers %d",
+		       pool_params->num_buffers);
+		status = -ENOMEM;
+		goto unlock;
+	}
+
+#ifdef _FPGA_
+	temp_pointers_table_ptr = (u32 *)((u32)pointers_table +
+			this->debugfs_info.ddrvirt2phyoff);
+#else
+	temp_pointers_table_ptr = (u32 *)pointers_table;
+#endif
+	user_array_ptr = (pool_params->base_addr_low) |
+			((u64)pool_params->base_addr_high << 32);
+
+	for (index = 0; index < pool_params->num_buffers; index++) {
+		u32 temp = user_array_ptr >> 6;
+		// for debugging ...
+		if (index == 0 || index == pool_params->num_buffers - 1)
+			pr_info("bmgr_pool_configure: index %d) writing 0x%x to 0x%x\n",
+				index, temp, (u32)temp_pointers_table_ptr);
+
+		*temp_pointers_table_ptr = user_array_ptr >> 6;
+		temp_pointers_table_ptr++;
+		user_array_ptr += pool_params->size_of_buffer;
+	}
+
+	phy_ll_base = dma_map_single(&this->pdev->dev,
+				     (void *)pointers_table,
+				     (pool_params->num_buffers * 4),
+				     DMA_TO_DEVICE);
+
+	status = bmgr_set_pool_size(*pool_id, pool_params->num_buffers);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_ext_fifo_base_addr_low(*pool_id, phy_ll_base);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_ext_fifo_base_addr_high(*pool_id, 0);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_ext_fifo_occupancy(*pool_id,
+					     pool_params->num_buffers);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	// Verify group is not full
+	index = this->driver_db.groups[pool_params->group_id].
+				num_pools_in_group;
+	if (index >= PP_BMGR_MAX_POOLS_IN_GROUP) {
+		pr_err("bmgr_pool_configure: Group %d is full. num_pools_in_group %d",
+		       pool_params->group_id, index);
+		status = -EIO;
+
+		goto free_memory;
+	}
+
+	// The group was unused
+	if (index == 0)
+		this->driver_db.num_groups++;
+
+	this->driver_db.groups[pool_params->group_id].pools[index] = *pool_id;
+	this->driver_db.groups[pool_params->group_id].available_buffers +=
+		pool_params->num_buffers;
+	this->driver_db.groups[pool_params->group_id].num_pools_in_group++;
+	// Group's reserved buffers will be updated when configuring the policy
+
+	status = bmgr_set_group_available_buffers(pool_params->group_id,
+						  this->driver_db.groups[
+				pool_params->group_id].available_buffers);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_pcu_fifo_base_address(*pool_id,
+						BMGR_START_PCU_FIFO_SRAM_ADDR +
+						(*pool_id *
+					BMGR_DEFAULT_PCU_FIFO_SIZE));
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_pcu_fifo_size(*pool_id, BMGR_DEFAULT_PCU_FIFO_SIZE);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_pcu_fifo_occupancy(*pool_id, 0);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_pcu_fifo_prog_empty(*pool_id,
+			BMGR_DEFAULT_PCU_FIFO_LOW_THRESHOLD);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_pcu_fifo_prog_full(*pool_id,
+			BMGR_DEFAULT_PCU_FIFO_HIGH_THRESHOLD);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_set_pool_watermark_low_threshold(*pool_id,
+			BMGR_DEFAULT_WATERMARK_LOW_THRESHOLD);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	if (pool_params->flags & POOL_ENABLE_FOR_MIN_GRNT_POLICY_CALC) {
+		status = bmgr_enable_min_guarantee_per_pool(*pool_id, 1);
+		if (status != RC_SUCCESS)
+			goto free_memory;
+	}
+
+	status = bmgr_pool_reset_fifo(*pool_id);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	status = bmgr_pool_enable(*pool_id, 1);
+	if (status != RC_SUCCESS)
+		goto free_memory;
+
+	// Update pool's DB
+	memcpy(&this->driver_db.pools[*pool_id].pool_params,
+	       pool_params, sizeof(struct bmgr_pool_params));
+	this->driver_db.pools[*pool_id].is_busy = 1;
+	this->driver_db.pools[*pool_id].internal_pointers_tables =
+			temp_pointers_table_ptr;
+	this->driver_db.num_pools++;
+
+	bmgr_wait_for_init_completion();
+
+	// @unlock
+	bmgr_db_unlock();
+
+	pr_info("Done");
+
+	// OK
+	return status;
+
+free_memory:
+	// free pointers_table
+	#ifndef _FPGA_
+		kfree(pointers_table);
+	#endif
+unlock:
+	// @unlock
+	bmgr_db_unlock();
+
+	return status;
+}
+EXPORT_SYMBOL(bmgr_pool_configure);
+
+/**************************************************************************
+ *! \fn	bmgr_policy_configure
+ **************************************************************************
+ *
+ *  \brief	Configure a Buffer Manager policy
+ *
+ *  \param	policy_params:	Policy param from user
+ *  \param	policy_id[OUT]:	Policy ID
+ *
+ *  \return	RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_policy_configure(const struct bmgr_policy_params * const policy_params,
+			  u8 * const policy_id)
+{
+	s32	status = RC_SUCCESS;
+	u8	index = 0;
+
+	pr_info("Configuring buffer manager policy...");
+
+	// @lock
+	bmgr_db_lock();
+
+	// Validity check
+	status = bmgr_is_policy_params_valid(policy_params);
+	if (status != RC_SUCCESS)
+		goto unlock;
+
+	*policy_id = 0;
+
+	// Find next available slot in policy db
+	for (index = 0; index < PP_BMGR_MAX_POLICIES; index++) {
+		if (this->driver_db.policies[index].is_busy == 0) {
+			*policy_id = index;
+			break;
+		}
+	}
+
+	// If not found (Can be done also using num_policies)
+	if (index == PP_BMGR_MAX_POLICIES) {
+		pr_err("No free policy!");
+		status = -EIO;
+
+		goto unlock;
+	}
+
+	status = bmgr_set_policy_group_association(*policy_id,
+						   policy_params->group_id);
+	if (status != RC_SUCCESS)
+		goto unlock;
+
+	status = bmgr_set_policy_max_allowed_per_policy(*policy_id,
+						policy_params->max_allowed);
+	if (status != RC_SUCCESS)
+		goto unlock;
+
+	status = bmgr_set_policy_min_guaranteed_per_policy(*policy_id,
+						policy_params->min_guaranteed);
+	if (status != RC_SUCCESS)
+		goto unlock;
+
+	// Set the group's reserved buffers
+	this->driver_db.groups[policy_params->group_id].reserved_buffers =
+		bmgr_get_group_reserved_buffers(policy_params->group_id);
+	this->driver_db.groups[policy_params->group_id].
+		reserved_buffers += policy_params->min_guaranteed;
+
+	status = bmgr_set_group_reserved_buffers(policy_params->group_id,
+						 this->driver_db.groups[
+				policy_params->group_id].reserved_buffers);
+	if (status != RC_SUCCESS)
+		goto unlock;
+
+	for (index = 0; index < policy_params->num_pools_in_policy; index++) {
+		status = bmgr_set_policy_max_allowed_per_pool(*policy_id,
+			policy_params->pools_in_policy[index].pool_id,
+			policy_params->pools_in_policy[index].max_allowed);
+		if (status != RC_SUCCESS)
+			goto unlock;
+
+		status = bmgr_set_policy_pool_mapping(*policy_id,
+			policy_params->pools_in_policy[index].pool_id, index);
+		if (status != RC_SUCCESS)
+			goto unlock;
+	}
+
+	// Update Policy DB
+	this->driver_db.num_policies++;
+	this->driver_db.policies[*policy_id].is_busy = 1;
+	memcpy(&this->driver_db.policies[*policy_id].policy_params,
+	       policy_params, sizeof(struct bmgr_policy_params));
+
+	bmgr_wait_for_init_completion();
+
+	// @unlock
+	bmgr_db_unlock();
+
+	pr_info("Done");
+
+	// OK
+	return status;
+
+unlock:
+	// @unlock
+	bmgr_db_unlock();
+
+	// Failure
+	return status;
+}
+EXPORT_SYMBOL(bmgr_policy_configure);
+
+void test_init_bmgr(struct device *dev)
+{
+	struct bmgr_pool_params	pool_params;
+	struct bmgr_policy_params	policy_params;
+	u8					pool_id;
+	u8					policy_id;
+	u8					num_buffers = 10;
+	u8					size_of_buffer = 64;
+	void				*ptr = NULL;
+
+	if (bmgr_driver_init() != RC_SUCCESS) {
+		pr_err("commands_store(): bmgr_driver_init failed\n");
+		return;
+	}
+
+	#ifdef _FPGA_
+		pool_params.base_addr_low = 0x400000;
+	#else
+		ptr = devm_kzalloc(dev,
+				   size_of_buffer * num_buffers,
+				   GFP_KERNEL);
+		if (!ptr)
+			return;
+
+		pool_params.base_addr_low = (u32)ptr;
+	#endif
+
+	pool_params.base_addr_high = 0;
+	pool_params.size_of_buffer = size_of_buffer;
+	pool_params.num_buffers = num_buffers;
+	pool_params.group_id = 0;
+	pool_params.flags = 0;
+	if (bmgr_pool_configure(&pool_params, &pool_id) != RC_SUCCESS) {
+		pr_err("commands_store(): bmgr_pool_configure failed\n");
+		return;
+	}
+
+	pr_info("test_init_bmgr(): configured pool_id %d with %d buffers (of %d), group id 0, address 0x%x\n",
+		pool_id, num_buffers, size_of_buffer, (u32)ptr);
+
+	policy_params.flags = 0;
+	policy_params.group_id = 0;
+	policy_params.max_allowed = num_buffers / 2;
+	policy_params.min_guaranteed = num_buffers / 5;
+	policy_params.num_pools_in_policy = 1;
+	policy_params.pools_in_policy[0].max_allowed = num_buffers;
+	policy_params.pools_in_policy[0].pool_id = 0;
+	if (bmgr_policy_configure(&policy_params, &policy_id) != RC_SUCCESS) {
+		pr_err("test_init_bmgr(): bmgr_policy_configure failed\n");
+		return;
+	}
+
+	pr_info("test_init_bmgr(): configured policy_id %d with max allowed %d, min guaranteed %d, pool 0 (max allowed %d) group 0\n",
+		policy_id, num_buffers / 2, num_buffers / 5, num_buffers);
+}
+
+void print_hw_stats(u8 policy_id, u8 group_id, u8 pool_id)
+{
+	u32	counter;
+
+	counter = bmgr_get_pool_pop_counter(pool_id);
+	pr_info("Pop counter (pool %d) = %d\n", pool_id, counter);
+
+	counter = bmgr_get_pool_push_counter(pool_id);
+	pr_info("Push counter (pool %d) = %d\n", pool_id, counter);
+
+	counter = bmgr_get_pool_allocated_counter(pool_id);
+	pr_info("Pool %d allocated counter = %d\n", pool_id, counter);
+
+	counter = bmgr_get_pool_size(pool_id);
+	pr_info("Pool %d size = %d\n", pool_id, counter);
+
+	counter = bmgr_get_policy_number_of_allocated_buffers(policy_id);
+	pr_info("Policy %d num allocated = %d\n", policy_id, counter);
+
+	counter = bmgr_get_policy_number_of_allocated_buffers_per_pool(
+				policy_id, pool_id);
+	pr_info("Policy %d num allocated per pool %d = %d\n",
+		policy_id, pool_id, counter);
+
+	counter = bmgr_get_policy_group_association(policy_id);
+	pr_info("Policy %d group association = %d\n", policy_id, counter);
+
+	counter = bmgr_get_policy_max_allowed_per_policy(policy_id);
+	pr_info("Policy %d max allowed = %d\n", policy_id, counter);
+
+	counter = bmgr_get_policy_max_allowed_per_pool(policy_id, pool_id);
+	pr_info("Policy %d max allowed per pool %d = %d\n",
+		policy_id, pool_id, counter);
+
+	counter = bmgr_get_policy_min_guaranteed_per_policy(policy_id);
+	pr_info("Policy %d min guaranteed = %d\n", policy_id, counter);
+
+	counter = bmgr_get_policy_null_counter(policy_id);
+	pr_info("Policy %d null counter = %d\n", policy_id, counter);
+
+	counter = bmgr_get_group_available_buffers(group_id);
+	pr_info("Group %d available buffers = %d\n", group_id, counter);
+
+	counter = bmgr_get_group_reserved_buffers(group_id);
+	pr_info("Group %d reserved buffers = %d\n", group_id, counter);
+
+	{
+		// Just to ignore compilation warnings
+		bmgr_get_ocp_burst_size();
+		bmgr_get_pcu_fifo_base_address(0);
+		bmgr_get_pcu_fifo_size(0);
+		bmgr_get_pcu_fifo_occupancy(0);
+		bmgr_get_pcu_fifo_prog_empty(0);
+		bmgr_get_pcu_fifo_prog_full(0);
+		bmgr_get_ext_fifo_base_addr_low(0);
+		bmgr_get_ext_fifo_base_addr_high(0);
+		bmgr_get_ext_fifo_occupancy(0);
+		bmgr_get_pool_burst_write_counter(0);
+		bmgr_get_pool_burst_read_counter(0);
+		bmgr_get_pool_watermark_low_threshold(0);
+		bmgr_get_policy_pools_mapping(0);
+	}
+}
+
diff --git a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.h b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.h
new file mode 100644
index 000000000..b2dc147ca
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.h
@@ -0,0 +1,265 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+
+#ifndef _BMGR_H_
+#define _BMGR_H_
+
+/**********************************
+ **********************************
+ ************ DEFINES *************
+ **********************************
+ **********************************/
+
+#define FALCON_SOC
+
+#ifdef FALCON_SOC
+	/*! \def PP_BMGR_MAX_POOLS
+	 *       Max supported pools
+	 */
+	#define PP_BMGR_MAX_POOLS		(4)
+
+	/*! \def PP_BMGR_MAX_POOLS_IN_GROUP
+	 *       Max pools in group
+	 */
+	#define PP_BMGR_MAX_POOLS_IN_GROUP	(4)
+
+	/*! \def PP_BMGR_MAX_GROUPS
+	 *       Max supported groups
+	 */
+	#define PP_BMGR_MAX_GROUPS		(2)
+
+	/*! \def PP_BMGR_MAX_POLICIES
+	 *       Max supported policies
+	 */
+	#define PP_BMGR_MAX_POLICIES		(32)
+#else /* PPv4 */
+	/*! \def PP_BMGR_MAX_POOLS
+	 *       Max supported pools
+	 */
+	#define PP_BMGR_MAX_POOLS		(16)
+
+	/*! \def PP_BMGR_MAX_POOLS_IN_GROUP
+	 *       Max pools in group
+	 */
+	#define PP_BMGR_MAX_POOLS_IN_GROUP	(4)
+
+	/*! \def PP_BMGR_MAX_GROUPS
+	 *       Max supported groups
+	 */
+	#define PP_BMGR_MAX_GROUPS		(16)
+
+	/*! \def PP_BMGR_MAX_POLICIES
+	 *       Max supoorted policies
+	 */
+	#define PP_BMGR_MAX_POLICIES		(256)
+#endif
+
+/*! \def POOL_ENABLE_FOR_MIN_GRNT_POLICY_CALC
+ *       bmgr pools flags (Used in bmgr_pool_params.pool_flags)
+ *       When set pool will be take part in
+ *       policy minimum guaranteed calculation
+ */
+#define POOL_ENABLE_FOR_MIN_GRNT_POLICY_CALC	BIT(0)
+
+/**************************************************************************
+ *! \struct	bmgr_pool_params
+ **************************************************************************
+ *
+ * \brief This structure is used in bmgr_pool_configure API in parameter
+ *
+ **************************************************************************/
+struct bmgr_pool_params {
+	//!< Pool flags
+	u16	flags;
+	//!< Group index which the pool belong to
+	u8	group_id;
+	//!< Amount of buffers in pool
+	u32	num_buffers;
+	//!< Buffer size for this pool (in bytes). Minimum is 64 bytes
+	u32	size_of_buffer;
+	//!< Base address of the pool (low)
+	u32	base_addr_low;
+	//!< Base address of the pool (high)
+	u32	base_addr_high;
+};
+
+/**************************************************************************
+ *! \struct	bmgr_group_params
+ **************************************************************************
+ *
+ * \brief This structure is used for buffer manager database
+ *
+ **************************************************************************/
+struct bmgr_group_params {
+	//!< available buffers in group
+	u32	available_buffers;
+	//!< reserved buffers in this group
+	u32	reserved_buffers;
+	//!< Pools in policy
+	u8	pools[PP_BMGR_MAX_POOLS];
+};
+
+/**************************************************************************
+ *! \struct	bmgr_pool_in_policy_info
+ **************************************************************************
+ *
+ * \brief	This structure is used in policy_params struct and holds
+ *			the information about the pools in policy
+ *
+ **************************************************************************/
+struct bmgr_pool_in_policy_info {
+	//!< Pool id
+	u8	pool_id;
+	//!< Max allowed per pool per policy
+	u32	max_allowed;
+};
+
+/*! \def POLICY_FLAG_RESERVED1
+ *       bmgr policy flags (Used in bmgr_policy_param.policy_flags)
+ */
+#define POLICY_FLAG_RESERVED1	BIT(0)
+
+/**************************************************************************
+ *! \struct	bmgr_policy_params
+ **************************************************************************
+ *
+ * \brief This structure is used in bmgr_policy_configure API in parameter
+ *
+ **************************************************************************/
+struct bmgr_policy_params {
+	//!< Policy flags
+	u16		flags;
+	//!< Group index
+	u8		group_id;
+	//!< Policy maximum allowed
+	u32		max_allowed;
+	//!< Policy minimum guaranteed
+	u32		min_guaranteed;
+	//!< Pools information. Sorted from high priority (index 0) to lowest
+	struct bmgr_pool_in_policy_info
+		pools_in_policy[4/*PP_BMGR_MAX_POOLS_IN_POLICY*/];
+	//!< Number of pools in pools_in_policy
+	u8		num_pools_in_policy;
+};
+
+#define PP_BMGR_MAX_BURST_IN_POP	(32)
+/**************************************************************************
+ *! \struct	bmgr_buff_info
+ **************************************************************************
+ *
+ * \brief This structure is used for allocate and deallocate buffer
+ *
+ **************************************************************************/
+struct bmgr_buff_info {
+	//!< [Out] buffer pointer low
+	u32	addr_low[PP_BMGR_MAX_BURST_IN_POP];
+	//!< [Out] buffer pointer high
+	u32	addr_high[PP_BMGR_MAX_BURST_IN_POP];
+	//!< policy to allocate from
+	u8	policy_id;
+	//!< pool for deallocate buffer back
+	u8	pool_id[PP_BMGR_MAX_BURST_IN_POP];
+	//!< number of pointers to allocate (up to 32 pointers)
+	u8	num_allocs;
+};
+
+/**********************************
+ **********************************
+ *********** PROTOTYPES ***********
+ **********************************
+ **********************************/
+
+/**************************************************************************
+ *! \fn	bmgr_driver_init
+ **************************************************************************
+ *
+ *  \brief	Initializes the buffer manager driver.
+ *			Must be the first driver's function to be called
+ *
+ *  \return	PP_RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_driver_init(void);
+
+/**************************************************************************
+ *! \fn	bmgr_pool_configure
+ **************************************************************************
+ *
+ *  \brief	Configure a Buffer Manager pool
+ *
+ *  \param	pool_params:	Pool param from user
+ *  \param	pool_id[OUT]:	Pool ID
+ *
+ *  \return	PP_RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_pool_configure(const struct bmgr_pool_params * const pool_params,
+			u8 * const pool_id);
+
+/**************************************************************************
+ *! \fn	bmgr_policy_configure
+ **************************************************************************
+ *
+ *  \brief	Configure a Buffer Manager policy
+ *
+ *  \param	policy_params:	Policy param from user
+ *  \param	policy_id[OUT]:	Policy ID
+ *
+ *  \return	PP_RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_policy_configure(
+		const struct bmgr_policy_params * const policy_params,
+		u8 * const policy_id);
+
+/**************************************************************************
+ *! \fn	bmgr_pop_buffer
+ **************************************************************************
+ *
+ *  \brief	Pops a buffer from the buffer manager
+ *
+ *  \param	buff_info:	Buffer information
+ *
+ *  \return	PP_RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_pop_buffer(struct bmgr_buff_info * const buff_info);
+
+/**************************************************************************
+ *! \fn	bmgr_push_buffer
+ **************************************************************************
+ *
+ *  \brief	Pushes a buffer back to the buffer manager
+ *
+ *  \param	buff_info:	Buffer information
+ *
+ *  \return	PP_RC_SUCCESS on success, other error code on failure
+ *
+ **************************************************************************/
+s32 bmgr_push_buffer(struct bmgr_buff_info * const buff_info);
+
+#endif /* _BMGR_H_ */
diff --git a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv_internal.h b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv_internal.h
new file mode 100644
index 000000000..45c0cbffc
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv_internal.h
@@ -0,0 +1,212 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+
+#ifndef _BMGR_DRV_INTERNAL_H_
+#define _BMGR_DRV_INTERNAL_H_
+
+#include <linux/debugfs.h>
+#include "pp_bm_drv.h"
+
+/*! \def BMGR_DRIVER_VERSION */
+#define BMGR_DRIVER_VERSION			"1.0.0"
+
+/*! \def BMGR_MIN_POOL_BUFFER_SIZE
+ *       Minimum buffer size in configured pool
+ */
+#define BMGR_MIN_POOL_BUFFER_SIZE		(64)
+
+/*! \def BMGR_START_PCU_SRAM_ADDR
+ *       Start PCU address in SRAM. Used in confiure pool
+ */
+#define BMGR_START_PCU_FIFO_SRAM_ADDR		(0)
+
+/*! \def BMGR_DEFAULT_PCU_FIFO_SIZE
+ *       PCU fifo size
+ */
+#define BMGR_DEFAULT_PCU_FIFO_SIZE		(0x80)
+
+/*! \def BMGR_DEFAULT_PCU_FIFO_LOW_THRESHOLD
+ *       PCU fifo low threshold
+ */
+#define BMGR_DEFAULT_PCU_FIFO_LOW_THRESHOLD	(1)
+
+/*! \def BMGR_DEFAULT_PCU_FIFO_HIGH_THRESHOLD
+ *       PCU fifo high threshold
+ */
+#define BMGR_DEFAULT_PCU_FIFO_HIGH_THRESHOLD	(0x70)
+
+/*! \def BMGR_DEFAULT_WATERMARK_LOW_THRESHOLD
+ *       Watermark low threshold
+ */
+#define BMGR_DEFAULT_WATERMARK_LOW_THRESHOLD	(0x200)
+
+/**********************************
+ **********************************
+ *********** STRUCTURE ************
+ **********************************
+ **********************************/
+
+/**************************************************************************
+ *! \enum	bmgr_policy_pools_priority_e
+ **************************************************************************
+ *
+ * \brief enum to describe the pool's priority
+ *
+ **************************************************************************/
+enum bmgr_policy_pools_priority_e {
+	bmgr_policy_pool_priority_high,		//!< High priority
+	bmgr_policy_pool_priority_mid_high,	//!< Mid-High priority
+	bmgr_policy_pool_priority_mid_low,	//!< Mid-Low priority
+	bmgr_policy_pool_priority_low,		//!< Low priority
+	bmgr_policy_pool_priority_max		//!< Last priority
+};
+
+/**************************************************************************
+ *! \struct	bmgr_pool_db_entry
+ **************************************************************************
+ *
+ * \brief This structure holds the pool database
+ *
+ **************************************************************************/
+struct bmgr_pool_db_entry {
+	//!< Pool params
+	struct bmgr_pool_params	pool_params;
+	//!< Number of allocated buffers
+	u32						num_allocated_buffers;
+	//!< Number of deallocated buffers
+	u32						num_deallocated_buffers;
+	//!< Is entry in used
+	u8						is_busy;
+	//!< Pointers table to be used in HW
+	void					*internal_pointers_tables;
+};
+
+/**************************************************************************
+ *! \struct	bmgr_group_db_entry
+ **************************************************************************
+ *
+ * \brief This structure holds the group database
+ *
+ **************************************************************************/
+struct bmgr_group_db_entry {
+	//!< available buffers in group
+	u32	available_buffers;
+	//!< reserved buffers in this group
+	u32	reserved_buffers;
+	//!< Pools in policy (if set, pool is part of this group)
+	u8	pools[PP_BMGR_MAX_POOLS_IN_GROUP];
+	//!< Number of pools in group
+	u8	num_pools_in_group;
+};
+
+/**************************************************************************
+ *! \struct	bmgr_policy_db_entry
+ **************************************************************************
+ *
+ * \brief This structure holds the policy database
+ *
+ **************************************************************************/
+struct bmgr_policy_db_entry {
+	//!< Policy params
+	struct bmgr_policy_params	policy_params;
+	//!< Number of allocated buffers
+	u32		num_allocated_buffers;
+	//!< Number of deallocated buffers
+	u32		num_deallocated_buffers;
+	//!< Is entry in used
+	u8		is_busy;
+};
+
+/**************************************************************************
+ *! \struct	bmgr_driver_db
+ **************************************************************************
+ *
+ * \brief This structure holds the buffer manager database
+ *
+ **************************************************************************/
+struct bmgr_driver_db {
+	//!< Pools information
+	struct bmgr_pool_db_entry	pools[PP_BMGR_MAX_POOLS];
+	//!< Groups information
+	struct bmgr_group_db_entry	groups[PP_BMGR_MAX_GROUPS];
+	//!< Policies information
+	struct bmgr_policy_db_entry	policies[PP_BMGR_MAX_POLICIES];
+
+	// general counters
+	//!< Number of active pools
+	u32				num_pools;
+	//!< Number of active groups
+	u32				num_groups;
+	//!< Number of active policies
+	u32				num_policies;
+
+	//!< spinlock
+	spinlock_t		db_lock;
+};
+
+/**************************************************************************
+ *! \struct	bmgr_driver_db
+ **************************************************************************
+ *
+ * \brief This structure holds the buffer manager database
+ *
+ **************************************************************************/
+struct bmgr_debugfs_info {
+	//!< Debugfs dir
+	struct dentry	*dir;
+	u32				virt2phyoff;
+	u32				ddrvirt2phyoff;
+	u32				fpga_offset;
+};
+
+/**************************************************************************
+ *! \struct	bmgr_driver_private
+ **************************************************************************
+ *
+ * \brief This struct defines the driver's private data
+ *
+ **************************************************************************/
+struct bmgr_driver_private {
+	//!< Platform device pointer
+	struct platform_device		*pdev;
+	//!< Is driver enabled
+	int							enabled;
+	//!< Platform device DB
+	struct bmgr_driver_db		driver_db;
+	//!< Debugfs info
+	struct bmgr_debugfs_info	debugfs_info;
+};
+
+void test_init_bmgr(struct device *dev);
+void print_hw_stats(u8 policy_id, u8 group_id, u8 pool_id);
+
+int bm_dbg_dev_init(struct platform_device *pdev);
+void bm_dbg_dev_clean(struct platform_device *pdev);
+int bm_dbg_module_init(void);
+void bm_dbg_module_clean(void);
+
+#endif /* _BMGR_DRV_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/Makefile b/drivers/net/ethernet/lantiq/ppv4/qos/Makefile
new file mode 100644
index 000000000..4241e0562
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for PPv4 QoS driver.
+#
+
+obj-$(CONFIG_LTQ_PPV4_QOS) += pp_qos_drv.o
+pp_qos_drv-y 	:= pp_qos_linux.o pp_qos_main.o pp_qos_utils.o pp_qos_fw.o pp_qos_tests.o pp_qos_debugfs.o
+ccflags-y	+= -Iinclude/net -DQOS_CPU_UC_SAME_ENDIANESS
+
+ccflags-$(CONFIG_LTQ_PPV4_QOS_TEST) += -DPP_QOS_TEST
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_common.h b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_common.h
new file mode 100644
index 000000000..4fa22b718
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_common.h
@@ -0,0 +1,180 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#ifndef PP_QOS_COMMON_H
+#define PP_QOS_COMMON_H
+
+#ifdef __KERNEL__
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/stringify.h>
+#else
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include "list.h"
+#endif
+
+/* Logs */
+#ifndef __KERNEL__
+#define BIT(x) ((uint32_t) (1U << (x)))
+#define QOS_LOG_CRIT(format, arg...)	printf(format, ##arg)
+#define QOS_LOG_ERR(format, arg...)	printf(format, ##arg)
+#define QOS_LOG_INFO(format, arg...)	printf(format, ##arg)
+#define QOS_LOG_DEBUG(format, arg...)	printf(format, ##arg)
+#else
+extern struct device *cur_dev;
+#define QOS_LOG_CRIT(format, arg...) \
+do { \
+	if (cur_dev) \
+		dev_crit(cur_dev, format, ##arg); \
+	else \
+		pr_crit(format, ##arg); \
+} while (0)
+#define QOS_LOG_ERR(format, arg...) \
+do { \
+	if (cur_dev) \
+		dev_err(cur_dev, format, ##arg); \
+	else \
+		pr_err(format, ##arg); \
+} while (0)
+#define QOS_LOG_INFO(format, arg...) \
+do { \
+	if (cur_dev) \
+		dev_info(cur_dev, format, ##arg); \
+	else \
+		pr_info(format, ##arg); \
+} while (0)
+#define QOS_LOG_DEBUG(format, arg...) \
+do { \
+	if (cur_dev) \
+		dev_dbg(cur_dev, format, ##arg); \
+	else \
+		pr_debug(format, ##arg); \
+} while (0)
+
+#endif
+
+#ifndef __KERNEL__
+#define max(a, b)		\
+	({ typeof(a) _a = (a);	\
+	typeof(b) _b = (b);	\
+	_a > _b ? _a : _b; })
+
+#define min(a, b)		\
+	({ typeof(a) _a = (a);	\
+	 typeof(b) _b = (b);	\
+	 _a < _b ? _a : _b; })
+#endif
+
+/* Memory allocation */
+#ifdef __KERNEL__
+#define QOS_MALLOC(size)	vmalloc(size)
+#define QOS_FREE(p)		vfree(p)
+#else
+#define QOS_MALLOC(size)	malloc(size)
+#define QOS_FREE(p)		free(p)
+#endif
+
+/* Locking */
+#ifdef __KERNEL__
+#define LOCK spinlock_t
+#define QOS_LOCK_INIT(qdev)	spin_lock_init(&qdev->lock)
+#define QOS_LOCK(qdev)		spin_lock(&qdev->lock)
+#define QOS_UNLOCK(qdev)	spin_unlock(&qdev->lock)
+#define QOS_SPIN_IS_LOCKED(qdev) spin_is_locked(&qdev->lock)
+#else
+#define LOCK int
+#define QOS_LOCK_INIT(qdev)	(qdev->lock = 0)
+#define QOS_LOCK(qdev)		do {QOS_ASSERT(qdev->lock == 0,\
+				"Lock already taken\n");\
+				qdev->lock++; } while (0)
+#define QOS_UNLOCK(qdev)	do {QOS_ASSERT(qdev->lock == 1,\
+				"Lock not taken\n");\
+				qdev->lock--; } while (0)
+#define QOS_SPIN_IS_LOCKED(qdev) (qdev->lock)
+#endif
+
+/* MMIO */
+#ifdef __KERNEL__
+#else
+#define __iomem
+#define iowrite32(val, addr) (*((uint32_t *)(addr)) = (val))
+#endif
+
+/* TODO not clean
+ * There might be other mapping on platform different
+ * than the FPGA I am currently testing on
+ */
+#ifdef CONFIG_OF
+#define TO_QOS_ADDR(x) (x)
+#define FW_OK_OFFSET (0x25e10000U)
+#else
+#define FW_OK_OFFSET (0x6ffff8U)
+#define PCI_DDR_BAR_START 0xd2000000
+#define TO_QOS_ADDR(x) ((x) - PCI_DDR_BAR_START)
+#endif
+
+/* Sleep */
+#ifdef __KERNEL__
+#define qos_sleep(t) msleep((t))
+#else
+#define qos_sleep(t) usleep((t) * 1000)
+#endif
+
+/* Endianess */
+#ifndef __KERNEL__
+#define cpu_to_le32(x) (x)
+#define le32_to_cpu(x) (x)
+#endif
+
+#ifdef QOS_CPU_UC_SAME_ENDIANESS
+#define qos_u32_to_uc(val)	(val)
+#define qos_u32_from_uc(val)  (val)
+#else
+#define qos_u32_to_uc(val)	(cpu_to_le32((val)))
+#define qos_u32_from_uc(val)	(le32_to_cpu((val)))
+#endif
+
+/* Creating enums and enums strings */
+#ifndef __KERNEL__
+#define __stringify(x)		#x
+#endif
+
+#define GEN_ENUM(enumerator)    enumerator,
+#define GEN_STR(str)		__stringify(str),
+
+#endif /* PP_QOS_COMMON_H */
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_debugfs.c b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_debugfs.c
new file mode 100644
index 000000000..1e93e73ae
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_debugfs.c
@@ -0,0 +1,660 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include "pp_qos_common.h"
+#include "pp_qos_utils.h"
+#include "pp_qos_fw.h"
+#include "pp_qos_kernel.h"
+
+/* TODO is this the common way for driver's not device specific data */
+static struct {
+	struct dentry *dir;
+} dbg_data = {NULL, };
+
+#define PP_QOS_DEBUGFS_DIR "ppv4_qos"
+
+static int pp_qos_dbg_node_show(struct seq_file *s, void *unused)
+{
+	struct platform_device *pdev;
+	struct pp_qos_dev *qdev;
+	struct pp_qos_drv_data *pdata;
+	const char *typename;
+	unsigned int phy;
+	unsigned int id;
+	unsigned int i;
+	static const char *const types[] = {"Port", "Sched", "Queue",
+		"Unknown"};
+	static const char *const yesno[] = {"No", "Yes"};
+	int rc;
+	struct pp_qos_node_info info;
+
+	pdev = s->private;
+	dev_info(&pdev->dev, "node_show called\n");
+	if (pdev) {
+		pdata = platform_get_drvdata(pdev);
+		qdev = pdata->qdev;
+		if (!qdev->initialized) {
+			seq_puts(s, "Device is not initialized !!!!\n");
+			return 0;
+		}
+
+		id = pdata->dbg.node;
+		phy = get_phy_from_id(qdev->mapping, id);
+		if (!QOS_PHY_VALID(phy)) {
+			seq_printf(s, "Invalid id %u\n", id);
+			return 0;
+		}
+		rc = pp_qos_get_node_info(qdev, id, &info);
+		if (rc) {
+			seq_printf(s, "Could not get info for node %u!!!!\n",
+					id);
+			return 0;
+		}
+
+		if (info.type >=  PPV4_QOS_NODE_TYPE_PORT &&
+				info.type <= PPV4_QOS_NODE_TYPE_QUEUE)
+			typename = types[info.type];
+		else
+			typename = types[3];
+
+		seq_printf(s, "%u(%u) - %s: suspended(%s) internal node(%s)\n",
+				id, phy,
+				typename,
+				yesno[!!info.is_suspended],
+				yesno[!!info.is_internal]);
+
+		if (info.preds[0].phy != PPV4_QOS_INVALID) {
+			seq_printf(s, "%u(%u)", id, phy);
+			for (i = 0; i < 6; ++i) {
+				if (info.preds[i].phy == PPV4_QOS_INVALID)
+					break;
+				seq_printf(s, " ==> %u(%u)",
+						info.preds[i].id,
+						info.preds[i].phy);
+			}
+			seq_puts(s, "\n");
+		}
+
+		if (info.children[0].phy != PPV4_QOS_INVALID) {
+			for (i = 0; i < 8; ++i) {
+				if (info.children[i].phy == PPV4_QOS_INVALID)
+					break;
+				seq_printf(s, "%u(%u) ",
+						info.children[i].id,
+						info.children[i].phy);
+			}
+			seq_puts(s, "\n");
+		}
+
+		if (info.type == PPV4_QOS_NODE_TYPE_QUEUE)  {
+			seq_printf(s, "Physical queue: %u\n",
+					info.queue_physical_id);
+			seq_printf(s, "Port: %u\n", info.port);
+		}
+
+		seq_printf(s, "Bandwidth: %u Kbps\n", info.bw_limit);
+	}
+	return 0;
+}
+
+static int pp_qos_dbg_node_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pp_qos_dbg_node_show, inode->i_private);
+}
+
+static const struct file_operations debug_node_fops = {
+	.open = pp_qos_dbg_node_open,
+	.read = seq_read,
+	.release = single_release,
+};
+
+static int pp_qos_dbg_stat_show(struct seq_file *s, void *unused)
+{
+	struct platform_device *pdev;
+	struct pp_qos_dev *qdev;
+	struct pp_qos_drv_data *pdata;
+	struct pp_qos_queue_stat qstat;
+	struct pp_qos_port_stat pstat;
+	struct qos_node *node;
+	unsigned int phy;
+	unsigned int id;
+
+	pdev = s->private;
+	dev_info(&pdev->dev, "node_show called\n");
+	if (pdev) {
+		pdata = platform_get_drvdata(pdev);
+		qdev = pdata->qdev;
+		if (!qdev->initialized) {
+			seq_puts(s, "Device is not initialized !!!!\n");
+			return 0;
+		}
+
+		id = pdata->dbg.node;
+		phy = get_phy_from_id(qdev->mapping, id);
+		if (!QOS_PHY_VALID(phy)) {
+			seq_printf(s, "Invalid id %u\n", id);
+			return 0;
+		}
+
+		node = get_node_from_phy(qdev->nodes, phy);
+		if (node_used(node)) {
+			seq_printf(s, "%u(%u) - ", id, phy);
+			if (node_queue(node)) {
+				seq_puts(s, "Queue\n");
+				memset(&qstat, 0, sizeof(qstat));
+				if (pp_qos_queue_stat_get(qdev, id, &qstat)
+						== 0) {
+					seq_printf(s, "queue_packets_occupancy:%u\n",
+						qstat.queue_packets_occupancy);
+					seq_printf(s, "queue_bytes_occupancy:%u\n",
+						qstat.queue_bytes_occupancy);
+					seq_printf(s, "total_packets_accepted:%u\n",
+						qstat.total_packets_accepted);
+					seq_printf(s, "total_packets_dropped:%u\n",
+						qstat.total_packets_dropped);
+					seq_printf(
+						s,
+						"total_packets_red_dropped:%u\n",
+						qstat.total_packets_red_dropped
+						);
+					seq_printf(s, "total_bytes_accepted:%llu\n",
+						qstat.total_bytes_accepted);
+					seq_printf(s, "total_bytes_dropped:%llu\n",
+						qstat.total_bytes_dropped);
+				} else {
+					seq_puts(s, "Could not obtained statistics\n");
+				}
+			} else if (node_port(node)) {
+				seq_puts(s, "Port\n");
+				memset(&pstat, 0, sizeof(pstat));
+				if (pp_qos_port_stat_get(qdev, id, &pstat)
+						== 0) {
+					seq_printf(
+						s,
+						"total_green_bytes in port's queues:%u\n",
+						pstat.total_green_bytes);
+					seq_printf(
+						s,
+						"total_yellow_bytes in port's queues:%u\n",
+						pstat.total_yellow_bytes);
+				} else {
+					seq_puts(s, "Could not obtained statistics\n");
+				}
+			} else {
+					seq_puts(s, "Node is not a queue or port, no statistics\n");
+			}
+		} else {
+			seq_printf(s, "Node %u is unused\n", id);
+		}
+	}
+	return 0;
+}
+
+static int pp_qos_dbg_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pp_qos_dbg_stat_show, inode->i_private);
+}
+
+static const struct file_operations debug_stat_fops = {
+	.open = pp_qos_dbg_stat_open,
+	.read = seq_read,
+	.release = single_release,
+};
+
+static int pp_qos_dbg_gen_show(struct seq_file *s, void *unused)
+{
+	unsigned int i;
+	struct platform_device *pdev;
+	struct pp_qos_drv_data *pdata;
+	const struct qos_node *node;
+	struct pp_qos_dev *qdev;
+	unsigned int ports;
+	unsigned int queues;
+	unsigned int scheds;
+	unsigned int internals;
+	unsigned int used;
+	unsigned int major;
+	unsigned int minor;
+	unsigned int build;
+
+	pdev = s->private;
+	if (pdev) {
+		pdata = platform_get_drvdata(pdev);
+		qdev = pdata->qdev;
+		if (qdev->initialized) {
+			ports = 0;
+			scheds = 0;
+			queues = 0;
+			internals = 0;
+			used = 0;
+			node = get_node_from_phy(qdev->nodes, 0);
+			for (i = 0; i < NUM_OF_NODES; ++i) {
+				if (node_used(node)) {
+					++used;
+					if (node_port(node))
+						++ports;
+					else if (node_queue(node))
+						++queues;
+					else if (node_internal(node))
+						++internals;
+					else if (node_sched(node))
+						++scheds;
+				}
+				++node;
+			}
+
+			seq_printf(s, "Driver version: %s\n", PPV4_QOS_DRV_VER);
+			if (pp_qos_get_fw_version(
+						qdev,
+						&major,
+						&minor,
+						&build) == 0)
+				seq_printf(s, "FW version:\tmajor(%u) minor(%u) build(%u)\n",
+						major, minor, build);
+			else
+				seq_puts(s, "Could not obtain FW version\n");
+
+			seq_printf(s, "Used nodes:\t%u\nPorts:\t\t%u\nScheds:\t\t%u\nQueues:\t\t%u\nInternals:\t%u\n",
+					used, ports, scheds, queues, internals);
+		} else {
+			seq_puts(s, "Device is not initialized !!!!\n");
+		}
+
+	} else {
+		pr_err("Error, platform device was not found\n");
+	}
+
+	return 0;
+}
+
+static int pp_qos_dbg_gen_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pp_qos_dbg_gen_show, inode->i_private);
+}
+
+static const struct file_operations debug_gen_fops = {
+	.open = pp_qos_dbg_gen_open,
+	.read = seq_read,
+	.release = single_release,
+};
+
+
+static int dbg_cmd_open(struct inode *inode, struct file *filep)
+{
+	filep->private_data = inode->i_private;
+	return 0;
+}
+
+#define MAX_CMD_LEN 0x200
+static ssize_t dbg_cmd_write(struct file *fp, const char __user *user_buffer,
+			     size_t cnt, loff_t *pos)
+{
+	int rc;
+	int i;
+	uint8_t cmd[MAX_CMD_LEN];
+	struct platform_device *pdev;
+	struct pp_qos_drv_data *pdata;
+	struct pp_qos_dev *qdev;
+	uint32_t *dst;
+	uint32_t *src;
+
+	pdev = (struct platform_device *)(fp->private_data);
+	pdata = platform_get_drvdata(pdev);
+	qdev = pdata->qdev;
+
+	pr_info("qos drv address is %p\n", qdev);
+
+	if (cnt > MAX_CMD_LEN) {
+		dev_err(&pdev->dev, "Illegal length %zu\n", cnt);
+		return -EINVAL;
+	}
+
+	rc =  simple_write_to_buffer(cmd, MAX_CMD_LEN, pos, user_buffer, cnt);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Write failed with %d\n", rc);
+		return rc;
+	}
+
+	dst = (uint32_t *)(qdev->fwcom.cmdbuf);
+	src = (uint32_t *)cmd;
+	dev_info(&pdev->dev, "Writing %d bytes into\n", rc);
+	for (i = 0; i < rc / 4; ++i) {
+		*dst++ = qos_u32_to_uc(*src++);
+		dev_info(&pdev->dev, "Wrote 0x%08X\n", dst[-1]);
+	}
+
+	signal_uc(qdev);
+
+	return rc;
+}
+
+static const struct file_operations debug_cmd_fops = {
+	.open = dbg_cmd_open,
+	.write = dbg_cmd_write,
+};
+
+static void swap_msg(char *msg)
+{
+	unsigned int i;
+	uint32_t *cur;
+
+	cur = (uint32_t *)msg;
+
+	for (i = 0; i < 32; ++i)
+		cur[i] = le32_to_cpu(cur[i]);
+}
+
+static void print_fw_log(struct platform_device *pdev)
+{
+	char		msg[128];
+	unsigned int	num;
+	unsigned int    i;
+	uint32_t	*addr;
+	uint32_t	read;
+	char		*cur;
+	struct device	*dev;
+	struct pp_qos_drv_data *pdata;
+
+	pdata = platform_get_drvdata(pdev);
+	addr = (uint32_t *)(pdata->dbg.fw_logger_addr);
+	num = qos_u32_from_uc(*addr);
+	read = qos_u32_from_uc(addr[1]);
+	dev = &pdev->dev;
+	cur = (char *)(pdata->dbg.fw_logger_addr + 8);
+
+	dev_info(dev, "addr is 0x%08X num of messages is %u, read index is %u",
+		 (unsigned int)(uintptr_t)cur,
+		 num,
+		 read);
+
+	for (i = read; i < num; ++i) {
+		memcpy((char *)msg, (char *)(cur + 128 * i), 128);
+		swap_msg(msg);
+		msg[127] = '\0';
+		dev_info(dev, "[ARC]: %s\n", msg);
+	}
+
+	addr[1] = num;
+}
+
+static int ctrl_set(void *data, u64 val)
+{
+	struct platform_device *pdev;
+	struct pp_qos_drv_data *pdata;
+
+	pdev = data;
+	dev_info(&pdev->dev, "ctrl got %llu", val);
+	pdata = platform_get_drvdata(pdev);
+
+	switch (val) {
+#ifdef PP_QOS_TEST
+	case 0:
+		QOS_LOG_INFO("running basic tests\n");
+		basic_tests();
+		break;
+	case 1:
+		QOS_LOG_INFO("running advance tests\n");
+		advance_tests();
+		break;
+	case 2:
+		QOS_LOG_INFO("running all tests\n");
+		tests();
+		break;
+	case 7:
+		QOS_LOG_INFO("running falcon test\n");
+		falcon_test();
+		break;
+	case 8:
+		QOS_LOG_INFO("running simple test\n");
+		stat_test();
+		break;
+	case 9:
+		QOS_LOG_INFO("running load fw test\n");
+		load_fw_test();
+		break;
+	case 10:
+		QOS_LOG_INFO("running stat test\n");
+		stat_test();
+		break;
+	case 11:
+		QOS_LOG_INFO("running info test\n");
+		info_test();
+		break;
+
+	case 14:
+		QOS_LOG_INFO("printing logger\n");
+		print_fw_log(pdev);
+		break;
+
+#endif
+
+	default:
+		QOS_LOG_INFO("unknown test\n");
+		break;
+	}
+
+	return 0;
+}
+
+static int phy2id_get(void *data, u64 *val)
+{
+	uint16_t id;
+	struct platform_device *pdev;
+	struct pp_qos_drv_data *pdata;
+	struct pp_qos_dev *qdev;
+
+	pdev = data;
+	pdata = platform_get_drvdata(pdev);
+	qdev = pdata->qdev;
+	if (!qdev->initialized) {
+		dev_err(&pdev->dev, "Device is not initialized\n");
+		id =  QOS_INVALID_ID;
+		goto out;
+	}
+
+	id = get_id_from_phy(qdev->mapping, pdata->dbg.node);
+out:
+	*val = id;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_ctrl_fops, NULL, ctrl_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(dbg_phy2id_fops, phy2id_get, NULL, "%llu\n");
+
+#define MAX_DIR_NAME 11
+int qos_dbg_dev_init(struct platform_device *pdev)
+{
+
+	struct pp_qos_drv_data *pdata;
+	char   dirname[MAX_DIR_NAME];
+	struct dentry *dent;
+	int err;
+
+	if (!pdev) {
+		dev_err(&pdev->dev, "Invalid platform device\n");
+		return -ENODEV;
+	}
+
+	pdata = platform_get_drvdata(pdev);
+
+	snprintf(dirname, MAX_DIR_NAME, "qos%d", pdata->id);
+	dent = debugfs_create_dir(dirname, dbg_data.dir);
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int) PTR_ERR(dent);
+		dev_err(&pdev->dev, "debugfs_create_dir failed with %d\n", err);
+		return err;
+	}
+
+	pdata->dbg.dir = dent;
+	dent = debugfs_create_file(
+			"nodeinfo",
+			0400,
+			pdata->dbg.dir,
+			pdev,
+			&debug_node_fops
+			);
+
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int) PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_file failed creating nodeinfo with %d\n",
+			err);
+		goto fail;
+	}
+
+	dent = debugfs_create_file(
+			"stat",
+			0400,
+			pdata->dbg.dir,
+			pdev,
+			&debug_stat_fops
+			);
+
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int) PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_file failed creating stat with %d\n",
+			err);
+		goto fail;
+	}
+
+	dent = debugfs_create_u16("node",
+			0600,
+			pdata->dbg.dir,
+			&pdata->dbg.node);
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int) PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_u16 failed creating nodeinfo with %d\n",
+			err);
+		goto fail;
+	}
+
+	dent = debugfs_create_file(
+			"ctrl",
+			0200,
+			pdata->dbg.dir,
+			pdev,
+			&dbg_ctrl_fops
+			);
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int) PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_file failed creating ctrl with %d\n",
+			err);
+		goto fail;
+	}
+
+	dent = debugfs_create_file(
+			"phy2id",
+			0400,
+			pdata->dbg.dir,
+			pdev,
+			&dbg_phy2id_fops
+			);
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int) PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_file failed creating phy2id with %d\n",
+			err);
+		goto fail;
+	}
+
+	dent = debugfs_create_file(
+			"geninfo",
+			0400,
+			pdata->dbg.dir,
+			pdev,
+			&debug_gen_fops
+			);
+
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int) PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_file failed creating geninfo with %d\n",
+			err);
+		goto fail;
+	}
+
+	dent = debugfs_create_file(
+			"cmd",
+			0200,
+			pdata->dbg.dir,
+			pdev,
+			&debug_cmd_fops
+			);
+
+	if (IS_ERR_OR_NULL(dent)) {
+		err = (int) PTR_ERR(dent);
+		dev_err(&pdev->dev,
+			"debugfs_create_file failed creating cmds with %d\n",
+			err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	debugfs_remove_recursive(pdata->dbg.dir);
+	return err;
+
+}
+
+void qos_dbg_dev_clean(struct platform_device *pdev)
+{
+	struct pp_qos_drv_data *pdata;
+
+	if (pdev) {
+		pdata = platform_get_drvdata(pdev);
+		if (pdata)
+			debugfs_remove_recursive(pdata->dbg.dir);
+	}
+}
+
+int qos_dbg_module_init(void)
+{
+	int rc;
+	struct dentry *dir;
+
+	dir = debugfs_create_dir(PP_QOS_DEBUGFS_DIR, NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		rc = (int) PTR_ERR(dir);
+		pr_err("debugfs_create_dir failed with %d\n", rc);
+		return rc;
+	}
+	dbg_data.dir = dir;
+	return 0;
+}
+
+void qos_dbg_module_clean(void)
+{
+	debugfs_remove_recursive(dbg_data.dir);
+}
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_fw.c b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_fw.c
new file mode 100644
index 000000000..4c4b849b3
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_fw.c
@@ -0,0 +1,2373 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#include "pp_qos_common.h"
+#include "pp_qos_utils.h"
+#include "pp_qos_uc_wrapper.h"
+#include "pp_qos_fw.h"
+
+
+#ifdef __LP64__
+#define GET_ADDRESS_HIGH(addr) ((((uintptr_t)(addr)) >> 32) & 0xFFFFFFFF)
+#else
+#define GET_ADDRESS_HIGH(addr) (0)
+#endif
+
+#ifndef PP_QOS_DISABLE_CMDQ
+
+#define CMD_FLAGS_WRAP_SUSPEND_RESUME 1
+#define CMD_FLAGS_WRAP_PARENT_SUSPEND_RESUME 2
+#define CMD_FLAGS_POST_PROCESS	4
+
+
+#define FW_CMDS(OP) \
+	OP(CMD_TYPE_INIT_LOGGER)		\
+	OP(CMD_TYPE_INIT_QOS)			\
+	OP(CMD_TYPE_MOVE)			\
+	OP(CMD_TYPE_ADD_PORT)			\
+	OP(CMD_TYPE_SET_PORT)			\
+	OP(CMD_TYPE_REMOVE_PORT)		\
+	OP(CMD_TYPE_ADD_SCHED)			\
+	OP(CMD_TYPE_SET_SCHED)			\
+	OP(CMD_TYPE_REMOVE_SCHED)		\
+	OP(CMD_TYPE_ADD_QUEUE)			\
+	OP(CMD_TYPE_SET_QUEUE)			\
+	OP(CMD_TYPE_REMOVE_QUEUE)		\
+	OP(CMD_TYPE_UPDATE_PREDECESSORS)	\
+	OP(CMD_TYPE_PARENT_CHANGE)		\
+	OP(CMD_TYPE_REMOVE_NODE)		\
+	OP(CMD_TYPE_GET_QUEUE_STATS)		\
+	OP(CMD_TYPE_GET_PORT_STATS)		\
+	OP(CMD_TYPE_ADD_SHARED_GROUP)		\
+	OP(CMD_TYPE_PUSH_DESC)			\
+	OP(CMD_TYPE_GET_NODE_INFO)		\
+	OP(CMD_TYPE_REMOVE_SHARED_GROUP)	\
+	OP(CMD_TYPE_SET_SHARED_GROUP)
+
+enum cmd_type {
+	FW_CMDS(GEN_ENUM)
+};
+
+static const char *const cmd_str[] = {
+	FW_CMDS(GEN_STR)
+};
+
+struct ppv4_qos_fw_hdr {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t build;
+};
+
+struct ppv4_qos_fw_sec {
+	uint32_t dst;
+	uint32_t size;
+};
+
+#define FW_OK_SIGN (0xCAFECAFEU)
+#define FW_DDR_LOWEST (0x400000U)
+
+static void copy_section(void *_dst, const void *_src, unsigned int size)
+{
+	unsigned int i;
+	const uint32_t *src;
+	uint32_t *dst;
+
+	src = (uint32_t *)_src;
+	dst = (uint32_t *)_dst;
+
+	for (i = size; i > 0; i -= 4)
+		*dst++ = cpu_to_le32(*src++);
+}
+
+/*
+ * This function loads the firmware.
+ * The firmware is built from a header which holds the major, minor
+ * and build numbers.
+ * Following the header are sections. Each section is composed of
+ * header which holds the memory destination where this section's
+ * data should be copied and the size of this section in bytes.
+ * After the header comes section's data which is a stream of uint32
+ * words.
+ * The memory destination on section header designates offset relative
+ * to either ddr (a.k.a external) or qos (a.k.a) spaces. Offsets higher
+ * than FW_DDR_LOWEST refer to ddr space.
+ *
+ * Firmware is little endian.
+ *
+ * When firmware runs it writes 0xCAFECAFE to offset FW_OK_OFFSET of ddr
+ * space.
+ */
+int do_load_firmware(
+		struct pp_qos_dev *qdev,
+		const struct ppv4_qos_fw *fw,
+		void *ddr_base,
+		void *qos_base,
+		void *data)
+{
+	size_t size;
+	struct ppv4_qos_fw_hdr *hdr;
+	const uint8_t *cur;
+	const uint8_t *last;
+	void *dst;
+	struct ppv4_qos_fw_sec *sec;
+	uint32_t val;
+
+	size = fw->size;
+	hdr = (struct ppv4_qos_fw_hdr *)(fw->data);
+	hdr->major = le32_to_cpu(hdr->major);
+	hdr->minor = le32_to_cpu(hdr->minor);
+	hdr->build = le32_to_cpu(hdr->build);
+	QOS_LOG_INFO("Firmware size(%zu) major(%u) minor(%u) build(%u)\n",
+			size,
+			hdr->major,
+			hdr->minor,
+			hdr->build);
+
+	if (hdr->major != UC_VERSION_MAJOR || hdr->minor != UC_VERSION_MINOR) {
+		QOS_LOG_ERR("mismatch major %u of minor %u\n",
+				UC_VERSION_MAJOR, UC_VERSION_MINOR);
+		return -EINVAL;
+	}
+
+	qdev->fwver.major = hdr->major;
+	qdev->fwver.minor = hdr->minor;
+	qdev->fwver.build = hdr->build;
+
+	last = fw->data + size - 1;
+	cur = (uint8_t *)(hdr + 1);
+	while (cur < last) {
+		sec = (struct ppv4_qos_fw_sec *)cur;
+		sec->dst = le32_to_cpu(sec->dst);
+		sec->size = le32_to_cpu(sec->size);
+		cur = (uint8_t *)(sec + 1);
+
+		if (sec->dst >= FW_DDR_LOWEST)
+			dst = ddr_base + sec->dst;
+		else
+			dst = qos_base + sec->dst;
+
+		QOS_LOG_DEBUG("Copying %u bytes (0x%08X) <-- (0x%08X)\n",
+				sec->size,
+				(unsigned int)(uintptr_t)(dst),
+				(unsigned int)(uintptr_t)(cur));
+
+		copy_section(dst, cur, sec->size);
+		cur += sec->size;
+	}
+
+	wake_uc(data);
+	QOS_LOG_DEBUG("waked fw\n");
+	qos_sleep(10);
+	val = *((uint32_t *)(ddr_base + FW_OK_OFFSET));
+	if (val != FW_OK_SIGN) {
+		QOS_LOG_ERR("FW OK value is 0x%08X, instead got 0x%08X\n",
+				FW_OK_SIGN, val);
+		return  -ENODEV;
+	}
+	QOS_LOG_INFO("FW is running :)\n");
+	*((uint32_t *)(ddr_base + FW_OK_OFFSET)) = 0;
+	return 0;
+}
+
+/******************************************************************************/
+/*                         Driver commands structures	                      */
+/******************************************************************************/
+struct cmd {
+	unsigned int id;
+	unsigned int fw_id;
+	unsigned int flags;
+	enum cmd_type  type;
+	size_t len;
+	uint32_t *pos;
+};
+
+struct cmd_init_logger {
+	struct cmd base;
+	unsigned int	addr;
+	int mode;
+	int level;
+	unsigned int num_of_msgs;
+};
+
+struct cmd_init_qos {
+	struct cmd base;
+	unsigned int qm_ddr_start;
+	unsigned int qm_num_pages;
+	unsigned int wred_total_avail_resources;
+	unsigned int wred_prioritize_pop;
+	unsigned int wred_avg_q_size_p;
+	unsigned int wred_max_q_size;
+	unsigned int num_of_ports;
+};
+
+struct cmd_move {
+	struct cmd base;
+	int      node_type;
+	uint16_t src;
+	uint16_t dst;
+	unsigned int rlm;
+	uint16_t dst_port;
+	uint16_t preds[6];
+};
+
+struct cmd_remove_node {
+	struct cmd base;
+	unsigned int phy;
+	unsigned int data; /* rlm in queue, otherwise irrlevant */
+};
+
+struct cmd_update_preds {
+	struct cmd base;
+	int node_type;
+	uint16_t preds[6];
+	unsigned int phy;
+};
+
+struct port_properties {
+	struct pp_qos_common_node_properties common;
+	struct pp_qos_parent_node_properties parent;
+	void *ring_addr;
+	size_t ring_size;
+	uint8_t  packet_credit_enable;
+	unsigned int credit;
+};
+
+struct cmd_add_port {
+	struct cmd base;
+	unsigned int phy;
+	struct port_properties prop;
+};
+
+struct cmd_set_port {
+	struct cmd base;
+	unsigned int phy;
+	struct port_properties prop;
+	uint32_t modified;
+};
+
+struct sched_properties {
+	struct pp_qos_common_node_properties common;
+	struct pp_qos_parent_node_properties parent;
+	struct pp_qos_child_node_properties  child;
+};
+
+struct cmd_add_sched {
+	struct cmd base;
+	unsigned int phy;
+	unsigned int parent;
+	uint16_t preds[6];
+	struct sched_properties prop;
+};
+
+struct cmd_set_sched {
+	struct cmd base;
+	unsigned int phy;
+	unsigned int parent;
+	struct sched_properties prop;
+	uint32_t modified;
+};
+
+struct queue_properties {
+	struct pp_qos_common_node_properties common;
+	struct pp_qos_child_node_properties  child;
+	uint8_t  blocked;
+	uint8_t  wred_enable;
+	uint8_t  fixed_drop_prob_enable;
+	unsigned int max_burst;
+	unsigned int queue_wred_min_avg_green;
+	unsigned int queue_wred_max_avg_green;
+	unsigned int queue_wred_slope_green;
+	unsigned int queue_wred_min_avg_yellow;
+	unsigned int queue_wred_max_avg_yellow;
+	unsigned int queue_wred_slope_yellow;
+	unsigned int queue_wred_min_guaranteed;
+	unsigned int queue_wred_max_allowed;
+	unsigned int queue_wred_fixed_drop_prob_green;
+	unsigned int queue_wred_fixed_drop_prob_yellow;
+	unsigned int      rlm;
+};
+
+struct cmd_add_queue {
+	struct cmd base;
+	unsigned int phy;
+	unsigned int parent;
+	unsigned int port;
+	uint16_t preds[6];
+	struct queue_properties prop;
+};
+
+struct cmd_set_queue {
+	struct cmd base;
+	unsigned int phy;
+	struct queue_properties prop;
+	uint32_t modified;
+};
+
+struct cmd_parent_change {
+	struct cmd base;
+	unsigned int phy;
+	int type;
+	int arbitration;
+	int first;
+	unsigned int num;
+};
+
+struct cmd_get_queue_stats {
+	struct cmd base;
+	unsigned int phy;
+	unsigned int rlm;
+	unsigned int addr;
+	struct pp_qos_queue_stat *stat;
+};
+
+struct cmd_get_port_stats {
+	struct cmd base;
+	unsigned int phy;
+	unsigned int addr;
+	struct pp_qos_port_stat *stat;
+};
+
+struct cmd_push_desc {
+	struct cmd base;
+	unsigned int queue;
+	unsigned int size;
+	unsigned int color;
+	unsigned int addr;
+};
+
+struct cmd_get_node_info {
+	struct cmd base;
+	unsigned int phy;
+	unsigned int addr;
+	struct pp_qos_node_info *info;
+};
+
+struct stub_cmd {
+	struct cmd cmd;
+	uint8_t data;
+};
+
+struct cmd_set_shared_group {
+	struct cmd base;
+	unsigned int id;
+	unsigned int limit;
+};
+
+struct cmd_remove_shared_group {
+	struct cmd base;
+	unsigned int id;
+};
+
+union driver_cmd {
+	struct cmd	 cmd;
+	struct stub_cmd  stub;
+	struct cmd_init_logger init_logger;
+	struct cmd_init_qos  init_qos;
+	struct cmd_move  move;
+	struct cmd_update_preds update_preds;
+	struct cmd_remove_node remove_node;
+	struct cmd_add_port add_port;
+	struct cmd_set_port set_port;
+	struct cmd_add_sched add_sched;
+	struct cmd_set_sched set_sched;
+	struct cmd_add_queue add_queue;
+	struct cmd_set_queue set_queue;
+	struct cmd_parent_change parent_change;
+	struct cmd_get_queue_stats queue_stats;
+	struct cmd_get_port_stats port_stats;
+	struct cmd_set_shared_group set_shared_group;
+	struct cmd_remove_shared_group remove_shared_group;
+	struct cmd_push_desc	pushd;
+	struct cmd_get_node_info node_info;
+};
+
+/******************************************************************************/
+/*                         Driver functions                                   */
+/******************************************************************************/
+
+/*
+ * Following functions creates commands in driver fromat to be stored at
+ * drivers queues before sending to firmware
+ */
+
+/*
+ * Extract ancestors of node from driver's DB
+ */
+static void fill_preds(
+		const struct pp_nodes *nodes,
+		unsigned int phy,
+		uint16_t *preds,
+		size_t size)
+{
+	unsigned int i;
+	const struct qos_node *node;
+
+	i = 0;
+	memset(preds, 0x0, size * sizeof(uint16_t));
+	node = get_const_node_from_phy(nodes, phy);
+	while (node_child(node) && (i < size)) {
+		preds[i] = node->child_prop.parent_phy;
+		node = get_const_node_from_phy(nodes,
+				node->child_prop.parent_phy);
+		i++;
+	}
+}
+
+static void cmd_init(
+		const struct pp_qos_dev *qdev,
+		struct cmd *cmd,
+		enum cmd_type type,
+		size_t len,
+		unsigned int flags)
+{
+	cmd->flags = flags;
+	cmd->type = type;
+	cmd->len = len;
+	cmd->id = qdev->drvcmds.cmd_id;
+	cmd->fw_id = qdev->drvcmds.cmd_fw_id;
+}
+
+/* TODO - make less hard code */
+void create_init_logger_cmd(struct pp_qos_dev *qdev)
+{
+	struct cmd_init_logger cmd;
+
+	cmd_init(qdev, &(cmd.base), CMD_TYPE_INIT_LOGGER, sizeof(cmd), 0);
+	cmd.addr = qdev->hwconf.fw_logger_start;
+	cmd.mode = UC_LOGGER_MODE_WRITE_HOST_MEM;
+	cmd.level = UC_LOGGER_LEVEL_DEBUG;
+	cmd.num_of_msgs = PPV4_QOS_LOGGER_BUF_SIZE / PPV4_QOS_LOGGER_MSG_SIZE;
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_INIT_LOGGER\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id);
+	cmd_queue_put(qdev->drvcmds.cmdq, (uint8_t *)&cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_init_qos_cmd(struct pp_qos_dev *qdev)
+{
+	struct cmd_init_qos cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	cmd_init(qdev, &(cmd.base), CMD_TYPE_INIT_QOS, sizeof(cmd), 0);
+	cmd.qm_ddr_start = qdev->hwconf.qm_ddr_start;
+	cmd.qm_num_pages = qdev->hwconf.qm_num_pages;
+	cmd.wred_total_avail_resources =
+		qdev->hwconf.wred_total_avail_resources;
+	cmd.wred_prioritize_pop = qdev->hwconf.wred_prioritize_pop;
+	cmd.wred_avg_q_size_p = qdev->hwconf.wred_const_p;
+	cmd.wred_max_q_size = qdev->hwconf.wred_max_q_size;
+	cmd.num_of_ports = qdev->max_port + 1;
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_INIT_QOS\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id);
+	cmd_queue_put(qdev->drvcmds.cmdq, (uint8_t *)&cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_move_cmd(
+		struct pp_qos_dev *qdev,
+		uint16_t dst,
+		uint16_t src,
+		uint16_t dst_port)
+{
+	struct cmd_move cmd;
+	const struct qos_node *node;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	node = get_const_node_from_phy(qdev->nodes, dst);
+
+	cmd_init(qdev, &(cmd.base), CMD_TYPE_MOVE, sizeof(cmd), 0);
+	cmd.src = src;
+	cmd.dst = dst;
+	cmd.dst_port = dst_port;
+	cmd.node_type = node->type;
+	if (node->type == TYPE_QUEUE)
+		cmd.rlm = node->data.queue.rlm;
+	else
+		cmd.rlm = -1;
+
+	fill_preds(qdev->nodes, dst, cmd.preds, 6);
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_MOVE %u-->%u type:%d, rlm:%d, port:%u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			src,
+			dst,
+			node->type,
+			cmd.rlm,
+			dst_port);
+	cmd_queue_put(qdev->drvcmds.cmdq, (uint8_t *)&cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_remove_node_cmd(
+		struct pp_qos_dev *qdev,
+		enum node_type ntype,
+		unsigned int phy,
+		unsigned int data)
+{
+	struct cmd_remove_node cmd;
+	enum cmd_type ctype;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	switch (ntype) {
+	case TYPE_PORT:
+		ctype = CMD_TYPE_REMOVE_PORT;
+		break;
+	case TYPE_SCHED:
+		ctype = CMD_TYPE_REMOVE_SCHED;
+		break;
+	case TYPE_QUEUE:
+		ctype = CMD_TYPE_REMOVE_QUEUE;
+		break;
+	case TYPE_UNKNOWN:
+		QOS_ASSERT(0, "Unexpected unknow type\n");
+		ctype = CMD_TYPE_REMOVE_NODE;
+		break;
+	default:
+		QOS_LOG_INFO("illegal node type %d\n", ntype);
+		return;
+	}
+
+	cmd_init(qdev, &(cmd.base), ctype, sizeof(cmd), 0);
+	cmd.phy = phy;
+	cmd.data = data;
+
+	QOS_LOG_INFO("cmd %u:%u %s %u rlm %u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			cmd_str[ctype], phy, data);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_update_preds_cmd(struct pp_qos_dev *qdev, unsigned int phy)
+{
+	const struct qos_node *node;
+	struct cmd_update_preds cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_init(
+			qdev,
+			&(cmd.base),
+			CMD_TYPE_UPDATE_PREDECESSORS,
+			sizeof(cmd),
+			0);
+	cmd.phy = phy;
+	fill_preds(qdev->nodes, phy, cmd.preds, 6);
+	node = get_const_node_from_phy(qdev->nodes, phy);
+	cmd.node_type = node->type;
+
+	QOS_LOG_INFO(
+			"cmd %u:%u CMD_TYPE_UPDATE_PREDECESSORS %u:%u-->%u-->%u-->%u-->%u-->%u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			phy,
+			cmd.preds[0], cmd.preds[1], cmd.preds[2],
+			cmd.preds[3], cmd.preds[4], cmd.preds[5]);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+static void set_cmd_port_properties(
+		struct port_properties *prop,
+		const struct pp_qos_port_conf *conf)
+{
+	prop->common = conf->common_prop;
+	prop->parent = conf->port_parent_prop;
+	prop->packet_credit_enable = !!conf->packet_credit_enable;
+	prop->ring_addr = conf->ring_address;
+	prop->ring_size = conf->ring_size;
+	prop->credit = conf->credit;
+}
+
+static void create_add_port_cmd(
+		struct pp_qos_dev *qdev,
+		const struct pp_qos_port_conf *conf,
+		unsigned int phy)
+{
+	struct cmd_add_port cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_init(qdev, &(cmd.base), CMD_TYPE_ADD_PORT, sizeof(cmd), 0);
+	cmd.phy = phy;
+	set_cmd_port_properties(&cmd.prop, conf);
+
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_ADD_PORT %u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			phy);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_set_port_cmd(
+		struct pp_qos_dev *qdev,
+		const struct pp_qos_port_conf *conf,
+		unsigned int phy,
+		uint32_t modified)
+{
+	struct cmd_set_port cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_NODE_TYPE)) {
+		create_add_port_cmd(qdev, conf, phy);
+	} else {
+		memset(&cmd, 0, sizeof(cmd));
+		cmd_init(qdev, &(cmd.base), CMD_TYPE_SET_PORT, sizeof(cmd), 0);
+		cmd.phy = phy;
+		set_cmd_port_properties(&cmd.prop, conf);
+		cmd.modified = modified;
+		QOS_LOG_INFO("cmd %u:%u CMD_TYPE_SET_PORT %u\n",
+				qdev->drvcmds.cmd_id,
+				qdev->drvcmds.cmd_fw_id,
+				phy);
+		cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+		qdev->drvcmds.cmd_fw_id++;
+	}
+}
+
+static void set_cmd_sched_properties(
+		struct sched_properties *prop,
+		const struct pp_qos_sched_conf *conf)
+{
+	prop->common = conf->common_prop;
+	prop->parent = conf->sched_parent_prop;
+	prop->child = conf->sched_child_prop;
+}
+
+static void create_add_sched_cmd(
+		struct pp_qos_dev *qdev,
+		const struct pp_qos_sched_conf *conf,
+		unsigned int phy,
+		unsigned int parent)
+{
+	struct cmd_add_sched cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_init(qdev, &(cmd.base), CMD_TYPE_ADD_SCHED, sizeof(cmd), 0);
+	cmd.phy = phy;
+	cmd.parent = parent;
+	fill_preds(qdev->nodes, phy, cmd.preds, 6);
+	set_cmd_sched_properties(&cmd.prop, conf);
+
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_ADD_SCHED %u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id, phy);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_set_sched_cmd(
+		struct pp_qos_dev *qdev,
+		const struct pp_qos_sched_conf *conf,
+		unsigned int phy,
+		unsigned int parent,
+		uint32_t modified)
+{
+	struct cmd_set_sched cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_NODE_TYPE)) {
+		create_add_sched_cmd(qdev, conf, phy, parent);
+	} else {
+		memset(&cmd, 0, sizeof(cmd));
+		cmd_init(qdev, &(cmd.base), CMD_TYPE_SET_SCHED, sizeof(cmd), 0);
+		cmd.phy = phy;
+		set_cmd_sched_properties(&cmd.prop, conf);
+		cmd.modified = modified;
+		QOS_LOG_INFO("cmd %u:%u CMD_TYPE_SET_SCHED %u\n",
+				qdev->drvcmds.cmd_id,
+				qdev->drvcmds.cmd_fw_id, phy);
+		cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+		qdev->drvcmds.cmd_fw_id++;
+	}
+}
+
+static void set_cmd_queue_properties(
+		struct queue_properties *prop,
+		const struct pp_qos_queue_conf *conf,
+		unsigned int rlm)
+{
+	prop->common = conf->common_prop;
+	prop->child = conf->queue_child_prop;
+	prop->blocked = !!conf->blocked;
+	prop->wred_enable = !!conf->wred_enable;
+	prop->fixed_drop_prob_enable = !!conf->wred_fixed_drop_prob_enable;
+	prop->max_burst =  conf->max_burst;
+	prop->queue_wred_min_avg_green = conf->queue_wred_min_avg_green;
+	prop->queue_wred_max_avg_green = conf->queue_wred_max_avg_green;
+	prop->queue_wred_slope_green = conf->queue_wred_slope_green;
+	prop->queue_wred_min_avg_yellow = conf->queue_wred_min_avg_yellow;
+	prop->queue_wred_max_avg_yellow = conf->queue_wred_max_avg_yellow;
+	prop->queue_wred_slope_yellow = conf->queue_wred_slope_yellow;
+	prop->queue_wred_min_guaranteed = conf->queue_wred_min_guaranteed;
+	prop->queue_wred_max_allowed = conf->queue_wred_max_allowed;
+	prop->queue_wred_fixed_drop_prob_green =
+		conf->queue_wred_fixed_drop_prob_green;
+	prop->queue_wred_fixed_drop_prob_yellow =
+		conf->queue_wred_fixed_drop_prob_yellow;
+	prop->rlm = rlm;
+}
+
+static void create_add_queue_cmd(
+		struct pp_qos_dev *qdev,
+		const struct pp_qos_queue_conf *conf,
+		unsigned int phy,
+		unsigned int parent,
+		unsigned int rlm)
+{
+	struct cmd_add_queue cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_init(
+			qdev,
+			&(cmd.base),
+			CMD_TYPE_ADD_QUEUE,
+			sizeof(cmd),
+			CMD_FLAGS_WRAP_PARENT_SUSPEND_RESUME);
+	cmd.phy = phy;
+	cmd.parent = parent;
+
+	cmd.port = get_port(qdev->nodes, phy);
+
+	fill_preds(qdev->nodes, phy, cmd.preds, 6);
+	set_cmd_queue_properties(&cmd.prop, conf, rlm);
+
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_ADD_QUEUE %u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			phy);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+
+}
+
+void create_set_queue_cmd(
+		struct pp_qos_dev *qdev,
+		const struct pp_qos_queue_conf *conf,
+		unsigned int phy,
+		unsigned int parent,
+		unsigned int rlm,
+		uint32_t modified)
+{
+	struct cmd_set_queue cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_NODE_TYPE)) {
+		create_add_queue_cmd(qdev, conf, phy, parent, rlm);
+	} else {
+		memset(&cmd, 0, sizeof(cmd));
+		cmd_init(
+				qdev,
+				&(cmd.base),
+				CMD_TYPE_SET_QUEUE,
+				sizeof(cmd),
+				CMD_FLAGS_WRAP_SUSPEND_RESUME |
+				CMD_FLAGS_WRAP_PARENT_SUSPEND_RESUME);
+		cmd.phy = phy;
+		set_cmd_queue_properties(&cmd.prop, conf, rlm);
+		cmd.modified = modified;
+		QOS_LOG_INFO("cmd %u:%u CMD_TYPE_SET_QUEUE %u\n",
+				qdev->drvcmds.cmd_id,
+				qdev->drvcmds.cmd_fw_id, phy);
+		cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+		qdev->drvcmds.cmd_fw_id++;
+	}
+}
+
+void create_parent_change_cmd(struct pp_qos_dev *qdev, unsigned int phy)
+{
+	struct cmd_parent_change cmd;
+	const struct qos_node *node;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	node = get_const_node_from_phy(qdev->nodes, phy);
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_init(qdev, &(cmd.base), CMD_TYPE_PARENT_CHANGE, sizeof(cmd), 0);
+	cmd.phy = phy;
+	cmd.type = node->type;
+	cmd.arbitration = node->parent_prop.arbitration;
+	cmd.first = node->parent_prop.first_child_phy;
+	cmd.num = node->parent_prop.num_of_children;
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_PARENT_CHANGE %u first:%u num:%d\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			phy,
+			cmd.first,
+			cmd.num);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_get_port_stats_cmd(
+		struct pp_qos_dev *qdev,
+		unsigned int phy,
+		unsigned int addr,
+		struct pp_qos_port_stat *pstat)
+{
+	struct cmd_get_port_stats cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_init(
+			qdev,
+			&(cmd.base),
+			CMD_TYPE_GET_PORT_STATS,
+			sizeof(cmd),
+			CMD_FLAGS_POST_PROCESS);
+	cmd.phy = phy;
+	cmd.addr =  addr;
+	cmd.stat = pstat;
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_GET_PORT_STATS %u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			phy);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_get_queue_stats_cmd(
+		struct pp_qos_dev *qdev,
+		unsigned int phy,
+		unsigned int rlm,
+		unsigned int addr,
+		struct pp_qos_queue_stat *qstat)
+{
+	struct cmd_get_queue_stats cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_init(
+			qdev,
+			&(cmd.base),
+			CMD_TYPE_GET_QUEUE_STATS,
+			sizeof(cmd),
+			CMD_FLAGS_POST_PROCESS);
+	cmd.phy = phy;
+	cmd.rlm = rlm;
+	cmd.addr =  addr;
+	cmd.stat = qstat;
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_GET_QUEUE_STATS %u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			phy);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_get_node_info_cmd(
+		struct pp_qos_dev *qdev,
+		unsigned int phy,
+		unsigned int addr,
+		struct pp_qos_node_info *info)
+{
+	struct cmd_get_node_info cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd_init(
+			qdev,
+			&(cmd.base),
+			CMD_TYPE_GET_NODE_INFO,
+			sizeof(cmd),
+			CMD_FLAGS_POST_PROCESS);
+	cmd.phy = phy;
+	cmd.addr =  addr;
+	cmd.info = info;
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_GET_NODE_INFO %u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			phy);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+static void _create_set_shared_group_cmd(struct pp_qos_dev *qdev,
+		enum cmd_type ctype,
+		unsigned int id,
+		unsigned int limit)
+{
+	struct cmd_set_shared_group cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	cmd_init(qdev, &(cmd.base), ctype, sizeof(cmd), 0);
+	cmd.id = id;
+	cmd.limit = limit;
+	QOS_LOG("cmd %u:%u %s id %u limit %u\n",
+		qdev->drvcmds.cmd_id,
+		qdev->drvcmds.cmd_fw_id,
+		cmd_str[ctype],
+		id, limit);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_add_shared_group_cmd(struct pp_qos_dev *qdev,
+		unsigned int id,
+		unsigned int limit)
+{
+	_create_set_shared_group_cmd(qdev, CMD_TYPE_ADD_SHARED_GROUP,
+			id, limit);
+}
+
+void create_set_shared_group_cmd(struct pp_qos_dev *qdev,
+		unsigned int id,
+		unsigned int limit)
+{
+	_create_set_shared_group_cmd(qdev, CMD_TYPE_SET_SHARED_GROUP,
+			id, limit);
+}
+
+void create_remove_shared_group_cmd(struct pp_qos_dev *qdev,
+		unsigned int id)
+{
+	struct cmd_remove_shared_group cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	cmd_init(
+			qdev,
+			&(cmd.base),
+			CMD_TYPE_REMOVE_SHARED_GROUP,
+			sizeof(cmd), 0);
+	cmd.id = id;
+	QOS_LOG_INFO("cmd %u:%u CMD_TYPE_REMOVE_SHARED_GROUP id %u\n",
+			qdev->drvcmds.cmd_id,
+			qdev->drvcmds.cmd_fw_id,
+			id);
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+
+void create_push_desc_cmd(struct pp_qos_dev *qdev, unsigned int queue,
+		unsigned int size, unsigned int color, unsigned int addr)
+{
+	struct cmd_push_desc cmd;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	cmd_init(qdev, &(cmd.base), CMD_TYPE_PUSH_DESC, sizeof(cmd), 0);
+	cmd.queue = queue;
+	cmd.size = size;
+	cmd.color = color;
+	cmd.addr = addr;
+	cmd_queue_put(qdev->drvcmds.cmdq, &cmd, sizeof(cmd));
+	qdev->drvcmds.cmd_fw_id++;
+}
+/******************************************************************************/
+/*                                 FW CMDS                                    */
+/******************************************************************************/
+
+struct fw_set_common {
+	uint32_t valid;
+	int suspend;
+	unsigned int bw_limit;
+	unsigned int shared_bw_group;
+};
+
+struct fw_set_parent {
+	uint32_t valid;
+	int best_effort_enable;
+	uint16_t first;
+	uint16_t last;
+	uint16_t first_wrr;
+};
+
+struct fw_set_child {
+	uint32_t valid;
+	uint32_t bw_share;
+	uint16_t preds[6];
+};
+
+struct fw_set_port {
+	uint32_t valid;
+	void	 *ring_addr;
+	size_t	 ring_size;
+};
+
+struct fw_set_sched {
+	uint32_t valid;
+};
+
+struct fw_set_queue {
+	uint32_t valid;
+	unsigned int rlm;
+	int active;
+	uint32_t disable;
+	unsigned int queue_wred_min_avg_green;
+	unsigned int queue_wred_max_avg_green;
+	unsigned int queue_wred_slope_green;
+	unsigned int queue_wred_min_avg_yellow;
+	unsigned int queue_wred_max_avg_yellow;
+	unsigned int queue_wred_slope_yellow;
+	unsigned int queue_wred_min_guaranteed;
+	unsigned int queue_wred_max_allowed;
+	unsigned int queue_wred_fixed_drop_prob_green;
+	unsigned int queue_wred_fixed_drop_prob_yellow;
+};
+
+struct fw_internal {
+	struct fw_set_common common;
+	struct fw_set_parent parent;
+	struct fw_set_child  child;
+	union {
+		struct fw_set_port port;
+		struct fw_set_sched sched;
+		struct fw_set_queue queue;
+	} type_data;
+	unsigned int	pushed;
+};
+
+/******************************************************************************/
+/*                         FW write functions                                 */
+/******************************************************************************/
+
+int init_fwdata_internals(struct pp_qos_dev *qdev)
+{
+	qdev->fwbuf = QOS_MALLOC(sizeof(struct fw_internal));
+	if (qdev->fwbuf) {
+		memset(qdev->fwbuf, 0, sizeof(struct fw_internal));
+		return 0;
+	}
+	return -EBUSY;
+}
+
+void clean_fwdata_internals(struct pp_qos_dev *qdev)
+{
+	if (qdev->fwbuf)
+		QOS_FREE(qdev->fwbuf);
+}
+
+/*
+ * Following functions translate driver commands to firmware
+ * commands
+ */
+static uint32_t *fw_write_init_logger_cmd(
+		uint32_t *buf,
+		const struct cmd_init_logger *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_INIT_UC_LOGGER);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(4);
+	*buf++ = qos_u32_to_uc((uintptr_t)cmd->addr & 0xFFFFFFFF);
+	*buf++ = qos_u32_to_uc(cmd->mode);
+	*buf++ = qos_u32_to_uc(cmd->level);
+	*buf++ = qos_u32_to_uc(cmd->num_of_msgs);
+	return buf;
+}
+
+static uint32_t *fw_write_init_qos_cmd(
+		uint32_t *buf,
+		const struct cmd_init_qos *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_INIT_QOS);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(7);
+	*buf++ = qos_u32_to_uc(cmd->qm_ddr_start & 0xFFFFFFFF);
+	*buf++ = qos_u32_to_uc(cmd->qm_num_pages);
+	*buf++ = qos_u32_to_uc(cmd->wred_total_avail_resources);
+	*buf++ = qos_u32_to_uc(cmd->wred_prioritize_pop);
+	*buf++ = qos_u32_to_uc(cmd->wred_avg_q_size_p);
+	*buf++ = qos_u32_to_uc(cmd->wred_max_q_size);
+	*buf++ = qos_u32_to_uc(cmd->num_of_ports);
+	return buf;
+}
+
+static uint32_t *fw_write_add_port_cmd(
+		uint32_t *buf,
+		const struct cmd_add_port *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_ADD_PORT);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(13);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	*buf++ = qos_u32_to_uc(!!cmd->prop.common.suspended);
+	*buf++ = qos_u32_to_uc(0);
+	*buf++ = qos_u32_to_uc(0);
+	*buf++ = qos_u32_to_uc(cmd->prop.common.bandwidth_limit);
+	*buf++ = qos_u32_to_uc(!!cmd->prop.parent.best_effort_enable);
+	*buf++ = qos_u32_to_uc(0);
+	*buf++ = qos_u32_to_uc(cmd->prop.common.shared_bandwidth_group);
+	*buf++ = qos_u32_to_uc(cmd->prop.packet_credit_enable);
+	*buf++ = qos_u32_to_uc(cmd->prop.ring_size);
+	*buf++ = qos_u32_to_uc(GET_ADDRESS_HIGH(cmd->prop.ring_addr));
+	*buf++ = qos_u32_to_uc(((uintptr_t)cmd->prop.ring_addr) & 0xFFFFFFFF);
+	*buf++ = qos_u32_to_uc(cmd->prop.credit);
+	return buf;
+}
+
+static uint32_t *fw_write_set_port_cmd(
+		uint32_t *buf,
+		unsigned int phy,
+		uint32_t flags,
+		const struct fw_set_common *common,
+		const struct fw_set_parent *parent,
+		const struct fw_set_port *port)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_SET_PORT);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(13);
+	*buf++ = qos_u32_to_uc(phy);
+	*buf++ = qos_u32_to_uc(common->valid | parent->valid | port->valid);
+	*buf++ = qos_u32_to_uc(common->suspend);
+	*buf++ = qos_u32_to_uc(parent->first);
+	*buf++ = qos_u32_to_uc(parent->last);
+	*buf++ = qos_u32_to_uc(common->bw_limit);
+	*buf++ = qos_u32_to_uc(parent->best_effort_enable);
+	*buf++ = qos_u32_to_uc(parent->first_wrr);
+	*buf++ = qos_u32_to_uc(common->shared_bw_group);
+	*buf++ = qos_u32_to_uc(port->valid);
+	*buf++ = qos_u32_to_uc(port->ring_size);
+	*buf++ = qos_u32_to_uc(GET_ADDRESS_HIGH(port->ring_addr));
+	*buf++ = qos_u32_to_uc(((uintptr_t)port->ring_addr) & 0xFFFFFFFF);
+	return buf;
+}
+
+static uint32_t *fw_write_add_sched_cmd(
+		uint32_t *buf,
+		const struct cmd_add_sched *cmd,
+		uint32_t flags)
+{
+	unsigned int i;
+
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_ADD_SCHEDULER);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(14);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	*buf++ = qos_u32_to_uc(!!cmd->prop.common.suspended);
+	*buf++ = qos_u32_to_uc(0);
+	*buf++ = qos_u32_to_uc(0);
+	*buf++ = qos_u32_to_uc(cmd->prop.common.bandwidth_limit);
+	*buf++ = qos_u32_to_uc(!!cmd->prop.parent.best_effort_enable);
+	*buf++ = qos_u32_to_uc(0);
+	*buf++ = qos_u32_to_uc(cmd->prop.common.shared_bandwidth_group);
+	for (i = 0; i < 6; ++i)
+		*buf++ = qos_u32_to_uc(cmd->preds[i]);
+
+	return buf;
+}
+
+static uint32_t *fw_write_set_sched_cmd(
+		uint32_t *buf,
+		unsigned int phy,
+		uint32_t flags,
+		const struct fw_set_common *common,
+		const struct fw_set_parent *parent,
+		const struct fw_set_child *child,
+		const struct fw_set_sched *sched)
+{
+	unsigned int i;
+
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_SET_SCHEDULER);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(15);
+	*buf++ = qos_u32_to_uc(phy);
+	*buf++ = qos_u32_to_uc(common->valid | parent->valid |
+			child->valid | sched->valid);
+	*buf++ = qos_u32_to_uc(common->suspend);
+	*buf++ = qos_u32_to_uc(parent->first);
+	*buf++ = qos_u32_to_uc(parent->last);
+	*buf++ = qos_u32_to_uc(common->bw_limit);
+	*buf++ = qos_u32_to_uc(parent->best_effort_enable);
+	*buf++ = qos_u32_to_uc(parent->first_wrr);
+	*buf++ = qos_u32_to_uc(common->shared_bw_group);
+
+	for (i = 0; i < 6; ++i)
+		*buf++ = qos_u32_to_uc(child->preds[i]);
+
+	return buf;
+}
+
+static uint32_t *fw_write_add_queue_cmd(
+		uint32_t *buf,
+		const struct cmd_add_queue *cmd,
+		uint32_t flags)
+{
+	unsigned int i;
+	uint32_t disable;
+
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_ADD_QUEUE);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(24);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	*buf++ = qos_u32_to_uc(cmd->port);
+	*buf++ = qos_u32_to_uc(cmd->prop.rlm);
+	*buf++ = qos_u32_to_uc(!!cmd->prop.common.suspended);
+	*buf++ = qos_u32_to_uc(cmd->prop.common.bandwidth_limit);
+	*buf++ = qos_u32_to_uc(cmd->prop.common.shared_bandwidth_group);
+	for (i = 0; i < 6; ++i)
+		*buf++ = qos_u32_to_uc(cmd->preds[i]);
+	*buf++ = qos_u32_to_uc(!cmd->prop.blocked);
+
+	disable = 0;
+	if (!cmd->prop.wred_enable)
+		QOS_BITS_SET(disable, 1);
+	if (cmd->prop.fixed_drop_prob_enable)
+		QOS_BITS_SET(disable, 8);
+	*buf++ = qos_u32_to_uc(disable);
+
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_fixed_drop_prob_green);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_fixed_drop_prob_yellow);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_min_avg_yellow);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_max_avg_yellow);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_slope_yellow);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_min_avg_green);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_max_avg_green);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_slope_green);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_min_guaranteed);
+	*buf++ = qos_u32_to_uc(cmd->prop.queue_wred_max_allowed);
+	return buf;
+}
+
+static uint32_t *fw_write_set_queue_cmd(
+		uint32_t *buf,
+		unsigned int phy,
+		uint32_t flags,
+		const struct fw_set_common *common,
+		const struct fw_set_child *child,
+		const struct fw_set_queue *queue)
+{
+	unsigned int i;
+
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_SET_QUEUE);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(25);
+	*buf++ = qos_u32_to_uc(phy);
+	*buf++ = qos_u32_to_uc(queue->rlm);
+	*buf++ = qos_u32_to_uc(common->valid | child->valid);
+	*buf++ = qos_u32_to_uc(common->suspend);
+	*buf++ = qos_u32_to_uc(common->bw_limit);
+	*buf++ = qos_u32_to_uc(common->shared_bw_group);
+	for (i = 0; i < 6; ++i)
+		*buf++ = qos_u32_to_uc(child->preds[i]);
+	*buf++ = qos_u32_to_uc(queue->valid);
+	*buf++ = qos_u32_to_uc(queue->active);
+	*buf++ = qos_u32_to_uc(queue->disable);
+
+	*buf++ = qos_u32_to_uc(queue->queue_wred_fixed_drop_prob_green);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_fixed_drop_prob_yellow);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_min_avg_yellow);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_max_avg_yellow);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_slope_yellow);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_min_avg_green);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_max_avg_green);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_slope_green);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_min_guaranteed);
+	*buf++ = qos_u32_to_uc(queue->queue_wred_max_allowed);
+
+	return buf;
+}
+
+static uint32_t *fw_write_move_sched_cmd(
+		uint32_t *buf,
+		const struct cmd_move *cmd,
+		uint32_t flags)
+{
+	unsigned int i;
+
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_MOVE_SCHEDULER);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(8);
+	*buf++ = qos_u32_to_uc(cmd->src);
+	*buf++ = qos_u32_to_uc(cmd->dst);
+
+	for (i = 0; i < 6; ++i)
+		*buf++ = qos_u32_to_uc(cmd->preds[i]);
+
+	return buf;
+}
+
+static uint32_t *fw_write_move_queue_cmd(
+		uint32_t *buf,
+		const struct cmd_move *cmd,
+		uint32_t flags)
+{
+	unsigned int i;
+
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_MOVE_QUEUE);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(10);
+	*buf++ = qos_u32_to_uc(cmd->src);
+	*buf++ = qos_u32_to_uc(cmd->dst);
+	*buf++ = qos_u32_to_uc(cmd->dst_port);
+	*buf++ = qos_u32_to_uc(cmd->rlm);
+
+	for (i = 0; i < 6; ++i)
+		*buf++ = qos_u32_to_uc(cmd->preds[i]);
+
+	return buf;
+}
+
+static uint32_t *fw_write_remove_queue_cmd(
+		uint32_t *buf,
+		const struct cmd_remove_node *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_REMOVE_QUEUE);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(2);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	*buf++ = qos_u32_to_uc(cmd->data);
+	return buf;
+}
+
+static uint32_t *fw_write_remove_sched_cmd(
+		uint32_t *buf,
+		const struct cmd_remove_node *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_REMOVE_SCHEDULER);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(1);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	return buf;
+}
+
+static uint32_t *fw_write_remove_port_cmd(
+		uint32_t *buf,
+		const struct cmd_remove_node *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_REMOVE_PORT);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(1);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	return buf;
+}
+
+static uint32_t *fw_write_get_queue_stats(
+		uint32_t *buf,
+		const struct cmd_get_queue_stats *cmd,
+		uint32_t flags)
+{
+	uint32_t reset;
+
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_GET_QUEUE_STATS);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(4);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	*buf++ = qos_u32_to_uc(cmd->rlm);
+	*buf++ = qos_u32_to_uc((uintptr_t)cmd->addr & 0xFFFFFFFF);
+	if (cmd->stat->reset)
+		reset = QUEUE_STATS_CLEAR_Q_AVG_SIZE_BYTES |
+			QUEUE_STATS_CLEAR_DROP_P_YELLOW |
+			QUEUE_STATS_CLEAR_DROP_P_GREEN |
+			QUEUE_STATS_CLEAR_TOTAL_BYTES_ADDED |
+			QUEUE_STATS_CLEAR_TOTAL_ACCEPTS |
+			QUEUE_STATS_CLEAR_TOTAL_DROPS |
+			QUEUE_STATS_CLEAR_TOTAL_DROPPED_BYTES |
+			QUEUE_STATS_CLEAR_TOTAL_RED_DROPS;
+	else
+		reset = QUEUE_STATS_CLEAR_NONE;
+	*buf++ = qos_u32_to_uc(reset);
+
+	return buf;
+}
+
+static uint32_t *fw_write_get_port_stats(
+		uint32_t *buf,
+		const struct cmd_get_port_stats *cmd,
+		uint32_t flags)
+{
+	uint32_t reset;
+
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_GET_PORT_STATS);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(3);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	*buf++ = qos_u32_to_uc((uintptr_t)cmd->addr & 0xFFFFFFFF);
+	if (cmd->stat->reset)
+		reset = PORT_STATS_CLEAR_ALL;
+	else
+		reset = PORT_STATS_CLEAR_NONE;
+	*buf++ = qos_u32_to_uc(reset);
+	return buf;
+}
+
+static uint32_t *fw_write_set_shared_group(
+		uint32_t *buf,
+		enum cmd_type ctype,
+		const struct cmd_set_shared_group *cmd,
+		uint32_t flags)
+{
+	uint32_t uc_cmd;
+
+	if (ctype == CMD_TYPE_ADD_SHARED_GROUP)
+		uc_cmd = UC_QOS_COMMAND_ADD_SHARED_BW_LIMIT_GROUP;
+	else
+		uc_cmd = UC_QOS_COMMAND_SET_SHARED_BW_LIMIT_GROUP;
+
+	*buf++ = qos_u32_to_uc(uc_cmd);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(2);
+	*buf++ = qos_u32_to_uc(cmd->id);
+	*buf++ = qos_u32_to_uc(cmd->limit);
+	return buf;
+}
+
+static uint32_t *fw_write_remove_shared_group(uint32_t *buf,
+		const struct cmd_remove_shared_group *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_REMOVE_SHARED_BW_LIMIT_GROUP);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(1);
+	*buf++ = qos_u32_to_uc(cmd->id);
+	return buf;
+}
+
+static uint32_t *fw_write_push_desc(
+		uint32_t *buf,
+		const struct cmd_push_desc *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_DEBUG_PUSH_DESC);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(6);
+	*buf++ = qos_u32_to_uc(cmd->queue);
+	*buf++ = qos_u32_to_uc(cmd->size);
+	*buf++ = qos_u32_to_uc(cmd->color);
+	*buf++ = qos_u32_to_uc(cmd->addr);
+	*buf++ = qos_u32_to_uc(0);
+	*buf++ = qos_u32_to_uc(0);
+	return buf;
+}
+
+static uint32_t *fw_write_get_node_info(
+		uint32_t *buf,
+		const struct cmd_get_node_info *cmd,
+		uint32_t flags)
+{
+	*buf++ = qos_u32_to_uc(UC_QOS_COMMAND_GET_NODE_INFO);
+	*buf++ = qos_u32_to_uc(flags);
+	*buf++ = qos_u32_to_uc(2);
+	*buf++ = qos_u32_to_uc(cmd->phy);
+	*buf++ = qos_u32_to_uc(cmd->addr);
+	return buf;
+}
+/******************************************************************************/
+/*                                FW wrappers                                 */
+/******************************************************************************/
+
+static void set_common(
+		const struct pp_qos_common_node_properties *conf,
+		struct fw_set_common *common,
+		uint32_t modified)
+{
+	uint32_t valid;
+
+	valid = 0;
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_SUSPENDED)) {
+		QOS_BITS_SET(valid, TSCD_NODE_CONF_SUSPEND_RESUME);
+		common->suspend = conf->suspended;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_BANDWIDTH_LIMIT)) {
+		QOS_BITS_SET(valid, TSCD_NODE_CONF_BW_LIMIT);
+		common->bw_limit = conf->bandwidth_limit;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_SHARED_GROUP_LIMIT)) {
+		QOS_BITS_SET(valid, TSCD_NODE_CONF_BW_LIMIT);
+		common->shared_bw_group = conf->shared_bandwidth_group;
+	}
+	common->valid = valid;
+}
+
+static void set_parent(
+		const struct pp_qos_parent_node_properties *conf,
+		struct fw_set_parent *parent,
+		uint32_t modified)
+{
+	uint32_t valid;
+
+	valid = 0;
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_BEST_EFFORT)) {
+		QOS_BITS_SET(valid, TSCD_NODE_CONF_BEST_EFFORT_ENABLE);
+		parent->best_effort_enable = conf->best_effort_enable;
+	}
+	parent->valid = valid;
+}
+
+static void set_child(
+		const struct pp_qos_child_node_properties *conf,
+		struct fw_set_child *child,
+		uint32_t modified)
+{
+	uint32_t valid;
+
+	valid = 0;
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_VIRT_BW_SHARE)) {
+		QOS_BITS_SET(valid, TSCD_NODE_CONF_SHARED_BWL_GROUP);
+		child->bw_share = conf->bandwidth_share;
+	}
+	child->valid = valid;
+}
+
+static void set_port_specific(
+		const struct port_properties *conf,
+		struct fw_set_port *port,
+		uint32_t modified)
+{
+	uint32_t valid;
+
+	valid = 0;
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_RING_ADDRESS)) {
+		QOS_BITS_SET(valid,
+				PORT_CONF_RING_ADDRESS_HIGH |
+				PORT_CONF_RING_ADDRESS_LOW);
+		port->ring_addr = conf->ring_addr;
+	}
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_RING_SIZE)) {
+		QOS_BITS_SET(valid, PORT_CONF_RING_SIZE);
+		port->ring_size = conf->ring_size;
+	}
+	port->valid = valid;
+}
+
+static uint32_t *set_port_cmd_wrapper(
+		struct fw_internal *fwdata,
+		uint32_t *buf,
+		const struct cmd_set_port *cmd,
+		uint32_t flags)
+{
+	uint32_t modified;
+
+	modified = cmd->modified;
+	set_common(&cmd->prop.common, &fwdata->common, modified);
+	set_parent(&cmd->prop.parent, &fwdata->parent, modified);
+	set_port_specific(&cmd->prop, &fwdata->type_data.port, modified);
+	fwdata->type_data.port.valid = 0;
+
+	return fw_write_set_port_cmd(
+			buf,
+			cmd->phy,
+			flags,
+			&fwdata->common,
+			&fwdata->parent,
+			&fwdata->type_data.port);
+}
+
+static uint32_t *set_sched_cmd_wrapper(
+		struct fw_internal *fwdata,
+		uint32_t *buf,
+		const struct cmd_set_sched *cmd,
+		uint32_t flags)
+{
+	uint32_t modified;
+
+	modified = cmd->modified;
+	set_common(&cmd->prop.common, &fwdata->common, modified);
+	set_parent(&cmd->prop.parent, &fwdata->parent, modified);
+	set_child(&cmd->prop.child, &fwdata->child, modified);
+	fwdata->type_data.sched.valid = 0;
+	return fw_write_set_sched_cmd(
+			buf,
+			cmd->phy,
+			flags,
+			&fwdata->common,
+			&fwdata->parent,
+			&fwdata->child,
+			&fwdata->type_data.sched);
+}
+
+/*
+ * Topologic changes like first/last child change or change of predecessors
+ * will not be manifested through this path. They will be manifested through
+ * CMD_TYPE_PARENT_CHANGE and CMD_TYPE_UPDATE_PREDECESSORS driver commands
+ * which will call fw_write_set_node_cmd.
+ * So the only think that needs to use fw_write_set_node_cmd in this path is
+ * modify of suspend/resume
+ *
+ */
+static uint32_t *set_queue_cmd_wrapper(
+		struct fw_internal *fwdata,
+		uint32_t *buf,
+		const struct cmd_set_queue *cmd,
+		uint32_t flags)
+{
+	uint32_t modified;
+	uint32_t valid;
+	uint32_t disable;
+	struct fw_set_queue *queue;
+
+	queue = &fwdata->type_data.queue;
+	modified = cmd->modified;
+	set_common(&cmd->prop.common, &fwdata->common, modified);
+	set_child(&cmd->prop.child, &fwdata->child, modified);
+
+	valid = 0;
+	queue->rlm = cmd->prop.rlm;
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_BLOCKED)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_ACTIVE_Q);
+		queue->active = !cmd->prop.blocked;
+	}
+
+	disable = 0;
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_ENABLE)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_DISABLE);
+		if (!cmd->prop.wred_enable)
+			QOS_BITS_SET(disable, BIT(0));
+	}
+	if (QOS_BITS_IS_SET(
+				modified,
+				QOS_MODIFIED_WRED_FIXED_DROP_PROB_ENABLE)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_DISABLE);
+		if (cmd->prop.fixed_drop_prob_enable)
+			QOS_BITS_SET(disable, BIT(3));
+	}
+	queue->disable = disable;
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_FIXED_GREEN_PROB)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_FIXED_GREEN_DROP_P);
+		queue->queue_wred_fixed_drop_prob_green =
+			cmd->prop.queue_wred_fixed_drop_prob_green;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_FIXED_YELLOW_PROB)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_FIXED_YELLOW_DROP_P);
+		queue->queue_wred_fixed_drop_prob_yellow =
+			cmd->prop.queue_wred_fixed_drop_prob_yellow;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_MIN_YELLOW)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_MIN_AVG_YELLOW);
+		queue->queue_wred_min_avg_yellow =
+			cmd->prop.queue_wred_min_avg_yellow;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_MAX_YELLOW)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_MAX_AVG_YELLOW);
+		queue->queue_wred_max_avg_yellow =
+			cmd->prop.queue_wred_max_avg_yellow;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_SLOPE_YELLOW)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_SLOPE_YELLOW);
+		queue->queue_wred_slope_yellow =
+			cmd->prop.queue_wred_slope_yellow;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_MIN_GREEN)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_MIN_AVG_GREEN);
+		queue->queue_wred_min_avg_green =
+			cmd->prop.queue_wred_min_avg_green;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_MAX_GREEN)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_MAX_AVG_GREEN);
+		queue->queue_wred_max_avg_green =
+			cmd->prop.queue_wred_max_avg_green;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_SLOPE_GREEN)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_SLOPE_GREEN);
+		queue->queue_wred_slope_green =
+			cmd->prop.queue_wred_slope_green;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_MIN_GUARANTEED)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_MIN_GUARANTEED);
+		queue->queue_wred_min_guaranteed =
+			cmd->prop.queue_wred_min_guaranteed;
+	}
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_WRED_MAX_ALLOWED)) {
+		QOS_BITS_SET(valid, WRED_QUEUE_CONF_MAX_ALLOWED);
+		queue->queue_wred_max_allowed =
+			cmd->prop.queue_wred_max_allowed;
+	}
+
+	queue->valid = valid;
+	return fw_write_set_queue_cmd(
+			buf,
+			cmd->phy,
+			flags,
+			&fwdata->common,
+			&fwdata->child,
+			queue);
+}
+
+static uint32_t *parent_change_cmd_wrapper(
+		struct fw_internal *fwdata,
+		uint32_t *buf,
+		const struct cmd_parent_change *cmd,
+		uint32_t flags)
+{
+	fwdata->parent.valid =
+		TSCD_NODE_CONF_FIRST_CHILD |
+		TSCD_NODE_CONF_LAST_CHILD |
+		TSCD_NODE_CONF_FIRST_WRR_NODE;
+	fwdata->parent.first = cmd->first;
+	fwdata->parent.last = cmd->first + cmd->num - 1;
+	if (cmd->arbitration == PP_QOS_ARBITRATION_WSP)
+		fwdata->parent.first_wrr = 0;
+	else
+		fwdata->parent.first_wrr = cmd->first;
+
+	fwdata->common.valid = 0;
+	if (cmd->type == TYPE_PORT) {
+		fwdata->type_data.port.valid  = 0;
+		return fw_write_set_port_cmd(
+				buf,
+				cmd->phy,
+				flags,
+				&fwdata->common,
+				&fwdata->parent,
+				&fwdata->type_data.port);
+	} else {
+		fwdata->child.valid = 0;
+		fwdata->type_data.sched.valid = 0;
+		return fw_write_set_sched_cmd(
+				buf,
+				cmd->phy,
+				flags,
+				&fwdata->common,
+				&fwdata->parent,
+				&fwdata->child,
+				&fwdata->type_data.sched);
+	}
+}
+
+static uint32_t *update_preds_cmd_wrapper(
+		struct fw_internal *fwdata,
+		uint32_t *buf,
+		const struct cmd_update_preds *cmd,
+		uint32_t flags)
+{
+	unsigned int i;
+
+	fwdata->common.valid = 0;
+	fwdata->child.valid = TSCD_NODE_CONF_PREDECESSOR_0 |
+		TSCD_NODE_CONF_PREDECESSOR_1 |
+		TSCD_NODE_CONF_PREDECESSOR_2 |
+		TSCD_NODE_CONF_PREDECESSOR_3 |
+		TSCD_NODE_CONF_PREDECESSOR_4 |
+		TSCD_NODE_CONF_PREDECESSOR_5;
+	for (i = 0; i < 6; ++i)
+		fwdata->child.preds[i] = cmd->preds[i];
+
+	if (cmd->node_type == TYPE_SCHED) {
+		fwdata->type_data.sched.valid  = 0;
+		fwdata->parent.valid = 0;
+		return fw_write_set_sched_cmd(
+				buf,
+				cmd->phy,
+				flags,
+				&fwdata->common,
+				&fwdata->parent,
+				&fwdata->child,
+				&fwdata->type_data.sched);
+	} else {
+		fwdata->type_data.queue.valid = 0;
+		return fw_write_set_queue_cmd(
+				buf,
+				cmd->phy,
+				flags,
+				&fwdata->common,
+				&fwdata->child,
+				&fwdata->type_data.queue);
+	}
+}
+
+/*
+ * Signal firmware to read and executes commands from cmd
+ * buffer.
+ * This is done by using the mailbox bundle.
+ * Refer to driver's design document for further information
+ * Since this is the only signal that is sent from driver to firmware
+ * the value of DRV_SIGNAL is insignificant
+ */
+#define DRV_SIGNAL (2U)
+void signal_uc(struct pp_qos_dev *qdev)
+{
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+#ifndef NO_FW
+	iowrite32(DRV_SIGNAL, qdev->fwcom.mbx_to_uc + 4);
+	QOS_LOG("signal uc was called\n");
+#endif
+}
+
+/******************************************************************************/
+/*                                Engine                                      */
+/******************************************************************************/
+
+/*
+ * Translation to user format and sanity checks on node info
+ * obtained from firmware
+ */
+static void post_process_get_node_info(
+		struct pp_qos_dev *qdev,
+		struct cmd_get_node_info *cmd)
+{
+	struct pp_qos_node_info *info;
+	struct hw_node_info_s *fw_info;
+	struct qos_node *node;
+	struct qos_node *child;
+	uint16_t preds[6];
+	uint32_t *fw_preds;
+	unsigned int first;
+	unsigned int phy;
+	unsigned int id;
+	unsigned int i;
+	unsigned int num;
+	unsigned int port;
+	int reached_port;
+
+	port = PPV4_QOS_INVALID;
+	node = get_node_from_phy(qdev->nodes, cmd->phy);
+	QOS_ASSERT(node_used(node), "Node %u is not used\n", cmd->phy);
+	fw_info = (struct hw_node_info_s *)(qdev->stat);
+	info = cmd->info;
+	memset(info, 0xFF, sizeof(*info));
+
+	QOS_ASSERT(node->type != TYPE_UNKNOWN,
+			"Node %u has unknown type\n",
+			cmd->phy);
+	info->type = node->type - 1;
+	info->is_internal = node_internal(node);
+	QOS_ASSERT(node_suspended(node) == fw_info->is_suspended,
+			"Node %u suspend status according to driver is %d and according to HW is %d\n",
+			cmd->phy, node_suspended(node), fw_info->is_suspended);
+	info->is_suspended = fw_info->is_suspended;
+
+	if (node_parent(node)) {
+		first = fw_info->first_child;
+		if (first == 0) {
+			first = QOS_INVALID_PHY;
+			num = 0;
+		} else {
+			num = fw_info->last_child - first + 1;
+			child = get_node_from_phy(qdev->nodes, first);
+		}
+		QOS_ASSERT(node->parent_prop.first_child_phy == first,
+				"Driver has %u as the phy of node's %u first child, while HW has %u\n",
+				node->parent_prop.first_child_phy,
+				cmd->phy,
+				first);
+
+		QOS_ASSERT(node->parent_prop.num_of_children == num,
+				"Driver has %u as the number of children of node %u, while HW has %u\n",
+				node->parent_prop.num_of_children,
+				cmd->phy,
+				num);
+
+		for (i = 0; i < num; ++i) {
+			phy = i + first;
+			id = get_id_from_phy(qdev->mapping, phy);
+			QOS_ASSERT(QOS_ID_VALID(id),
+					"Child of %u with phy %u has no valid id\n",
+					cmd->phy, phy);
+			QOS_ASSERT(node_used(child),
+					"Child node with phy %u and id %u is not used\n",
+					phy,
+					id);
+			info->children[i].phy = phy;
+			info->children[i].id = id;
+			++child;
+		}
+	}
+
+	if (!node_port(node)) {
+		fill_preds(qdev->nodes, cmd->phy, preds, 6);
+		fw_preds = &(fw_info->predecessor0);
+		reached_port = 0;
+		for (i = 0; i < 6; ++i) {
+			QOS_ASSERT(preds[i] == *fw_preds,
+					"Driver has %u as the %u predecessor of node %u, while HW has %u\n",
+					preds[i],
+					i,
+					cmd->phy,
+					*fw_preds);
+
+			if (!reached_port) {
+				info->preds[i].phy = preds[i];
+				id = get_id_from_phy(qdev->mapping, preds[i]);
+				QOS_ASSERT(QOS_ID_VALID(id),
+						"Pred with phy %u has no valid id\n",
+						preds[i]);
+				info->preds[i].id = id;
+				if (preds[i] <= qdev->max_port) {
+					reached_port = 1;
+					port = preds[i];
+				}
+			} else {
+				info->preds[i].phy = PPV4_QOS_INVALID;
+			}
+			++fw_preds;
+		}
+	}
+
+	if (node_queue(node)) {
+		QOS_ASSERT(node->data.queue.rlm == fw_info->queue_physical_id,
+				"Node %u physical queue is %u according to driver and %u according to HW\n",
+				cmd->phy, node->data.queue.rlm,
+				fw_info->queue_physical_id);
+		QOS_ASSERT(port == fw_info->queue_port,
+				"Driver has %u as %u port, while HW has %u\n",
+				port,
+				cmd->phy,
+				fw_info->queue_physical_id);
+
+		info->queue_physical_id = fw_info->queue_physical_id;
+		info->port = fw_info->queue_port;
+	}
+
+	QOS_ASSERT(fw_info->bw_limit == node->bandwidth_limit,
+			"Driver has %u as node's %u bandwidth limit, while HW has %u\n",
+			node->bandwidth_limit,
+			cmd->phy,
+			fw_info->bw_limit);
+
+	info->bw_limit = fw_info->bw_limit;
+}
+
+/*
+ * Commands that are marked with POST_PROCESS reach
+ * here for further processing before return to client
+ */
+static void post_process(struct pp_qos_dev *qdev, union driver_cmd *dcmd)
+{
+	enum cmd_type  type;
+	struct pp_qos_queue_stat *qstat;
+	struct pp_qos_port_stat *pstat;
+	struct queue_stats_s *fw_qstat;
+	struct port_stats_s *fw_pstat;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	type = dcmd->cmd.type;
+	switch (type) {
+	case CMD_TYPE_GET_QUEUE_STATS:
+		fw_qstat = (struct queue_stats_s *)(qdev->stat);
+		qstat = dcmd->queue_stats.stat;
+		qstat->queue_packets_occupancy = fw_qstat->queue_size_entries;
+		qstat->queue_bytes_occupancy = fw_qstat->queue_size_bytes;
+		qstat->total_packets_accepted = fw_qstat->total_accepts;
+		qstat->total_packets_dropped = fw_qstat->total_drops;
+		qstat->total_packets_red_dropped = fw_qstat->total_red_dropped;
+		qstat->total_bytes_accepted =
+			(((uint64_t)fw_qstat->total_bytes_added_high) << 32)
+			| fw_qstat->total_bytes_added_low;
+		qstat->total_bytes_dropped =
+			(((uint64_t)fw_qstat->total_dropped_bytes_high) << 32)
+			| fw_qstat->total_dropped_bytes_low;
+		QOS_ASSERT(fw_qstat->queue_size_entries ==
+				fw_qstat->qmgr_num_queue_entries,
+				"wred (%u) and qm (%u) reports differnet number of queue entries for queue %u\n",
+				fw_qstat->queue_size_entries,
+				fw_qstat->qmgr_num_queue_entries,
+				dcmd->queue_stats.rlm);
+		break;
+
+	case CMD_TYPE_GET_PORT_STATS:
+		fw_pstat = (struct port_stats_s *)(qdev->stat);
+		pstat = dcmd->port_stats.stat;
+		pstat->total_green_bytes = fw_pstat->total_green_bytes;
+		pstat->total_yellow_bytes = fw_pstat->total_yellow_bytes;
+		break;
+
+	case CMD_TYPE_GET_NODE_INFO:
+		post_process_get_node_info(qdev, &dcmd->node_info);
+		break;
+
+	default:
+		QOS_ASSERT(0, "Unexpected cmd %d for post process\n", type);
+		return;
+
+	}
+}
+
+#define MAX_FW_CMD_SIZE 120U
+#define MS_SLEEP_BETWEEN_POLL 50U
+#define NUM_OF_POLLS	100U
+
+/*
+ * Go over all commands on pending queue until cmd id
+ * is changed or queue is empty
+ * (refer to driver design document to learn more about cmd id).
+ * On current implmentation it is expected that pending queue contain
+ * firmware commands for a single client command, therfore queue should
+ * become empty before cmd id is changed.
+ *
+ * For each command wait until firmware signals
+ * completion before continue to next command.
+ * Completion status for each command is polled NUM_OF_POLLS
+ * times. And the function sleeps between pools.
+ * If command have not completed after all that polls -
+ * function asserts.
+ *
+ */
+void check_completion(struct pp_qos_dev *qdev)
+{
+	union driver_cmd dcmd;
+
+	volatile uint32_t *pos;
+	uint32_t	val;
+	size_t len;
+	unsigned int idcur;
+	unsigned int idnext;
+	int first_cmd;
+	int rc;
+	unsigned int i;
+	unsigned int popped;
+	struct fw_internal *internals;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	popped = 0;
+	idcur = 0;
+	first_cmd = 1;
+	while (cmd_queue_peek(
+			      qdev->drvcmds.pendq,
+			      &dcmd.cmd,
+			      sizeof(struct cmd)) == 0) {
+		idnext = dcmd.cmd.id;
+		if (!first_cmd && (idnext != idcur))
+			break;
+		pos = dcmd.stub.cmd.pos;
+		len = dcmd.cmd.len;
+		++pos;
+#ifndef NO_FW
+		i = 0;
+		val = qos_u32_from_uc(*pos);
+		while ((val &
+			(UC_CMD_FLAG_UC_DONE | UC_CMD_FLAG_UC_ERROR)) == 0) {
+			val = qos_u32_from_uc(*pos);
+			qos_sleep(MS_SLEEP_BETWEEN_POLL);
+			++i;
+			if (i == NUM_OF_POLLS) {
+				QOS_ASSERT(
+					0,
+					"FW is not responding, polling offset 0x%04tX for cmd type %s\n",
+					(void *)pos -
+					(void *)(qdev->fwcom.cmdbuf),
+					cmd_str[dcmd.cmd.type]);
+				return;
+			}
+		}
+		if (val & UC_CMD_FLAG_UC_ERROR) {
+			QOS_ASSERT(0,
+				   "FW signaled error, polling offset 0x%04tX, cmd type %s\n",
+				   (void *)pos - (void *)(qdev->fwcom.cmdbuf),
+				   cmd_str[dcmd.cmd.type]);
+			return;
+		}
+#endif
+		rc = cmd_queue_get(qdev->drvcmds.pendq, &dcmd.stub, len);
+		QOS_ASSERT(rc == 0,
+			   "Command queue does not contain a full command\n");
+		if (dcmd.cmd.flags & CMD_FLAGS_POST_PROCESS)
+			post_process(qdev, &dcmd);
+		++popped;
+		idcur = idnext;
+		first_cmd = 0;
+	}
+
+	internals = qdev->fwbuf;
+	QOS_ASSERT(popped == internals->pushed,
+		   "Expected to pop %u msgs from pending queue but popped %u\n",
+		   internals->pushed, popped);
+	QOS_ASSERT(cmd_queue_is_empty(qdev->drvcmds.cmdq),
+		   "Driver's cmd queue is not empty\n");
+	QOS_ASSERT(cmd_queue_is_empty(qdev->drvcmds.pendq),
+		   "Driver's pending queue is not empty\n");
+}
+
+#define FW_CMD_BUFFER_DCCM_START 0xF0006000
+
+/*
+ * Take all commands from driver cmd queue, translate them to
+ * firmware format and put them on firmware queue.
+ * When finish signal firmware.
+ */
+void enqueue_cmds(struct pp_qos_dev *qdev)
+{
+	size_t len;
+	int rc;
+	uint32_t *cur;
+	uint32_t *prev;
+	uint32_t *start;
+	size_t remain;
+	uint32_t flags;
+	union driver_cmd dcmd;
+	struct fw_internal *internals;
+	unsigned int pushed;
+
+	if (PP_QOS_DEVICE_IS_ASSERT(qdev))
+		return;
+
+	start = qdev->fwcom.cmdbuf;
+	remain = qdev->fwcom.cmdbuf_sz;
+
+	pushed = 0;
+	cur = start;
+	prev = start;
+	*cur++ = qos_u32_to_uc(UC_QOS_COMMAND_MULTIPLE_COMMANDS);
+	*cur++ = qos_u32_to_uc(UC_CMD_FLAG_IMMEDIATE);
+	*cur++ = qos_u32_to_uc(1);
+	*cur = qos_u32_to_uc(((uintptr_t)(cur) - (uintptr_t)start +
+				4 + FW_CMD_BUFFER_DCCM_START) & 0xFFFFFFFF);
+	++cur;
+
+	internals = qdev->fwbuf;
+	remain -= 16;
+	flags = UC_CMD_FLAG_IMMEDIATE;
+	while ((remain >= MAX_FW_CMD_SIZE) &&
+			cmd_queue_peek(
+				qdev->drvcmds.cmdq,
+				&dcmd.cmd,
+				sizeof(struct cmd)) == 0) {
+		len = dcmd.cmd.len;
+		rc = cmd_queue_get(qdev->drvcmds.cmdq, &dcmd.stub, len);
+		QOS_ASSERT(rc == 0,
+				"Command queue does not contain a full command\n");
+		prev = cur;
+		dcmd.stub.cmd.pos = cur;
+		cmd_queue_put(
+				qdev->drvcmds.pendq,
+				&dcmd.stub,
+				dcmd.stub.cmd.len);
+		switch (dcmd.cmd.type) {
+		case CMD_TYPE_INIT_LOGGER:
+			cur = fw_write_init_logger_cmd(
+					prev,
+					&dcmd.init_logger,
+					flags);
+			break;
+
+		case CMD_TYPE_INIT_QOS:
+			cur = fw_write_init_qos_cmd(
+					prev,
+					&dcmd.init_qos,
+					flags);
+			break;
+
+		case CMD_TYPE_MOVE:
+			if (dcmd.move.node_type == TYPE_QUEUE)
+				cur = fw_write_move_queue_cmd(
+						prev,
+						&dcmd.move,
+						flags);
+			else
+				cur = fw_write_move_sched_cmd(
+						prev,
+						&dcmd.move,
+						flags);
+			break;
+
+		case CMD_TYPE_PARENT_CHANGE:
+			cur = parent_change_cmd_wrapper(
+					internals,
+					prev,
+					&dcmd.parent_change,
+					flags);
+			break;
+
+		case CMD_TYPE_UPDATE_PREDECESSORS:
+			cur = update_preds_cmd_wrapper(
+					internals,
+					prev,
+					&dcmd.update_preds,
+					flags);
+			break;
+
+		case CMD_TYPE_ADD_PORT:
+			cur = fw_write_add_port_cmd(
+					prev,
+					&dcmd.add_port,
+					flags);
+			break;
+
+		case CMD_TYPE_SET_PORT:
+			cur = set_port_cmd_wrapper(
+					internals,
+					prev,
+					&dcmd.set_port,
+					flags);
+			break;
+
+		case CMD_TYPE_REMOVE_PORT:
+			cur = fw_write_remove_port_cmd(
+					prev,
+					&dcmd.remove_node,
+					flags);
+			break;
+
+		case CMD_TYPE_ADD_SCHED:
+			cur = fw_write_add_sched_cmd(
+					prev,
+					&dcmd.add_sched,
+					flags);
+			break;
+
+		case CMD_TYPE_ADD_QUEUE:
+			cur = fw_write_add_queue_cmd(
+					prev,
+					&dcmd.add_queue,
+					flags);
+			break;
+
+		case CMD_TYPE_REMOVE_QUEUE:
+			cur = fw_write_remove_queue_cmd(
+					prev,
+					&dcmd.remove_node,
+					flags);
+			break;
+
+		case CMD_TYPE_REMOVE_SCHED:
+			cur = fw_write_remove_sched_cmd(
+					prev,
+					&dcmd.remove_node,
+					flags);
+			break;
+
+		case CMD_TYPE_SET_SCHED:
+			cur = set_sched_cmd_wrapper(
+					internals,
+					prev,
+					&dcmd.set_sched,
+					flags);
+			break;
+
+		case CMD_TYPE_SET_QUEUE:
+			cur = set_queue_cmd_wrapper(
+					internals,
+					prev,
+					&dcmd.set_queue,
+					flags);
+			break;
+
+		case CMD_TYPE_REMOVE_NODE:
+			QOS_ASSERT(0,
+					"Did not expect CMD_TYPE_REMOVE_NODE\n");
+			break;
+
+		case CMD_TYPE_GET_QUEUE_STATS:
+			cur = fw_write_get_queue_stats(
+					prev, &dcmd.queue_stats, flags);
+			break;
+
+		case CMD_TYPE_GET_PORT_STATS:
+			cur = fw_write_get_port_stats(
+					prev, &dcmd.port_stats, flags);
+			break;
+
+		case CMD_TYPE_ADD_SHARED_GROUP:
+		case CMD_TYPE_SET_SHARED_GROUP:
+			cur = fw_write_set_shared_group(
+					prev,
+					dcmd.cmd.type,
+					&dcmd.set_shared_group,
+					flags);
+			break;
+
+		case CMD_TYPE_REMOVE_SHARED_GROUP:
+			cur = fw_write_remove_shared_group(
+					prev,
+					&dcmd.remove_shared_group,
+					flags);
+			break;
+		case CMD_TYPE_PUSH_DESC:
+			cur = fw_write_push_desc(
+					prev,
+					&dcmd.pushd,
+					flags);
+			break;
+
+		case CMD_TYPE_GET_NODE_INFO:
+			cur = fw_write_get_node_info(
+					prev,
+					&dcmd.node_info,
+					flags);
+			break;
+
+		default:
+			QOS_ASSERT(0, "Unexpected msg type %d\n",
+					dcmd.cmd.type);
+			return;
+		}
+		++pushed;
+		remain -= (uintptr_t)cur - (uintptr_t)prev;
+	}
+	QOS_ASSERT(remain >= MAX_FW_CMD_SIZE,
+			"Remain size on command buffer is not enought\n");
+
+	if (prev != start) {
+		prev += 1;
+		flags = qos_u32_from_uc(*prev);
+		QOS_BITS_SET(flags, UC_CMD_FLAG_MULTIPLE_COMMAND_LAST);
+		*prev = qos_u32_to_uc(flags);
+	}
+	internals->pushed = pushed;
+
+	if (prev != start)
+		signal_uc(qdev);
+}
+#endif
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_fw.h b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_fw.h
new file mode 100644
index 000000000..df51fdde4
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_fw.h
@@ -0,0 +1,156 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#ifndef PP_QOS_FW_H
+#define PP_QOS_FW_H
+
+#include "pp_qos_utils.h"
+#include "pp_qos_common.h"
+#include "pp_qos_uc_wrapper.h"
+
+struct ppv4_qos_fw {
+	size_t size;
+	const uint8_t *data;
+};
+
+int load_firmware(struct pp_qos_dev *qdev, const char *name);
+
+#ifndef PP_QOS_DISABLE_CMDQ
+void create_move_cmd(struct pp_qos_dev *qdev, uint16_t dst, uint16_t src,
+		     uint16_t dst_port);
+void create_set_port_cmd(struct pp_qos_dev *qdev,
+			 const struct pp_qos_port_conf *conf, unsigned int phy,
+			 uint32_t modified);
+void create_set_queue_cmd(struct pp_qos_dev *qdev,
+			  const struct pp_qos_queue_conf *conf,
+			  unsigned int phy,
+			  unsigned int parent,
+			  unsigned int rlm, uint32_t modified);
+void create_set_sched_cmd(struct pp_qos_dev *qdev,
+			  const struct pp_qos_sched_conf *conf,
+			  unsigned int phy,
+			  unsigned int parent, uint32_t modified);
+void create_remove_node_cmd(struct pp_qos_dev *qdev, enum node_type type,
+			    unsigned int phy, unsigned int data);
+void create_parent_change_cmd(struct pp_qos_dev *qdev, unsigned int phy);
+void create_update_preds_cmd(struct pp_qos_dev *qdev, unsigned int phy);
+void create_init_qos_cmd(struct pp_qos_dev *qdev);
+void enqueue_cmds(struct pp_qos_dev *qdev);
+void check_completion(struct pp_qos_dev *qdev);
+void create_get_queue_stats_cmd(
+		struct pp_qos_dev *qdev,
+		unsigned int phy,
+		unsigned int rlm,
+		unsigned int addr,
+		struct pp_qos_queue_stat *qstat);
+void create_get_port_stats_cmd(
+		struct pp_qos_dev *qdev,
+		unsigned int phy,
+		unsigned int addr,
+		struct pp_qos_port_stat *pstat);
+int init_fwdata_internals(struct pp_qos_dev *qdev);
+void clean_fwdata_internals(struct pp_qos_dev *qdev);
+void create_init_logger_cmd(struct pp_qos_dev *qdev);
+void create_add_shared_group_cmd(struct pp_qos_dev *qdev,
+		unsigned int id,
+		unsigned int limit);
+void create_remove_shared_group_cmd(struct pp_qos_dev *qdev,
+		unsigned int id);
+
+void create_set_shared_group_cmd(struct pp_qos_dev *qdev,
+		unsigned int id,
+		unsigned int limit);
+
+void create_push_desc_cmd(struct pp_qos_dev *qdev, unsigned int queue,
+		unsigned int size, unsigned int color, unsigned int addr);
+
+void create_get_node_info_cmd(
+		struct pp_qos_dev *qdev,
+		unsigned int phy,
+		unsigned int addr,
+		struct pp_qos_node_info *info);
+
+int do_load_firmware(
+		struct pp_qos_dev *qdev,
+		const struct ppv4_qos_fw *fw,
+		void *ddr_base,
+		void *ivt_base,
+		void *data);
+
+void signal_uc(struct pp_qos_dev *qdev);
+#elif defined(PRINT_CREATE_CMD)
+#define create_move_cmd(qdev, dst, src, dst_port)\
+	QOS_LOG_DEBUG("MOVE: %u ==> %u\n", src, dst)
+#define create_set_port_cmd(qdev, conf, phy, modified)\
+	QOS_LOG_DEBUG("SET PORT: %u\n", phy)
+#define create_set_sched_cmd(qdev, conf, phy, parent, modified)\
+	QOS_LOG_DEBUG("SET SCH: %u\n", phy)
+#define create_set_queue_cmd(qdev, conf, phy, parent, rlm, modified)\
+	QOS_LOG_DEBUG("SET QUEUE: %u\n", phy)
+#define create_remove_node_cmd(qdev, type, phy, data)\
+	QOS_LOG_DEBUG("REMOVE: %u(%u)\n", phy, type)
+#define create_parent_change_cmd(qdev, phy)\
+	QOS_LOG_DEBUG("PARENT_CHANGE: %u\n", phy)
+#define create_update_preds_cmd(qdev, phy)\
+	QOS_LOG_DEBUG("UPDATE_PREDS: %u\n", phy)
+#define create_get_queue_stats_cmd(qdev, phy, rlm, addr, qstat)
+#define create_init_qos_cmd(qdev)
+#define enqueue_cmds(qdev)
+#define check_completion(qdev)
+#define init_fwdata_internals(qdev) 0
+#define clean_fwdata_internals(qdev)
+#define create_init_logger_cmd(qdev)
+#define create_add_shared_group_cmd(qdev, id, limit)
+#define create_set_shared_group_cmd(qdev, id, limit)
+#define create_remove_shared_group_cmd(qdev, id)
+#define create_get_queue_stats_cmd(qdev, phy, rlm, addr, qstat)
+#define create_get_port_stats_cmd(qdev, phy, addr, pstat)
+#define create_get_node_info_cmd(qdev, phy, addr, info)
+#define create_push_desc_cmd(qdev, queue, size, color, addr)
+#else
+#define create_move_cmd(qdev, dst, src, dst_port)
+#define create_set_port_cmd(qdev, conf, phy, modified)
+#define create_set_sched_cmd(qdev, conf, phy, parent, modified)
+#define create_set_queue_cmd(qdev, conf, phy, parent, rlm, modified)
+#define create_remove_node_cmd(qdev, type, phy, data)
+#define create_parent_change_cmd(qdev, phy)
+#define create_update_preds_cmd(qdev, phy)
+#define create_get_queue_stats_cmd(qdev, phy, rlm, addr, qstat)
+#define create_init_qos_cmd(qdev)
+#define enqueue_cmds(qdev)
+#define check_completion(qdev)
+#define init_fwdata_internals(qdev) 0
+#define clean_fwdata_internals(qdev)
+#define create_init_logger_cmd(qdev)
+#define create_add_shared_group_cmd(qdev, id, limit)
+#define create_set_shared_group_cmd(qdev, id, limit)
+#define create_remove_shared_group_cmd(qdev, id)
+#define create_get_queue_stats_cmd(qdev, phy, rlm, addr, qstat)
+#define create_get_port_stats_cmd(qdev, phy, addr, pstat)
+#define create_get_node_info_cmd(qdev, phy, addr, info)
+#define create_push_desc_cmd(qdev, queue, size, color, addr)
+#endif
+#endif
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_kernel.h b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_kernel.h
new file mode 100644
index 000000000..fe1d38b81
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_kernel.h
@@ -0,0 +1,75 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#ifndef _PP_QOS_KERNEL_H
+#define _PP_QOS_KERNEL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+int qos_dbg_dev_init(struct platform_device *pdev);
+void qos_dbg_dev_clean(struct platform_device *pdev);
+int qos_dbg_module_init(void);
+void qos_dbg_module_clean(void);
+
+struct pp_qos_dbg_data {
+	struct dentry *dir;
+	uint16_t	node;
+	void		*fw_logger_addr;
+};
+
+struct pp_qos_drv_data {
+	int	id;
+	struct pp_qos_dev *qdev;
+	struct pp_qos_dbg_data dbg;
+	void *ddr;
+	void *dccm;
+	void *ivt;
+	void __iomem *wakeuc;
+	resource_size_t ddr_phy_start;
+
+};
+
+#ifndef __BIG_ENDIAN
+#define ENDIANESS_MATCH(endianess) (endianess == 0)
+#else
+#define ENDIANESS_MATCH(endianess) (endianess != 0)
+#endif
+
+#ifdef CONFIG_OF
+#define PPV4_QOS_MANUAL_ADD(rc)
+#define PPV4_QOS_MANUAL_REMOVE()
+#else
+#define PPV4_QOS_MANUAL_ADD(rc)		\
+do {					\
+	if (rc == 0)			\
+		rc = add_qos_dev();	\
+} while (0)
+#define PPV4_QOS_MANUAL_REMOVE() remove_qos_dev()
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_linux.c b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_linux.c
new file mode 100644
index 000000000..4d833b1a3
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_linux.c
@@ -0,0 +1,536 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include "pp_qos_kernel.h"
+#include "pp_qos_common.h"
+#include "pp_qos_utils.h"
+#include "pp_qos_fw.h"
+
+/*
+ * TODO - this obstruct multi instances but helps debug
+ * at this phase
+ */
+struct device *cur_dev;
+
+#define PPV4_QOS_CMD_BUF_SIZE (0x1000)
+#define PPV4_QOS_CMD_BUF_OFFSET (0x6000U)
+#define MAX_CMD_SIZE 150
+
+void stop_run(void)
+{
+	struct pp_qos_drv_data *pdata;
+
+	if (cur_dev) {
+		dev_crit(cur_dev, "!!!!! Qos driver in unstable mode !!!!!\n");
+		pdata = dev_get_drvdata(cur_dev);
+		QOS_BITS_SET(pdata->qdev->flags, PP_QOS_FLAGS_ASSERT);
+	}
+}
+
+void wake_uc(void *data)
+{
+	struct pp_qos_drv_data *pdata;
+
+	pdata = (struct pp_qos_drv_data *)data;
+	iowrite32(4, pdata->wakeuc);
+}
+
+int load_firmware(struct pp_qos_dev *qdev, const char *name)
+{
+	int err;
+	const struct firmware *firmware;
+	struct ppv4_qos_fw fw;
+	struct pp_qos_drv_data *pdata;
+	struct device *dev;
+	struct platform_device *pdev;
+
+	pdev = (struct platform_device *)(qdev->pdev);
+	dev = &pdev->dev;
+	pdata = platform_get_drvdata(pdev);
+	err = request_firmware(&firmware, name, dev);
+	if (err < 0) {
+		dev_err(dev, "Failed loading firmware ret is %d\n", err);
+		return err;
+	}
+
+	fw.size = firmware->size;
+	fw.data = firmware->data;
+	err = do_load_firmware(qdev,
+			&fw,
+			pdata->ddr - pdata->ddr_phy_start,
+			pdata->ivt - PPV4_QOS_IVT_START,
+			pdata);
+
+	release_firmware(firmware);
+	return err;
+}
+
+static const struct of_device_id pp_qos_match[] = {
+	{ .compatible = "intel,falconmx-ppv4qos" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, pp_qos_match);
+
+static struct resource *get_resource(
+		struct platform_device *pdev,
+		const char *name,
+		unsigned int type,
+		size_t size)
+{
+	struct resource *r;
+	size_t sz;
+	struct device *dev;
+
+	dev = &pdev->dev;
+
+	r = platform_get_resource_byname(pdev, type, name);
+	if (!r) {
+		dev_err(dev, "Could not get %s resource\n", name);
+		return NULL;
+	}
+
+	sz = resource_size(r);
+	if (size && sz != size) {
+		dev_err(dev, "Illegal size %zu for %s resource expected %zu\n",
+				sz, name, size);
+		return NULL;
+	}
+
+	return r;
+}
+
+static void print_resource(
+		struct device *dev,
+		const char *name,
+		struct resource *r)
+{
+
+	dev_info(dev, "%s memory resource: start(0x%08zX), size(%zu)\n",
+			name,
+			(size_t)(uintptr_t)r->start,
+			(size_t)(uintptr_t)resource_size(r));
+}
+
+static void *map_mem_resource(
+		struct platform_device *pdev,
+		const char *name,
+		unsigned int type,
+		unsigned int size)
+{
+	struct resource *r;
+	void *addr;
+	struct device *dev;
+	int err;
+
+	dev = &pdev->dev;
+
+	r = get_resource(
+			pdev,
+			name,
+			type,
+			size);
+	if (!r)
+		return NULL;
+
+	print_resource(dev, name, r);
+	addr = devm_memremap(dev, r->start, resource_size(r), MEMREMAP_WT);
+	if (IS_ERR_OR_NULL(addr)) {
+		err = (int) PTR_ERR(addr);
+		dev_err(dev,
+			"devm_memremap on resource %s failed with %d\n",
+			name,
+			err);
+		return NULL;
+	}
+	dev_info(dev, "%s memory mapped to %p\n", name, addr);
+
+	return addr;
+}
+
+static void __iomem *map_reg_resource(
+		struct platform_device *pdev,
+		const char *name,
+		unsigned int type,
+		unsigned int size)
+{
+	struct resource *r;
+	void __iomem *addr;
+
+	r = get_resource(
+			pdev,
+			name,
+			type,
+			size);
+
+	if (!r)
+		return NULL;
+
+	print_resource(&pdev->dev, name, r);
+
+	addr = devm_ioremap_resource(&pdev->dev, r);
+	if (!addr)
+		dev_err(&pdev->dev, "Map of resource %s failed\n", name);
+	else
+		dev_info(&pdev->dev, "%s register mapped to %p\n", name, addr);
+
+	return addr;
+}
+
+static int pp_qos_get_resources(
+		struct platform_device *pdev,
+		struct qos_dev_init_info *info)
+{
+	int err;
+	struct pp_qos_drv_data *pdata;
+	struct device *dev;
+	void __iomem  *regaddr;
+	void *memaddr;
+
+	dev = &pdev->dev;
+	pdata = platform_get_drvdata(pdev);
+
+	/* Cmd buffer and dccm */
+	memaddr = map_mem_resource(pdev, "dccm", IORESOURCE_MEM, 0);
+	if (!memaddr)
+		return -ENODEV;
+	pdata->dccm = memaddr;
+	info->fwcom.cmdbuf = memaddr + PPV4_QOS_CMD_BUF_OFFSET;
+	info->fwcom.cmdbuf_sz = PPV4_QOS_CMD_BUF_SIZE;
+
+	memaddr = map_mem_resource(pdev, "ivt", IORESOURCE_MEM, 0);
+	if (!memaddr)
+		return -ENODEV;
+	pdata->ivt = memaddr;
+
+	/* Mbx from uc */
+	regaddr = map_reg_resource(pdev, "mbx-from-uc", IORESOURCE_MEM, 24);
+	if (!regaddr)
+		return -ENODEV;
+	info->fwcom.mbx_from_uc = regaddr;
+
+	/* Mbx to uc */
+	regaddr = map_reg_resource(pdev, "mbx-to-uc", IORESOURCE_MEM, 24);
+	if (!regaddr)
+		return -ENODEV;
+	info->fwcom.mbx_to_uc = regaddr;
+
+	regaddr = map_reg_resource(pdev, "wake-uc", IORESOURCE_MEM, 4);
+	if (!regaddr)
+		return -ENODEV;
+	pdata->wakeuc = regaddr;
+
+	err = platform_get_irq(pdev, 0);
+	if (err < 0) {
+		dev_err(dev,
+			"Could not retrieve irqline from platform err is %d\n",
+			err);
+		return err;
+	}
+	info->fwcom.irqline = err;
+	dev_info(dev, "irq is %d\n", err);
+
+	return 0;
+}
+
+static int pp_qos_config_from_of_node(
+		struct platform_device *pdev,
+		struct ppv4_qos_platform_data *pdata,
+		struct pp_qos_drv_data *pdrvdata)
+{
+	int err;
+	uint32_t val;
+	struct resource r;
+	struct device_node *node;
+	struct device *dev;
+	void *addr;
+	dma_addr_t dma;
+
+	dev = &pdev->dev;
+	node = pdev->dev.of_node;
+	dev_info(&pdev->dev, "Using device tree info to init platform data\n");
+	err = of_alias_get_id(node, "qos");
+	if (err < 0) {
+		dev_err(dev, "failed to get alias id, errno %d\n", err);
+		return err;
+	}
+	pdata->id = err;
+
+	err = of_property_read_u32(node, "intel,qos-max-port", &val);
+	if (err) {
+		dev_err(dev,
+			"Could not get max port from DT, error is %d\n",
+			err);
+		return -ENODEV;
+	}
+	pdata->max_port = val;
+
+	err = of_property_read_u32(
+			node,
+			"intel,wred-prioritize",
+			&val);
+	if (err) {
+		dev_err(dev,
+			"Could not get wred pop prioritize from DT, error is %d\n",
+			err);
+		return -ENODEV;
+	}
+	pdata->wred_prioritize_pop = val;
+
+	/* Get reserved memory region */
+	node = of_parse_phandle(node, "memory-region", 0);
+	if (!node) {
+		dev_err(dev, "No memory-region specified\n");
+		return -ENODEV;
+	}
+	err = of_address_to_resource(node, 0, &r);
+	if (err) {
+		dev_err(dev, "No memory address assigned to the region\n");
+		return err;
+	}
+
+	print_resource(dev, "ddr", &r);
+	pdrvdata->ddr_phy_start = r.start;
+	pdrvdata->ddr = devm_memremap(
+			dev,
+			r.start,
+			resource_size(&r),
+			MEMREMAP_WT);
+	if (IS_ERR_OR_NULL(pdrvdata->ddr)) {
+		err = (int) PTR_ERR(pdrvdata->ddr);
+		dev_err(dev, "devm_memremap failed mapping ddr with %d\n", err);
+		return err;
+	}
+	dev_info(dev, "DDR memory mapped to %p\n", pdrvdata->ddr);
+
+	addr = dmam_alloc_coherent(dev, PPV4_QOS_QM_BUF_SIZE, &dma, GFP_KERNEL);
+	if (addr == NULL) {
+		dev_err(dev,
+				"Could not allocate %u bytes for queue manager\n",
+				PPV4_QOS_QM_BUF_SIZE);
+		return -ENOMEM;
+	}
+	pdata->qm_num_pages =  PPV4_QOS_QM_BUF_SIZE / PPV4_QOS_PAGE_SIZE;
+	pdata->qm_ddr_start = (unsigned int)(dma);
+
+	dev_info(dev, "Dma allocated %u bytes for queue manager, bus address is 0x%08X\n",
+		 PPV4_QOS_QM_BUF_SIZE,
+		 pdata->qm_ddr_start);
+
+	addr = dmam_alloc_coherent(
+			dev,
+			PPV4_QOS_LOGGER_BUF_SIZE + PPV4_QOS_STAT_SIZE,
+			&dma,
+			GFP_KERNEL);
+	if (addr == NULL) {
+		dev_err(dev,
+				"Could not allocate %u bytes for logger buffer\n",
+				PPV4_QOS_LOGGER_BUF_SIZE);
+		return -ENOMEM;
+	}
+	pdata->fw_logger_start = (unsigned int)(dma);
+	pdata->fw_stat = pdata->fw_logger_start + PPV4_QOS_LOGGER_BUF_SIZE;
+	pdrvdata->dbg.fw_logger_addr = addr;
+
+	dev_info(dev, "Dma allocated %u bytes for fw logger, bus address is 0x%08X, virtual addr is %p\n",
+		 PPV4_QOS_LOGGER_BUF_SIZE,
+		 pdata->fw_logger_start,
+		 pdrvdata->dbg.fw_logger_addr);
+
+	return 0;
+}
+
+static int pp_qos_config_from_platform_data(
+		struct platform_device *pdev,
+		struct ppv4_qos_platform_data *pdata,
+		struct pp_qos_drv_data *pdrvdata
+		)
+{
+	struct ppv4_qos_platform_data *psrc;
+	void *memaddr;
+
+	dev_info(&pdev->dev, "Using platform info to init platform data\n");
+	psrc = (struct ppv4_qos_platform_data *)dev_get_platdata(&pdev->dev);
+	if (!psrc) {
+		dev_err(&pdev->dev, "Device contain no platform data\n");
+		return -ENODEV;
+	}
+	pdata->id = psrc->id;
+	pdata->max_port = psrc->max_port;
+	pdata->wred_prioritize_pop = psrc->wred_prioritize_pop;
+
+	/* DDR */
+	pdrvdata->ddr_phy_start = 0;
+	memaddr = map_mem_resource(pdev, "ddr", IORESOURCE_MEM, 0);
+	if (!memaddr)
+		return -ENODEV;
+	pdrvdata->ddr = memaddr;
+
+	pdata->qm_ddr_start = psrc->qm_ddr_start;
+	pdata->qm_num_pages = psrc->qm_num_pages;
+	pdata->fw_logger_start = psrc->fw_logger_start;
+	pdata->fw_stat = pdata->fw_logger_start + PPV4_QOS_LOGGER_BUF_SIZE;
+	pdrvdata->dbg.fw_logger_addr = pdrvdata->ddr + pdata->fw_logger_start;
+	return 0;
+}
+
+static int pp_qos_probe(struct platform_device *pdev)
+{
+	const struct device_node *of_node = pdev->dev.of_node;
+	struct qos_dev_init_info init_info = {};
+	const struct of_device_id *match;
+	struct pp_qos_dev *qdev;
+	int err;
+	struct pp_qos_drv_data *pdata;
+	struct device *dev;
+
+	dev = &pdev->dev;
+	dev_info(dev, "Probing...\n");
+
+	/* there is no devm_vmalloc so using dev_kzalloc */
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, pdata);
+
+	match = of_match_device(of_match_ptr(pp_qos_match), dev);
+	if (match && of_node) {
+		err = pp_qos_config_from_of_node(
+				pdev,
+				&init_info.pl_data,
+				pdata);
+		if (err)
+			return err;
+	} else if (dev_get_platdata(dev)) {
+		err = pp_qos_config_from_platform_data(
+				pdev,
+				&init_info.pl_data,
+				pdata);
+		if (err)
+			return err;
+	} else {
+		dev_err(dev, "Could not get qos configuration\n");
+		return -ENODEV;
+	}
+
+	dev_info(dev, "id(%d), max_port(%u), pop_prioritize(%u)\n",
+			init_info.pl_data.id,
+			init_info.pl_data.max_port,
+			init_info.pl_data.wred_prioritize_pop
+			);
+
+	pdata->id = init_info.pl_data.id;
+	err = pp_qos_get_resources(pdev, &init_info);
+	if (err)
+		return err;
+
+	qdev = create_qos_dev_desc(&init_info);
+	if (!qdev)
+		return -ENODEV;
+	qdev->pdev = pdev;
+	pdata->qdev = qdev;
+	qdev->stat = pdata->dbg.fw_logger_addr + PPV4_QOS_LOGGER_BUF_SIZE;
+
+	err = qos_dbg_dev_init(pdev);
+	if (err)
+		goto fail;
+
+	dev_info(dev, "Probe completed\n");
+	cur_dev = dev;
+	return 0;
+
+fail:
+	_qos_clean(qdev);
+	return err;
+}
+
+static int pp_qos_remove(struct platform_device *pdev)
+{
+	struct pp_qos_drv_data *pdata;
+
+	dev_err(&pdev->dev, "Removing...\n");
+	pdata = platform_get_drvdata(pdev);
+	remove_qos_instance(pdata->id);
+	qos_dbg_dev_clean(pdev);
+	/* TODO !!! How to cleanup? */
+	return 0;
+}
+
+static struct platform_driver pp_qos_driver = {
+	.probe = pp_qos_probe,
+	.remove = pp_qos_remove,
+	.driver = {
+		.name = "pp-qos",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+		.of_match_table = pp_qos_match,
+#endif
+	},
+};
+
+static int __init pp_qos_mod_init(void)
+{
+	int rc;
+
+	qos_module_init();
+	qos_dbg_module_init();
+	rc = platform_driver_register(&pp_qos_driver);
+	PPV4_QOS_MANUAL_ADD(rc);
+	pr_info("pp_qos_driver init, status %d\n", rc);
+	return rc;
+}
+
+static void __exit pp_qos_mod_exit(void)
+{
+	PPV4_QOS_MANUAL_REMOVE();
+	platform_driver_unregister(&pp_qos_driver);
+
+	qos_dbg_module_clean();
+	pr_info("pp_qos_driver exit\n");
+}
+
+//device_initcall(pp_qos_mod_init);
+// TODO ask Thomas
+arch_initcall(pp_qos_mod_init);
+module_exit(pp_qos_mod_exit);
+
+MODULE_VERSION(PPV4_QOS_DRV_VER);
+MODULE_AUTHOR("Intel CHD");
+MODULE_DESCRIPTION("PPv4 QoS driver");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(FIRMWARE_FILE);
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_main.c b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_main.c
new file mode 100644
index 000000000..23383b59d
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_main.c
@@ -0,0 +1,2191 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#include "pp_qos_common.h"
+#include "pp_qos_utils.h"
+#include "pp_qos_fw.h"
+#include "pp_qos_drv.h"
+
+#define PP_QOS_ENTER_FUNC()		QOS_LOG_DEBUG("%s <--\n", __func__)
+#define PP_QOS_EXIT_FUNC()		QOS_LOG_DEBUG("%s -->\n", __func__)
+
+STATIC_UNLESS_TEST void update_cmd_id(struct driver_cmds *drvcmds)
+{
+	drvcmds->cmd_id++;
+	drvcmds->cmd_fw_id = 0;
+}
+
+STATIC_UNLESS_TEST void transmit_cmds(struct pp_qos_dev *qdev)
+{
+	enqueue_cmds(qdev);
+	check_completion(qdev);
+}
+
+static struct pp_qos_dev *qos_devs[MAX_QOS_INSTANCES];
+
+/******************************************************************************/
+/*                                      PORTS                                 */
+/******************************************************************************/
+static int set_port_specific_prop(struct pp_qos_dev *qdev,
+				  struct qos_node *node,
+				  const struct pp_qos_port_conf *conf,
+				  uint32_t *modified)
+{
+	if (node->data.port.ring_address != conf->ring_address) {
+		node->data.port.ring_address = conf->ring_address;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_RING_ADDRESS);
+	}
+
+	if (node->data.port.ring_size != conf->ring_size) {
+		node->data.port.ring_size = conf->ring_size;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_RING_SIZE);
+	}
+
+	if (!!(conf->packet_credit_enable) !=
+			!!QOS_BITS_IS_SET(node->flags,
+				QOS_NODE_FLAGS_PORT_PACKET_CREDIT_ENABLE)) {
+		QOS_BITS_TOGGLE(node->flags,
+				QOS_NODE_FLAGS_PORT_PACKET_CREDIT_ENABLE);
+		QOS_BITS_SET(*modified, QOS_MODIFIED_BEST_EFFORT);
+	}
+
+	if (node->data.port.credit != conf->credit) {
+		node->data.port.credit = conf->credit;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_CREDIT);
+	}
+
+	return 0;
+}
+
+STATIC_UNLESS_TEST
+int port_cfg_valid(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *node,
+		const struct qos_node *orig_node)
+{
+	if (!node_port(node)) {
+		QOS_LOG_ERR("Node is not a port\n");
+		return 0;
+	}
+
+	if (node->data.port.ring_address == NULL ||
+			node->data.port.ring_size == 0) {
+		QOS_LOG_ERR("Invalid ring address %p or ring size %zu\n",
+				node->data.port.ring_address,
+				node->data.port.ring_size);
+		return 0;
+	}
+
+	return node_cfg_valid(qdev, node, orig_node, QOS_INVALID_PHY);
+}
+
+static
+struct qos_node *get_conform_node(const struct pp_qos_dev *qdev,
+		unsigned int id,
+		int (*conform)(const struct qos_node *node))
+{
+	struct qos_node *node;
+	unsigned int phy;
+
+	phy = get_phy_from_id(qdev->mapping, id);
+	if (!QOS_PHY_VALID(phy)) {
+		QOS_LOG_ERR("Illegal id %u\n", id);
+		return NULL;
+	}
+	node = get_node_from_phy(qdev->nodes, phy);
+	if (conform && !conform(node)) {
+		QOS_LOG_ERR("Illegal id %u\n", id);
+		return NULL;
+	}
+
+	return node;
+}
+
+/******************************************************************************/
+/*                                 API                                        */
+/******************************************************************************/
+static int _pp_qos_port_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_port_conf *conf);
+static int
+_pp_qos_port_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_port_conf *conf);
+
+void pp_qos_port_conf_set_default(struct pp_qos_port_conf *conf)
+{
+	memset(conf, 0, sizeof(struct pp_qos_port_conf));
+	conf->common_prop.bandwidth_limit = QOS_NO_BANDWIDTH_LIMIT;
+	conf->common_prop.shared_bandwidth_group =
+		QOS_NO_SHARED_BANDWIDTH_GROUP;
+	conf->port_parent_prop.arbitration = PP_QOS_ARBITRATION_WSP;
+	conf->packet_credit_enable = 1;
+}
+
+int pp_qos_port_allocate(
+		struct pp_qos_dev *qdev,
+		unsigned int physical_id,
+		unsigned int *_id)
+{
+	unsigned int phy;
+	int rc;
+	unsigned int id;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (physical_id == ALLOC_PORT_ID) {
+		phy = pp_pool_get(qdev->portsphys);
+		if (!QOS_PHY_VALID(phy)) {
+			rc = -EBUSY;
+			goto out;
+		}
+	} else if (!QOS_PHY_VALID(physical_id) ||
+			!qdev->reserved_ports[physical_id]) {
+		QOS_LOG(
+				"requested physical id %u is not reserved or out of range\n",
+				physical_id);
+		rc = -EINVAL;
+		goto out;
+	} else if (node_used(get_const_node_from_phy(qdev->nodes,
+					physical_id))) {
+		QOS_LOG("port %u already used\n", physical_id);
+		rc = -EBUSY;
+		goto out;
+	} else {
+		phy = physical_id;
+	}
+
+	node_init(qdev, get_node_from_phy(qdev->nodes, phy), 1, 1, 0);
+	nodes_modify_used_status(qdev, phy, 1, 1);
+
+	id = pp_pool_get(qdev->ids);
+	QOS_ASSERT(QOS_ID_VALID(id), "got illegal id %u\n", id);
+	map_id_phy(qdev->mapping, id, phy);
+	*_id = id;
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_port_suspend(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	struct pp_qos_port_conf conf;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = _pp_qos_port_conf_get(qdev, id, &conf);
+	if (rc)
+		goto out;
+	conf.common_prop.suspended = 1;
+	rc =  _pp_qos_port_set(qdev, id, &conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+
+}
+
+int pp_qos_port_resume(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	struct pp_qos_port_conf conf;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = _pp_qos_port_conf_get(qdev, id, &conf);
+	if (rc)
+		goto out;
+	conf.common_prop.suspended = 0;
+	rc =  _pp_qos_port_set(qdev, id, &conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+
+}
+
+int pp_qos_port_block(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	const struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_port);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = tree_modify_blocked_status(
+			qdev,
+			get_phy_from_id(qdev->mapping, id), 1);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+
+}
+
+int pp_qos_port_unblock(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	const struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_port);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = tree_modify_blocked_status(
+			qdev,
+			get_phy_from_id(qdev->mapping, id), 0);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+
+}
+
+int pp_qos_port_flush(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	const struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	node = get_conform_node(qdev, id, node_port);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = tree_flush(qdev, get_phy_from_id(qdev->mapping, id));
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_port_remove(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	const struct qos_node *node;
+	unsigned int phy;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	node = get_conform_node(qdev, id, node_port);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+	phy = get_phy_from_node(qdev->nodes, node);
+	rc = tree_remove(qdev, phy);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+static int _pp_qos_port_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_port_conf *conf)
+{
+	const struct qos_node *node;
+
+	if (conf == NULL)
+		return -EINVAL;
+
+	node = get_conform_node(qdev, id, node_port);
+	if (!node)
+		return -EINVAL;
+
+	conf->ring_address = node->data.port.ring_address;
+	conf->ring_size = node->data.port.ring_size;
+	conf->packet_credit_enable = !!QOS_BITS_IS_SET(node->flags,
+			QOS_NODE_FLAGS_PORT_PACKET_CREDIT_ENABLE);
+	conf->credit = node->data.port.credit;
+	return  get_node_prop(
+			qdev,
+			node,
+			&conf->common_prop,
+			&conf->port_parent_prop,
+			NULL);
+}
+
+int pp_qos_port_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_port_conf *conf)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = _pp_qos_port_conf_get(qdev, id, conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_port_info_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_port_info *info)
+{
+	int rc;
+	const struct qos_node *node;
+	unsigned int phy;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (info == NULL) {
+		rc = -EINVAL;
+		goto out;
+	}
+	node = get_conform_node(qdev, id, node_port);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	phy = get_phy_from_node(qdev->nodes, node);
+	info->physical_id = phy;
+	get_node_queues(qdev, phy, NULL, 0, &info->num_of_queues);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_port_get_queues(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		uint16_t *queue_ids,
+		unsigned int size,
+		unsigned int *queues_num)
+{
+	int rc;
+	unsigned int phy;
+	const struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_port);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	phy = get_phy_from_node(qdev->nodes, node);
+	get_node_queues(qdev, phy, queue_ids, size, queues_num);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+static int _pp_qos_port_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_port_conf *conf)
+{
+	int rc;
+	uint32_t modified;
+	struct qos_node node;
+	struct qos_node *nodep;
+	uint16_t phy;
+
+	modified = 0;
+
+	nodep = get_conform_node(qdev, id, NULL);
+	if (!nodep)
+		return -EINVAL;
+	memcpy(&node, nodep, sizeof(node));
+
+	if (node.type != TYPE_PORT) {
+		node.type = TYPE_PORT;
+		QOS_BITS_SET(modified, QOS_MODIFIED_NODE_TYPE);
+	}
+
+	rc = set_node_prop(
+			qdev,
+			&node,
+			&conf->common_prop,
+			&conf->port_parent_prop,
+			NULL,
+			&modified);
+	if (rc)
+		return rc;
+
+	rc = set_port_specific_prop(qdev, &node, conf, &modified);
+	if (rc)
+		return rc;
+
+	if (!port_cfg_valid(qdev, &node, nodep))
+		return -EINVAL;
+
+	memcpy(nodep, &node, sizeof(struct qos_node));
+
+	if (modified) {
+		phy = get_phy_from_node(qdev->nodes, nodep);
+		create_set_port_cmd(qdev, conf, phy, modified);
+	}
+
+	return 0;
+}
+
+int pp_qos_port_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_port_conf *conf)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = _pp_qos_port_set(qdev, id, conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+/**
+ * pp_qos_port_stat_get() - Get port's statistics
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:	  queue's id obtained from queue_allocate
+ * @stat: pointer to struct to be filled with queue's statistics
+ *
+ * Return: 0 on success
+ */
+int pp_qos_port_stat_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_port_stat *stat)
+{
+	int rc;
+	struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_port);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	create_get_port_stats_cmd(
+			qdev,
+			get_phy_from_node(qdev->nodes, node),
+			qdev->hwconf.fw_stat,
+			stat);
+
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+
+/******************************************************************************/
+/*                                     QUEUES                                 */
+/******************************************************************************/
+static int _pp_qos_queue_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_queue_conf *conf);
+static int _pp_qos_queue_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_queue_conf *conf);
+
+static void node_queue_init(struct pp_qos_dev *qdev, struct qos_node *node)
+{
+	node_init(qdev, node, 1, 0, 1);
+	memset(&(node->data.queue), 0x0, sizeof(struct _queue));
+	node->type = TYPE_QUEUE;
+}
+
+static int set_queue_specific_prop(struct pp_qos_dev *qdev,
+				  struct qos_node *node,
+				  const struct pp_qos_queue_conf *conf,
+				  uint32_t *modified)
+{
+	if (node->data.queue.max_burst != conf->max_burst) {
+		node->data.queue.max_burst = conf->max_burst;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_MAX_BURST);
+	}
+
+	if (node->data.queue.green_min != conf->queue_wred_min_avg_green) {
+		node->data.queue.green_min = conf->queue_wred_min_avg_green;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_MIN_GREEN);
+	}
+
+	if (node->data.queue.green_max != conf->queue_wred_max_avg_green) {
+		node->data.queue.green_max = conf->queue_wred_max_avg_green;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_MAX_GREEN);
+	}
+
+	if (node->data.queue.green_slope != conf->queue_wred_slope_green) {
+		node->data.queue.green_slope = conf->queue_wred_slope_green;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_SLOPE_GREEN);
+	}
+
+	if (node->data.queue.yellow_min != conf->queue_wred_min_avg_yellow) {
+		node->data.queue.yellow_min = conf->queue_wred_min_avg_yellow;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_MIN_YELLOW);
+	}
+
+	if (node->data.queue.yellow_max != conf->queue_wred_max_avg_yellow) {
+		node->data.queue.yellow_max = conf->queue_wred_max_avg_yellow;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_MAX_YELLOW);
+	}
+
+	if (node->data.queue.yellow_slope != conf->queue_wred_slope_yellow) {
+		node->data.queue.yellow_slope = conf->queue_wred_slope_yellow;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_SLOPE_YELLOW);
+	}
+
+	if (node->data.queue.max_burst != conf->max_burst) {
+		node->data.queue.max_burst = conf->max_burst;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_MAX_BURST);
+	}
+
+	if (node->data.queue.max_allowed != conf->queue_wred_max_allowed) {
+		node->data.queue.max_allowed = conf->queue_wred_max_allowed;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_MAX_ALLOWED);
+	}
+
+	if (node->data.queue.min_guaranteed !=
+			conf->queue_wred_min_guaranteed) {
+		node->data.queue.min_guaranteed =
+			conf->queue_wred_min_guaranteed;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_MIN_GUARANTEED);
+	}
+
+	if (!!(conf->wred_enable) !=
+			!!QOS_BITS_IS_SET(node->flags,
+				QOS_NODE_FLAGS_QUEUE_WRED_ENABLE)) {
+		QOS_BITS_TOGGLE(node->flags, QOS_NODE_FLAGS_QUEUE_WRED_ENABLE);
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_ENABLE);
+	}
+
+	if (!!(conf->wred_fixed_drop_prob_enable) !=
+			!!QOS_BITS_IS_SET(node->flags,
+			QOS_NODE_FLAGS_QUEUE_WRED_FIXED_DROP_PROB_ENABLE)) {
+		QOS_BITS_TOGGLE(node->flags,
+			QOS_NODE_FLAGS_QUEUE_WRED_FIXED_DROP_PROB_ENABLE);
+		QOS_BITS_SET(*modified,
+			QOS_MODIFIED_WRED_FIXED_DROP_PROB_ENABLE);
+	}
+
+	if (node->data.queue.fixed_green_prob !=
+			conf->queue_wred_fixed_drop_prob_green) {
+		node->data.queue.fixed_green_prob =
+			conf->queue_wred_fixed_drop_prob_green;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_FIXED_GREEN_PROB);
+	}
+
+	if (node->data.queue.fixed_yellow_prob !=
+			conf->queue_wred_fixed_drop_prob_yellow) {
+		node->data.queue.fixed_yellow_prob =
+			conf->queue_wred_fixed_drop_prob_yellow;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_WRED_FIXED_YELLOW_PROB);
+	}
+
+	if (!!(conf->blocked) !=
+			!!QOS_BITS_IS_SET(node->flags,
+				QOS_NODE_FLAGS_QUEUE_BLOCKED)) {
+		QOS_BITS_TOGGLE(node->flags, QOS_NODE_FLAGS_QUEUE_BLOCKED);
+		QOS_BITS_SET(*modified, QOS_MODIFIED_BLOCKED);
+	}
+	return 0;
+}
+
+STATIC_UNLESS_TEST int queue_cfg_valid(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *node,
+		const struct qos_node *orig_node,
+		unsigned int prev_virt_parent_phy)
+{
+	if (!node_queue(node)) {
+		QOS_LOG("Node is not a queue\n");
+		return 0;
+	}
+
+	return node_cfg_valid(qdev, node, orig_node, prev_virt_parent_phy);
+}
+
+static int check_queue_conf_validity(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_queue_conf *conf,
+		struct qos_node *node,
+		uint32_t *modified)
+{
+	unsigned int phy;
+	int rc;
+	struct qos_node *nodep;
+
+	/*
+	 * The phy of the current first ancesstor which is not an
+	 * internal scheduler QOS_INVALID_PHY if it is a new queue
+	 */
+	unsigned int prev_virt_parent_phy;
+
+	/* Check if node id is valid */
+	phy = get_phy_from_id(qdev->mapping, id);
+	if (!QOS_PHY_VALID(phy)) {
+		if (!QOS_PHY_UNKNOWN(phy)) {
+			QOS_LOG_ERR("Id %u is not a node\n", id);
+			return -EINVAL;
+		}
+		nodep = NULL;
+
+		/* New queue which has id, but no phy was allocated for it */
+		node_queue_init(qdev, node);
+		node->data.queue.rlm = pp_pool_get(qdev->rlms);
+		if (node->data.queue.rlm == QOS_INVALID_RLM) {
+			QOS_LOG_ERR("Can't get free rlm\n");
+			return  -EBUSY;
+		}
+		QOS_BITS_SET(node->flags, QOS_NODE_FLAGS_USED);
+		QOS_BITS_SET(*modified,
+			     QOS_MODIFIED_NODE_TYPE |
+			     QOS_MODIFIED_SUSPENDED |
+			     QOS_MODIFIED_BANDWIDTH_LIMIT |
+			     QOS_MODIFIED_PRIORITY |
+			     QOS_MODIFIED_VIRT_BW_SHARE |
+			     QOS_MODIFIED_PARENT |
+			     QOS_MODIFIED_MAX_BURST |
+			     QOS_MODIFIED_BLOCKED |
+			     QOS_MODIFIED_WRED_ENABLE |
+			     QOS_MODIFIED_WRED_FIXED_DROP_PROB_ENABLE |
+			     QOS_MODIFIED_WRED_FIXED_GREEN_PROB |
+			     QOS_MODIFIED_WRED_FIXED_YELLOW_PROB |
+			     QOS_MODIFIED_WRED_MIN_GREEN |
+			     QOS_MODIFIED_WRED_MAX_GREEN |
+			     QOS_MODIFIED_WRED_SLOPE_GREEN |
+			     QOS_MODIFIED_WRED_MIN_YELLOW |
+			     QOS_MODIFIED_WRED_MAX_YELLOW |
+			     QOS_MODIFIED_WRED_SLOPE_YELLOW |
+			     QOS_MODIFIED_WRED_MAX_ALLOWED |
+			     QOS_MODIFIED_WRED_MIN_GUARANTEED |
+			     QOS_MODIFIED_RLM);
+		prev_virt_parent_phy = QOS_INVALID_PHY;
+	} else {
+		nodep = get_node_from_phy(qdev->nodes, phy);
+		if (!node_queue(nodep)) {
+			QOS_LOG("Id %u is not a queue\n", id);
+			rc = -EINVAL;
+		}
+		memcpy(node, nodep, sizeof(struct qos_node));
+		prev_virt_parent_phy = get_virtual_parent_phy(
+				qdev->nodes,
+				node);
+	}
+
+	/*
+	 * Before calling set_node_prop:
+	 * If new node
+	 *	node.child.parent.phy = INVALID_PHY
+	 *	prev_virt_parent_phy = INVALID_PHY
+	 * Else
+	 *	node.child.parent.phy = actual parent
+	 *	prev_virt_parent_phy = virtual parent
+	 *	(i.e. first non internal ancestor)
+	 *
+	 * When returning:
+	 * If new node
+	 *     node.child.parent.phy == virtual parent phy
+	 * Else
+	 *     If map_id_to_phy(conf.parent) != prev_virt_parent_phy
+	 *		node.child.parent.phy = map_id_to_phy(conf.parent)
+	 *     Else
+	 *		node.child.parent.phy = actual parent (no change)
+	 *
+	 */
+	rc = set_node_prop(
+			qdev,
+			node,
+			&conf->common_prop,
+			NULL,
+			&conf->queue_child_prop,
+			modified);
+	if (rc)
+		return rc;
+
+	rc = set_queue_specific_prop(qdev, node, conf, modified);
+	if (rc)
+		return rc;
+
+	if (!queue_cfg_valid(qdev, node, nodep, prev_virt_parent_phy))
+		return -EINVAL;
+
+	return 0;
+}
+
+void pp_qos_queue_conf_set_default(struct pp_qos_queue_conf *conf)
+{
+	memset(conf, 0, sizeof(struct pp_qos_queue_conf));
+	conf->common_prop.bandwidth_limit = QOS_NO_BANDWIDTH_LIMIT;
+	conf->common_prop.shared_bandwidth_group =
+		QOS_NO_SHARED_BANDWIDTH_GROUP;
+}
+
+int pp_qos_queue_allocate(struct pp_qos_dev *qdev, unsigned int *id)
+{
+	int rc;
+	uint16_t _id;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	_id = pp_pool_get(qdev->ids);
+	QOS_ASSERT(QOS_ID_VALID(_id), "got illegal id %u\n", _id);
+	*id = _id;
+	map_id_reserved(qdev->mapping, _id);
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+static int _pp_qos_queue_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_queue_conf *conf)
+{
+	const struct qos_node *node;
+
+	if (conf == NULL)
+		return -EINVAL;
+
+	node = get_conform_node(qdev, id, node_queue);
+	if (!node)
+		return -EINVAL;
+
+	conf->max_burst = node->data.queue.max_burst;
+	conf->queue_wred_min_avg_green = node->data.queue.green_min;
+	conf->queue_wred_max_avg_green = node->data.queue.green_max;
+	conf->queue_wred_slope_green = node->data.queue.green_slope;
+	conf->queue_wred_min_avg_yellow = node->data.queue.yellow_min;
+	conf->queue_wred_max_avg_yellow = node->data.queue.yellow_max;
+	conf->queue_wred_slope_yellow = node->data.queue.yellow_slope;
+	conf->queue_wred_max_allowed = node->data.queue.max_allowed;
+	conf->queue_wred_min_guaranteed = node->data.queue.min_guaranteed;
+	conf->queue_wred_fixed_drop_prob_green =
+		node->data.queue.fixed_green_prob;
+	conf->queue_wred_fixed_drop_prob_yellow =
+		node->data.queue.fixed_yellow_prob;
+	conf->wred_enable =
+		!!QOS_BITS_IS_SET(node->flags,
+				QOS_NODE_FLAGS_QUEUE_WRED_ENABLE);
+	conf->wred_fixed_drop_prob_enable =
+		!!QOS_BITS_IS_SET(node->flags,
+			QOS_NODE_FLAGS_QUEUE_WRED_FIXED_DROP_PROB_ENABLE);
+	conf->blocked =
+		!!QOS_BITS_IS_SET(node->flags,
+				QOS_NODE_FLAGS_QUEUE_BLOCKED);
+	return get_node_prop(
+			qdev,
+			node,
+			&conf->common_prop,
+			NULL,
+			&conf->queue_child_prop);
+}
+
+int pp_qos_queue_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_queue_conf *conf)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_queue_conf_get(qdev, id, conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+static int _pp_qos_queue_remove(struct pp_qos_dev *qdev, int id)
+{
+	struct qos_node *node;
+	int rc;
+
+	node = get_conform_node(qdev, id, node_queue);
+	if (!node)
+		return -EINVAL;
+
+	rc  = node_remove(qdev, node);
+	return rc;
+}
+
+/**
+ * pp_qos_queue_remove() -	Remove a queue
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:   queue's id obtained from queue_allocate
+ *
+ * Note: client should make sure that queue is empty and
+ * that new packets are not enqueued, by calling
+ *       pp_qos_queue_disable and pp_qos_queue_flush
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_remove(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_queue_remove(qdev, id);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+
+/**
+ * pp_qos_queue_set() - Set queue configuration
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:	  queue's id obtained from queue_allocate
+ * @conf: new configuration for the queue
+ *
+ * Return: 0 on success
+ */
+static int _pp_qos_queue_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_queue_conf *conf)
+{
+	int rc;
+	unsigned int phy;
+	unsigned int prev_phy;
+	struct qos_node *parent;
+	struct qos_node node;
+	struct qos_node *nodep;
+	uint32_t modified;
+	int parent_changed;
+	unsigned int dst_port;
+
+	modified = 0;
+	nodep = NULL;
+
+	rc = check_queue_conf_validity(qdev, id, conf, &node, &modified);
+	parent_changed = QOS_BITS_IS_SET(modified, QOS_MODIFIED_PARENT);
+	if (rc)
+		goto out;
+
+	if (parent_changed) {
+		parent = get_node_from_phy(
+				qdev->nodes,
+				node.child_prop.parent_phy);
+		phy = phy_alloc_by_parent(
+				qdev,
+				parent,
+				conf->queue_child_prop.priority);
+		if (!QOS_PHY_VALID(phy)) {
+			rc = -EINVAL;
+			goto out;
+		}
+		nodep = get_node_from_phy(qdev->nodes, phy);
+		node.child_prop.parent_phy = nodep->child_prop.parent_phy;
+		memcpy(nodep, &node, sizeof(struct qos_node));
+
+		/* If this is not a new queue - delete previous node */
+		if (!QOS_BITS_IS_SET(modified, QOS_MODIFIED_NODE_TYPE)) {
+			prev_phy = get_phy_from_id(qdev->mapping, id);
+			dst_port = get_port(qdev->nodes, phy);
+			create_move_cmd(qdev, phy, prev_phy, dst_port);
+			map_invalidate_id(qdev->mapping, id);
+			map_id_phy(qdev->mapping, id, phy);
+			node_phy_delete(qdev, prev_phy);
+			phy = get_phy_from_id(qdev->mapping, id);
+			nodep = get_node_from_phy(qdev->nodes, phy);
+		} else {
+			map_id_phy(qdev->mapping, id, phy);
+		}
+
+		parent = get_node_from_phy(
+				qdev->nodes,
+				nodep->child_prop.parent_phy);
+		if (nodep->child_prop.virt_bw_share && node_internal(parent))
+			update_internal_bandwidth(qdev, parent);
+	} else {
+		phy = get_phy_from_id(qdev->mapping, id);
+		nodep = get_node_from_phy(qdev->nodes, phy);
+		memcpy(nodep, &node, sizeof(struct qos_node));
+	}
+
+	if (modified) {
+		create_set_queue_cmd(
+				qdev,
+				conf,
+				phy,
+				nodep->child_prop.parent_phy,
+				nodep->data.queue.rlm,
+				modified);
+		create_update_preds_cmd(qdev, phy);
+	}
+out:
+	if (rc) {
+		if (parent_changed) {
+			if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_RLM))
+				release_rlm(qdev->rlms, node.data.queue.rlm);
+		}
+	}
+	return rc;
+}
+
+int pp_qos_queue_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_queue_conf *conf)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = _pp_qos_queue_set(qdev, id, conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+static int _pp_qos_queue_suspend(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	struct pp_qos_queue_conf conf;
+
+	rc = _pp_qos_queue_conf_get(qdev, id, &conf);
+	if (rc)
+		return rc;
+	conf.common_prop.suspended = 1;
+	return _pp_qos_queue_set(qdev, id, &conf);
+}
+
+/**
+ * pp_qos_queue_suspend() - Remove queue from scheduling (doesn't get bandwidth)
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:	  queue's id obtained from queue_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_suspend(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_queue_suspend(qdev, id);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+
+static int _pp_qos_queue_resume(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	struct pp_qos_queue_conf conf;
+
+	rc = _pp_qos_queue_conf_get(qdev, id, &conf);
+	if (rc)
+		return rc;
+	conf.common_prop.suspended = 0;
+	return _pp_qos_queue_set(qdev, id, &conf);
+}
+
+/**
+ * pp_qos_queue_resume() -	Return queue to be scheduleable
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:   queue's id obtained from queue_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_resume(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_queue_resume(qdev, id);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int _pp_qos_queue_block(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	struct pp_qos_queue_conf conf;
+
+	rc = _pp_qos_queue_conf_get(qdev, id, &conf);
+	if (rc)
+		return rc;
+	conf.blocked = 1;
+	return _pp_qos_queue_set(qdev, id, &conf);
+}
+
+/**
+ * pp_qos_queue_block() - All new packets enqueue to this queue will be dropped
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:   queue's id obtained from queue_allocate
+ *
+ * Note	- already enqueued descriptors will be transmitted
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_block(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_queue_block(qdev, id);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int _pp_qos_queue_unblock(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	struct pp_qos_queue_conf conf;
+
+	rc = _pp_qos_queue_conf_get(qdev, id, &conf);
+	if (rc)
+		return rc;
+
+	conf.blocked = 0;
+	return _pp_qos_queue_set(qdev, id, &conf);
+}
+
+/**
+ * pp_qos_queue_unblock() - Unblock enqueuing of new packets
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:	  queue's id obtained from queue_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_unblock(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_queue_unblock(qdev, id);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+
+int _pp_qos_queue_flush(struct pp_qos_dev *qdev, unsigned int id)
+{
+	struct qos_node *node;
+
+	node = get_conform_node(qdev, id, node_queue);
+	if (!node)
+		return -EINVAL;
+	return node_flush(qdev, node);
+}
+
+/**
+ * pp_qos_queue_flush() - Drop all enqueued packets
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:	  queue's id obtained from queue_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_flush(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_queue_flush(qdev, id);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+
+/**
+ * pp_qos_queue_info_get() - Get information about queue
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:	  queue's id obtained from queue_allocate
+ * @info: pointer to struct to be filled with queue's info
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_info_get(struct pp_qos_dev *qdev, unsigned int id,
+			  struct pp_qos_queue_info *info)
+{
+	int rc;
+	struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (info == NULL) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_queue);
+
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	info->physical_id = node->data.queue.rlm;
+	info->port_id = get_port(
+			qdev->nodes,
+			get_phy_from_node(qdev->nodes, node));
+	rc = 0;
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+/**
+ * pp_qos_queue_stat_get() - Get queue's statistics
+ * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
+ * @id:	  queue's id obtained from queue_allocate
+ * @stat: pointer to struct to be filled with queue's statistics
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_stat_get(struct pp_qos_dev *qdev, unsigned int id,
+			  struct pp_qos_queue_stat *stat)
+{
+	int rc;
+	struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_queue);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+	create_get_queue_stats_cmd(
+			qdev,
+			get_phy_from_node(qdev->nodes, node),
+			node->data.queue.rlm,
+			qdev->hwconf.fw_stat,
+			stat);
+
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+/******************************************************************************/
+/*                                 Schedulers                                 */
+/******************************************************************************/
+static int _pp_qos_sched_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_sched_conf *conf);
+static int _pp_qos_sched_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_sched_conf *conf);
+
+static void node_sched_init(struct pp_qos_dev *qdev, struct qos_node *node)
+{
+	node_init(qdev, node, 1, 1, 1);
+	node->type = TYPE_SCHED;
+}
+
+STATIC_UNLESS_TEST int sched_cfg_valid(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *node,
+		const struct qos_node *orig_node,
+		unsigned int prev_virt_parent_phy)
+{
+	if (!node_sched(node)) {
+		QOS_LOG_ERR("Node is not a sched\n");
+		return 0;
+	}
+
+	return node_cfg_valid(qdev, node, orig_node, prev_virt_parent_phy);
+}
+
+static int check_sched_conf_validity(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_sched_conf *conf,
+		struct qos_node *node,
+		uint32_t *modified)
+{
+	unsigned int phy;
+	int rc;
+	struct qos_node *nodep;
+	unsigned int prev_virt_parent_phy_phy;
+
+	/* Check if node id is valid */
+	phy = get_phy_from_id(qdev->mapping, id);
+	if (!QOS_PHY_VALID(phy)) {
+		if (!QOS_PHY_UNKNOWN(phy)) {
+			QOS_LOG_ERR("Id %u is not a node\n", id);
+			return -EINVAL;
+		}
+
+		/* New sched which has id, but no phy was allocated for it */
+		node_sched_init(qdev, node);
+		QOS_BITS_SET(node->flags, QOS_NODE_FLAGS_USED);
+		QOS_BITS_SET(*modified,
+			     QOS_MODIFIED_NODE_TYPE |
+			     QOS_MODIFIED_SUSPENDED |
+			     QOS_MODIFIED_SHARED_GROUP_LIMIT |
+			     QOS_MODIFIED_BANDWIDTH_LIMIT |
+			     QOS_MODIFIED_PRIORITY |
+			     QOS_MODIFIED_VIRT_BW_SHARE |
+			     QOS_MODIFIED_PARENT |
+			     QOS_MODIFIED_ARBITRATION |
+			     QOS_MODIFIED_BEST_EFFORT);
+
+		nodep = NULL;
+		prev_virt_parent_phy_phy = QOS_INVALID_PHY;
+	} else {
+		nodep = get_node_from_phy(qdev->nodes, phy);
+		if (!node_sched(nodep)) {
+			QOS_LOG_ERR("Id %u is not a sched\n", id);
+			rc = -EINVAL;
+		}
+		memcpy(node, nodep, sizeof(struct qos_node));
+		prev_virt_parent_phy_phy = get_virtual_parent_phy(
+				qdev->nodes,
+				node);
+	}
+
+	rc = set_node_prop(qdev,
+			node,
+			&conf->common_prop,
+			&conf->sched_parent_prop,
+			&conf->sched_child_prop,
+			modified);
+	if (rc)
+		return rc;
+
+	if (!sched_cfg_valid(qdev, node, nodep, prev_virt_parent_phy_phy))
+		return -EINVAL;
+
+	return 0;
+}
+
+void pp_qos_sched_conf_set_default(struct pp_qos_sched_conf *conf)
+{
+	memset(conf, 0, sizeof(struct pp_qos_sched_conf));
+	conf->common_prop.bandwidth_limit = QOS_NO_BANDWIDTH_LIMIT;
+	conf->common_prop.shared_bandwidth_group =
+		QOS_NO_SHARED_BANDWIDTH_GROUP;
+	conf->sched_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+}
+
+int pp_qos_sched_allocate(struct pp_qos_dev *qdev, unsigned int *id)
+{
+	uint16_t _id;
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	_id = pp_pool_get(qdev->ids);
+	QOS_ASSERT(QOS_ID_VALID(_id), "got illegal id %u\n", _id);
+	*id = _id;
+	map_id_reserved(qdev->mapping, _id);
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_sched_remove(struct pp_qos_dev *qdev, unsigned int id)
+{
+	struct qos_node *node;
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	node = get_conform_node(qdev, id, node_sched);
+	if (!node)  {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = tree_remove(qdev, get_phy_from_node(qdev->nodes, node));
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+static int _pp_qos_sched_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_sched_conf *conf)
+{
+	int rc;
+	unsigned int phy;
+	unsigned int prev_phy;
+	struct qos_node *parent;
+	struct qos_node node;
+	struct qos_node *nodep;
+	struct qos_node *oldnode;
+	uint32_t modified;
+	unsigned int dst_port;
+
+	modified = 0;
+
+	rc = check_sched_conf_validity(qdev, id, conf, &node, &modified);
+	if (rc)
+		return rc;
+
+	if (QOS_BITS_IS_SET(modified, QOS_MODIFIED_PARENT)) {
+		parent = get_node_from_phy(
+				qdev->nodes,
+				node.child_prop.parent_phy);
+		phy = phy_alloc_by_parent(
+				qdev,
+				parent,
+				conf->sched_child_prop.priority);
+		if (!QOS_PHY_VALID(phy))
+			return -EINVAL;
+
+		/*
+		 * nodep's parent_phy holds value of actual parent, node's
+		 * parent_phy holds phy of virtual parent
+		 */
+		nodep = get_node_from_phy(qdev->nodes, phy);
+		node.child_prop.parent_phy = nodep->child_prop.parent_phy;
+		memcpy(nodep, &node, sizeof(struct qos_node));
+
+		/* If this is not a new sched - delete previous node */
+		if (!QOS_BITS_IS_SET(modified, QOS_MODIFIED_NODE_TYPE)) {
+			prev_phy = get_phy_from_id(qdev->mapping, id);
+			dst_port = get_port(qdev->nodes, phy);
+			create_move_cmd(qdev, phy, prev_phy, dst_port);
+			node_update_children(qdev, prev_phy, phy);
+			map_invalidate_id(qdev->mapping, id);
+			map_id_phy(qdev->mapping, id, phy);
+			oldnode = get_node_from_phy(qdev->nodes, prev_phy);
+			nodep->parent_prop.first_child_phy =
+				oldnode->parent_prop.first_child_phy;
+			nodep->parent_prop.num_of_children =
+				oldnode->parent_prop.num_of_children;
+			oldnode->parent_prop.first_child_phy = QOS_INVALID_PHY;
+			oldnode->parent_prop.num_of_children = 0;
+			node_phy_delete(qdev, prev_phy);
+			phy = get_phy_from_id(qdev->mapping, id);
+			nodep = get_node_from_phy(qdev->nodes, phy);
+			tree_update_predecessors(qdev, phy);
+		} else {
+			map_id_phy(qdev->mapping, id, phy);
+		}
+
+		parent = get_node_from_phy(
+				qdev->nodes,
+				nodep->child_prop.parent_phy);
+		if (nodep->child_prop.virt_bw_share && node_internal(parent))
+			update_internal_bandwidth(qdev, parent);
+	} else {
+		phy = get_phy_from_id(qdev->mapping, id);
+		nodep = get_node_from_phy(qdev->nodes, phy);
+		memcpy(nodep, &node, sizeof(struct qos_node));
+	}
+
+	if (modified) {
+		create_set_sched_cmd(
+				qdev,
+				conf,
+				phy,
+				nodep->child_prop.parent_phy,
+				modified);
+		create_update_preds_cmd(qdev, phy);
+	}
+	return 0;
+}
+
+int pp_qos_sched_set(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		const struct pp_qos_sched_conf *conf)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_sched_set(qdev, id, conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+static int _pp_qos_sched_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_sched_conf *conf)
+{
+	const struct qos_node *node;
+
+	if (conf == NULL)
+		return -EINVAL;
+
+	node = get_conform_node(qdev, id, node_sched);
+	if (!node)
+		return -EINVAL;
+
+	return get_node_prop(
+			qdev,
+			node,
+			&conf->common_prop,
+			&conf->sched_parent_prop,
+			&conf->sched_child_prop);
+}
+
+int pp_qos_sched_conf_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_sched_conf *conf)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_sched_conf_get(qdev, id, conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_sched_suspend(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	struct pp_qos_sched_conf conf;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = _pp_qos_sched_conf_get(qdev, id, &conf);
+	if (rc)
+		goto out;
+	conf.common_prop.suspended = 1;
+	rc =  _pp_qos_sched_set(qdev, id, &conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+
+}
+
+int pp_qos_sched_resume(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	struct pp_qos_sched_conf conf;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = _pp_qos_sched_conf_get(qdev, id, &conf);
+	if (rc)
+		goto out;
+	conf.common_prop.suspended = 0;
+	rc = _pp_qos_sched_set(qdev, id, &conf);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_sched_info_get(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_sched_info *info)
+{
+	int rc;
+	struct qos_node *node;
+	unsigned int phy;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (info == NULL) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_sched);
+
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+	phy = get_phy_from_node(qdev->nodes, node);
+	info->port_id = get_port(
+			qdev->nodes,
+			get_phy_from_node(qdev->nodes, node));
+	get_node_queues(qdev, phy, NULL, 0, &info->num_of_queues);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_sched_get_queues(struct pp_qos_dev *qdev, unsigned int id,
+			    uint16_t *queue_ids, unsigned int size,
+			    unsigned int *queues_num)
+{
+	int rc;
+	unsigned int phy;
+	const struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_sched);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	phy = get_phy_from_node(qdev->nodes, node);
+	get_node_queues(qdev, phy, queue_ids, size, queues_num);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_get_fw_version(
+		struct pp_qos_dev *qdev,
+		unsigned int *major,
+		unsigned int *minor,
+		unsigned int *build)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	*major = qdev->fwver.major;
+	*minor = qdev->fwver.minor;
+	*build = qdev->fwver.build;
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+/******************************************************************************/
+/*                                 Node                                       */
+/******************************************************************************/
+int pp_qos_get_node_info(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		struct pp_qos_node_info *info)
+{
+	int rc;
+	struct qos_node *node;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	node = get_conform_node(qdev, id, node_used);
+	if (!node) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	create_get_node_info_cmd(
+			qdev,
+			get_phy_from_node(qdev->nodes, node),
+			qdev->hwconf.fw_stat,
+			info);
+
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+
+}
+
+/******************************************************************************/
+/*                                 SHARED_LIMIT                               */
+/******************************************************************************/
+static int _pp_qos_shared_limit_group_get_members(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		uint16_t *members,
+		unsigned int size,
+		unsigned int *members_num);
+
+int pp_qos_shared_limit_group_add(
+		struct pp_qos_dev *qdev,
+		unsigned int limit,
+		unsigned int *id)
+{
+	unsigned int i;
+	int rc;
+	struct shared_bandwidth_group *grp;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	grp = qdev->groups + 1;
+	for (i = 1; i <= QOS_MAX_SHARED_BANDWIDTH_GROUP; ++grp, ++i) {
+		if (!grp->used)
+			break;
+	}
+
+	if (grp->used) {
+		QOS_LOG_ERR("No free shared groups\n");
+		rc = -EBUSY;
+		goto out;
+	}
+
+	grp->used = 1;
+	grp->limit = limit;
+	*id = i;
+
+	create_add_shared_group_cmd(qdev, i, limit);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_shared_limit_group_remove(struct pp_qos_dev *qdev, unsigned int id)
+{
+	int rc;
+	unsigned int num;
+	uint16_t tmp;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	if (QOS_BW_GRP_VALID(id) && (qdev->groups[id].used)) {
+		_pp_qos_shared_limit_group_get_members(
+				qdev, id, &tmp, 1, &num);
+		if (num) {
+			QOS_LOG_ERR(
+					"Shared group %u has members can't remove\n",
+					id);
+			rc = -EBUSY;
+			goto out;
+		}
+		qdev->groups[id].used = 0;
+		create_remove_shared_group_cmd(qdev, id);
+		update_cmd_id(&qdev->drvcmds);
+		transmit_cmds(qdev);
+		rc = 0;
+	} else {
+		QOS_LOG_ERR("Shared group %u is not used\n", id);
+		rc = -EINVAL;
+	}
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+int pp_qos_shared_limit_group_modify(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		unsigned int limit)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (QOS_BW_GRP_VALID(id) && (qdev->groups[id].used)) {
+		if (qdev->groups[id].limit != limit) {
+			create_set_shared_group_cmd(qdev, id, limit);
+			update_cmd_id(&qdev->drvcmds);
+			transmit_cmds(qdev);
+		}
+		rc = 0;
+	} else {
+		QOS_LOG_ERR("Shared group %u is not used\n", id);
+		rc = -EINVAL;
+	}
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+static int _pp_qos_shared_limit_group_get_members(
+		struct pp_qos_dev *qdev, unsigned int id,
+		uint16_t *members,
+		unsigned int size,
+		unsigned int *members_num)
+{
+	unsigned int phy;
+	unsigned int total;
+	unsigned int tmp;
+	const struct qos_node *node;
+
+	total = 0;
+	for (phy = 0; (phy <= qdev->max_port) && (size > 0); ++phy) {
+		node = get_const_node_from_phy(qdev->nodes, phy);
+		if (node_used(node)) {
+			get_bw_grp_members_under_node(
+					qdev,
+					id,
+					phy,
+					members,
+					size,
+					&tmp);
+			total += tmp;
+			members += tmp;
+			if (tmp < size)
+				size -= tmp;
+			else
+				size = 0;
+		}
+	}
+
+	*members_num = total;
+	return 0;
+}
+
+int pp_qos_shared_limit_group_get_members(
+		struct pp_qos_dev *qdev,
+		unsigned int id,
+		uint16_t *members,
+		unsigned int size,
+		unsigned int *members_num)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (!qdev->initialized) {
+		QOS_LOG_ERR("Device was not initialized\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = _pp_qos_shared_limit_group_get_members(
+			qdev, id, members, size, members_num);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+out:
+	QOS_UNLOCK(qdev);
+	return rc;
+}
+
+struct pp_qos_dev *pp_qos_dev_open(unsigned int id)
+{
+	if (id < MAX_QOS_INSTANCES)
+		return qos_devs[id];
+
+	QOS_LOG_ERR("Illegal qos instance %u\n", id);
+	return NULL;
+}
+
+int pp_qos_dev_init(struct pp_qos_dev *qdev, struct pp_qos_init_param *conf)
+{
+	int rc;
+
+	QOS_LOCK(qdev);
+	PP_QOS_ENTER_FUNC();
+	if (qdev->initialized) {
+		QOS_LOG_ERR(
+				"Device already initialized, can't initialize again\n");
+		rc = -EINVAL;
+		goto out;
+	}
+	if ((qdev->max_port + 1) & 7) {
+		QOS_LOG_ERR("Given max port %u is not last node of an octet\n",
+				qdev->max_port);
+		rc = -EINVAL;
+		goto out;
+	}
+	qdev->portsphys = free_ports_phys_init(
+			qdev->reserved_ports,
+			qdev->max_port,
+			conf->reserved_ports,
+			MAX_PORTS);
+
+	qdev->hwconf.wred_total_avail_resources =
+		conf->wred_total_avail_resources;
+	if (conf->wred_p_const > 1024) {
+		QOS_LOG_ERR("wred_p_const should be not greater than 1024\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	qdev->hwconf.wred_const_p = conf->wred_p_const;
+	qdev->hwconf.wred_max_q_size = conf->wred_max_q_size;
+
+	rc = load_firmware(qdev, FIRMWARE_FILE);
+	if (rc)
+		goto out;
+
+	create_init_logger_cmd(qdev);
+	create_init_qos_cmd(qdev);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+
+	qdev->initialized = 1;
+	rc = 0;
+out:
+	QOS_UNLOCK(qdev);
+	QOS_LOG_DEBUG("pp_qos_dev_init completed with %d\n", rc);
+	return rc;
+}
+
+
+struct pp_qos_dev *create_qos_dev_desc(struct qos_dev_init_info *initinfo)
+{
+	struct pp_qos_dev *qdev;
+	int rc;
+	unsigned int id;
+
+	id = initinfo->pl_data.id;
+	if (id >= MAX_QOS_INSTANCES) {
+		QOS_LOG_ERR("Illegal qos instance %u\n", id);
+		return NULL;
+	}
+
+	if (qos_devs[id]) {
+		QOS_LOG_ERR("qos instance %u already exists\n", id);
+		return NULL;
+	}
+
+	qdev = _qos_init(initinfo->pl_data.max_port);
+	if (qdev) {
+		qdev->hwconf.wred_prioritize_pop =
+			initinfo->pl_data.wred_prioritize_pop;
+		qdev->hwconf.qm_ddr_start = initinfo->pl_data.qm_ddr_start;
+		qdev->hwconf.qm_num_pages = initinfo->pl_data.qm_num_pages;
+		qdev->hwconf.fw_logger_start =
+			initinfo->pl_data.fw_logger_start;
+		qdev->hwconf.fw_stat = initinfo->pl_data.fw_stat;
+		memcpy(&qdev->fwcom, &initinfo->fwcom, sizeof(struct fw_com));
+		rc = init_fwdata_internals(qdev);
+		if (rc)
+			goto err;
+		qos_devs[id] = qdev;
+		QOS_LOG_INFO("Initialized qos instance\nmax_port:\t\t%u\n",
+				qdev->max_port);
+		QOS_LOG_INFO("qm_ddr_start:\t\t0x%08X\n",
+				qdev->hwconf.qm_ddr_start);
+		QOS_LOG_INFO("qm_num_of_pages\t\t%u\n",
+				qdev->hwconf.qm_num_pages);
+		QOS_LOG_INFO("fw_logger_start:\t0x%08X\n",
+				qdev->hwconf.fw_logger_start);
+		QOS_LOG_INFO("fw_stat:\t\t0x%08X\n",
+				qdev->hwconf.fw_stat);
+		QOS_LOG_INFO("cmdbuf:\t\t0x%08X\ncmdbuf size:\t\t%zu\n",
+				(unsigned int)(uintptr_t)qdev->fwcom.cmdbuf,
+				qdev->fwcom.cmdbuf_sz);
+	} else {
+		QOS_LOG_CRIT("Failed creating qos instance %u\n", id);
+	}
+
+	return qdev;
+err:
+	_qos_clean(qdev);
+	QOS_LOG_CRIT("Failed creating qos instance %u\n", id);
+	return NULL;
+}
+
+void remove_qos_instance(unsigned int id)
+{
+	if (id >= MAX_QOS_INSTANCES) {
+		QOS_LOG_ERR("Illegal qos instance %u\n", id);
+		return;
+	}
+
+	if (qos_devs[id] == NULL) {
+		QOS_LOG_ERR("qos instance %u not exists\n", id);
+		return;
+	}
+
+	_qos_clean(qos_devs[id]);
+	qos_devs[id] = NULL;
+	QOS_LOG_INFO("removed qos instance %u\n", id);
+}
+
+void qos_module_init(void)
+{
+	unsigned int i;
+
+	for (i = 0 ; i < MAX_QOS_INSTANCES; ++i)
+		qos_devs[i] = NULL;
+
+	QOS_LOG_INFO("qos_module_init completed\n");
+}
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_tests.c b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_tests.c
new file mode 100644
index 000000000..d4fe2a643
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_tests.c
@@ -0,0 +1,2574 @@
+#ifdef PP_QOS_TEST
+#include "pp_qos_fw.h"
+#include "pp_qos_utils.h" 
+
+#define QOS_INVALID_PRIORITY   0xF
+
+/*
+ * Check following for each octet:
+ * Number of nodes marked as used on each octet matches octet usage
+ * Ports' octets are not enqueued
+ * Non ports' octets are enqueued on a list which match their usage count
+ * No unused node interleaves sequence of used nodes
+ *
+ */
+static void check_octets(struct pp_qos_dev *qdev)
+{
+	int i;
+	int j;
+	int unused_seen;
+	uint8_t usage;
+	int last;
+	struct qos_node *node;
+
+	last = octet_of_phy(qdev->max_port);
+
+	for (i = 0; i < NUM_OF_OCTETS; ++i) {
+		node = get_node_from_phy(qdev->nodes, 8 * i);
+		usage = 0;
+		unused_seen = 0;
+		for (j = 0; j < 8; ++j) { 
+			if (node_used(node)) {
+				if (i > last)
+					QOS_ASSERT(!unused_seen, "Node %u is used after previous node in same octet was unused\n", i * 8 + j);
+				usage++;
+			} else {
+				unused_seen = 1;
+			}
+			node++;
+		}
+		debug_verify_octet_usage(qdev->octets, i, usage);
+	}
+}
+
+/*
+ * Check the following for each id:
+ *   If id is mapped to a valid phy
+ *	- Node designated by this phy is marked as used
+ *	- The inverse mapping from phy maps to the same id
+ *
+ * Check the following for each phy:
+ *   If phy designates a used node
+ *     - It is mapped to valid id
+ *     - The inverse mapping from id maps to the same phy
+ *
+ */
+static void check_mapping(struct pp_qos_dev *qdev)
+{
+	uint16_t phy;
+	uint16_t id;
+	struct qos_node *node;
+	unsigned i;
+
+	for (i = 0; i < NUM_OF_NODES; ++i) {
+		phy = get_phy_from_id(qdev->mapping, i);
+		if (QOS_PHY_VALID(phy)) {
+			node = get_node_from_phy(qdev->nodes, phy);
+			QOS_ASSERT(
+				   node_used(node),
+				   "Id %u is mapped to unused node %u\n", i, phy);
+			id = get_id_from_phy(qdev->mapping, phy);
+			QOS_ASSERT(
+				   id == i,
+				   "Id %u is mapped to phy %u, but the latter is mapped to %u\n", i, phy, id);
+		}
+
+		node = get_node_from_phy(qdev->nodes, i);
+		id = get_id_from_phy(qdev->mapping, i);
+		QOS_ASSERT(!node_used(node) || QOS_ID_VALID(id), "Node %u is used but has invalid id\n", i);
+		if (QOS_ID_VALID(id)) {
+			phy = get_phy_from_id(qdev->mapping, id);
+			QOS_ASSERT(
+				   phy == i,
+				   "Phy %u is mapped to id %u, but the latter is mapped to %u\n", i, id, phy);
+		}
+	}
+}
+
+struct pp_pool {
+	int capacity;
+	int top;
+	uint16_t invalid;
+	uint16_t data[1];
+};
+static uint8_t ids[NUM_OF_NODES];
+static uint8_t rlms[NUM_OF_QUEUES];
+static void check_resources_usage(struct pp_qos_dev *qdev)
+{
+	int i;
+	int id;
+	int rlm;
+	struct qos_node *node;
+	memset(ids, 0, NUM_OF_NODES);
+	memset(rlms, 0, NUM_OF_QUEUES);
+
+	node = get_node_from_phy(qdev->nodes, 0);
+	for (i = 0; i < NUM_OF_NODES; ++i) {
+		if (node_used(node)) {
+			id = get_id_from_phy(qdev->mapping, i);
+			QOS_ASSERT(ids[id] == 0, "Id %d of phy %d already found\n", id ,i);
+			ids[id] = 1;
+			if (node->type == TYPE_QUEUE) {
+				rlm = node->data.queue.rlm;
+				QOS_ASSERT(rlms[rlm] == 0, "Rlm %d of phy %d already found\n", rlm ,i);
+				rlms[rlm] = 1;
+			}
+		}
+		
+		if (qdev->ids->top > i) {
+			id = qdev->ids->data[i];
+			QOS_ASSERT(ids[id] == 0, "Id %d on free ids pool already found\n", id);
+			ids[id] = 1;
+		}
+
+		if (qdev->rlms->top > i) {
+			rlm = qdev->rlms->data[i];
+			QOS_ASSERT(rlms[rlm] == 0, "Rlm %d on free rlms pool already found\n", rlm);
+			rlms[rlm] = 1;
+		}
+
+		++node;
+	}
+
+	for (i = 0; i < NUM_OF_NODES; ++i) {
+		QOS_ASSERT(ids[i], "Id %d was not found\n", i);
+		QOS_ASSERT(i >= NUM_OF_QUEUES || rlms[i], "Rlm %d was not found\n", i);
+	}
+}
+
+
+ /* 
+  * Check following tests for each node:
+  * If node is a parent
+  *	- All the nodes it claims are its children, are indeed children and have it as their
+  *	  parent
+  *	- If its a WSP parent then its children are ordered correctly
+  *	- The sum of share of its children does not exceed 100
+  *
+  * If node is a child
+  *     - It is indeed recognized by its claimed parent as a child
+  *
+  */
+static void check_parent_children(struct pp_qos_dev *qdev)
+{
+	int phy;
+	int i;
+	int check_priorities;
+	int priority;
+	struct qos_node *node;
+	struct qos_node *cur;
+	int first_child_phy;
+	int last_child_phy;
+	int share;
+
+	for (phy = 0; phy < NUM_OF_NODES; ++phy) {
+		node = get_node_from_phy(qdev->nodes, phy);
+		if (node_parent(node)) {
+			i = node->parent_prop.num_of_children;
+			QOS_ASSERT(i >= 0, "Illegal number of children %d for node %d\n", i, phy);
+			check_priorities = (node->parent_prop.arbitration == PP_QOS_ARBITRATION_WSP);
+			priority = -1;
+			if (i)
+				cur = get_node_from_phy(qdev->nodes, node->parent_prop.first_child_phy);
+			for (; i > 0; --i) {
+				QOS_ASSERT(node_child(cur), 
+					   "Node %u claims that %u is its child but the latter is not a child (or marked unused)\n", phy, get_phy_from_node(qdev->nodes, cur));
+				QOS_ASSERT(cur->child_prop.parent_phy == phy, 
+					   "Node %d has node %u as a child, but the latter has node %u as its parent\n", 
+					   phy, get_phy_from_node(qdev->nodes, cur), cur->child_prop.parent_phy) ;
+				QOS_ASSERT(
+					   !check_priorities || (cur->child_prop.priority > priority),
+					   "Child %u of wsp parent %u has priority %u while previous child priority is %u\n", 
+					   get_phy_from_node(qdev->nodes, cur), phy, cur->child_prop.priority, priority);
+
+				if (check_priorities)
+					priority = cur->child_prop.priority;
+
+				++cur;
+			}
+
+			//share = get_virt_children_bandwidth_share(qdev, node);
+			share = get_children_bandwidth_share(qdev, node);
+			QOS_ASSERT(share <= 100, "Bandwidth share of %d children is %d\n", phy, share);
+			if (node_internal(node)) {
+				share = get_children_bandwidth_share(qdev, node);
+				QOS_ASSERT(share == node->child_prop.virt_bw_share, "iInternal scheduler: sum bandwidth children is %d while parent is %d\n", share, node->child_prop.virt_bw_share);
+			}
+		} 
+
+		if (node_child(node)) {
+			cur = get_node_from_phy(qdev->nodes, node->child_prop.parent_phy);
+			QOS_ASSERT(node_parent(cur), "Node %u claims that %u is its parent, but the latter is not a parent (or marked unused)\n", phy, get_phy_from_node(qdev->nodes, cur));
+			first_child_phy = cur->parent_prop.first_child_phy;
+			last_child_phy = first_child_phy + cur->parent_prop.num_of_children - 1;
+			QOS_ASSERT((phy >= first_child_phy) && (phy <= last_child_phy),
+				   "Node %d has node %u as a parent, but the latter children are %u..%u\n", 
+				   phy, get_phy_from_node(qdev->nodes, cur), first_child_phy, last_child_phy);
+		}
+		++node;
+	}
+}
+
+static void check_consistency(struct pp_qos_dev *qdev)
+{
+	QOS_ASSERT(!QOS_SPIN_IS_LOCKED(qdev), "Lock is taken\n");
+	check_octets(qdev);
+	check_parent_children(qdev);
+	check_mapping(qdev);
+}
+
+static void check_consistency_with_resources(struct pp_qos_dev *qdev)
+{
+	check_consistency(qdev);
+	check_resources_usage(qdev);
+}
+
+static void create_nodes(struct pp_qos_dev *qdev, uint16_t start_phy, int count)
+{
+	int i;
+	uint16_t id;
+	struct qos_node *node;
+
+	nodes_modify_used_status(qdev, start_phy, count, 1);
+	node = get_node_from_phy(qdev->nodes, start_phy);
+	for (i = start_phy; i < start_phy + count; ++i) {
+		id = pp_pool_get(qdev->ids);
+		map_id_phy(qdev->mapping, id, i);	
+		node->type = TYPE_UNKNOWN;
+		++node;
+	}
+}
+
+static void destroy_nodes(struct pp_qos_dev *qdev, uint16_t start_phy, int count)
+{
+	int i;
+	uint16_t id;
+
+	nodes_modify_used_status(qdev, start_phy, count, 0);
+
+	for (i = start_phy; i < start_phy + count; ++i) {
+		id = get_id_from_phy(qdev->mapping, i);
+		map_invalidate_id(qdev->mapping, id);
+		pp_pool_put(qdev->ids, id);
+	}
+}
+
+static void config_parent_node(struct pp_qos_dev *qdev, uint16_t phy, enum node_type type, enum pp_qos_arbitration arb, uint16_t first_child, int num)
+{
+	struct qos_node *node;
+
+	node = get_node_from_phy(qdev->nodes, phy);
+	node->type = type;
+	node->parent_prop.arbitration = arb;
+	node->parent_prop.first_child_phy = first_child;
+	node->parent_prop.num_of_children = num;
+}
+
+static void config_child_node(struct pp_qos_dev *qdev, uint16_t phy, enum node_type type, uint16_t parent_phy, int priority)
+{
+	struct qos_node *node;
+
+	node = get_node_from_phy(qdev->nodes, phy);
+	node->type = type;
+
+	if (QOS_PHY_VALID(parent_phy))
+		node->child_prop.parent_phy =parent_phy;
+
+	if (priority != QOS_INVALID_PRIORITY)
+		node->child_prop.priority = priority;
+}
+
+/*
+ */
+static void test_modify_used_status(struct pp_qos_dev *qdev)
+{
+	debug_verify_octet_usage(qdev->octets, 2, 0);
+	debug_verify_octet_usage(qdev->octets, 3, 0);
+	debug_verify_octet_usage(qdev->octets, 4, 0);
+
+	create_nodes(qdev, 18, 15);
+	debug_verify_octet_usage(qdev->octets, 2, 6);
+	debug_verify_octet_usage(qdev->octets, 3, 8);
+	debug_verify_octet_usage(qdev->octets, 4, 1);
+
+	destroy_nodes(qdev, 25, 2);
+	debug_verify_octet_usage(qdev->octets, 2, 6);
+	debug_verify_octet_usage(qdev->octets, 3, 6);
+	debug_verify_octet_usage(qdev->octets, 4, 1);
+
+	destroy_nodes(qdev, 18, 7);
+	debug_verify_octet_usage(qdev->octets, 2, 0);
+	debug_verify_octet_usage(qdev->octets, 3, 5);
+	debug_verify_octet_usage(qdev->octets, 4, 1);
+
+	destroy_nodes(qdev, 27, 6);
+	debug_verify_octet_usage(qdev->octets, 2, 0);
+	debug_verify_octet_usage(qdev->octets, 3, 0);
+	debug_verify_octet_usage(qdev->octets, 4, 0);
+	check_consistency(qdev);
+}
+
+static void test_move_nodes(struct pp_qos_dev *qdev)
+{
+	debug_verify_octet_usage(qdev->octets, 2, 0);
+
+	create_nodes(qdev, 18, 3);
+	debug_verify_octet_usage(qdev->octets, 2, 3);
+
+	octet_nodes_shift(qdev, 18, 2, -2); 
+	debug_verify_octet_usage(qdev->octets, 2, 3);
+
+	octet_nodes_shift(qdev, 16, 2, 2); 
+	debug_verify_octet_usage(qdev->octets, 2, 3);
+
+	octet_nodes_shift(qdev, 19, 2, 3); 
+	debug_verify_octet_usage(qdev->octets, 2, 3);
+
+	create_nodes(qdev, 26, 4);
+	debug_verify_octet_usage(qdev->octets, 2, 3);
+	debug_verify_octet_usage(qdev->octets, 3, 4);
+
+	nodes_move(qdev, 30, 22, 2);
+	debug_verify_octet_usage(qdev->octets, 2, 1);
+	debug_verify_octet_usage(qdev->octets, 3, 6);
+
+	destroy_nodes(qdev, 18, 1);
+	destroy_nodes(qdev, 26, 6);
+	debug_verify_octet_usage(qdev->octets, 2, 0);
+	debug_verify_octet_usage(qdev->octets, 3, 0);
+
+	check_consistency(qdev);
+}
+
+static void test_phy_allocation_not_full_octet(struct pp_qos_dev *qdev)
+{
+	struct qos_node *base;
+	uint16_t phy;
+	uint16_t expected_phy;
+	uint16_t id;
+
+	create_nodes(qdev, 0, 2);
+	create_nodes(qdev, 16, 6);
+
+	base = get_node_from_phy(qdev->nodes, 0);
+
+	// Port (Node 1)
+	config_parent_node(qdev, 1, TYPE_PORT, PP_QOS_ARBITRATION_WRR, 16, 2);
+
+	config_parent_node(qdev, 16, TYPE_SCHED, PP_QOS_ARBITRATION_WSP, 18, 2);
+	config_child_node(qdev, 16, TYPE_SCHED, 1, 0);
+	config_parent_node(qdev, 17, TYPE_SCHED, PP_QOS_ARBITRATION_WRR, 20, 2);
+	config_child_node(qdev, 17, TYPE_SCHED, 1, 0);
+
+	// 2 Queues of first sched (Nodes, 18, 19)
+	//
+	config_child_node(qdev, 18, TYPE_QUEUE, 16, 3);
+	config_child_node(qdev, 19, TYPE_QUEUE, 16, 5);
+
+	// 2 Queues of second sched (Nodes, 20, 21)
+	config_child_node(qdev, 20, TYPE_QUEUE, 17, 3);
+	config_child_node(qdev, 21, TYPE_QUEUE, 17, 5);
+	check_consistency(qdev);
+
+	expected_phy = 18;
+	phy = phy_alloc_by_parent(qdev, base + 16, 1);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 1);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, expected_phy + 1, 4, -1);
+	check_consistency(qdev);
+
+	expected_phy = 19;
+	phy = phy_alloc_by_parent(qdev, base + 16, 4);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 4);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, expected_phy + 1, 3, -1);
+	check_consistency(qdev);
+	
+	expected_phy = 20;
+	phy = phy_alloc_by_parent(qdev, base + 16, 6);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 6);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+	destroy_nodes(qdev, phy, 1);
+	config_parent_node(qdev, 16, TYPE_SCHED, PP_QOS_ARBITRATION_WSP, 18, 2);
+	octet_nodes_shift(qdev, expected_phy + 1, 2, -1);
+	check_consistency(qdev);
+
+	expected_phy = 22;
+	phy = phy_alloc_by_parent(qdev, base + 17, 1);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+
+	destroy_nodes(qdev, 0, 2);
+	destroy_nodes(qdev, 16, 7);
+	check_consistency(qdev);
+}
+
+static void test_phy_allocation_full_octet(struct pp_qos_dev *qdev)
+{
+	struct qos_node *base;
+	uint16_t phy;
+	uint16_t sched;
+	uint16_t sched2;
+	uint16_t expected_phy;
+	uint16_t id;
+	struct qos_node *node;
+
+	create_nodes(qdev, 0, 2);
+	create_nodes(qdev, 16, 8);
+	base = get_node_from_phy(qdev->nodes, 0);
+
+	// Port (Node 1) and 2 scheds - 16(WSP), 17(WRR)
+	config_parent_node(qdev, 1, TYPE_PORT, PP_QOS_ARBITRATION_WRR, 16, 2);
+	config_parent_node(qdev, 16, TYPE_SCHED, PP_QOS_ARBITRATION_WSP, 18, 3);
+	config_child_node(qdev, 16, TYPE_SCHED, 1, 0);
+	config_parent_node(qdev, 17, TYPE_SCHED, PP_QOS_ARBITRATION_WRR, 21, 3);
+	config_child_node(qdev, 17, TYPE_SCHED, 1, 0);
+
+	// 3 Queues of first sched (Nodes, 18, 19, 20)
+	config_child_node(qdev, 18, TYPE_QUEUE, 16, 1);
+	config_child_node(qdev, 19, TYPE_QUEUE, 16, 3);
+	config_child_node(qdev, 20, TYPE_QUEUE, 16, 5);
+
+	// 3 Queues of second sched (Nodes, 21, 22, 23)
+	config_child_node(qdev, 23, TYPE_QUEUE, 17, 0);
+	config_child_node(qdev, 21, TYPE_QUEUE, 17, 0);
+	config_child_node(qdev, 22, TYPE_QUEUE, 17, 0);
+	check_consistency(qdev);
+
+	sched = get_id_from_phy(qdev->mapping, 16);
+	sched2 = get_id_from_phy(qdev->mapping, 17);
+
+	expected_phy = 17;
+	phy = phy_alloc_by_parent(qdev, base + 16, 2);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 2);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, expected_phy + 1, 5, -1);
+	check_consistency(qdev);
+	octet_nodes_shift(qdev, 16, 6, 2);
+	phy = get_phy_from_id(qdev->mapping, sched); 
+	nodes_move(qdev, 16, phy, 2);
+	check_consistency(qdev);
+
+	expected_phy = 18;
+	phy = phy_alloc_by_parent(qdev, base + 16, 4);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 4);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, expected_phy + 1, 4, -1);
+	check_consistency(qdev);
+	octet_nodes_shift(qdev, 16, 6, 2);
+	phy = get_phy_from_id(qdev->mapping, sched); 
+	nodes_move(qdev, 16, phy, 2);
+	check_consistency(qdev);
+	
+	expected_phy = 16;
+	phy = phy_alloc_by_parent(qdev, base + 16, 0);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 0);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, expected_phy + 1, 6, -1);
+	check_consistency(qdev);
+	octet_nodes_shift(qdev, 16, 6, 2);
+	phy = get_phy_from_id(qdev->mapping, sched); 
+	nodes_move(qdev, 16, phy, 2);
+	check_consistency(qdev);
+
+
+	expected_phy = 19;
+	phy = phy_alloc_by_parent(qdev, base + 16, 6);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 6);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+	node = get_node_from_phy(qdev->nodes, get_phy_from_id(qdev->mapping, sched));
+	node->parent_prop.num_of_children--;
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, expected_phy + 1, 3, -1);
+	check_consistency(qdev);
+
+	octet_nodes_shift(qdev, 16, 6, 2);
+	phy = get_phy_from_id(qdev->mapping, sched); 
+	nodes_move(qdev, 16, phy, 2);
+	check_consistency(qdev);
+
+	expected_phy = 22;
+	phy = phy_alloc_by_parent(qdev, base + 17, 0);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	check_consistency(qdev);
+	node = get_node_from_phy(qdev->nodes, get_phy_from_id(qdev->mapping, sched2));
+	node->parent_prop.num_of_children--;
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, 16, 6, 2);
+	phy = get_phy_from_id(qdev->mapping, sched); 
+	nodes_move(qdev, 16, phy, 2);
+	check_consistency(qdev);
+
+	destroy_nodes(qdev, 16, 8);
+	destroy_nodes(qdev, 0, 2);
+	check_consistency(qdev);
+}
+
+static void test_phy_allocation_full_octet_2(struct pp_qos_dev *qdev)
+{
+	struct qos_node *base;
+	uint16_t phy;
+	uint16_t queue;
+	uint16_t expected_phy;
+	uint16_t id;
+
+	create_nodes(qdev, 0, 2);
+	create_nodes(qdev, 16, 8);
+	base = get_node_from_phy(qdev->nodes, 0);
+
+	// Port (Node 1) and 2 scheds - 16(WSP), 17(WRR)
+	config_parent_node(qdev, 1, TYPE_PORT, PP_QOS_ARBITRATION_WRR, 16, 2);
+	config_parent_node(qdev, 16, TYPE_SCHED, PP_QOS_ARBITRATION_WSP, 18, 5);
+	config_child_node(qdev, 16, TYPE_SCHED, 1, 0);
+	config_parent_node(qdev, 17, TYPE_SCHED, PP_QOS_ARBITRATION_WRR, 23, 1);
+	config_child_node(qdev, 17, TYPE_SCHED, 1, 0);
+
+	// 4 Queues of first sched (Nodes, 18, 19, 20, 21, 22)
+	config_child_node(qdev, 18, TYPE_QUEUE, 16, 1);
+	config_child_node(qdev, 19, TYPE_QUEUE, 16, 3);
+	config_child_node(qdev, 20, TYPE_QUEUE, 16, 5);
+	config_child_node(qdev, 21, TYPE_QUEUE, 16, 6);
+	config_child_node(qdev, 22, TYPE_QUEUE, 16, 7);
+
+	// 1 Queues of second sched (Node 23)
+	config_child_node(qdev, 23, TYPE_QUEUE, 17, 0);
+	queue = get_id_from_phy(qdev->mapping, 23);
+	check_consistency(qdev);
+
+	expected_phy = 18;
+	phy = phy_alloc_by_parent(qdev, base + 16, 0);
+	QOS_ASSERT(phy == expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 0);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, expected_phy + 1, 5, -1);
+	check_consistency(qdev);
+	phy = get_phy_from_id(qdev->mapping, queue); 
+	nodes_move(qdev, 23, phy, 1);
+	check_consistency(qdev);
+
+	//
+	// change config to 3 schedulers, first has 1 child, second 4 and third
+	// none
+	//
+	config_parent_node(qdev, 1, TYPE_PORT, PP_QOS_ARBITRATION_WRR, 16, 3);
+	config_parent_node(qdev, 16, TYPE_SCHED, PP_QOS_ARBITRATION_WSP, 19, 1);
+	config_parent_node(qdev, 17, TYPE_SCHED, PP_QOS_ARBITRATION_WRR, 20, 4);
+	config_child_node(qdev, 18, TYPE_SCHED, 1, 0);
+	config_child_node(qdev, 19, TYPE_QUEUE, 16, 3);
+	config_child_node(qdev, 23, TYPE_QUEUE, 17, 0);
+	config_child_node(qdev, 20, TYPE_QUEUE, 17, 0);
+	config_child_node(qdev, 21, TYPE_QUEUE, 17, 0);
+	config_child_node(qdev, 22, TYPE_QUEUE, 17, 0);
+	check_consistency(qdev);
+
+	queue = get_id_from_phy(qdev->mapping, 19);
+	phy = phy_alloc_by_parent(qdev, base + 16, 0);
+	expected_phy = get_phy_from_id(qdev->mapping, queue) - 1;
+	QOS_ASSERT(phy ==  expected_phy, "Expected phy for new node %d but got %d\n", expected_phy, phy);
+	config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, 0);
+	id = pp_pool_get(qdev->ids);
+	map_id_phy(qdev->mapping, id, phy);
+	destroy_nodes(qdev, phy, 1);
+	octet_nodes_shift(qdev, phy + 1, 1, -1);
+	check_consistency(qdev);
+
+	destroy_nodes(qdev, 16, 7);
+	destroy_nodes(qdev, 0, 2);
+	destroy_nodes(qdev, phy, 1);
+	check_consistency(qdev);
+}
+
+static void test_internal_scheduler(struct pp_qos_dev *qdev)
+{
+	uint16_t phy;
+	struct qos_node *base;
+	int i;
+	uint16_t id;
+
+	base = get_node_from_phy(qdev->nodes, 0);
+	create_nodes(qdev, 0, 2);
+	create_nodes(qdev, 16, 8);
+	config_parent_node(qdev, 1, TYPE_PORT, PP_QOS_ARBITRATION_WRR, 16, 8);
+	config_child_node(qdev, 16, TYPE_QUEUE, 1, 0);
+	config_child_node(qdev, 17, TYPE_QUEUE, 1, 0);
+	config_child_node(qdev, 18, TYPE_QUEUE, 1, 0);
+	config_child_node(qdev, 19, TYPE_QUEUE, 1, 0);
+	config_child_node(qdev, 20, TYPE_QUEUE, 1, 0);
+	config_child_node(qdev, 21, TYPE_QUEUE, 1, 0);
+	config_child_node(qdev, 22, TYPE_QUEUE, 1, 0);
+	config_child_node(qdev, 23, TYPE_QUEUE, 1, 0);
+	check_consistency(qdev);
+
+	for (i = 0 ; i < 200; ++i) {
+		phy = phy_alloc_by_parent(qdev, base + 1, 0);
+		id = pp_pool_get(qdev->ids);
+		map_id_phy(qdev->mapping, id, phy);
+		config_child_node(qdev, phy, TYPE_QUEUE, QOS_INVALID_PHY, QOS_INVALID_PRIORITY);
+		check_consistency(qdev);
+	}
+
+}
+
+static void expected_id_phy(const struct pp_qos_dev *qdev, int id, int eid, int ephy)
+{
+	int phy;
+
+	phy = get_phy_from_id(qdev->mapping, id);
+	QOS_ASSERT(id == eid, "Got id %d expected %d\n", id, eid);
+	QOS_ASSERT(phy == ephy, "Got phy %d expected %d\n", phy, ephy);
+}
+
+static void test_ports(struct pp_qos_dev *qdev)
+{
+	struct pp_qos_port_conf conf;
+	unsigned int id1;
+	unsigned int id2;
+	int tmp;
+	struct qos_node *node;
+	struct pp_qos_port_info info;
+	uint16_t ids[5];
+	unsigned tmp2;
+	unsigned firstnonport;
+
+	QOS_LOG_INFO("starting ports tests\n");	
+	firstnonport = qdev->max_port + 1;
+
+	/* Allocate port with arbitrary phy */
+	tmp = pp_qos_port_allocate(qdev, ALLOC_PORT_ID, &id1);
+	QOS_ASSERT(tmp == 0, "could not allocate qos port\n");
+	expected_id_phy(qdev, id1, 0, 0);
+
+	pp_qos_port_conf_set_default(&conf); 
+	conf.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	conf.ring_address = (void *)1;
+	conf.ring_size = 1;
+	QOS_ASSERT(pp_qos_port_set(qdev, id1, &conf) == 0, "Could not set port\n");
+
+	node = get_node_from_phy(qdev->nodes, get_phy_from_id(qdev->mapping, id1));
+	conf.ring_size = 3;
+	QOS_ASSERT(pp_qos_port_set(qdev, id1, &conf) == 0, "Could not set port\n");
+	QOS_ASSERT(port_cfg_valid(qdev, node, node), "Configuration not valid\n");
+
+	conf.ring_size = 0;
+	QOS_ASSERT(pp_qos_port_set(qdev, id1, &conf) == -EINVAL, "Port setting should have failed\n");
+	QOS_ASSERT(port_cfg_valid(qdev, node, node), "Configuration not valid\n");
+
+	/* Allocate port with certain phy */ 
+	tmp = pp_qos_port_allocate(qdev, 10, &id2);
+	QOS_ASSERT(tmp == -EINVAL, "Port allocation should have failed\n");
+	tmp = pp_qos_port_allocate(qdev, 12, &id2);
+	QOS_ASSERT(tmp == 0, "Port allocation failed\n");
+	expected_id_phy(qdev, id2, 1, 12);
+	tmp = pp_qos_port_allocate(qdev, 12, &id2);
+	QOS_ASSERT(tmp == -EBUSY, "Port allocation should have failed\n");
+	create_nodes(qdev, firstnonport, 3);
+	config_child_node(qdev, firstnonport, TYPE_QUEUE, 12, 0);
+	config_child_node(qdev, firstnonport + 1, TYPE_QUEUE, 12, 0);
+	config_child_node(qdev, firstnonport + 2, TYPE_QUEUE, 12, 0);
+	config_parent_node(qdev, 12, TYPE_PORT, PP_QOS_ARBITRATION_WRR, firstnonport, 3);
+	pp_qos_port_info_get(qdev, id2, &info);
+	QOS_ASSERT(info.num_of_queues == 3 && info.physical_id == 12, "Number of queues %d or phy %d is not what expected\n", info.num_of_queues, info.physical_id);
+
+	pp_qos_port_get_queues(qdev, id2, ids, 2, &tmp2);
+	QOS_ASSERT((tmp2 == 3) && (ids[0] == 2) && (ids[1] == 3), "Number of queues %d or ids %d %d are not expected\n", tmp2, ids[0], ids[1]);
+
+	pp_qos_port_remove(qdev, id1);
+	pp_qos_port_remove(qdev, id2);
+
+
+	check_consistency(qdev);
+	QOS_LOG_INFO("ports tests completed successfully :)\n");	
+}
+
+static void expected_queues(struct pp_qos_dev *qdev, int phy, unsigned exp)
+{
+	unsigned num;
+
+	get_node_queues(qdev, phy, NULL, 0, &num);
+	QOS_ASSERT(exp == num, "Expected %u queues on phy %d but got %u\n", exp, phy, num); 
+}
+
+static void test_queues1(struct pp_qos_dev *qdev)
+{
+	int tmp;
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_port_conf confp;
+	struct qos_node node;
+	struct qos_node *nodep;
+	unsigned int p1;
+	unsigned int p7;
+	unsigned int q[100];
+	unsigned int j;
+	unsigned int i;
+	unsigned firstnonport;
+
+	QOS_LOG_INFO("starting queues1 tests\n");	
+	firstnonport = qdev->max_port + 1;
+
+	/* Allocate port 1  */ 
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_address = (void *)1;
+	confp.ring_size = 1;
+	tmp = pp_qos_port_allocate(qdev, 1, &p1);
+	expected_id_phy(qdev, p1, 0, 1);
+	QOS_ASSERT(pp_qos_port_set(qdev, p1, &confp) == 0, "Could not set port\n");
+	check_consistency_with_resources(qdev);
+
+	/* Allocate first queue */
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_child_prop.parent = p1;
+	tmp = pp_qos_queue_allocate(qdev, &q[0]);
+	pp_qos_queue_set(qdev, q[0], &confq);
+	expected_id_phy(qdev, q[0], 1, firstnonport);
+	check_consistency_with_resources(qdev);
+
+
+	/* Allocate 7 more queues */
+	for (i = 1; i < 8; ++ i) {
+		tmp = pp_qos_queue_allocate(qdev, &q[i]);
+		confq.queue_wred_max_allowed = i;
+		pp_qos_queue_set(qdev, q[i], &confq);
+		expected_id_phy(qdev, q[i], i + 1, firstnonport + i);
+		check_consistency_with_resources(qdev);
+	}
+
+	/* Allocate 64 more queues */
+	for (i = 8; i < 72; ++i) {
+		tmp = pp_qos_queue_allocate(qdev, &q[i]);
+		confq.queue_wred_max_allowed = i;
+		pp_qos_queue_set(qdev, q[i], &confq);
+		check_consistency_with_resources(qdev);
+	}
+
+	/* change setting without parent */
+	tmp = get_phy_from_id(qdev->mapping, q[7]);
+	nodep = get_node_from_phy(qdev->nodes, tmp);
+	memcpy(&node, nodep, sizeof(struct qos_node)); 
+	QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[7], &confq) == 0, "pp_qos_queue_conf_get failed\n");
+	confq.queue_wred_max_allowed = 77; 
+	pp_qos_queue_set(qdev, q[7], &confq);
+	QOS_ASSERT(memcmp(&node, nodep, offsetof(struct qos_node, data.queue.max_allowed)) == 0, "Unexpected change\n");
+	QOS_ASSERT(nodep->data.queue.max_allowed == 77, "Set max allowed did not work\n");
+	QOS_ASSERT(memcmp(&(node.data.queue.min_guaranteed), &(nodep->data.queue.min_guaranteed), sizeof(struct qos_node) - offsetof(struct qos_node, data.queue.min_guaranteed)) == 0, "Unexpected change\n");
+	check_consistency_with_resources(qdev);
+
+	/* change setting with parent */
+	tmp = pp_qos_port_allocate(qdev, 7, &p7);
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port\n");
+	
+	tmp = get_phy_from_id(qdev->mapping, q[14]);
+	nodep = get_node_from_phy(qdev->nodes, tmp);
+	memcpy(&node, nodep, sizeof(struct qos_node)); 
+
+	QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[14], &confq) == 0, "pp_qos_queue_conf_get failed\n");
+	confq.queue_child_prop.parent = p7;
+	confq.queue_wred_max_allowed = 777; 
+	pp_qos_queue_set(qdev, q[14], &confq);
+	tmp = get_phy_from_id(qdev->mapping, q[14]);
+	nodep = get_node_from_phy(qdev->nodes, tmp);
+	QOS_ASSERT(nodep->data.queue.max_allowed == 777, "Set max allowed not work\n");
+	QOS_ASSERT(nodep->child_prop.parent_phy == get_phy_from_id(qdev->mapping, p7), "Set of parent did not work has %d instead of %d\n",
+		   nodep->child_prop.parent_phy,  get_phy_from_id(qdev->mapping, p7));
+	QOS_ASSERT(memcmp(&node, nodep, offsetof(struct qos_node, child_prop.parent_phy)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&node, nodep, offsetof(struct qos_node, child_prop.parent_phy)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&(node.child_prop.priority), &(nodep->child_prop.priority), offsetof(struct qos_node, data.queue.max_allowed) - offsetof(struct qos_node, child_prop.priority)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&(node.data.queue.min_guaranteed), &(nodep->data.queue.min_guaranteed), sizeof(struct qos_node) - offsetof(struct qos_node, data.queue.min_guaranteed)) == 0, "Unexpected change\n");
+
+	check_consistency_with_resources(qdev);
+
+	for (j = 0 ; j < 100; ++j) {
+		for (i = 20; i < 50; ++i) {
+			QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+			confq.queue_wred_max_allowed = 300 + i;
+			confq.queue_child_prop.parent = p7;
+			QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+			check_consistency_with_resources(qdev);
+		}
+
+		for (i = 20; i < 50; ++i) {
+			QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+			confq.queue_wred_max_allowed = 400 + i;
+			confq.queue_child_prop.parent = p1;
+			QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+			check_consistency_with_resources(qdev);
+		}
+	}
+	QOS_LOG_INFO("queue1 tests completed successfully :)\n");	
+}
+static void test_queues2(struct pp_qos_dev *qdev)
+{
+	int tmp;
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_port_conf confp;
+	struct qos_node node;
+	struct qos_node *nodep;
+	unsigned int p1;
+	unsigned int p7;
+	unsigned int p12;
+	unsigned int q[100];
+	unsigned int j;
+	unsigned int i;
+	unsigned tmp1;
+	unsigned tmp2;
+	unsigned tmp3;
+	unsigned tmp4;
+	unsigned firstnonport;
+
+	QOS_LOG_INFO("starting queues2 tests\n");	
+	firstnonport = qdev->max_port + 1;
+		
+
+	/* Allocate port 1  */ 
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_address = (void *)1;
+	confp.ring_size = 1;
+	tmp = pp_qos_port_allocate(qdev, 1, &p1);
+	expected_id_phy(qdev, p1, 0, 1);
+	QOS_ASSERT(pp_qos_port_set(qdev, p1, &confp) == 0, "Could not set port\n");
+	check_consistency_with_resources(qdev);
+
+	/* Allocate first queue */
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_child_prop.parent = p1;
+	tmp = pp_qos_queue_allocate(qdev, &q[0]);
+	pp_qos_queue_set(qdev, q[0], &confq);
+	expected_id_phy(qdev, q[0], 1, firstnonport);
+	check_consistency_with_resources(qdev);
+
+
+	/* Allocate 7 more queues */
+	for (i = 1; i < 8; ++ i) {
+		tmp = pp_qos_queue_allocate(qdev, &q[i]);
+		confq.queue_wred_max_allowed = i;
+		pp_qos_queue_set(qdev, q[i], &confq);
+		expected_id_phy(qdev, q[i], i + 1, firstnonport + i);
+		check_consistency_with_resources(qdev);
+	}
+
+	/* Allocate 64 more queues */
+	for (i = 8; i < 100; ++i) {
+		tmp = pp_qos_queue_allocate(qdev, &q[i]);
+		confq.queue_wred_max_allowed = i;
+		confq.queue_child_prop.bandwidth_share = 1;
+		pp_qos_queue_set(qdev, q[i], &confq);
+		check_consistency_with_resources(qdev);
+	}
+
+	/* change setting without parent */
+	tmp = get_phy_from_id(qdev->mapping, q[7]);
+	nodep = get_node_from_phy(qdev->nodes, tmp);
+	memcpy(&node, nodep, sizeof(struct qos_node)); 
+	QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[7], &confq) == 0, "pp_qos_queue_conf_get failed\n");
+	confq.queue_wred_max_allowed = 77; 
+	pp_qos_queue_set(qdev, q[7], &confq);
+	QOS_ASSERT(memcmp(&node, nodep, offsetof(struct qos_node, data.queue.max_allowed)) == 0, "Unexpected change\n");
+	QOS_ASSERT(nodep->data.queue.max_allowed == 77, "Set max allowed did not work\n");
+	QOS_ASSERT(memcmp(&(node.data.queue.min_guaranteed), &(nodep->data.queue.min_guaranteed), sizeof(struct qos_node) - offsetof(struct qos_node, data.queue.min_guaranteed)) == 0, "Unexpected change\n");
+	check_consistency_with_resources(qdev);
+
+	/* allocate port 7 and port 12 */
+	tmp = pp_qos_port_allocate(qdev, 7, &p7);
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port\n");
+	tmp = pp_qos_port_allocate(qdev, 12, &p12);
+	QOS_ASSERT(pp_qos_port_set(qdev, p12, &confp) == 0, "Could not set port\n");
+	
+	/* change setting with parent */
+	tmp = get_phy_from_id(qdev->mapping, q[14]);
+	nodep = get_node_from_phy(qdev->nodes, tmp);
+	memcpy(&node, nodep, sizeof(struct qos_node)); 
+
+	QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[14], &confq) == 0, "pp_qos_queue_conf_get failed\n");
+	confq.queue_child_prop.parent = p7;
+	confq.queue_wred_max_allowed = 777; 
+	pp_qos_queue_set(qdev, q[14], &confq);
+	tmp = get_phy_from_id(qdev->mapping, q[14]);
+	nodep = get_node_from_phy(qdev->nodes, tmp);
+	QOS_ASSERT(nodep->data.queue.max_allowed == 777, "Set max allowed not work\n");
+	QOS_ASSERT(nodep->child_prop.parent_phy == get_phy_from_id(qdev->mapping, p7), "Set of parent did not work has %d instead of %d\n",
+		   nodep->child_prop.parent_phy,  get_phy_from_id(qdev->mapping, p7));
+	QOS_ASSERT(memcmp(&node, nodep, offsetof(struct qos_node, child_prop.parent_phy)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&node, nodep, offsetof(struct qos_node, child_prop.parent_phy)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&(node.child_prop.priority), &(nodep->child_prop.priority), offsetof(struct qos_node, data.queue.max_allowed) - offsetof(struct qos_node, child_prop.priority)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&(node.data.queue.min_guaranteed), &(nodep->data.queue.min_guaranteed), sizeof(struct qos_node) - offsetof(struct qos_node, data.queue.min_guaranteed)) == 0, "Unexpected change\n");
+
+	check_consistency_with_resources(qdev);
+
+	tmp = get_phy_from_id(qdev->mapping, q[21]);
+	nodep = get_node_from_phy(qdev->nodes, tmp);
+	memcpy(&node, nodep, sizeof(struct qos_node)); 
+	QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[21], &confq) == 0, "pp_qos_queue_conf_get failed\n");
+	confq.queue_child_prop.parent = p12;
+	confq.queue_wred_max_allowed = 2121; 
+	pp_qos_queue_set(qdev, q[21], &confq);
+	tmp = get_phy_from_id(qdev->mapping, q[21]);
+	nodep = get_node_from_phy(qdev->nodes, tmp);
+	QOS_ASSERT(nodep->data.queue.max_allowed == 2121, "Set max allowed not work\n");
+	QOS_ASSERT(nodep->child_prop.parent_phy == get_phy_from_id(qdev->mapping, p12), "Set of parent did not work has %d instead of %d\n",
+		   nodep->child_prop.parent_phy,  get_phy_from_id(qdev->mapping, p12));
+	QOS_ASSERT(memcmp(&node, nodep, offsetof(struct qos_node, child_prop.parent_phy)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&node, nodep, offsetof(struct qos_node, child_prop.parent_phy)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&(node.child_prop.priority), &(nodep->child_prop.priority), offsetof(struct qos_node, data.queue.max_allowed) - offsetof(struct qos_node, child_prop.priority)) == 0, "Unexpected change\n");
+	QOS_ASSERT(memcmp(&(node.data.queue.min_guaranteed), &(nodep->data.queue.min_guaranteed), sizeof(struct qos_node) - offsetof(struct qos_node, data.queue.min_guaranteed)) == 0, "Unexpected change\n");
+
+
+	check_consistency_with_resources(qdev);
+	expected_queues(qdev, 1, 98);
+	expected_queues(qdev, 7, 1);
+	expected_queues(qdev, 12, 1);
+	for (i = 25; i < 50; ++i) {
+		QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+		confq.queue_wred_max_allowed = 300 + i;
+		confq.queue_child_prop.parent = p12;
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+		check_consistency_with_resources(qdev);
+	}
+	expected_queues(qdev, 1, 73);
+	expected_queues(qdev, 7, 1);
+	expected_queues(qdev, 12, 26);
+
+	for (i = 50; i < 70; ++i) {
+		QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+		confq.queue_wred_max_allowed = 400 + i;
+		confq.queue_child_prop.parent = p7;
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+		check_consistency_with_resources(qdev);
+	}
+	expected_queues(qdev, 1, 53);
+	expected_queues(qdev, 7, 21);
+	expected_queues(qdev, 12, 26);
+
+	for (i = 25; i < 45; ++i) {
+		QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+		confq.queue_wred_max_allowed = 500 + i;
+		confq.queue_child_prop.parent = p7;
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+		check_consistency_with_resources(qdev);
+	}
+
+	expected_queues(qdev, 1, 53);
+	expected_queues(qdev, 7, 41);
+	expected_queues(qdev, 12, 6);
+	for (i = 45; i < 60; ++i) {
+		QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+		confq.queue_wred_max_allowed = 600 + i;
+		confq.queue_child_prop.parent = p12;
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+		check_consistency_with_resources(qdev);
+	}
+	expected_queues(qdev, 1, 53);
+	expected_queues(qdev, 7, 31);
+	expected_queues(qdev, 12, 16);
+
+	for (i = 60; i < 70; ++i) {
+		QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+		confq.queue_wred_max_allowed = 700 + i;
+		confq.queue_child_prop.parent = p1;
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+		check_consistency_with_resources(qdev);
+	}
+	expected_queues(qdev, 1, 63);
+	expected_queues(qdev, 7, 21);
+	expected_queues(qdev, 12, 16);
+	for (j = 0 ; j < 100; ++j) {
+		for (i = 50; i < 70; ++i) {
+			QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+			confq.queue_wred_max_allowed = 400 + i;
+			confq.queue_child_prop.parent = p7;
+			QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+			check_consistency_with_resources(qdev);
+		}
+
+		for (i = 25; i < 45; ++i) {
+			QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+			confq.queue_wred_max_allowed = 500 + i;
+			confq.queue_child_prop.parent = p7;
+			QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+			check_consistency_with_resources(qdev);
+		}
+
+		for (i = 45; i < 60; ++i) {
+			QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+			confq.queue_wred_max_allowed = 600 + i;
+			confq.queue_child_prop.parent = p12;
+			QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+			check_consistency_with_resources(qdev);
+		}
+
+		for (i = 60; i < 70; ++i) {
+			QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed on %d iteration\n", i);
+			confq.queue_wred_max_allowed = 700 + i;
+			confq.queue_child_prop.parent = p1;
+			QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+			check_consistency_with_resources(qdev);
+		}
+	}
+
+
+	get_node_queues(qdev, 1, NULL, 0, &tmp1);
+	get_node_queues(qdev, 7, NULL, 0, &tmp2);
+	get_node_queues(qdev, 12, NULL, 0, &tmp3);
+	tmp4 = tmp1 + tmp2 + tmp3;
+	QOS_ASSERT(tmp4 == 100, "Number of queues expected to be 100 but it is %u\n", tmp4);
+	
+	QOS_LOG_INFO("queue2 tests completed successfully :)\n");	
+	
+}
+
+static void test_queues3(struct pp_qos_dev *qdev)
+{
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_port_conf confp;
+	unsigned int p1;
+	unsigned int q[100];
+	unsigned int i;
+	int tmp;
+	unsigned int bw[] = { 13, 0, 20, 17, 0, 0, 0, 8 };
+	unsigned firstnonport;
+
+	QOS_LOG_INFO("starting queues3 tests\n");	
+	firstnonport = qdev->max_port + 1;
+
+	/* Allocate port 1  */ 
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_address = (void *)1;
+	confp.ring_size = 1;
+	tmp = pp_qos_port_allocate(qdev, 1, &p1);
+	expected_id_phy(qdev, p1, 0, 1);
+	QOS_ASSERT(pp_qos_port_set(qdev, p1, &confp) == 0, "Could not set port\n");
+	check_consistency_with_resources(qdev);
+
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_child_prop.parent = p1;
+	
+	/* Allocate 7 queues */
+	for (i = 0; i < 7; ++ i) {
+		tmp = pp_qos_queue_allocate(qdev, &q[i]);
+		QOS_ASSERT(tmp == 0, "Could not allocate queue\n");
+		confq.queue_wred_max_allowed = i;
+		confq.queue_child_prop.bandwidth_share = 13;
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Could not set queue\n");
+	}
+	check_consistency_with_resources(qdev);
+
+	/* Try to add a new queue with a share that cross 100 */
+	tmp = pp_qos_queue_allocate(qdev, &q[7]);
+	QOS_ASSERT(tmp == 0, "Could not allocate queue\n");
+	confq.queue_wred_max_allowed = 8;
+	confq.queue_child_prop.bandwidth_share = 10;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[7], &confq) != 0, "Set should have failed due to over bandwidth\n");
+	pp_pool_put(qdev->ids, q[7]);
+	check_consistency_with_resources(qdev);
+
+	
+	/* Change setting on existing node */
+	QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[6], &confq) == 0, "pp_qos_queue_conf_get failed\n");
+	confq.queue_child_prop.bandwidth_share = 15;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[6], &confq) == 0, "Set failed\n");
+	
+
+	/* Add a new queue that does not cross 100קד*/
+	tmp = pp_qos_queue_allocate(qdev, &q[7]);
+	QOS_ASSERT(tmp == 0, "Could not allocate queue\n");
+	confq.queue_wred_max_allowed = 8;
+	confq.queue_child_prop.bandwidth_share = 6;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[7], &confq) == 0, "Set have failed\n");
+	check_consistency_with_resources(qdev);
+
+	for (i = 0; i < 8; ++ i) {
+		QOS_ASSERT(pp_qos_queue_conf_get(qdev, q[i], &confq) == 0, "pp_qos_queue_conf_get failed\n");
+		QOS_ASSERT(tmp == 0, "Could not allocate queue\n");
+		confq.queue_child_prop.bandwidth_share = bw[i];
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Could not set queue\n");
+	}
+
+	tmp = pp_qos_queue_allocate(qdev, &q[8]);
+	QOS_ASSERT(tmp == 0, "Could not allocate queue\n");
+	confq.queue_child_prop.bandwidth_share = 10;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[8], &confq) == 0, "Could not set queue\n");
+	check_consistency_with_resources(qdev);
+
+	for (i = 9; i < 64; ++ i) {
+		tmp = pp_qos_queue_allocate(qdev, &q[i]);
+		QOS_ASSERT(tmp == 0, "Could not allocate queue\n");
+		confq.queue_child_prop.bandwidth_share = 0;
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Could not set queue\n");
+		check_consistency_with_resources(qdev);
+	}
+
+	tmp = pp_qos_queue_allocate(qdev, &q[64]);
+	QOS_ASSERT(tmp == 0, "Could not allocate queue\n");
+	confq.queue_child_prop.bandwidth_share = 15;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[64], &confq) == 0, "Could not set queue\n");
+
+	pp_qos_queue_remove(qdev, q[64]);
+	check_consistency_with_resources(qdev);
+
+	tree_remove(qdev, firstnonport); 
+	check_consistency_with_resources(qdev);
+
+	QOS_LOG_INFO("queue3 tests completed successfully :)\n");	
+}
+
+
+static void reposition_internal_test(struct pp_qos_dev *qdev)
+{
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_sched_conf confs;
+	
+	unsigned int p89;
+	unsigned int p97;
+	unsigned int p105;
+	unsigned int p7;
+	unsigned int q[200];
+	unsigned int s[5];
+	unsigned int i;
+	int tmp;
+
+	QOS_LOG_INFO("reposition test started\n");
+
+	qdev->reserved_ports[89] = 1;
+	qdev->reserved_ports[97] = 1;
+	qdev->reserved_ports[105] = 1;
+	tmp = pp_qos_port_allocate(qdev, 7, &p7);
+	QOS_ASSERT(tmp == 0, "could not allocate qos port\n");
+
+	tmp = pp_qos_port_allocate(qdev, 89, &p89);
+	QOS_ASSERT(tmp == 0, "could not allocate qos port\n");
+
+	tmp = pp_qos_port_allocate(qdev, 97, &p97);
+	QOS_ASSERT(tmp == 0, "could not allocate qos port\n");
+
+	tmp = pp_qos_port_allocate(qdev, 105, &p105);
+	QOS_ASSERT(tmp == 0, "could not allocate qos port\n");
+
+	pp_qos_port_conf_set_default(&confp);
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+	confp.ring_address = (void *)950000;
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port\n");
+
+	confp.ring_address = (void *)950600;
+	QOS_ASSERT(pp_qos_port_set(qdev, p89, &confp) == 0, "Could not set port\n");
+
+	confp.ring_address = (void *)950700;
+	QOS_ASSERT(pp_qos_port_set(qdev, p97, &confp) == 0, "Could not set port\n");
+
+	confp.ring_address = (void *)950800;
+	QOS_ASSERT(pp_qos_port_set(qdev, p105, &confp) == 0, "Could not set port\n");
+
+	pp_qos_queue_conf_set_default(&confq);
+	confq.queue_child_prop.parent = p89;
+	for (i = 0; i < 200; ++ i) {
+		tmp = pp_qos_queue_allocate(qdev, &q[i]);
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "q set failed\n");
+	}
+
+	/* moving queues from p89 to p7 */ 
+	confq.queue_child_prop.parent = p7;
+	for (i = 0; i < 200; ++i)
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "q set failed\n");
+	
+	check_consistency_with_resources(qdev);
+
+	for (i = 0; i < 195; ++i)
+		QOS_ASSERT(pp_qos_queue_remove(qdev, q[i]) == 0, "q remove failed\n");
+	check_consistency_with_resources(qdev);
+
+	pp_qos_sched_conf_set_default(&confs);
+	for (i = 0; i < 5; ++i) {
+		QOS_ASSERT(pp_qos_sched_allocate(qdev, &s[i]) == 0, "Could not allocate sched\n");
+		if (i == 0)
+			confs.sched_child_prop.parent = p97;
+		else
+			confs.sched_child_prop.parent = s[i - 1];
+		QOS_ASSERT(pp_qos_sched_set(qdev, s[i], &confs) == 0, "Could not set sched\n");
+	}
+	
+	check_consistency_with_resources(qdev);
+
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[0]) == 0, "Could not allocate queue\n");
+	confq.queue_child_prop.parent = s[4];
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[0], &confq) == 0, "Could not set queue\n");
+	check_consistency_with_resources(qdev);
+
+	confs.sched_child_prop.parent = p97;
+	QOS_ASSERT(pp_qos_sched_set(qdev, s[3], &confs) == 0, "Could not set sched\n");
+	check_consistency_with_resources(qdev);
+
+	confs.sched_child_prop.parent = p105;
+	QOS_ASSERT(pp_qos_sched_set(qdev, s[4], &confs) == 0, "Could not set sched\n");
+	check_consistency_with_resources(qdev);
+	QOS_LOG_INFO("reposition test completed successfully :)\n");
+	
+}
+
+static struct pp_qos_dev *test_open_and_init_instance(unsigned int instance)
+{
+	int rc;
+	struct pp_qos_init_param initp;
+	struct pp_qos_dev *qdev;
+
+	qdev = pp_qos_dev_open(instance);
+	if (qdev) {
+		if (!qdev->initialized) {
+			memset(&initp, 0, sizeof(initp));
+			initp.wred_total_avail_resources = 0x10000;
+			initp.wred_p_const = 512;
+			initp.wred_max_q_size = 10000;
+			initp.reserved_ports[1] = 1;
+			initp.reserved_ports[7] = 1;
+			initp.reserved_ports[12] = 1;
+			rc = pp_qos_dev_init(qdev, &initp);
+			QOS_ASSERT(rc == 0, "Init of instance 0 failed\n");
+		} else {
+			QOS_LOG_INFO("Device %u already initialized\n", instance);
+		}
+	} else {
+		QOS_LOG_ERR("Could not open device %u\n", instance);
+	}
+	return qdev;
+}
+
+
+static void test_shared_group(struct pp_qos_dev *qdev)
+{
+	int rc;
+	unsigned int id1;
+	unsigned int id2;
+	unsigned int p;
+	unsigned int q[10];
+	unsigned int phy;
+	unsigned int gid;
+	uint16_t members[5];
+	unsigned int members_num;
+	int i;
+	struct qos_node *node;
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_port_conf confp;
+
+	QOS_LOG_INFO("starting shared group tests\n");	
+
+	rc = pp_qos_shared_limit_group_add(qdev, 100, &id1);
+	QOS_ASSERT(rc == 0 && id1 == 1, "Return code is %d id is %u expected 1\n", rc ,id1);
+
+	rc = pp_qos_shared_limit_group_add(qdev, 200, &id2);
+	QOS_ASSERT(rc == 0 && id2 == 2, "Return code is %d id is %u expected 2\n", rc ,id2);
+
+	rc = pp_qos_shared_limit_group_remove(qdev, 3);
+	QOS_ASSERT(rc == -EINVAL, "Removing not existent group should have failed\n");
+
+	rc = pp_qos_shared_limit_group_remove(qdev, id2);
+	QOS_ASSERT(rc == 0, "Removing failed with %d\n", rc);
+
+	rc = pp_qos_shared_limit_group_modify(qdev, id2, 500);
+	QOS_ASSERT(rc == -EINVAL, "Modifying not existent group should have failed\n");
+
+	rc = pp_qos_shared_limit_group_modify(qdev, id1, 100);
+	QOS_ASSERT(rc == 0, "Modifying group failed with %d\n", rc);
+
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_address = (void *)1;
+	confp.ring_size = 1;
+	rc = pp_qos_port_allocate(qdev, 1, &p);
+	QOS_ASSERT(pp_qos_port_set(qdev, p, &confp) == 0, "Could not set port\n");
+	node = get_node_from_phy(qdev->nodes, 1);
+	QOS_ASSERT(node->shared_bandwidth_group ==  QOS_NO_SHARED_BANDWIDTH_GROUP, "shared bandwidth group shoud be %u but it is %u\n", QOS_NO_SHARED_BANDWIDTH_GROUP,
+			node->shared_bandwidth_group);
+	
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_child_prop.parent = p;
+	
+	/* Allocate 7 queues */
+	for (i = 0; i < 7; ++ i) {
+		rc = pp_qos_queue_allocate(qdev, &q[i]);
+		QOS_ASSERT(rc == 0, "Could not allocate queue\n");
+
+		if (i > 3) 
+			confq.common_prop.shared_bandwidth_group = id1;
+		else
+			confq.common_prop.shared_bandwidth_group = 0;
+
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Could not set queue\n");
+	}
+
+	for (i = 0; i < 7; ++i) {
+		phy = get_phy_from_id(qdev->mapping, q[i]);
+		node = get_node_from_phy(qdev->nodes, phy);
+		gid = node->shared_bandwidth_group;
+		if (i > 3)
+			QOS_ASSERT(gid == id1, "shared group id is %u expected %u\n", gid, id1);
+		else
+
+			QOS_ASSERT(gid == QOS_NO_SHARED_BANDWIDTH_GROUP, "shared group id is %u expected %u\n", gid, QOS_NO_SHARED_BANDWIDTH_GROUP);
+	}
+
+	rc = pp_qos_queue_allocate(qdev, &q[8]);
+	QOS_ASSERT(rc == 0, "Could not allocate queue\n");
+	confq.common_prop.shared_bandwidth_group = 10;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[8], &confq) == -EINVAL, "Illegal shared group, should have failed\n");
+	pp_pool_put(qdev->ids, q[8]);
+	
+
+
+	rc = pp_qos_shared_limit_group_get_members(qdev, id1, members, 5, &members_num);
+	QOS_ASSERT(rc == 0 && members_num == 3, "get_members returned %d, expected number of members to be %u but it is %u\n",
+			rc, 3 , members_num);
+
+	QOS_ASSERT(members[0] == q[4] && members[1] == q[5] && members[2] == q[6], "Unexpected members %u %u %u\n", members[0], members[1], members[2]);
+
+	/* Block removing of queue with members */
+	rc = pp_qos_shared_limit_group_remove(qdev, id1);
+	QOS_ASSERT(rc == -EBUSY, "Removing group with members should fail\n");
+
+	confq.common_prop.shared_bandwidth_group = 0;
+
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[4], &confq) == 0, "Could not set queue\n");
+	rc = pp_qos_shared_limit_group_remove(qdev, id1);
+	QOS_ASSERT(rc == -EBUSY, "Removing group with members should fail\n");
+	
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[5], &confq) == 0, "Could not set queue\n");
+	rc = pp_qos_shared_limit_group_remove(qdev, id1);
+	QOS_ASSERT(rc == -EBUSY, "Removing group with members should fail\n");
+
+	check_consistency_with_resources(qdev);
+
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[6], &confq) == 0, "Could not set queue\n");
+	rc = pp_qos_shared_limit_group_remove(qdev, id1);
+	QOS_ASSERT(rc == 0, "Removing group failed\n");
+
+
+	check_consistency_with_resources(qdev);
+	QOS_LOG_INFO("shared group tests completed successfully :)\n");	
+}
+
+
+/*
+ *       p1                       p7
+ *   s0      s1                   q3
+ * q0  s2    q1 
+ *     q2
+ */
+void test_cmd_queue(void)
+{
+	struct pp_qos_dev *qdev;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_sched_conf confs;
+	struct pp_qos_queue_conf confq;
+	unsigned int p1;
+	unsigned int p7;
+	unsigned int s0;
+	unsigned int s1;
+	unsigned int s2;
+	unsigned int q0;
+	unsigned int q1;
+	unsigned int q2;
+	unsigned int q3;
+	unsigned int q[10];
+	unsigned int i;
+	unsigned firstnonport;
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	firstnonport = qdev->max_port + 1;
+	QOS_LOG_INFO("starting cmd queue tests\n");
+	// Add ports 1 and 7 
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 1, &p1) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 7, &p7) == 0, "Could not allocate port\n");
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_address = (void *)1;
+	confp.ring_size = 1;
+	QOS_ASSERT(pp_qos_port_set(qdev, p1, &confp) == 0, "Could not set port\n");
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port\n");
+
+	// Add schedulers
+	QOS_ASSERT(pp_qos_sched_allocate(qdev, &s0) == 0, "Could not allocate sched\n");
+	QOS_ASSERT(pp_qos_sched_allocate(qdev, &s1) == 0, "Could not allocate sched\n");
+	QOS_ASSERT(pp_qos_sched_allocate(qdev, &s2) == 0, "Could not allocate sched\n");
+	pp_qos_sched_conf_set_default(&confs); 
+	confs.sched_child_prop.parent = p1;
+	QOS_ASSERT(pp_qos_sched_set(qdev, s0, &confs) == 0, "Could not set sched\n");
+	QOS_ASSERT(pp_qos_sched_set(qdev, s1, &confs) == 0, "Could not set sched\n");
+	confs.sched_child_prop.parent = s0;
+	confs.sched_child_prop.bandwidth_share = 7;
+	QOS_ASSERT(pp_qos_sched_set(qdev, s2, &confs) == 0, "Could not set sched\n");
+
+	// Add queues
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q0) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q1) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q2) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q3) == 0, "Could not allocate queue\n");
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_child_prop.parent = s0;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q0, &confq) == 0, "Could not set queue\n");
+	confq.queue_child_prop.parent = s1;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q1, &confq) == 0, "Could not set queue\n");
+	confq.queue_child_prop.parent = s2;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q2, &confq) == 0, "Could not set queue\n");
+	confq.queue_child_prop.parent = p7;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q3, &confq) == 0, "Could not set queue\n");
+	
+	check_consistency_with_resources(qdev);
+	nodes_move(qdev, 32, firstnonport, 7);
+	check_consistency_with_resources(qdev);
+	
+
+	/* s0 has 2 children, adding 5 more */
+	confq.queue_child_prop.parent = s0;
+	confq.queue_child_prop.bandwidth_share = 7;
+	for (i = 0; i < 5; ++i) {
+		QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[i]) == 0, "Could not allocate queue\n");
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Could not set queue\n");
+	}
+	
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[5]) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[5], &confq) == 0, "Could not set queue\n");
+
+	/* This should call create internal scheduler */
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[6]) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[6], &confq) == 0, "Could not set queue\n");
+	
+	check_consistency_with_resources(qdev);
+
+	//transmit_cmds(qdev);
+
+#if 0
+	// Modify port arbitration
+	confp.common_prop.bandwidth_limit = 10;
+	QOS_ASSERT(pp_qos_port_set(qdev, p0, &confp) == 0, "Could not set port\n");
+
+
+	// Add queue under port
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q0) == 0, "Could not allocate queue\n");
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_child_prop.priority = 0;
+	confq.queue_child_prop.parent = p0;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q0, &confq) == 0, "Could not set queue\n");
+	check_consistency_with_resources(qdev);
+
+	// Modify queue
+	confq.queue_child_prop.bandwidth_share = 10;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q0, &confq) == 0, "Could not set queue\n");
+
+	for (i = 0; i < 7; ++i) {
+		QOS_ASSERT(pp_qos_queue_allocate(qdev, q + i) == 0, "Could not allocate queue\n");
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Could not set queue\n");
+	}
+
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, q + 7) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[7], &confq) == 0, "Could not set queue\n");
+	// Remove port 
+	QOS_ASSERT(pp_qos_port_remove(qdev, p0) == 0, "Could not remove port\n");
+	check_consistency_with_resources(qdev);
+#endif
+
+	QOS_LOG_INFO("cmd queue tests completed successfully :)\n");
+}
+
+struct pp_mapping {
+	uint16_t id2phy[NUM_OF_NODES];
+	uint16_t phy2id[NUM_OF_NODES];
+};
+struct octet_info {
+	struct list_head list;
+	uint8_t usage;
+};
+struct pp_octets {
+	int last_port_octet;
+	struct octet_info info[NUM_OF_OCTETS];
+	struct list_head lists[9];
+};
+struct pp_nodes {
+	struct qos_node nodes[NUM_OF_NODES];
+};
+static void reset_qos_dev(struct pp_qos_dev *dst, struct pp_qos_dev *src)
+{
+
+	memcpy(dst->groups, src->groups, sizeof(dst->groups));
+	*(dst->ids) = *(src->ids);
+	memcpy(dst->ids->data, src->ids->data, dst->ids->capacity * sizeof(dst->ids->data[0]));
+	*(dst->rlms) = *(src->rlms);
+	memcpy(dst->rlms->data, src->rlms->data, dst->rlms->capacity * sizeof(dst->rlms->data[0]));
+	*(dst->portsphys) = *(src->portsphys);
+	memcpy(dst->portsphys->data, src->portsphys->data, dst->portsphys->capacity * sizeof(dst->portsphys->data[0]));
+
+	memcpy(dst->nodes, src->nodes, sizeof(struct pp_nodes));
+	memcpy(dst->mapping, src->mapping, sizeof(struct pp_mapping));
+	octets_clean(dst->octets);
+	dst->octets = octets_init(octet_of_phy(dst->max_port));
+}
+
+static struct pp_qos_dev *init_clone(struct pp_qos_dev *qdev)
+{
+	struct pp_qos_dev *clone;
+
+	clone = _qos_init(qdev->max_port);
+	clone->portsphys = free_ports_phys_init(clone->reserved_ports, qdev->max_port, qdev->reserved_ports, MAX_PORTS);
+
+
+	return clone;
+}
+
+void basic_tests(void)
+{
+	struct pp_qos_dev *qdev;
+	struct pp_qos_dev *clone;
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	QOS_LOG_INFO("starting basic tests :)\n");
+	clone = init_clone(qdev);
+	QOS_ASSERT(clone, "Could not create clone\n");
+
+
+	test_modify_used_status(qdev);
+	test_phy_allocation_not_full_octet(qdev);
+	test_phy_allocation_full_octet(qdev);
+	test_phy_allocation_full_octet_2(qdev);
+	test_internal_scheduler(qdev);
+	
+	reset_qos_dev(qdev, clone);
+	_qos_clean(clone);
+	QOS_LOG_INFO("basic tests completed successfully :)\n");
+}
+
+void advance_tests(void)
+{
+	struct pp_qos_dev *qdev;
+	struct pp_qos_dev *clone;
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	QOS_LOG_INFO("starting advance tests :)\n");
+	if (qdev->max_port < 15) {
+		QOS_LOG_ERR("Advance test requires that max port will be at least 15 !!!!\n");
+		return;
+	}
+	clone = init_clone(qdev);
+	QOS_ASSERT(clone, "Could not create clone\n");
+
+	test_ports(qdev);
+	reset_qos_dev(qdev, clone);
+
+	test_queues1(qdev);
+	reset_qos_dev(qdev, clone);
+
+	test_queues2(qdev);
+	reset_qos_dev(qdev, clone);
+
+	test_queues3(qdev);
+	reset_qos_dev(qdev, clone);
+
+	reposition_internal_test(qdev);
+	reset_qos_dev(qdev, clone);
+
+	test_shared_group(qdev);
+	reset_qos_dev(qdev, clone);
+
+	_qos_clean(clone);
+	QOS_LOG_INFO("advance tests completed successfully :)\n");
+}
+
+void tests(void)
+{
+	/* 
+	 * An old test that is not working but want to keep.
+	 * So compiler will not shout of unused function i
+	 */ 
+	struct pp_qos_dev *qdev;
+	if (0)
+		test_move_nodes(qdev);
+
+	basic_tests();
+	advance_tests();
+	QOS_LOG_INFO("all tests completed successfully :)\n");
+}
+
+#define RING_ADDRESS 0x247000
+#define RING_ADDRESS_0 0x240000
+#define RING_ADDRESS_7 0x240700
+#define RING_ADDRESS_26 0x241A00
+#define RING_ADDRESS_89 0x245900
+
+#if 0
+void falcon_test(void)
+{
+	struct pp_qos_dev *qdev;
+	unsigned int p89;
+	unsigned int p26;
+	unsigned int p7;
+	unsigned int p0;
+	unsigned int q[10];
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	qdev->reserved_ports[0] = 1;
+	qdev->reserved_ports[26] = 1;
+	qdev->reserved_ports[89] = 1;
+
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_wred_max_allowed = 0x400;
+
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 89, &p89) == 0, "Could not allocate port\n");
+	confp.ring_address = (void *)RING_ADDRESS_89; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p89, &confp) == 0, "Could not set port\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[0]) == 0, "Could not allocate queue\n");
+	confq.queue_child_prop.parent = p89;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[0], &confq) == 0, "Setting failed\n");
+
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 0, &p0) == 0, "Could not allocate port\n");
+	confp.ring_address = (void *)RING_ADDRESS_0; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p0, &confp) == 0, "Could not set port\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[1]) == 0, "Could not allocate queue\n");
+	confq.queue_child_prop.parent = p0;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[1], &confq) == 0, "Setting failed");
+
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 7, &p7) == 0, "Could not allocate port\n");
+	confp.ring_address = (void *)RING_ADDRESS_7; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[2]) == 0, "Could not allocate queue\n");
+	confq.queue_child_prop.parent = p7;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[2], &confq) == 0, "Setting failed\n");
+
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 26, &p26) == 0, "Could not allocate port\n");
+	confp.ring_address = (void *)RING_ADDRESS_26; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p26, &confp) == 0, "Could not set port\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[3]) == 0, "Could not allocate queue\n");
+	confq.queue_child_prop.parent = p26;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q[3], &confq) == 0, "Setting failed");
+
+	QOS_ASSERT(pp_qos_queue_remove(qdev, q[2]) == 0, "removing failed\n");
+}
+#endif
+
+#if 0
+void falcon_test(void)
+{
+	unsigned int i;
+	void *address;
+	struct pp_qos_dev *qdev;
+	unsigned int p[3];
+	unsigned int q[10];
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+	unsigned int rlms[] = {14, 74, 30, 87, 235, 42, 242, 190, 119, 103};
+	unsigned int ring_address[] = {RING_ADDRESS_0, RING_ADDRESS_7, RING_ADDRESS_26};
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	qdev->reserved_ports[0] = 1;
+	qdev->reserved_ports[26] = 1;
+
+	/* Hacking rlms array to give rlm that the slim driver currently uses */
+	for (i = 0; i < sizeof(rlms) / sizeof(rlms[0]); ++i) {
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - i] = rlms[i];
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - rlms[i]] = i;
+	}
+	
+	/* Configure ports 0, 7, 26 */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 0, &p[0]) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 7, &p[1]) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 26, &p[2]) == 0, "Could not allocate port\n");
+	pp_qos_port_conf_set_default(&confp); 
+
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+	for (i = 0 ; i < 3; ++i) {
+		confp.ring_address = (void *)(ring_address[i]); 
+		if (i == 0)
+			confp.credit = 2;
+		else
+			confp.credit = 8;
+		QOS_ASSERT(pp_qos_port_set(qdev, p[i], &confp) == 0, "Could not set port\n");
+	}
+
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_wred_max_allowed = 0x400;
+	for (i = 0; i < 10; ++i) {
+		QOS_ASSERT(pp_qos_queue_allocate(qdev, q + i) == 0, "Could not allocate queue\n");
+		if (i < 4)
+			confq.queue_child_prop.parent = p[0];
+		else if (i < 7)
+			confq.queue_child_prop.parent = p[1];
+		else 
+			confq.queue_child_prop.parent = p[2];
+
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+	}
+
+	QOS_LOG_INFO("Falcon test completed successfully :)\n");
+}
+#endif
+
+#if 1
+void falcon_test(void)
+{
+	unsigned int i;
+	struct pp_qos_dev *qdev;
+	unsigned int p[3];
+	unsigned int p89;
+	unsigned int q[10];
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_node_info info;
+	unsigned int rlms[] = {14, 74, 30, 87, 235, 42, 242, 190, 119, 103};
+	unsigned int ring_address[] = {RING_ADDRESS_0, RING_ADDRESS_7, RING_ADDRESS_26, RING_ADDRESS_89};
+
+	QOS_LOG_INFO("Falcon move test started\n");
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	qdev->reserved_ports[0] = 1;
+	qdev->reserved_ports[26] = 1;
+	qdev->reserved_ports[89] = 1;
+
+	/* Hacking rlms array to give rlm that the slim driver currently uses */
+	for (i = 0; i < sizeof(rlms) / sizeof(rlms[0]); ++i) {
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - i] = rlms[i];
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - rlms[i]] = i;
+	}
+	
+	/* Configure ports 0, 7, 26 */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 0, &p[0]) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 7, &p[1]) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 26, &p[2]) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 89, &p89) == 0, "Could not allocate port\n");
+	pp_qos_port_conf_set_default(&confp); 
+
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+	for (i = 0 ; i < 3; ++i) {
+		confp.ring_address = (void *)(uintptr_t)(ring_address[i]); 
+		if (i == 0)
+			confp.credit = 2;
+		else
+			confp.credit = 8;
+		QOS_ASSERT(pp_qos_port_set(qdev, p[i], &confp) == 0, "Could not set port\n");
+	}
+
+	QOS_ASSERT(pp_qos_port_set(qdev, p89, &confp) == 0, "Could not set port\n");
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_wred_max_allowed = 0x400;
+	confq.queue_child_prop.parent = p89;
+	for (i = 0; i < 10; ++i) {
+		QOS_ASSERT(pp_qos_queue_allocate(qdev, q + i) == 0, "Could not allocate queue\n");
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+	}
+
+	for (i = 0; i < 10; ++i) {
+		if (i < 4)
+			confq.queue_child_prop.parent = p[0];
+		else if (i < 7)
+			confq.queue_child_prop.parent = p[1];
+		else 
+			confq.queue_child_prop.parent = p[2];
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+	}
+
+	check_consistency_with_resources(qdev);
+
+	QOS_ASSERT(pp_qos_get_node_info(qdev, p89, &info) == 0, "Could not get node info\n");
+	for (i = 0; i < 3; ++i)
+		QOS_ASSERT(pp_qos_get_node_info(qdev, p[i], &info) == 0, "Could not get node info\n");
+
+	for (i = 0; i < 10; ++i)
+		QOS_ASSERT(pp_qos_get_node_info(qdev, q[i], &info) == 0, "Could not get node info\n");
+
+	QOS_LOG_INFO("Falcon move test completed successfully :)\n");
+}
+#endif
+
+#if 0
+void falcon_test(void)
+{
+	unsigned int i;
+	struct pp_qos_dev *qdev;
+	unsigned int p26;
+	unsigned int q1;
+	unsigned int q2;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+	unsigned int rlms[] = {14, 74, 30, 87, 235, 42, 242, 190, 119, 103};
+	unsigned int ring_address[] = {RING_ADDRESS_0, RING_ADDRESS_26};
+
+	QOS_LOG_INFO("Falcon test started\n");
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	qdev->reserved_ports[26] = 1;
+
+	/* Hacking rlms array to give rlm that the slim driver currently uses */
+	for (i = 0; i < sizeof(rlms) / sizeof(rlms[0]); ++i) {
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - i] = rlms[i];
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - rlms[i]] = i;
+	}
+	
+	/* Configure port 26 */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 26, &p26) == 0, "Could not allocate port\n");
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+	confp.ring_address = (void *)(RING_ADDRESS_26); 
+	confp.credit = 8;
+	QOS_ASSERT(pp_qos_port_set(qdev, p26, &confp) == 0, "Could not set port\n");
+
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_child_prop.parent = p26;
+	confq.queue_wred_max_allowed = 0x400;
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q1) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_set(qdev, q1, &confq) == 0, "Setting queue fail\n");
+	QOS_ASSERT(pp_qos_queue_remove(qdev, q1) == 0, "Remove queue failed\n");
+
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q2) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_set(qdev, q2, &confq) == 0, "Setting queue fail\n");
+	QOS_LOG_INFO("Falcon test completed successfully :)\n");
+}
+#endif
+#if 0
+/* 
+ * p(0) -> 14 
+ * p(7) -> s(a) -> s(b) -> s(c) -> s(d) -> s(e) -> q(235)
+ */
+void falcon_test(void)
+{
+	unsigned int i;
+	struct pp_qos_dev *qdev;
+	unsigned int p1;
+	unsigned int p7;
+	unsigned int s[5];
+	unsigned int q;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_sched_conf confs;
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+
+	/* add port 0 */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 1, &p1) == 0, "Could not allocate port 0\n");
+	confp.credit = 2;
+	confp.ring_address = (void *)RING_ADDRESS_0; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p1, &confp) == 0, "Could not set port 0\n");
+
+	/* add port 7 */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 7, &p7) == 0, "Could not allocate port 0\n");
+	confp.credit = 8;
+	confp.ring_address = (void *)RING_ADDRESS_7; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port 7\n");
+
+	/* Allocate 5 scheduler each is a child of its predecessor */
+	pp_qos_sched_conf_set_default(&confs);
+	for (i = 0; i < 5; ++i) {
+		QOS_ASSERT(pp_qos_sched_allocate(qdev, &s[i]) == 0, "Could not allocate sched\n");
+		if (i == 0)
+			confs.sched_child_prop.parent = p7;
+		else
+			confs.sched_child_prop.parent = s[i - 1];
+		QOS_ASSERT(pp_qos_sched_set(qdev, s[i], &confs) == 0, "Could not set sched\n");
+	}
+
+	pp_qos_queue_conf_set_default(&confq);
+	confq.queue_wred_max_allowed = 0x400;
+
+	/* Add q */
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q) == 0, "Could not allocate queue 235\n");
+	confq.queue_child_prop.parent = s[4];
+	QOS_ASSERT(pp_qos_queue_set(qdev, q, &confq) == 0, "Could not set queue 235\n");
+
+
+	check_consistency_with_resources(qdev);
+	QOS_LOG_INFO("Deep configuration completed\n");
+
+	confs.sched_child_prop.parent = p7;
+	QOS_ASSERT(pp_qos_sched_set(qdev, s[3], &confs) == 0, "Could not set sched\n");
+	check_consistency_with_resources(qdev);
+}
+#endif
+
+#if 0
+/*
+ * p(0) -> q(14)
+ * p(7) -> all other queues
+ */
+static unsigned int q[256];
+void falcon_test(void)
+{
+	unsigned int i;
+	struct pp_qos_dev *qdev;
+	unsigned int p7;
+	unsigned int p0;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+#if 0
+	unsigned int q14;
+	unsigned int rlms[] = {14};
+#endif
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	qdev->reserved_ports[0] = 1;
+	qdev->reserved_ports[26] = 1;
+
+#if 0
+	/* Hacking rlms array to give rlm that the slim driver currently uses */
+	for (i = 0; i < sizeof(rlms) / sizeof(rlms[0]); ++i) {
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - i] = rlms[i];
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - rlms[i]] = i;
+	}
+#endif
+	
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+
+	/* add port 0 */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 0, &p0) == 0, "Could not allocate port 0\n");
+	confp.credit = 2;
+	confp.ring_address = (void *)RING_ADDRESS_0; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p0, &confp) == 0, "Could not set port 0\n");
+
+	/* add port 7 */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 7, &p7) == 0, "Could not allocate port 0\n");
+	confp.credit = 8;
+	confp.ring_address = (void *)RING_ADDRESS_7; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port 7\n");
+
+	pp_qos_queue_conf_set_default(&confq);
+	confq.queue_wred_max_allowed = 0x200;
+
+#if 0
+	/* Add q14 under port 0 */
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q14) == 0, "Could not allocate queuei 14\n");
+	confq.queue_child_prop.parent = p0;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q14, &confq) == 0, "Could not set queue 14\n");
+#endif
+
+	/* Add other 255 queues under p7 */
+	confq.queue_child_prop.parent = p7;
+	for (i = 0; i < 256 ; ++i) {
+		QOS_ASSERT(pp_qos_queue_allocate(qdev, &q[i]) == 0, "Could not allocate queue\n");
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Could not set queue\n");
+	}
+
+	QOS_LOG_INFO("All queues test configuration completed\n");
+}
+#endif
+
+#if 0
+void falcon_test(void)
+{
+	unsigned int i;
+	struct pp_qos_dev *qdev;
+	unsigned int p[3];
+	unsigned int q[10];
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+	unsigned int rlms[] = {14, 74, 30, 87, 235, 42, 242, 190, 119, 103};
+	unsigned int ring_address[] = {RING_ADDRESS_0, RING_ADDRESS_7, RING_ADDRESS_26};
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	qdev->reserved_ports[0] = 1;
+	qdev->reserved_ports[26] = 1;
+
+	/* Hacking
+	 * rlms
+	 * array
+	 * to
+	 * give
+	 * rlm
+	 * that
+	 * the
+	 * slim
+	 * driver
+	 * currently
+	 * uses
+	 * */
+	for (i = 0; i < sizeof(rlms) / sizeof(rlms[0]); ++i) {
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - i] = rlms[i];
+		qdev->rlms->data[NUM_OF_QUEUES - 1 - rlms[i]] = i;
+	}
+
+	/* Configure
+	 * ports
+	 * 0,
+	 * 7,
+	 * 26
+	 * */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 0, &p[0]) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 7, &p[1]) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 26, &p[2]) == 0, "Could not allocate port\n");
+	pp_qos_port_conf_set_default(&confp);
+
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+	for (i = 0 ; i < 3; ++i) {
+		confp.ring_address = (void *)(uintptr_t)(ring_address[i]);
+		if (i == 0)
+			confp.credit = 2;
+		else
+			confp.credit = 8;
+		confp.common_prop.bandwidth_limit = 0;
+		if (i==1)
+		{
+			confp.common_prop.bandwidth_limit = 1;
+		}
+
+		QOS_ASSERT(pp_qos_port_set(qdev, p[i], &confp) == 0, "Could not set port\n");
+	}
+
+	pp_qos_queue_conf_set_default(&confq);
+	confq.queue_wred_max_allowed = 0x400;
+	for (i = 0; i < 10; ++i) {
+		QOS_ASSERT(pp_qos_queue_allocate(qdev, q + i) == 0, "Could not allocate queue\n");
+		if (i < 4)
+			confq.queue_child_prop.parent = p[0];
+		else if (i < 7)
+			confq.queue_child_prop.parent = p[1];
+		else
+			confq.queue_child_prop.parent = p[2];
+
+		QOS_ASSERT(pp_qos_queue_set(qdev, q[i], &confq) == 0, "Setting failed on %d iteration\n", i);
+	}
+
+	QOS_LOG_INFO("Falcon test completed successfully :)\n");
+}
+#endif
+
+
+#if 1
+void stat_test(void)
+{
+	struct pp_qos_dev *qdev;
+	unsigned int p;
+	unsigned int q1;
+	unsigned int q2;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_queue_stat qstat; 
+	struct pp_qos_port_stat pstat; 
+
+	QOS_LOG_INFO("statistics test started\n");
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 1, &p) == 0, "Could not allocate port\n");
+	pp_qos_port_conf_set_default(&confp); 
+
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+	confp.ring_address = (void *)RING_ADDRESS;
+	QOS_ASSERT(pp_qos_port_set(qdev, p, &confp) == 0, "Could not set port\n");
+
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_wred_max_allowed = 1000;
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q1) == 0, "Could not allocate queue\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q2) == 0, "Could not allocate queue\n");
+	confq.queue_child_prop.parent = p;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q1, &confq) == 0, "Could not set queue\n");
+	QOS_ASSERT(pp_qos_queue_set(qdev, q2, &confq) == 0, "Could not set queue\n");
+
+	memset(&pstat, 0, sizeof(pstat));
+	QOS_ASSERT(pp_qos_port_stat_get(qdev, p, &pstat) == 0, "Could not get port statistics\n");
+
+	memset(&qstat, 0, sizeof(qstat));
+	QOS_ASSERT(pp_qos_queue_stat_get(qdev, q1, &qstat) == 0, "Could not get queue statistics\n");
+	QOS_ASSERT(
+			qstat.reset == 0 &&
+			qstat.queue_packets_occupancy == 0 &&
+			qstat.queue_bytes_occupancy == 0 &&
+			qstat.total_packets_accepted == 0 &&
+			qstat.total_packets_dropped == 0 &&
+			qstat.total_packets_red_dropped == 0 &&
+			qstat.total_bytes_accepted == 0 &&
+			qstat.total_bytes_dropped == 0, "Some field of q1 stats is not 0\n");
+
+	memset(&qstat, 0, sizeof(qstat));
+	QOS_ASSERT(pp_qos_queue_stat_get(qdev, q2, &qstat) == 0, "Could not get queue statistics\n");
+	QOS_ASSERT(
+			qstat.reset == 0 &&
+			qstat.queue_packets_occupancy == 0 &&
+			qstat.queue_bytes_occupancy == 0 &&
+			qstat.total_packets_accepted == 0 &&
+			qstat.total_packets_dropped == 0 &&
+			qstat.total_packets_red_dropped == 0 &&
+			qstat.total_bytes_accepted == 0 &&
+			qstat.total_bytes_dropped == 0, "Some field of q2 stats is not 0\n");
+
+	memset(&pstat, 0, sizeof(pstat));
+	QOS_ASSERT(pp_qos_port_stat_get(qdev, p, &pstat) == 0, "Could not get port statistics\n");
+	QOS_ASSERT(
+			pstat.reset == 0 &&
+			pstat.total_green_bytes == 0 &&
+			pstat.total_yellow_bytes == 0, "Some field of p stats is not 0\n");
+
+	create_push_desc_cmd(qdev, 0, 128, 1, 0x600000);
+	create_push_desc_cmd(qdev, 0, 256, 2, 0x600000);
+	create_push_desc_cmd(qdev, 1, 100, 1, 0x600000);
+	create_push_desc_cmd(qdev, 1, 200, 2, 0x600000);
+	update_cmd_id(&qdev->drvcmds);
+	transmit_cmds(qdev);
+
+	memset(&qstat, 0, sizeof(qstat));
+	QOS_ASSERT(pp_qos_queue_stat_get(qdev, q1, &qstat) == 0, "Could not get queue statistics\n");
+	QOS_ASSERT(
+			qstat.reset == 0 &&
+			qstat.queue_packets_occupancy == 2 &&
+			qstat.queue_bytes_occupancy == 384 &&
+			qstat.total_packets_accepted == 2 &&
+			qstat.total_packets_dropped == 0 &&
+			qstat.total_packets_red_dropped == 0 &&
+			qstat.total_bytes_accepted == 384 &&
+			qstat.total_bytes_dropped == 0, "Some fields of q1 stats are not expected\n");
+
+	memset(&qstat, 0, sizeof(qstat));
+	qstat.reset = 1;
+	QOS_ASSERT(pp_qos_queue_stat_get(qdev, q2, &qstat) == 0, "Could not get queue statistics\n");
+	QOS_ASSERT(
+			qstat.reset == 1 &&
+			qstat.queue_packets_occupancy == 2 &&
+			qstat.queue_bytes_occupancy == 300 &&
+			qstat.total_packets_accepted == 2 &&
+			qstat.total_packets_dropped == 0 &&
+			qstat.total_packets_red_dropped == 0 &&
+			qstat.total_bytes_accepted == 300 &&
+			qstat.total_bytes_dropped == 0, "Some fields of q2 are not expected\n");
+
+	memset(&pstat, 0, sizeof(pstat));
+	QOS_ASSERT(pp_qos_port_stat_get(qdev, p, &pstat) == 0, "Could not get port statistics\n");
+	QOS_ASSERT(
+			pstat.reset == 0 &&
+			pstat.total_green_bytes == 228 &&
+			pstat.total_yellow_bytes == 456, "Some fields of p stats are not expected\n");
+
+	create_push_desc_cmd(qdev, 0, 100, 1, 0x600000);
+	create_push_desc_cmd(qdev, 1, 300, 1, 0x600000);
+
+	memset(&qstat, 0, sizeof(qstat));
+	QOS_ASSERT(pp_qos_queue_stat_get(qdev, q1, &qstat) == 0, "Could not get queue statistics\n");
+	QOS_ASSERT(
+			qstat.reset == 0 &&
+			qstat.queue_packets_occupancy == 3 &&
+			qstat.queue_bytes_occupancy == 484 &&
+			qstat.total_packets_accepted == 3 &&
+			qstat.total_packets_dropped == 0 &&
+			qstat.total_packets_red_dropped == 0 &&
+			qstat.total_bytes_accepted == 484 &&
+			qstat.total_bytes_dropped == 0, "Some field of q1 stats are not expected\n");
+
+	QOS_ASSERT(pp_qos_queue_stat_get(qdev, q2, &qstat) == 0, "Could not get queue statistics\n");
+	QOS_ASSERT(
+			qstat.reset == 0 &&
+			qstat.queue_packets_occupancy == 3 &&
+			qstat.queue_bytes_occupancy == 600 &&
+			qstat.total_packets_accepted == 1 &&
+			qstat.total_packets_dropped == 0 &&
+			qstat.total_packets_red_dropped == 0 &&
+			qstat.total_bytes_accepted == 300 &&
+			qstat.total_bytes_dropped == 0, "Some field of q2 stats is not 0\n");
+
+	memset(&pstat, 0, sizeof(pstat));
+	pstat.reset = 1;
+	QOS_ASSERT(pp_qos_port_stat_get(qdev, p, &pstat) == 0, "Could not get port statistics\n");
+	QOS_ASSERT(
+			pstat.reset == 1 &&
+			pstat.total_green_bytes == 628 &&
+			pstat.total_yellow_bytes == 456, "Some fields of p stats are not expected\n");
+
+	
+	create_push_desc_cmd(qdev, 0, 100, 2, 0x600000);
+	memset(&pstat, 0, sizeof(pstat));
+	QOS_ASSERT(pp_qos_port_stat_get(qdev, p, &pstat) == 0, "Could not get port statistics\n");
+	QOS_ASSERT(
+			pstat.reset == 0 &&
+			pstat.total_green_bytes == 0 &&
+			pstat.total_yellow_bytes == 100, "Some fields of p stats are not expected\n");
+
+	QOS_LOG_INFO("statistics test completed successfully :)\n");
+
+}
+#endif
+
+#if 0
+void falcon_test(void)
+{
+	struct pp_qos_dev *qdev;
+	unsigned int p;
+	unsigned int s;
+	unsigned int q;
+	struct pp_qos_queue_info qinfo;
+	struct pp_qos_sched_info sinfo;
+	struct pp_qos_port_info pinfo;
+	int rc;
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	QOS_ASSERT(pp_qos_port_allocate(qdev, ALLOC_PORT_ID, &p) == 0, "Could not allocate port\n");
+	QOS_ASSERT(pp_qos_sched_allocate(qdev, &s) == 0, "Could not allocate sched\n");
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q) == 0, "Could not allocate queue 0\n");
+	rc = pp_qos_port_info_get(qdev, p, &pinfo);
+	rc = pp_qos_sched_info_get(qdev, s, &sinfo);
+	rc = pp_qos_queue_info_get(qdev, q, &qinfo);
+}
+#endif
+
+#if 0
+void falcon_test(void)
+{
+	struct pp_qos_dev *qdev;
+	unsigned int p7;
+	unsigned int q0;
+	unsigned int q1;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_queue_conf confq;
+
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+
+	/* add port 7 */
+	QOS_ASSERT(pp_qos_port_allocate(qdev, 7, &p7) == 0, "Could not allocate port 7\n");
+	confp.credit = 8;
+	confp.ring_address = (void *)RING_ADDRESS_7; 
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port 7\n");
+	QOS_ASSERT(pp_qos_port_suspend(qdev, p7) == 0, "Could not suspend port 7\n");
+
+	pp_qos_queue_conf_set_default(&confq);
+	confq.queue_child_prop.parent = p7;
+	confq.queue_wred_min_guaranteed = 0;
+	confq.queue_wred_max_allowed = 25;
+	confq.wred_enable = 1;
+	confq.wred_fixed_drop_prob_enable = 1;
+
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q0) == 0, "Could not allocate queue 0\n");
+	confq.queue_wred_min_avg_green = 5;
+	confq.queue_wred_max_avg_green = 15;
+	confq.queue_wred_fixed_drop_prob_green = 20;
+	confq.queue_wred_min_avg_yellow = 10;
+	confq.queue_wred_max_avg_yellow = 20;
+	confq.queue_wred_fixed_drop_prob_yellow = 80;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q0, &confq) == 0, "Could not set queue 0\n");
+
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q1) == 0, "Could not allocate queue 1\n");
+	confq.queue_wred_min_avg_green = 12;
+	confq.queue_wred_max_avg_green = 22;
+	confq.queue_wred_fixed_drop_prob_green = 0;
+	confq.queue_wred_min_avg_yellow = 15;
+	confq.queue_wred_max_avg_yellow = 25;
+	confq.queue_wred_fixed_drop_prob_yellow = 100;
+	QOS_ASSERT(pp_qos_queue_set(qdev, q1, &confq) == 0, "Could not set queue 1\n");
+
+	QOS_LOG_INFO("WRED configuration completed\n");
+}
+#endif
+
+void reposition_test(void) 
+{
+	unsigned int s[5];
+	unsigned int q;
+	unsigned int p1;
+	unsigned int p7;
+	int tmp;
+	unsigned int i;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_sched_conf confs;
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_dev *qdev;
+	struct pp_qos_node_info info;
+
+	QOS_LOG_INFO("reposition test started\n");
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	tmp = pp_qos_port_allocate(qdev, 1, &p1);
+	QOS_ASSERT(tmp == 0, "could not allocate qos port\n");
+
+	tmp = pp_qos_port_allocate(qdev, 7, &p7);
+	QOS_ASSERT(tmp == 0, "could not allocate qos port\n");
+
+	pp_qos_port_conf_set_default(&confp);
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_size = 1;
+
+	confp.ring_address = (void *)950000;
+	QOS_ASSERT(pp_qos_port_set(qdev, p1, &confp) == 0, "Could not set port\n");
+
+	confp.ring_address = (void *)950600;
+	QOS_ASSERT(pp_qos_port_set(qdev, p7, &confp) == 0, "Could not set port\n");
+
+	pp_qos_sched_conf_set_default(&confs);
+	for (i = 0; i < 5; ++i) {
+		QOS_ASSERT(pp_qos_sched_allocate(qdev, &s[i]) == 0, "Could not allocate sched\n");
+		if (i == 0)
+			confs.sched_child_prop.parent = p1;
+		else
+			confs.sched_child_prop.parent = s[i - 1];
+		QOS_ASSERT(pp_qos_sched_set(qdev, s[i], &confs) == 0, "Could not set sched\n");
+	}
+	QOS_ASSERT(pp_qos_queue_allocate(qdev, &q) == 0, "Could not allocate queue\n");
+	pp_qos_queue_conf_set_default(&confq);
+	confq.queue_child_prop.parent = s[4];
+	QOS_ASSERT(pp_qos_queue_set(qdev, q, &confq) == 0, "Could not set queue\n");
+
+	confs.sched_child_prop.parent = p1;
+	QOS_ASSERT(pp_qos_sched_set(qdev, s[3], &confs) == 0, "Could not set sched\n");
+
+	confs.sched_child_prop.parent = p7;
+	QOS_ASSERT(pp_qos_sched_set(qdev, s[4], &confs) == 0, "Could not set sched\n");
+	check_consistency_with_resources(qdev);
+
+	QOS_ASSERT(pp_qos_get_node_info(qdev, p1, &info) == 0, "Could not get node info\n");
+	QOS_ASSERT(pp_qos_get_node_info(qdev, p7, &info) == 0, "Could not get node info\n");
+	QOS_ASSERT(pp_qos_get_node_info(qdev, q, &info) == 0, "Could not get node info\n");
+	for (i = 0; i < 5; ++i)
+		QOS_ASSERT(pp_qos_get_node_info(qdev, s[i], &info) == 0, "Could not get node info\n");
+
+	QOS_LOG_INFO("reposition test completed successfully :)\n");
+}
+
+void info_test(void)
+{
+	struct pp_qos_queue_conf confq;
+	struct pp_qos_port_conf confp;
+	struct pp_qos_dev *qdev;
+	unsigned int p1;
+	unsigned int q[100];
+	unsigned int i;
+	struct pp_qos_node_info info;
+
+	QOS_LOG_INFO("info test started\n");
+		
+	qdev = test_open_and_init_instance(0);
+	if (!qdev)
+		return;
+
+	/* Allocate port 1  */ 
+	pp_qos_port_conf_set_default(&confp); 
+	confp.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	confp.ring_address = (void *)1;
+	confp.ring_size = 1;
+	pp_qos_port_allocate(qdev, 1, &p1);
+	QOS_ASSERT(pp_qos_port_set(qdev, p1, &confp) == 0, "Could not set port\n");
+	check_consistency_with_resources(qdev);
+	QOS_ASSERT(pp_qos_get_node_info(qdev, p1, &info) == 0, "Could not get node info\n");
+
+	pp_qos_queue_conf_set_default(&confq); 
+	confq.queue_child_prop.parent = p1;
+	//confq.common_prop.bandwidth_limit = 77;
+	/* Allocate 8 queues */
+	for (i = 0; i < 8; ++ i) {
+		pp_qos_queue_allocate(qdev, &q[i]);
+		confq.queue_wred_max_allowed = i;
+		pp_qos_queue_set(qdev, q[i], &confq);
+	}
+	QOS_ASSERT(pp_qos_get_node_info(qdev, p1, &info) == 0, "Could not get node info\n");
+	QOS_ASSERT(info.type == PPV4_QOS_NODE_TYPE_PORT &&
+		   info.children[0].phy == 128 &&
+		   info.children[1].phy == 129 &&
+		   info.children[2].phy == 130 &&
+		   info.children[3].phy == 131 &&
+		   info.children[4].phy == 132 &&
+		   info.children[5].phy == 133 &&
+		   info.children[6].phy == 134 &&
+		   info.children[7].phy == 135 &&
+		   info.children[0].id == 1 &&
+		   info.children[1].id == 2 &&
+		   info.children[2].id == 3 &&
+		   info.children[3].id == 4 &&
+		   info.children[4].id == 5 &&
+		   info.children[5].id == 6 &&
+		   info.children[6].id == 7 &&
+		   info.children[7].id == 8 &&
+		   info.is_suspended == 0 &&
+		   info.is_internal == 0 &&
+		   info.bw_limit == 0 &&
+		   (info.preds[0].phy == info.preds[0].id &&  
+		   info.preds[1].phy == info.preds[1].id && 
+		   info.preds[2].phy == info.preds[2].id && 
+		   info.preds[3].phy == info.preds[3].id && 
+		   info.preds[4].phy == info.preds[4].id && 
+		   info.preds[5].phy == info.preds[5].id && 
+		   info.port == info.queue_physical_id),
+		   "Unexpected value from node get info\n");
+
+	QOS_ASSERT(pp_qos_get_node_info(qdev, q[7], &info) == 0, "Could not get node info\n");
+	QOS_ASSERT(info.type == PPV4_QOS_NODE_TYPE_QUEUE &&
+		   info.children[0].phy == PPV4_QOS_INVALID &&
+		   info.children[1].phy == PPV4_QOS_INVALID &&
+		   info.children[2].phy == PPV4_QOS_INVALID &&
+		   info.children[3].phy == PPV4_QOS_INVALID &&
+		   info.children[4].phy == PPV4_QOS_INVALID &&
+		   info.children[5].phy == PPV4_QOS_INVALID &&
+		   info.children[6].phy == PPV4_QOS_INVALID &&
+		   info.children[7].phy == PPV4_QOS_INVALID &&
+		   info.children[0].id == PPV4_QOS_INVALID &&
+		   info.children[1].id == PPV4_QOS_INVALID &&
+		   info.children[2].id == PPV4_QOS_INVALID &&
+		   info.children[3].id == PPV4_QOS_INVALID &&
+		   info.children[4].id == PPV4_QOS_INVALID &&
+		   info.children[5].id == PPV4_QOS_INVALID &&
+		   info.children[6].id == PPV4_QOS_INVALID &&
+		   info.children[7].id == PPV4_QOS_INVALID &&
+		   info.is_suspended == 0 &&
+		   info.is_internal == 0 &&
+		   info.bw_limit == 0 &&
+		   info.preds[0].phy == 1 && 
+		    info.preds[0].id == 0 &&  
+		   info.preds[1].phy == info.preds[1].id && 
+		   info.preds[2].phy == info.preds[2].id && 
+		   info.preds[3].phy == info.preds[3].id && 
+		   info.preds[4].phy == info.preds[4].id && 
+		   info.preds[5].phy == info.preds[5].id && 
+		   info.port == 1 &&
+		   info.queue_physical_id == 7,
+		   "Unexpected value from node get info\n");
+
+	/* Allocate 64 more queues */
+	for (i = 8; i < 100; ++i) {
+		pp_qos_queue_allocate(qdev, &q[i]);
+		pp_qos_queue_set(qdev, q[i], &confq);
+	}
+	QOS_ASSERT(pp_qos_get_node_info(qdev, q[7], &info) == 0, "Could not get node info\n");
+	QOS_ASSERT(info.type == PPV4_QOS_NODE_TYPE_QUEUE &&
+		   info.children[0].phy == PPV4_QOS_INVALID &&
+		   info.children[1].phy == PPV4_QOS_INVALID &&
+		   info.children[2].phy == PPV4_QOS_INVALID &&
+		   info.children[3].phy == PPV4_QOS_INVALID &&
+		   info.children[4].phy == PPV4_QOS_INVALID &&
+		   info.children[5].phy == PPV4_QOS_INVALID &&
+		   info.children[6].phy == PPV4_QOS_INVALID &&
+		   info.children[7].phy == PPV4_QOS_INVALID &&
+		   info.children[0].id == PPV4_QOS_INVALID &&
+		   info.children[1].id == PPV4_QOS_INVALID &&
+		   info.children[2].id == PPV4_QOS_INVALID &&
+		   info.children[3].id == PPV4_QOS_INVALID &&
+		   info.children[4].id == PPV4_QOS_INVALID &&
+		   info.children[5].id == PPV4_QOS_INVALID &&
+		   info.children[6].id == PPV4_QOS_INVALID &&
+		   info.children[7].id == PPV4_QOS_INVALID &&
+		   info.is_suspended == 0 &&
+		   info.is_internal == 0 &&
+		   info.bw_limit == 0 &&
+		   info.preds[0].phy == 135 && 
+		   info.preds[0].id == 66 && 
+		   info.preds[1].phy == 1 && 
+		   info.preds[1].id == 0 &&  
+		   info.preds[2].phy == info.preds[2].id && 
+		   info.preds[3].phy == info.preds[3].id && 
+		   info.preds[4].phy == info.preds[4].id && 
+		   info.preds[5].phy == info.preds[5].id && 
+		   info.port == 1 &&
+		   info.queue_physical_id == 7,
+		   "Unexpected value from node get info\n");
+
+	QOS_ASSERT(pp_qos_get_node_info(qdev, 66, &info) == 0, "Could not get node info\n");
+	QOS_ASSERT(info.type == PPV4_QOS_NODE_TYPE_SCHED &&
+		   info.children[0].phy == 192 &&
+		   info.children[1].phy == 193 &&
+		   info.children[2].phy == 194 &&
+		   info.children[3].phy == 195 &&
+		   info.children[4].phy == 196 &&
+		   info.children[5].phy == 197 &&
+		   info.children[6].phy == 198 &&
+		   info.children[7].phy == 199 &&
+		   info.children[0].id == 8 &&
+		   info.children[1].id == 65 &&
+		   info.children[2].id == 67 &&
+		   info.children[3].id == 68 &&
+		   info.children[4].id == 69 &&
+		   info.children[5].id == 70 &&
+		   info.children[6].id == 71 &&
+		   info.children[7].id == 72 &&
+		   info.is_suspended == 0 &&
+		   info.is_internal == 1 &&
+		   info.bw_limit == 0 &&
+		   info.preds[0].phy == 1 && 
+		   info.preds[0].id == 0 &&  
+		   info.preds[1].phy == info.preds[1].id && 
+		   info.preds[2].phy == info.preds[2].id && 
+		   info.preds[3].phy == info.preds[3].id && 
+		   info.preds[4].phy == info.preds[4].id && 
+		   info.preds[5].phy == info.preds[5].id && 
+		   info.port == PPV4_QOS_INVALID &&
+		   info.queue_physical_id == PPV4_QOS_INVALID,
+		   "Unexpected value from node get info\n");
+	check_consistency_with_resources(qdev);
+	QOS_LOG_INFO("info test completed successfully :)\n");
+}
+
+void load_fw_test(void)
+{
+	test_open_and_init_instance(0);
+}
+
+#endif
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_uc_defs.h b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_uc_defs.h
new file mode 100644
index 000000000..4445817ec
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_uc_defs.h
@@ -0,0 +1,516 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+/*
+ * uc_host_defs.h
+ *
+ *  Created on: Dec 17, 2017
+ *      Author: obakshe
+ */
+
+#ifndef SRC_UC_HOST_DEFS_H_
+#define SRC_UC_HOST_DEFS_H_
+
+// UC version
+#define UC_VERSION_MAJOR	(1)
+#define UC_VERSION_MINOR	(0)
+#define UC_VERSION_BUILD	(0)
+
+/**************************************************************************
+ *! @enum	UC_STATUS
+ **************************************************************************
+ *
+ * @brief UC general status enum
+ *
+ **************************************************************************/
+enum uc_status {
+	//!< Status OK
+	UC_STATUS_OK,
+
+	//!< General failure
+	UC_STATUS_GENERAL_FAILURE,
+
+	//!< Invalid user input
+	UC_STATUS_INVALID_INPUT,
+};
+
+/**************************************************************************
+ *! @enum	UC_LOGGER_LEVEL
+ **************************************************************************
+ *
+ * @brief UC Logger level enum
+ *
+ **************************************************************************/
+enum uc_logger_level {
+	//!< FATAL error occurred. SW will probably fail to proceed
+	UC_LOGGER_LEVEL_FATAL,
+
+	//!< General ERROR occurred.
+	UC_LOGGER_LEVEL_ERROR,
+
+	//!< WARNING
+	UC_LOGGER_LEVEL_WARNING,
+
+	//!< Information print to the user
+	UC_LOGGER_LEVEL_INFO,
+
+	//!< Debug purposes level
+	UC_LOGGER_LEVEL_DEBUG,
+};
+
+/**************************************************************************
+ *! @enum	UC_LOGGER_MODE
+ **************************************************************************
+ *
+ * @brief UC Logger operation mode
+ *
+ **************************************************************************/
+enum uc_logger_mode {
+	//!< Logger is disabled
+	UC_LOGGER_MODE_NONE,
+
+	//!< Messages are written to the standard output
+	UC_LOGGER_MODE_STDOUT,
+
+	//!< Local file. N/A
+//	UC_LOGGER_MODE_LOCAL_FILE,
+
+	//!< Messages are written to the host allocated memory
+	UC_LOGGER_MODE_WRITE_HOST_MEM,
+};
+
+/**************************************************************************
+ *! \enum	TSCD_NODE_CONF
+ **************************************************************************
+ *
+ * \brief TSCD node configuration valid bits. Used in modify existing node
+ *
+ **************************************************************************/
+enum tscd_node_conf {
+	//!< None
+	TSCD_NODE_CONF_NONE					=	0x0000,
+
+	//!< Suspend/Resume node
+	TSCD_NODE_CONF_SUSPEND_RESUME		=	0x0001,
+
+	//!< first child (Not relevant for queue)
+	TSCD_NODE_CONF_FIRST_CHILD			=	0x0002,
+
+	//!< last child (Not relevant for queue)
+	TSCD_NODE_CONF_LAST_CHILD			=	0x0004,
+
+	//!< 0 - BW Limit disabled >0 - define BW
+	TSCD_NODE_CONF_BW_LIMIT				=	0x0008,
+
+	//!< Best Effort enable
+	TSCD_NODE_CONF_BEST_EFFORT_ENABLE	=	0x0010,
+
+	//!< First Weighted-Round-Robin node (Not relevant for queue)
+	TSCD_NODE_CONF_FIRST_WRR_NODE		=	0x0020,
+
+	//!< Update predecessor 0 (Not relevant for port)
+	TSCD_NODE_CONF_PREDECESSOR_0		=	0x0040,
+
+	//!< Update predecessor 1 (Not relevant for port)
+	TSCD_NODE_CONF_PREDECESSOR_1		=	0x0080,
+
+	//!< Update predecessor 2 (Not relevant for port)
+	TSCD_NODE_CONF_PREDECESSOR_2		=	0x0100,
+
+	//!< Update predecessor 3 (Not relevant for port)
+	TSCD_NODE_CONF_PREDECESSOR_3		=	0x0200,
+
+	//!< Update predecessor 4 (Not relevant for port)
+	TSCD_NODE_CONF_PREDECESSOR_4		=	0x0400,
+
+	//!< Update predecessor 5 (Not relevant for port)
+	TSCD_NODE_CONF_PREDECESSOR_5		=	0x0800,
+
+	//!< Shared BW limit group (0: no shared BW limit, 1-511: group ID)
+	TSCD_NODE_CONF_SHARED_BWL_GROUP		=	0x1000,
+
+	//!< All flags are set
+	TSCD_NODE_CONF_ALL					=	0xFFFF
+};
+
+/**************************************************************************
+ *! \enum	WRED_QUEUE_CONF
+ **************************************************************************
+ *
+ * \brief WRED queue configuration valid bits. Used in modify existing queue
+ *
+ **************************************************************************/
+enum wred_queue_conf {
+	//!< None
+	WRED_QUEUE_CONF_NONE				=	0x0000,
+
+	//!< Q is active
+	WRED_QUEUE_CONF_ACTIVE_Q			=	0x0001,
+
+	//!< Disable flags valid
+	WRED_QUEUE_CONF_DISABLE				=	0x0002,
+
+	//!< Use fixed green drop probability
+	WRED_QUEUE_CONF_FIXED_GREEN_DROP_P	=	0x0004,
+
+	//!< Use fixed yellow drop probability
+	WRED_QUEUE_CONF_FIXED_YELLOW_DROP_P	=	0x0008,
+
+	//!< Min average yellow
+	WRED_QUEUE_CONF_MIN_AVG_YELLOW		=	0x0010,
+
+	//!< Max average yellow
+	WRED_QUEUE_CONF_MAX_AVG_YELLOW		=	0x0020,
+
+	//!< Slope yellow
+	WRED_QUEUE_CONF_SLOPE_YELLOW		=	0x0040,
+
+	//!< INTERNAL CONFIGURATION. SHOULD NOT BE SET BY HOST
+	WRED_QUEUE_CONF_SHIFT_AVG_YELLOW	=	0x0080,
+
+	//!< Min average green
+	WRED_QUEUE_CONF_MIN_AVG_GREEN		=	0x0100,
+
+	//!< Max average green
+	WRED_QUEUE_CONF_MAX_AVG_GREEN		=	0x0200,
+
+	//!< Slope green
+	WRED_QUEUE_CONF_SLOPE_GREEN			=	0x0400,
+
+	//!< INTERNAL CONFIGURATION. SHOULD NOT BE SET BY HOST
+	WRED_QUEUE_CONF_SHIFT_AVG_GREEN		=	0x0800,
+
+	//!< Min guaranteed
+	WRED_QUEUE_CONF_MIN_GUARANTEED		=	0x1000,
+
+	//!< max allowed
+	WRED_QUEUE_CONF_MAX_ALLOWED			=	0x2000,
+
+	//!< All flags are set
+	WRED_QUEUE_CONF_ALL					=	0xFFFF
+};
+
+/**************************************************************************
+ *! \enum	PORT_CONF
+ **************************************************************************
+ *
+ * \brief Port configuration valid bits. Used in modify existing port
+ *
+ **************************************************************************/
+enum port_conf {
+	//!< None
+	PORT_CONF_NONE					=	0x0000,
+
+	//!< Ring Size
+	PORT_CONF_RING_SIZE				=	0x0001,
+
+	//!< Ring address high
+	PORT_CONF_RING_ADDRESS_HIGH		=	0x0002,
+
+	//!< Ring address low
+	PORT_CONF_RING_ADDRESS_LOW		=	0x0004,
+
+	//!< All flags are set
+	PORT_CONF_ALL					=	0xFFFF
+};
+
+/**************************************************************************
+ *! \struct	port_stats_s
+ **************************************************************************
+ *
+ * \brief Port stats
+ *
+ **************************************************************************/
+struct port_stats_s {
+	u32	total_green_bytes;
+	u32	total_yellow_bytes;
+
+	// Following stats can not be reset
+	u32	debug_back_pressure_status;
+};
+
+/**************************************************************************
+ *! \struct	hw_node_info_s
+ **************************************************************************
+ *
+ * \brief HW node info
+ *
+ **************************************************************************/
+struct hw_node_info_s {
+	u32	first_child;
+	u32	last_child;
+	u32	is_suspended;
+	u32	bw_limit;
+	u32	predecessor0;
+	u32	predecessor1;
+	u32	predecessor2;
+	u32	predecessor3;
+	u32	predecessor4;
+	u32	predecessor5;
+	u32	queue_physical_id;
+	u32	queue_port;
+};
+
+/**************************************************************************
+ *! \enum	PORT_STATS_CLEAR_FLAGS
+ **************************************************************************
+ *
+ * \brief	port stats clear flags.
+ *			Used in get port stats command to set which stats
+ *			will be reset after read
+ *
+ **************************************************************************/
+enum port_stats_clear_flags {
+	//!< None
+	PORT_STATS_CLEAR_NONE					=	0x0000,
+
+	//!< Clear port total green bytes stats
+	PORT_STATS_CLEAR_TOTAL_GREEN_BYTES		=	0x0001,
+
+	//!< Clear port total yellow bytes stats
+	PORT_STATS_CLEAR_TOTAL_YELLOW_BYTES		=	0x0002,
+
+	//!< All above stats will be cleared
+	PORT_STATS_CLEAR_ALL					=	0xFFFF,
+};
+
+/**************************************************************************
+ *! \struct	queue_stats_s
+ **************************************************************************
+ *
+ * \brief Queue stats
+ *
+ **************************************************************************/
+struct queue_stats_s {
+	u32	queue_size_bytes;
+	u32	queue_average_size_bytes;
+	u32	queue_size_entries;
+	u32	drop_p_yellow;
+	u32	drop_p_green;
+	u32	total_bytes_added_low;
+	u32	total_bytes_added_high;
+	u32	total_accepts;
+	u32	total_drops;
+	u32	total_dropped_bytes_low;
+	u32	total_dropped_bytes_high;
+	u32	total_red_dropped;
+
+	// Following stats can not be reset
+	u32	qmgr_num_queue_entries;
+	u32	qmgr_null_pop_queue_counter;
+	u32	qmgr_empty_pop_queue_counter;
+	u32	qmgr_null_push_queue_counter;
+};
+
+/**************************************************************************
+ *! \enum	QUEUE_STATS_CLEAR_FLAGS
+ **************************************************************************
+ *
+ * \brief	queue stats clear flags.
+ *			Used in get queue stats command to set which stats
+ *			will be reset after read
+ *
+ **************************************************************************/
+enum queue_stats_clear_flags {
+	//!< None
+	QUEUE_STATS_CLEAR_NONE					=	0x0000,
+
+	//!< Clear queue size bytes stats
+	QUEUE_STATS_CLEAR_Q_SIZE_BYTES			=	0x0001,
+
+	//!< Clear queue average size bytes stats
+	QUEUE_STATS_CLEAR_Q_AVG_SIZE_BYTES		=	0x0002,
+
+	//!< Clear queue size entries stats
+	QUEUE_STATS_CLEAR_Q_SIZE_ENTRIES		=	0x0004,
+
+	//!< Clear drop probability yellow stats
+	QUEUE_STATS_CLEAR_DROP_P_YELLOW			=	0x0008,
+
+	//!< Clear drop probability green stats
+	QUEUE_STATS_CLEAR_DROP_P_GREEN			=	0x0010,
+
+	//!< Clear total bytes added stats
+	QUEUE_STATS_CLEAR_TOTAL_BYTES_ADDED		=	0x0020,
+
+	//!< Clear total accepts stats
+	QUEUE_STATS_CLEAR_TOTAL_ACCEPTS			=	0x0040,
+
+	//!< Clear total drops stats
+	QUEUE_STATS_CLEAR_TOTAL_DROPS			=	0x0080,
+
+	//!< Clear total dropped bytes stats
+	QUEUE_STATS_CLEAR_TOTAL_DROPPED_BYTES	=	0x0100,
+
+	//!< Clear total RED drops stats
+	QUEUE_STATS_CLEAR_TOTAL_RED_DROPS		=	0x0200,
+
+	//!< All above stats will be cleared
+	QUEUE_STATS_CLEAR_ALL					=	0xFFFF,
+};
+
+/**************************************************************************
+ *! \struct	system_stats_s
+ **************************************************************************
+ *
+ * \brief system stats
+ *
+ **************************************************************************/
+struct system_stats_s {
+	u32	qmgr_cache_free_pages_counter;
+	u32	qmgr_sm_current_state;
+	u32	qmgr_cmd_machine_busy;
+	u32	qmgr_cmd_machine_pop_busy;
+	u32	qmgr_null_pop_counter;
+	u32	qmgr_empty_pop_counter;
+	u32	qmgr_null_push_counter;
+	u32	qmgr_ddr_stop_push_low_threshold;
+	u32	qmgr_fifo_error_register;
+	u32	qmgr_ocp_error_register;
+	u32	qmgr_cmd_machine_sm_current_state_0;
+	u32	qmgr_cmd_machine_sm_current_state_1;
+	u32	qmgr_cmd_machine_sm_current_state_2;
+	u32	qmgr_cmd_machine_sm_current_state_3;
+	u32	qmgr_cmd_machine_sm_current_state_4;
+	u32	qmgr_cmd_machine_sm_current_state_5;
+	u32	qmgr_cmd_machine_sm_current_state_6;
+	u32	qmgr_cmd_machine_sm_current_state_7;
+	u32	qmgr_cmd_machine_sm_current_state_8;
+	u32	qmgr_cmd_machine_sm_current_state_9;
+	u32	qmgr_cmd_machine_sm_current_state_10;
+	u32	qmgr_cmd_machine_sm_current_state_11;
+	u32	qmgr_cmd_machine_sm_current_state_12;
+	u32	qmgr_cmd_machine_sm_current_state_13;
+	u32	qmgr_cmd_machine_sm_current_state_14;
+	u32	qmgr_cmd_machine_sm_current_state_15;
+	u32	qmgr_cmd_machine_queue_0;
+	u32	qmgr_cmd_machine_queue_1;
+	u32	qmgr_cmd_machine_queue_2;
+	u32	qmgr_cmd_machine_queue_3;
+	u32	qmgr_cmd_machine_queue_4;
+	u32	qmgr_cmd_machine_queue_5;
+	u32	qmgr_cmd_machine_queue_6;
+	u32	qmgr_cmd_machine_queue_7;
+	u32	qmgr_cmd_machine_queue_8;
+	u32	qmgr_cmd_machine_queue_9;
+	u32	qmgr_cmd_machine_queue_10;
+	u32	qmgr_cmd_machine_queue_11;
+	u32	qmgr_cmd_machine_queue_12;
+	u32	qmgr_cmd_machine_queue_13;
+	u32	qmgr_cmd_machine_queue_14;
+	u32	qmgr_cmd_machine_queue_15;
+};
+
+/**************************************************************************
+ *! @enum	UC_QOS_COMMAND
+ **************************************************************************
+ *
+ * @brief UC QOS command enum. Must be synced with the Host definition
+ *
+ **************************************************************************/
+enum uc_qos_command {
+	UC_QOS_COMMAND_GET_FW_VERSION,
+	UC_QOS_COMMAND_MULTIPLE_COMMANDS,
+	UC_QOS_COMMAND_INIT_UC_LOGGER,
+	UC_QOS_COMMAND_INIT_QOS,
+	UC_QOS_COMMAND_ADD_PORT,
+	UC_QOS_COMMAND_REMOVE_PORT,
+	UC_QOS_COMMAND_ADD_SCHEDULER,
+	UC_QOS_COMMAND_REMOVE_SCHEDULER,
+	UC_QOS_COMMAND_ADD_QUEUE,
+	UC_QOS_COMMAND_REMOVE_QUEUE,
+	UC_QOS_COMMAND_FLUSH_QUEUE,
+	UC_QOS_COMMAND_SET_PORT,
+	UC_QOS_COMMAND_SET_SCHEDULER,
+	UC_QOS_COMMAND_SET_QUEUE,
+	UC_QOS_COMMAND_MOVE_SCHEDULER,
+	UC_QOS_COMMAND_MOVE_QUEUE,
+	UC_QOS_COMMAND_GET_PORT_STATS,
+	UC_QOS_COMMAND_GET_QUEUE_STATS,
+	UC_QOS_COMMAND_GET_SYSTEM_STATS,
+	UC_QOS_COMMAND_ADD_SHARED_BW_LIMIT_GROUP,
+	UC_QOS_COMMAND_REMOVE_SHARED_BW_LIMIT_GROUP,
+	UC_QOS_COMMAND_SET_SHARED_BW_LIMIT_GROUP,
+	UC_QOS_COMMAND_GET_NODE_INFO,
+	UC_QOS_COMMAND_DEBUG_READ_NODE,
+	UC_QOS_COMMAND_DEBUG_PUSH_DESC,
+	UC_QOS_COMMAND_DEBUG_ADD_CREDIT_TO_PORT,
+};
+
+/**************************************************************************
+ *! @struct	uc_qos_cmd_s
+ **************************************************************************
+ *
+ * @brief UC commands.
+ * This structure defines the Host <-->UC interface
+ *
+ **************************************************************************/
+struct uc_qos_cmd_s {
+	//!< Type of command (UC_QOS_COMMAND)
+	u32			type;
+
+	//!< Commands flags
+	u32			flags;
+#define	UC_CMD_FLAG_IMMEDIATE				BIT(0)
+#define	UC_CMD_FLAG_BATCH_FIRST				BIT(1)
+#define	UC_CMD_FLAG_BATCH_LAST				BIT(2)
+#define	UC_CMD_FLAG_MULTIPLE_COMMAND_LAST	BIT(3)
+#define	UC_CMD_FLAG_UC_DONE					BIT(4)
+#define	UC_CMD_FLAG_UC_ERROR				BIT(5)
+
+	//!< Number of 32bit parameters available for this command.
+	// must be synced between the host and uc!
+	u32			num_params;
+
+	u32			param0;
+	u32			param1;
+	u32			param2;
+	u32			param3;
+	u32			param4;
+	u32			param5;
+	u32			param6;
+	u32			param7;
+	u32			param8;
+	u32			param9;
+	u32			param10;
+	u32			param11;
+	u32			param12;
+	u32			param13;
+	u32			param14;
+	u32			param15;
+	u32			param16;
+	u32			param17;
+	u32			param18;
+	u32			param19;
+	u32			param20;
+	u32			param21;
+	u32			param22;
+	u32			param23;
+	u32			param24;
+	u32			param25;
+};
+
+#endif /* SRC_UC_HOST_DEFS_H_ */
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_uc_wrapper.h b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_uc_wrapper.h
new file mode 100644
index 000000000..24c375248
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_uc_wrapper.h
@@ -0,0 +1,34 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#ifndef PP_QOS_UC_H
+#define PP_QOS_UC_H
+
+typedef uint32_t u32;
+
+#include "pp_qos_uc_defs.h"
+
+#endif
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_utils.c b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_utils.c
new file mode 100644
index 000000000..e342db324
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_utils.c
@@ -0,0 +1,2518 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#include "pp_qos_common.h"
+#include "pp_qos_fw.h"
+#include "pp_qos_utils.h"
+
+//#define VERY_VERBOSE_TESTS
+#ifdef VERY_VERBOSE_TESTS
+
+#define ISSUE_BW_CMD(phy, numinator, denominator) do {  \
+if (denominator)					\
+QOS_LOG("Issue bw change for phy %u: %u : %u\n", phy, numinator, denominator);\
+} while (0)
+
+#define ISSUE_DELETE_NODE(phy) QOS_LOG("Issue node %u delete\n", phy)
+
+#define ISSUE_FLUSH_NODE(phy) QOS_LOG("Issue node %u flush\n", phy)
+
+#else
+#define ISSUE_BW_CMD(phy, numinator, denominator)
+
+#define ISSUE_DELETE_NODE(phy)
+
+#define ISSUE_FLUSH_NODE(phy)
+
+#endif
+
+#define QOS_INVALID_PRIORITY   0xF
+
+/******************************************************************************/
+/*                                 OCTETS                                     */
+/******************************************************************************/
+struct octet_info {
+	struct list_head list;
+	uint8_t usage;
+};
+
+struct pp_octets {
+	unsigned int last_port_octet;
+	struct octet_info info[NUM_OF_OCTETS];
+	struct list_head lists[9];
+};
+
+static unsigned int octet_get_use_count(const struct pp_octets *octets,
+					unsigned int octet)
+{
+	return octets->info[octet].usage;
+}
+
+unsigned int octet_get_with_at_least_free_entries(struct pp_octets *octets,
+						  unsigned int count)
+{
+	unsigned int id;
+	struct list_head *list;
+
+	for (list = octets->lists + 8 - count; list >= octets->lists; --list)  {
+		if (!list_empty(list)) {
+			id = list_entry(list->next, struct octet_info, list) -
+				octets->info;
+			list_del_init(list->next);
+			return id;
+		}
+	}
+
+	return QOS_INVALID_OCTET;
+}
+
+unsigned int  octet_get_least_free_entries(struct pp_octets *octets)
+{
+	return octet_get_with_at_least_free_entries(octets, 1);
+}
+
+static void octet_set_use_count(struct pp_octets *octets,
+				unsigned int octet,
+				unsigned int usage)
+{
+	struct list_head *list;
+
+	QOS_ASSERT(QOS_OCTET_VALID(octet), "Illegal octet %u\n", octet);
+	QOS_ASSERT(usage <= 8, "Illegal usage %u\n", usage);
+	octets->info[octet].usage = usage;
+	if (octet > octets->last_port_octet) {
+		list = octets->lists + usage;
+		list_move_tail(&(octets->info[octet].list), list);
+	}
+}
+
+STATIC_UNLESS_TEST struct pp_octets *octets_init(unsigned int last_port_octet)
+{
+	unsigned int i;
+	struct octet_info *info;
+	struct pp_octets *octets;
+
+	octets = QOS_MALLOC(sizeof(struct pp_octets));
+	if (octets) {
+		for (i = 0; i < 9; ++i)
+			INIT_LIST_HEAD(octets->lists + i);
+
+		info = octets->info;
+		for (i = 0; i < NUM_OF_OCTETS; ++i) {
+			info->usage = 0;
+			if (i > last_port_octet)
+				list_add_tail(&info->list, octets->lists);
+			else
+				INIT_LIST_HEAD(&info->list);
+
+			++info;
+		}
+		octets->last_port_octet = last_port_octet;
+	}
+	return octets;
+}
+
+STATIC_UNLESS_TEST void octets_clean(struct pp_octets *octets)
+{
+	if (octets)
+		QOS_FREE(octets);
+}
+
+#ifdef PP_QOS_TEST
+STATIC_UNLESS_TEST void debug_verify_octet_usage(struct pp_octets *octets,
+						 unsigned int octet,
+						 unsigned int usage)
+{
+	struct list_head *list;
+	struct list_head *pos;
+	unsigned int tmp;
+
+	tmp = octet_get_use_count(octets, octet);
+	QOS_ASSERT(tmp == usage, "Octet %u count is %u and not %u\n", octet,
+		   tmp, usage);
+
+	list = &(octets->info[octet].list);
+	if (octet > octets->last_port_octet) {
+		for (pos = list->next; pos != list; pos = pos->next) {
+			if (pos >= octets->lists && pos <= octets->lists + 8) {
+				tmp = (pos - octets->lists);
+				QOS_ASSERT(tmp == usage,
+					   "Octet is on list %u but should be on %u\n",
+					   tmp, usage);
+			}
+		}
+	} else {
+		QOS_ASSERT(list_empty(list),
+			"Octet %u is on a list though its a port octet\n",
+			octet);
+	}
+}
+#endif
+
+/******************************************************************************/
+/*                        Ids, Rlms and Port phys pools                       */
+/******************************************************************************/
+struct pp_pool {
+	unsigned int capacity;
+	unsigned int top;
+	uint16_t invalid;
+	uint16_t data[1];
+};
+
+static struct pp_pool *pp_pool_init(unsigned int capacity, uint16_t invalid)
+{
+	struct pp_pool *tmp;
+
+	tmp = QOS_MALLOC(sizeof(struct pp_pool) +
+			(capacity - 1) * sizeof(uint16_t));
+	if (tmp) {
+		tmp->capacity = capacity;
+		tmp->top = 0;
+		tmp->invalid = invalid;
+	}
+	return tmp;
+}
+
+uint16_t pp_pool_get(struct pp_pool *pool)
+{
+	uint16_t tmp;
+	unsigned int top;
+
+	top = pool->top;
+	if (top > 0) {
+		--top;
+		tmp = pool->data[top];
+		pool->top = top;
+		return tmp;
+	}
+	return pool->invalid;
+}
+
+int pp_pool_put(struct pp_pool *pool, uint16_t data)
+{
+	unsigned int top;
+
+	top = pool->top;
+	if (top < pool->capacity) {
+		pool->data[top] = data;
+		pool->top = top + 1;
+		return 0;
+	}
+	return -1;
+}
+
+static void pp_pool_clean(struct pp_pool *pool)
+{
+	if (pool)
+		QOS_FREE(pool);
+}
+
+static struct pp_pool *free_id_init(void)
+{
+	unsigned int i;
+	struct pp_pool *tmp;
+
+	tmp  = pp_pool_init(NUM_OF_NODES, QOS_INVALID_ID);
+	if (tmp)
+		for (i = NUM_OF_NODES; i > 0; --i)
+			pp_pool_put(tmp, i - 1);
+
+	return tmp;
+}
+
+static struct pp_pool *free_rlm_init(void)
+{
+	unsigned int i;
+	struct pp_pool *tmp;
+
+	tmp  = pp_pool_init(NUM_OF_QUEUES, QOS_INVALID_ID);
+	if (tmp)
+		for (i = NUM_OF_QUEUES; i > 0; --i)
+			pp_pool_put(tmp, i - 1);
+
+	return tmp;
+}
+
+
+struct pp_pool *free_ports_phys_init(unsigned int *reserved,
+				     unsigned int max_port,
+				     const unsigned int *reserved_ports,
+				     unsigned int size)
+{
+	unsigned int i;
+	unsigned int res;
+	unsigned int tmp;
+	struct pp_pool *pool;
+
+	res = 0;
+	tmp = min(size, max_port + 1);
+	memcpy(reserved, reserved_ports, tmp * sizeof(unsigned int));
+
+	for (i = 0; i < tmp; ++i)
+		if (reserved[i] == 1)
+			++res;
+
+	tmp = max_port + 1 - res;
+	pool  = pp_pool_init(tmp, QOS_INVALID_PHY);
+	if (pool)
+		for (i = max_port + 1; i > 0; --i)
+			if (reserved[i - 1] == 0)
+				pp_pool_put(pool, i - 1);
+	return pool;
+}
+
+/******************************************************************************/
+/*                                 Queue                                      */
+/******************************************************************************/
+struct q_item {
+	struct list_head list;
+	uint16_t data;
+};
+
+struct pp_queue {
+	unsigned int capacity;
+	struct list_head used;
+	struct list_head free;
+	struct q_item items[1];
+};
+
+uint16_t pp_queue_dequeue(struct pp_queue *queue)
+{
+	struct q_item *item;
+	int rc;
+
+	if (!list_empty(&queue->used)) {
+		item = list_entry(queue->used.next, struct q_item, list);
+		rc = item->data;
+		list_move_tail(&item->list, &queue->free);
+	} else {
+		QOS_LOG("queue is empty\n");
+		rc = QOS_INVALID_ID;
+	}
+	return rc;
+}
+
+int pp_queue_enqueue(struct pp_queue *queue, uint16_t data)
+{
+	struct q_item *item;
+
+	if (!list_empty(&queue->free)) {
+		item = list_entry(queue->free.next, struct q_item, list);
+		item->data = data;
+		list_move_tail(&item->list, &queue->used);
+		return 0;
+	}
+	QOS_LOG("queue is full\n");
+	return -1;
+}
+
+static int queue_is_reset(struct pp_queue *queue)
+{
+	unsigned int i;
+	struct list_head *pos;
+
+	if (!list_empty(&queue->used))
+		return 0;
+
+	i = 0;
+	list_for_each(pos, &queue->free) ++i;
+
+	return (i == queue->capacity);
+}
+
+void pp_queue_reset(struct pp_queue *queue)
+{
+	list_splice_tail_init(&queue->used, &queue->free);
+	QOS_ASSERT(queue_is_reset(queue), "Queue is not reset\n");
+}
+
+static void pp_queue_clean(struct pp_queue *queue)
+{
+	if (queue)
+		QOS_FREE(queue);
+}
+
+static struct pp_queue *pp_queue_init(unsigned int capacity)
+{
+	struct pp_queue *queue;
+	unsigned int i;
+
+	queue = QOS_MALLOC(sizeof(struct pp_queue) +
+			   sizeof(struct q_item) * (capacity - 1));
+	if (queue) {
+		queue->capacity = capacity;
+		INIT_LIST_HEAD(&(queue->free));
+		INIT_LIST_HEAD(&(queue->used));
+		for (i = 0; i < queue->capacity; ++i)
+			list_add_tail(&(queue->items[i].list), &(queue->free));
+	}
+	return queue;
+}
+
+/******************************************************************************/
+/*                             Cmd Queue                                      */
+/******************************************************************************/
+#define CMD_QUEUE_SIZE 16384LU
+struct cmd_queue {
+	size_t read;  /* next index to read */
+	size_t write; /* next free index to write */
+	size_t count; /* how many bytes are in */
+	uint8_t data[CMD_QUEUE_SIZE];
+};
+
+static struct cmd_queue *cmd_queue_init(void)
+{
+	struct cmd_queue *q;
+
+	q = QOS_MALLOC(sizeof(struct cmd_queue));
+	if (q) {
+		q->read = 0;
+		q->write = 0;
+		q->count = 0;
+	}
+	return q;
+}
+
+static void cmd_queue_clean(struct cmd_queue *q)
+{
+	if (q)
+		QOS_FREE(q);
+}
+
+int cmd_queue_put(struct cmd_queue *q, void *_cmd, size_t size)
+{
+	unsigned int toend;
+	uint8_t *cmd;
+
+	cmd = _cmd;
+
+	if (CMD_QUEUE_SIZE - q->count < size) {
+		QOS_LOG_ERR("%lu bytes free, can't accommodate %zu bytes\n",
+				CMD_QUEUE_SIZE - q->count, size);
+		return -1;
+	}
+
+	toend = CMD_QUEUE_SIZE - q->write;
+	if (toend >= size) {
+		memcpy(q->data + q->write, cmd, size);
+		q->write += size;
+	} else {
+		memcpy(q->data + q->write, cmd, toend);
+		memcpy(q->data, cmd + toend, size - toend);
+		q->write = size - toend;
+	}
+
+	q->count += size;
+
+	return 0;
+}
+
+static int cmd_queue_read(struct cmd_queue *q,
+		void *_cmd,
+		size_t size,
+		int remove)
+{
+	unsigned int toend;
+	uint8_t *cmd;
+
+	cmd = _cmd;
+
+	if (q->count < size) {
+		QOS_LOG_DEBUG("has only %zu bytes, can't retrieve %zu bytes\n",
+				q->count, size);
+		return -1;
+	}
+
+	toend = CMD_QUEUE_SIZE - q->read;
+	if (toend >= size) {
+		memcpy(cmd, q->data + q->read, size);
+		if (remove)
+			q->read += size;
+	} else {
+		memcpy(cmd, q->data + q->read, toend);
+		memcpy(cmd + toend, q->data, size - toend);
+		if (remove)
+			q->read = size - toend;
+	}
+
+	if (remove)
+		q->count -= size;
+
+	return 0;
+}
+
+int cmd_queue_get(struct cmd_queue *q, void *_cmd, size_t size)
+{
+	return cmd_queue_read(q, _cmd, size, 1);
+}
+
+int cmd_queue_peek(struct cmd_queue *q, void *_cmd, size_t size)
+{
+	return cmd_queue_read(q, _cmd, size, 0);
+}
+
+int cmd_queue_is_empty(struct cmd_queue *q)
+{
+	return (q->count == 0);
+}
+
+/******************************************************************************/
+/*                                 Nodes                                      */
+/******************************************************************************/
+struct pp_nodes {
+	struct qos_node nodes[NUM_OF_NODES];
+};
+
+static struct pp_nodes *pp_nodes_init(void)
+{
+	struct pp_nodes *nodes;
+
+	nodes = QOS_MALLOC(sizeof(struct pp_nodes));
+	if (nodes)
+		memset(nodes, 0, sizeof(struct pp_nodes));
+
+	return nodes;
+}
+
+unsigned int get_phy_from_node(const struct pp_nodes *nodes,
+		const struct qos_node *node)
+{
+	QOS_ASSERT(node >= nodes->nodes && node <= nodes->nodes + NUM_OF_NODES,
+		   "invalid node pointer\n");
+	return node - nodes->nodes;
+}
+
+struct qos_node *get_node_from_phy(struct pp_nodes *nodes, unsigned int phy)
+{
+	QOS_ASSERT(QOS_PHY_VALID(phy), "invalid phy %u\n", phy);
+	return nodes->nodes + phy;
+}
+
+const struct qos_node *get_const_node_from_phy(const struct pp_nodes *nodes,
+					       unsigned int phy)
+{
+	QOS_ASSERT(QOS_PHY_VALID(phy), "invalid phy %u\n", phy);
+	return nodes->nodes + phy;
+}
+
+static void pp_nodes_clean(struct pp_nodes *nodes)
+{
+	if (nodes)
+		QOS_FREE(nodes);
+}
+
+
+/******************************************************************************/
+/*                                 Mapping                                    */
+/******************************************************************************/
+struct pp_mapping {
+	uint16_t id2phy[NUM_OF_NODES];
+	uint16_t phy2id[NUM_OF_NODES];
+};
+
+unsigned int get_id_from_phy(const struct pp_mapping *map, unsigned int phy)
+{
+	QOS_ASSERT(QOS_PHY_VALID(phy), "invalid phy %u\n", phy);
+	return map->phy2id[phy];
+}
+
+unsigned int get_phy_from_id(const struct pp_mapping *map, unsigned int id)
+{
+	QOS_ASSERT(QOS_ID_VALID(id), "invalid id %u\n", id);
+	return map->id2phy[id];
+}
+
+void map_id_phy(struct pp_mapping *map, unsigned int id, unsigned int phy)
+{
+	QOS_ASSERT(QOS_ID_VALID(id), "invalid id %u\n", id);
+	QOS_ASSERT(QOS_PHY_VALID(phy), "invalid phy %u\n", phy);
+	map->id2phy[id] = phy;
+	map->phy2id[phy] = id;
+}
+
+void map_id_reserved(struct pp_mapping *map, unsigned int id)
+{
+	QOS_ASSERT(QOS_ID_VALID(id), "invalid id %u\n", id);
+	map->id2phy[id] = QOS_UNKNOWN_PHY;
+}
+
+static struct pp_mapping *pp_mapping_init(void)
+{
+	unsigned int i;
+	struct pp_mapping *map;
+
+	map = QOS_MALLOC(sizeof(struct pp_mapping));
+	if (map) {
+		for (i = 0; i < NUM_OF_NODES; ++i) {
+			map->id2phy[i] = QOS_INVALID_PHY;
+			map->phy2id[i] = QOS_INVALID_ID;
+		}
+	}
+	return map;
+}
+
+static void pp_mapping_clean(struct pp_mapping *map)
+{
+	if (map)
+		QOS_FREE(map);
+}
+
+void map_invalidate_id(struct pp_mapping *map, unsigned int id)
+{
+	unsigned int phy;
+
+	QOS_ASSERT(QOS_ID_VALID(id), "invalid id %u\n", id);
+	phy = get_phy_from_id(map, id);
+	QOS_ASSERT(QOS_PHY_VALID(phy), "invalid phy is mapped to id %u\n", id);
+
+	map->id2phy[id] = QOS_INVALID_PHY;
+	map->phy2id[phy] = QOS_INVALID_ID;
+}
+
+/******************************************************************************/
+/*                                 Moving                                     */
+/******************************************************************************/
+#define MAX_MOVING_NODES 8
+/*
+ * Return 1 if used status of all count nodes starting from first_phy equals
+ * status
+ */
+static int is_all_nodes_used_status(const struct pp_qos_dev *qdev,
+		unsigned int first_phy, unsigned int count, unsigned int status)
+{
+	const struct qos_node *cur;
+	unsigned int used;
+
+	cur = get_const_node_from_phy(qdev->nodes, first_phy);
+	for (; count; --count) {
+		used = !!QOS_BITS_IS_SET(cur->flags, QOS_NODE_FLAGS_USED);
+		if (used != status)
+			return 0;
+		cur++;
+	}
+	return 1;
+}
+
+/* is child_phy a child of parent_phy */
+static int node_child_of(struct pp_nodes *nodes,
+		unsigned int child_phy, unsigned int parent_phy)
+{
+	const struct qos_node *parent;
+
+	parent = get_const_node_from_phy(nodes, parent_phy);
+
+	return (child_phy >= parent->parent_prop.first_child_phy &&
+		child_phy <= parent->parent_prop.first_child_phy +
+		parent->parent_prop.num_of_children - 1);
+}
+
+/**
+ * node_update_children_internal() - Update children's parent
+ *
+ * @nodes:     original nodes (qdev->nodes)
+ * @tmp_nodes: a window (buffer) holding a copy of some nodes from
+ *	       the original nodes
+ * @first:     The phy that the first node of tmp_nodes
+ *             represents
+ * @count:     How many children should be updated on tmp_nodes
+ * @phy:       original phy of parent
+ * @new_phy:   new phy of parent
+ *
+ * Does not create update predecessor cmds.
+ * The first count children of parent are updated on tmp_nodes
+ * the rest on nodes
+ */
+static void node_update_children_internal(
+				   struct pp_nodes *nodes,
+				   struct qos_node *tmp_nodes,
+				   unsigned int first,
+				   unsigned int count,
+				   unsigned int phy,
+				   unsigned int new_phy)
+
+{
+	unsigned int last;
+	unsigned int cur;
+	struct qos_node *child;
+	struct qos_node *parent;
+
+	parent = get_node_from_phy(nodes, phy);
+	cur = parent->parent_prop.first_child_phy;
+	last = cur + parent->parent_prop.num_of_children - 1;
+	for (; cur <= last; ++cur) {
+		if (cur >= first && cur < first + count)
+			child = tmp_nodes + cur - first;
+		else
+			child = get_node_from_phy(nodes, cur);
+		child->child_prop.parent_phy = new_phy;
+	}
+}
+
+/*
+ * Update the children of a parent which is moving from phy to new_phy
+ * Children parent_phy's is updated
+ */
+void node_update_children(
+			  struct pp_qos_dev *qdev,
+			  unsigned int phy,
+			  unsigned int new_phy)
+{
+	unsigned int num;
+	struct qos_node *child;
+	struct qos_node *parent;
+
+	parent = get_node_from_phy(qdev->nodes, phy);
+	child = get_node_from_phy(
+			qdev->nodes,
+			parent->parent_prop.first_child_phy);
+	num =  parent->parent_prop.num_of_children;
+	for (; num; --num)  {
+		child->child_prop.parent_phy = new_phy;
+		++child;
+	}
+}
+
+
+/*
+ * Update parent's first child phy and num of children, given that count nodes
+ * starting from src_phy are moving to dst_phy
+ *
+ * Note number of children is calculated as last_child - first_child + 1
+ */
+static int node_update_parent(struct pp_qos_dev *qdev,
+		struct qos_node *tmpnodes, unsigned int src_first,
+		unsigned int size, unsigned int src_phy, unsigned int dst_phy,
+		unsigned int count)
+{
+	unsigned int first;
+	unsigned int last;
+	unsigned int num;
+	unsigned int parent_phy;
+	unsigned int moving;
+	struct qos_node *parent;
+	struct qos_node *child;
+	struct pp_nodes *nodes;
+
+	nodes = qdev->nodes;
+	child = get_node_from_phy(nodes, src_phy);
+	parent_phy = child->child_prop.parent_phy;
+
+	if (parent_phy >= src_first && parent_phy < src_first + size)
+		parent = tmpnodes + parent_phy - src_first;
+	else
+		parent = get_node_from_phy(nodes, parent_phy);
+
+	QOS_ASSERT(node_parent(parent),
+			"Node %u suppose to be a parent but is not\n",
+			get_phy_from_node(nodes, parent));
+
+	QOS_ASSERT(node_child_of(nodes, src_phy, parent_phy),
+			"%u is not child of %u\n",
+			src_phy, parent_phy);
+
+	first = parent->parent_prop.first_child_phy;
+	num = parent->parent_prop.num_of_children;
+	last = first + num - 1;
+	QOS_ASSERT(same_octet(first, last),
+			"source nodes %u and %u are not in the same octet\n",
+			first, last);
+
+	/* Number of children going to move */
+	moving = min(count, num - (src_phy - first));
+	QOS_ASSERT(same_octet(dst_phy, dst_phy + moving - 1),
+			"destination nodes %u and %u are not in the same octet\n",
+			dst_phy, dst_phy + moving - 1);
+
+	if (moving == num) { /* all children are moving */
+		parent->parent_prop.first_child_phy = dst_phy;
+	} else {
+		QOS_ASSERT(same_octet(dst_phy, src_phy),
+				"src phy %u and dst phy %u are not in same octet\n",
+				src_phy, dst_phy);
+		if (first != src_phy) /* first is not moving */
+			first = min(first, dst_phy);
+		else   /* first is moving */
+			first = min(first + moving, dst_phy);
+
+		if (last != src_phy + moving - 1)  /* last is not moving */
+			last = max(last, dst_phy + moving - 1);
+		else  /* last is moving */
+			last = max(last - moving, dst_phy + moving - 1);
+
+		QOS_ASSERT(same_octet(first, last),
+				"%u and %u are in different octets\n",
+				first, last);
+		parent->parent_prop.first_child_phy = first;
+		parent->parent_prop.num_of_children = last - first + 1;
+	}
+
+	return moving;
+}
+
+/*
+ * Given count nodes starting at src_phy [src_phy..src_phy + count - 1],
+ * that are going to move to [dst_phy..dst_phy + count - 1]:
+ *
+ * Update id<->phy mapping for each node that is moved
+ *
+ * For each node that is a parent node
+ *   Update parent_phy of each of its children
+ *
+ * For each group of children that have the same parent
+ *   Update parent's first_child_phy and num of children
+ *
+ */
+static void nodes_update_stake_holders(struct pp_qos_dev *qdev,
+		unsigned int src_phy, unsigned int dst_phy, unsigned int count)
+{
+	unsigned int i;
+	unsigned int tmp;
+	struct qos_node *next_updated_parent;
+	struct qos_node *node;
+	struct pp_mapping *map;
+	struct qos_node tmp_nodes[MAX_MOVING_NODES];
+	unsigned int tmp_map[MAX_MOVING_NODES];
+	unsigned int updated_parents[MAX_MOVING_NODES];
+	unsigned int index;
+
+	QOS_ASSERT(count < MAX_MOVING_NODES,
+			"Trying to update %u nodes but the maximum is %u\n",
+			count, MAX_MOVING_NODES);
+	map = qdev->mapping;
+	index = 0;
+
+	memcpy(&tmp_nodes, qdev->nodes->nodes + src_phy,
+			sizeof(struct qos_node) * count);
+
+	/*
+	 * Invalidate current mapping, update children and parents of
+	 * moved nodes
+	 */
+	node = get_node_from_phy(qdev->nodes, src_phy);
+	next_updated_parent = node;
+	for (i = src_phy; i < src_phy + count; ++i) {
+		tmp = get_id_from_phy(map, i);
+		tmp_map[i - src_phy] = tmp;
+		map_invalidate_id(map, tmp);
+		if (node_parent(node))
+			node_update_children_internal(
+					qdev->nodes,
+					tmp_nodes,
+					src_phy,
+					count,
+					i,
+					dst_phy + i - src_phy);
+
+		if (node_child(node) && (node >= next_updated_parent)) {
+			next_updated_parent =  node +
+				node_update_parent(qdev, tmp_nodes,
+						src_phy, count,
+						i, dst_phy + i - src_phy,
+						count - (i - src_phy));
+			updated_parents[index++] = node->child_prop.parent_phy;
+		}
+
+		node++;
+	}
+
+	/* map all previous invalidated ids to their new phy */
+	for (i = dst_phy; i < dst_phy + count; ++i) {
+		tmp = tmp_map[i - dst_phy];
+		QOS_ASSERT(QOS_ID_VALID(tmp), "Invalid id %u on phy %u\n",
+				tmp, i);
+		map_id_phy(map, tmp, i);
+	}
+
+	memcpy(qdev->nodes->nodes + src_phy, tmp_nodes,
+	       sizeof(struct qos_node) * count);
+
+	for (i = 0; i < index; ++i)
+		create_parent_change_cmd(qdev, updated_parents[i]);
+}
+
+/*
+ * For small arrays this is probably quicker than
+ * the heap sort kernel is using
+ */
+static unsigned int remove_duplicates(unsigned int *data, unsigned int size)
+{
+	unsigned int i;
+	unsigned int j;
+
+	QOS_ASSERT(size <= MAX_MOVING_NODES,
+			"This function intends for array with size up to %d\n",
+			MAX_MOVING_NODES);
+
+	for (i = 0; i < size; ++i) {
+		for (j = i + 1; j < size; ++j) {
+			if (data[i] == data[j]) {
+				--size;
+				data[j] = data[size];
+				--j;
+			}
+		}
+	}
+
+	return size;
+}
+
+/*
+ * Given a group A of count nodes starting at start, find a group B which
+ * satisfies:
+ *
+ * - Each element in B is a direct parent of an element from A
+ * - B has no duplicates
+ * - If (external)
+ *	- Each element in B does not belong to A
+ *	- Every element on A is a decendant of some element on B
+ *   Else
+ *	- Each element in B belongs to A
+ *	- The parent of each element is B does not belong to A
+ *	- Every element on A is either on B or has ancestor on B
+ *
+ *   B is stored on data and the function returns the size of B
+ *
+ *   Return number of elements on B
+ */
+static unsigned int parents_span(struct pp_nodes *nodes, unsigned int external,
+		unsigned int start, unsigned int count,
+		unsigned int *data, unsigned int size)
+{
+	unsigned int i;
+	unsigned int parent_phy;
+	unsigned int cur_phy;
+	const struct qos_node *cur;
+
+	QOS_ASSERT(size >= count,
+			"Array size %u is smaller than number of moved nodes %u\n",
+			size, count);
+	i = 0;
+	cur = get_const_node_from_phy(nodes, start);
+	for (cur_phy = start; cur_phy < start + count; ++cur_phy) {
+		QOS_ASSERT((cur->type == TYPE_UNKNOWN) ||
+				node_child(cur), "Node is not a child\n");
+		parent_phy = cur->child_prop.parent_phy;
+		/* parent not moving */
+		if ((parent_phy < start) ||
+				(parent_phy >= start + count)) {
+			if (external)
+				data[i] = parent_phy;
+			else
+				data[i] = cur_phy;
+			++i;
+		}
+		++cur;
+	}
+
+	return remove_duplicates(data, i);
+}
+
+
+/*
+ * If the src and dst region don't overlap then we can use
+ * either strategy of "move last src first" or "move first src first"
+ * If they overlap and the move is forward (src < dst) we will use
+ * "move last src first".
+ * If they overlap and the move is backward (src > dst) we will use
+ * "move last src first"
+ */
+static void create_multiple_move_cmds(struct pp_qos_dev *qdev,
+		unsigned int dst_phy,
+		unsigned int src_phy,
+		unsigned int count)
+{
+	unsigned int dst_port;
+	unsigned int i;
+	unsigned int dst;
+	unsigned int src;
+	unsigned int delta;
+
+	if (src_phy < dst_phy) {
+		src = src_phy + count - 1;
+		dst = dst_phy + count - 1;
+		delta = ~0;
+	} else {
+		src = src_phy;
+		dst = dst_phy;
+		delta = 1;
+	}
+
+	for (i = 0; i < count; ++i) {
+		dst_port = get_port(qdev->nodes, dst);
+		create_move_cmd(qdev, dst, src, dst_port);
+		src += delta;
+		dst += delta;
+	}
+}
+
+
+/**
+ * nodes_move() - Move nodes
+ *
+ * Implement the core functionality of moving nodes to different phy.
+ * This involves updating the stake holders nodes e.g. parents and children
+ * of the moved nodes, and updating the id<->phy mapping
+ *
+ * @qdev:
+ * @dst_phy: Dest phy of first node
+ * @src_phy: Source phy of first node
+ * @count: Number of nodes to move
+ *
+ * Notes:
+ * - All src nodes must be used, all dst nodes which don't overlap src
+ *   nodes must be unused
+ * - Number of moving nodes should be smaller than 8
+ * - If not all children of parent are moving then the move
+ *   must be within octet boundries - e.g. a shift
+ *
+ */
+STATIC_UNLESS_TEST void nodes_move(struct pp_qos_dev *qdev,
+		unsigned int dst_phy,
+		unsigned int src_phy,
+		unsigned int count)
+{
+	unsigned int i;
+#if 0
+	unsigned int suspend_size;
+	unsigned int suspend[MAX_MOVING_NODES];
+#endif
+	unsigned int ances_size;
+	unsigned int ancestors[MAX_MOVING_NODES];
+#if 0
+	const struct qos_node *node;
+#endif
+
+	QOS_ASSERT(src_phy != dst_phy,
+			"src and dst are the same %u\n",
+			src_phy);
+	QOS_ASSERT(count < MAX_MOVING_NODES, "Can't move %u nodes, max is %u\n",
+			count, MAX_MOVING_NODES);
+	if (count) {
+	#if 0
+		suspend_size = parents_span(qdev->nodes, 1,
+				src_phy, count, suspend, MAX_MOVING_NODES);
+		for (i = 0; i < suspend_size; ++i)
+			create_suspend_cmd(qdev, suspend[i]);
+	#endif
+
+		nodes_update_stake_holders(qdev, src_phy, dst_phy, count);
+		nodes_modify_used_status(qdev, src_phy, count, 0);
+		QOS_ASSERT(is_all_nodes_used_status(qdev, dst_phy, count, 0),
+			   "Some of the destination nodes are in used\n");
+		memmove(get_node_from_phy(qdev->nodes, dst_phy),
+				get_node_from_phy(qdev->nodes, src_phy),
+				count * sizeof(struct qos_node));
+		nodes_modify_used_status(qdev, dst_phy, count, 1);
+
+		create_multiple_move_cmds(qdev, dst_phy, src_phy, count);
+
+#if 0
+		/* FW will delete the src node on a move command */
+		/* Issue delete node cmd to each node */
+		node = get_const_node_from_phy(qdev->nodes, src_phy);
+		for (i = 0; i < count; ++i) {
+			if (!node_used(node))
+				create_remove_node_cmd(qdev,
+						TYPE_UNKNOWN,
+						i + src_phy, 0);
+			++node;
+		}
+#endif
+
+		/* Issue update preds each node whose ancestor moved */
+		ances_size = parents_span(qdev->nodes, 0, dst_phy, count,
+				ancestors, MAX_MOVING_NODES);
+		for (i = 0; i < ances_size; ++i)
+			tree_update_predecessors(qdev, ancestors[i]);
+	#if 0
+		for (i = 0; i < suspend_size; ++i)
+			create_resume_cmd(qdev, suspend[i]);
+	#endif
+	}
+}
+
+/******************************************************************************/
+/*                                 Generic                                    */
+/******************************************************************************/
+
+struct qos_node *octet_get_min_sibling_group(
+		const struct pp_qos_dev *qdev,
+		unsigned int octet,
+		const struct qos_node *special_parent,
+		unsigned int *num_of_children)
+{
+	unsigned int min_size;
+	unsigned int phy;
+	unsigned int last_phy;
+	unsigned int num_children;
+	const struct qos_node *cur;
+	struct qos_node *cur_parent;
+	struct qos_node *min_parent;
+
+	min_size = 9;
+	min_parent = NULL;
+
+	phy = octet * 8;
+	last_phy = phy + octet_get_use_count(qdev->octets, octet);
+	while (phy < last_phy) {
+		cur = get_const_node_from_phy(qdev->nodes, phy);
+		cur_parent = get_node_from_phy(qdev->nodes,
+				cur->child_prop.parent_phy);
+		num_children = cur_parent->parent_prop.num_of_children;
+		if ((cur_parent != special_parent) &&
+				(num_children < min_size)) {
+			min_size = num_children;
+			min_parent = cur_parent;
+		} else if ((cur_parent == special_parent) &&
+				(num_children + 1 < min_size)) {
+			min_size = num_children + 1;
+			min_parent = cur_parent;
+		}
+		phy += cur_parent->parent_prop.num_of_children;
+	}
+	*num_of_children = min_size;
+	return min_parent;
+}
+
+static void update_octet_usage(struct pp_octets *octets, unsigned int phy,
+		unsigned int delta, unsigned int status)
+{
+	unsigned int octet;
+	unsigned int usage;
+
+	octet = octet_of_phy(phy);
+	usage = octet_get_use_count(octets, octet);
+	if (status == 0)
+		usage -= delta;
+	else
+		usage += delta;
+	octet_set_use_count(octets, octet, usage);
+}
+
+/*
+ * Set used status of count nodes starting from first_phy to status
+ * Verify that current used status is different from the value to be set
+ */
+void nodes_modify_used_status(struct pp_qos_dev *qdev, unsigned int first_phy,
+		unsigned int count, unsigned int status)
+{
+	struct qos_node *cur;
+	unsigned int phy;
+	unsigned int delta;
+
+	QOS_ASSERT(is_all_nodes_used_status(qdev, first_phy, count, !status),
+		   "some nodes used status is already %u\n", status);
+	delta = 0;
+	for (phy = first_phy; phy < first_phy + count; ++phy) {
+		cur = get_node_from_phy(qdev->nodes, phy);
+		QOS_BITS_TOGGLE(cur->flags, QOS_NODE_FLAGS_USED);
+		++delta;
+		if (octet_phy_offset(phy) == 7) {
+			update_octet_usage(qdev->octets, phy, delta, status);
+			delta = 0;
+		}
+	}
+
+	if (delta)
+		update_octet_usage(qdev->octets, phy - 1, delta, status);
+}
+
+
+/**
+ * octet_nodes_shift() - Shift nodes whithin octet
+ * @qdev:
+ * @first_phy: Phy of first node to be shifted
+ * @count: Number of nodes to shift
+ * @shift: Number of places to shift Negative indicates shift left,
+ *	   positive shift right
+ */
+STATIC_UNLESS_TEST void octet_nodes_shift(struct pp_qos_dev *qdev,
+		unsigned int first_phy,
+		unsigned int count,
+		int shift)
+{
+	if (count != 0 && shift != 0) {
+		QOS_ASSERT(same_octet(first_phy, first_phy + count - 1) &&
+			   same_octet(first_phy, first_phy + shift) &&
+			   same_octet(first_phy, first_phy + shift + count - 1)
+			   ,
+			   "shift %d places of %u nodes from %u crosses octet boundaries\n",
+			   shift, count, first_phy);
+		nodes_move(qdev, first_phy + shift, first_phy, count);
+	}
+}
+
+/*
+ *
+ */
+void node_phy_delete(struct pp_qos_dev *qdev, unsigned int phy)
+{
+	unsigned int usage;
+	unsigned int reminder;
+	struct qos_node *node;
+	struct qos_node *parent;
+	unsigned int id;
+	unsigned int parent_phy;
+	unsigned int update_bw;
+
+	ISSUE_DELETE_NODE(phy);
+	update_bw = 0;
+	usage = octet_get_use_count(qdev->octets, octet_of_phy(phy));
+	node = get_node_from_phy(qdev->nodes, phy);
+
+	QOS_ASSERT(!node_parent(node) || node->parent_prop.num_of_children == 0,
+			"Try to delete node %u that has children\n", phy);
+
+	if (node_child(node)) {
+		parent_phy = node->child_prop.parent_phy;
+		parent = get_node_from_phy(qdev->nodes, parent_phy);
+		if (node->child_prop.virt_bw_share && node_internal(parent)) {
+			update_bw = 1;
+			id = get_id_from_phy(qdev->mapping, parent_phy);
+		}
+
+		/*
+		 * If this is the last child of a parent than shift
+		 * operation will not update the parent, since its children
+		 * are not moving - we have to update it manually
+		 */
+		if (phy == parent->parent_prop.first_child_phy +
+				parent->parent_prop.num_of_children - 1) {
+			--parent->parent_prop.num_of_children;
+			create_parent_change_cmd(qdev, parent_phy);
+		}
+	}
+	nodes_modify_used_status(qdev, phy, 1, 0);
+	reminder = usage - (octet_phy_offset(phy) + 1);
+	if (octet_of_phy(phy) > octet_of_phy(qdev->max_port))
+		octet_nodes_shift(qdev, phy + 1, reminder, -1);
+
+	if (update_bw) {
+		phy = get_phy_from_id(qdev->mapping, id);
+		parent = get_node_from_phy(qdev->nodes, phy);
+		update_internal_bandwidth(qdev, parent);
+	}
+}
+
+void release_rlm(struct pp_pool *rlms, unsigned int rlm)
+{
+	pp_pool_put(rlms, rlm);
+}
+
+static void release_node_id(struct pp_qos_dev *qdev, unsigned int phy)
+{
+	unsigned int id;
+
+	id = get_id_from_phy(qdev->mapping, phy);
+	QOS_LOG_DEBUG("Deleting id %u phy %u\n", id, phy);
+	QOS_ASSERT(QOS_ID_VALID(id), "Invalid id for phy %u\n", phy);
+	map_invalidate_id(qdev->mapping, id);
+	pp_pool_put(qdev->ids, id);
+}
+
+int node_remove(struct pp_qos_dev *qdev, struct qos_node *node)
+{
+	unsigned int phy;
+	enum node_type type;
+	unsigned int rlm;
+
+	phy = get_phy_from_node(qdev->nodes, node);
+	type = node->type;
+	QOS_ASSERT(type != TYPE_UNKNOWN, "Unknown node type of phy %u\n",
+			phy);
+
+	if (type == TYPE_QUEUE)
+		rlm  = node->data.queue.rlm;
+	else
+		rlm = 0;
+	create_remove_node_cmd(qdev, type, phy, rlm);
+	release_node_id(qdev, phy);
+	if (node_queue(node))
+		release_rlm(qdev->rlms, node->data.queue.rlm);
+
+	node_phy_delete(qdev, get_phy_from_node(qdev->nodes, node));
+	return 0;
+}
+
+int node_flush(struct pp_qos_dev *qdev, struct qos_node *node)
+{
+	ISSUE_FLUSH_NODE(get_phy_from_node(qdev->nodes, node));
+	return 0;
+}
+
+STATIC_UNLESS_TEST unsigned int get_children_bandwidth_share(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *parent);
+void update_internal_bandwidth(const struct pp_qos_dev *qdev,
+		struct qos_node *parent)
+{
+	unsigned int share;
+	const struct qos_node *child;
+	struct qos_node *tmp;
+	unsigned int cnt;
+	unsigned int phy;
+	struct qos_node *internals[10];
+	unsigned int index;
+	unsigned int parent_share;
+
+	tmp = parent;
+	index = 0;
+	do {
+		QOS_ASSERT(node_internal(tmp), "Node is not internal\n");
+		share = get_children_bandwidth_share(qdev, tmp);
+		tmp->child_prop.virt_bw_share = share;
+		internals[index++] = tmp;
+		phy = tmp->child_prop.parent_phy;
+		tmp = get_node_from_phy(qdev->nodes, phy);
+	} while (node_internal(tmp));
+
+	--index;
+	tmp = internals[index];
+	ISSUE_BW_CMD(get_phy_from_node(qdev->nodes, tmp),
+			tmp->child_prop.virt_bw_share, 1U);
+	do {
+		QOS_ASSERT(node_internal(tmp), "Node is not internal\n");
+		parent_share = tmp->child_prop.virt_bw_share;
+		//TODO remove when parent share is used and compiler
+		//does not shout
+		parent_share = parent_share;
+
+		child = get_const_node_from_phy(qdev->nodes,
+				tmp->parent_prop.first_child_phy);
+		cnt = tmp->parent_prop.num_of_children;
+		for (; cnt; --cnt) {
+			QOS_ASSERT(node_child(child), "Node is not a child\n");
+			if (child->child_prop.virt_bw_share)
+				ISSUE_BW_CMD(
+					get_phy_from_node(
+						qdev->nodes,
+						child),
+					child->child_prop.virt_bw_share *
+					10000U,
+					parent_share * 100);
+			++child;
+		}
+
+		if (index > 0) {
+			--index;
+			tmp = internals[index];
+		} else {
+			break;
+		}
+	} while (1);
+}
+
+/**
+ * link_with_parent() - Link a new node to a parent
+ * @qdev:
+ * @phy:	The phy of the node
+ * @parent_id:  Id of a parent to linked with
+ *
+ * Note node is marked as used
+ */
+static void link_with_parent(
+		struct pp_qos_dev *qdev,
+		unsigned int phy,
+		unsigned int parent_id)
+{
+	struct qos_node *node;
+	struct qos_node *parent;
+	unsigned int first;
+	unsigned int last;
+	unsigned int parent_phy;
+	unsigned int num;
+
+	parent_phy = get_phy_from_id(qdev->mapping, parent_id);
+	parent = get_node_from_phy(qdev->nodes, parent_phy);
+	node = get_node_from_phy(qdev->nodes, phy);
+	node->child_prop.parent_phy = parent_phy;
+
+	num = parent->parent_prop.num_of_children;
+	first = parent->parent_prop.first_child_phy;
+	last = first + num - 1;
+
+	if (num == 0 || phy < first)
+		first = phy;
+
+	if (num == 0 || phy > last)
+		last = phy;
+
+	parent->parent_prop.num_of_children = last - first + 1;
+	parent->parent_prop.first_child_phy = first;
+	nodes_modify_used_status(qdev, phy, 1, 1);
+	create_parent_change_cmd(qdev, parent_phy);
+
+}
+
+struct pp_qos_dev *_qos_init(unsigned int max_port)
+{
+	struct pp_qos_dev *qdev;
+	unsigned int i;
+
+	qdev = QOS_MALLOC(sizeof(struct pp_qos_dev));
+	if (qdev) {
+		memset(qdev, 0, sizeof(struct pp_qos_dev));
+		qdev->max_port = max_port;
+
+		qdev->octets = octets_init(octet_of_phy(max_port));
+		if (qdev->octets == NULL)
+			goto fail;
+
+		qdev->nodes = pp_nodes_init();
+		if (qdev->nodes == NULL)
+			goto fail;
+
+		qdev->ids = free_id_init();
+		if (qdev->ids == NULL)
+			goto fail;
+
+		qdev->rlms = free_rlm_init();
+		if (qdev->rlms == NULL)
+			goto fail;
+
+		qdev->mapping = pp_mapping_init();
+		if (qdev->mapping == NULL)
+			goto fail;
+
+		qdev->queue = pp_queue_init(1024);
+		if (qdev->queue == NULL)
+			goto fail;
+
+		qdev->drvcmds.cmdq = cmd_queue_init();
+		if (qdev->drvcmds.cmdq == NULL)
+			goto fail;
+
+		qdev->drvcmds.pendq = cmd_queue_init();
+		if (qdev->drvcmds.pendq == NULL)
+			goto fail;
+
+		for (i = 0; i <= QOS_MAX_SHARED_BANDWIDTH_GROUP; ++i)
+			qdev->groups[i].used = 0;
+		QOS_LOCK_INIT(qdev);
+	}
+	return qdev;
+fail:
+	_qos_clean(qdev);
+	return NULL;
+}
+
+void _qos_clean(struct pp_qos_dev *qdev)
+{
+	if (qdev) {
+		clean_fwdata_internals(qdev);
+		cmd_queue_clean(qdev->drvcmds.pendq);
+		cmd_queue_clean(qdev->drvcmds.cmdq);
+		pp_queue_clean(qdev->queue);
+		pp_mapping_clean(qdev->mapping);
+		pp_pool_clean(qdev->portsphys);
+		pp_pool_clean(qdev->ids);
+		pp_nodes_clean(qdev->nodes);
+		octets_clean(qdev->octets);
+		QOS_FREE(qdev);
+	}
+}
+
+
+/******************************************************************************/
+/*                           Tree traversal                                   */
+/******************************************************************************/
+/**
+ * post_order_travers_tree() - Traverse subtree post order - children first
+ * followed by parents and apply operation on each conformed node
+ *
+ * @qdev:
+ * @root:	Phy of sub tree's root node
+ * @conform:    Function to see if the node we reach is conform
+ * @cdata:	Data to supply to conform
+ * @operation:  Operation to apply upon each conformed node
+ * @odata:      Data to supply to operation
+ * Return:	Sum of the returned values on all operations applied at the
+ * subtree
+ *
+ * Note - though operation is allowed to modify qdev, it is important that it
+ * won't do operation that modifies tree topology (e.g. move/shift/delete/add
+ * node), since this will mess the iteration process
+ */
+static int post_order_travers_tree(struct pp_qos_dev *qdev,
+				    int root,
+				    int (*conform)(
+					    const struct pp_qos_dev *qdev,
+					    const struct qos_node *node,
+					    void *cdata),
+				    void *cdata,
+				    int (*operation)(
+					    struct pp_qos_dev *qdev,
+					    const struct qos_node *node,
+					    void *odata),
+				    void *odata)
+{
+	unsigned int i;
+	unsigned int phy;
+	unsigned int total;
+	struct qos_node *node;
+
+	total = 0;
+	node = get_node_from_phy(qdev->nodes, root);
+	QOS_ASSERT(node_used(node), "Unused node\n");
+
+	if (node_parent(node)) {
+		for (i = 0; i < node->parent_prop.num_of_children; ++i) {
+			phy = node->parent_prop.first_child_phy + i;
+			total += post_order_travers_tree(qdev, phy, conform,
+					cdata, operation, odata);
+		}
+	}
+	if (conform(qdev, node, cdata))
+		total += operation(qdev, node, odata);
+
+	return total;
+}
+
+struct ids_container_metadata {
+	unsigned int next;
+	uint16_t *ids;
+	unsigned int size;
+};
+
+static int update_ids_container(struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	struct ids_container_metadata *ids;
+	unsigned int phy;
+	uint16_t id;
+
+	ids = (struct ids_container_metadata *)data;
+	phy = get_phy_from_node(qdev->nodes, node);
+	id = get_id_from_phy(qdev->mapping, phy);
+
+	if (ids->next < ids->size) {
+		ids->ids[ids->next] = id;
+		ids->next++;
+	}
+
+	return 1;
+}
+
+static int node_queue_wrapper(const struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	return node_used(node) && (node->type == TYPE_QUEUE);
+}
+
+void get_node_queues(struct pp_qos_dev *qdev,
+		unsigned int phy, uint16_t *queue_ids,
+		unsigned int size, unsigned int *queues_num)
+{
+	struct ids_container_metadata data = {0, queue_ids, size};
+
+	if (queue_ids == NULL)
+		data.size = 0;
+	*queues_num = post_order_travers_tree(qdev, phy,
+			node_queue_wrapper, NULL,
+			update_ids_container, &data);
+}
+
+static int node_in_grp(const struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	return node_used(node) &&
+		(node->shared_bandwidth_group == (uintptr_t)data);
+}
+
+void get_bw_grp_members_under_node(struct pp_qos_dev *qdev, unsigned int id,
+		unsigned int phy, uint16_t *ids,
+		unsigned int size, unsigned int *ids_num)
+{
+	struct ids_container_metadata data = {0, ids, size};
+
+	if (ids == NULL)
+		data.size = 0;
+	*ids_num = post_order_travers_tree(qdev, phy, node_in_grp,
+			(void *)(uintptr_t)id, update_ids_container, &data);
+}
+
+static int update_predecessors(struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	create_update_preds_cmd(qdev, get_phy_from_node(qdev->nodes, node));
+	return 1;
+}
+
+static int node_child_wrapper(const struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	return node_child(node);
+}
+
+void tree_update_predecessors(struct pp_qos_dev *qdev, unsigned int phy)
+{
+	post_order_travers_tree(qdev, phy, node_child_wrapper,
+			NULL, update_predecessors, NULL);
+}
+
+#if 0
+static int node_virtual_child_of(const struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	return node_child(node) && !node_internal(node) &&
+		((uintptr_t)data == get_virtual_parent_phy(qdev->nodes, node));
+}
+
+static int add_bandwidth_share(struct pp_qos_dev *qdev,
+		const struct qos_node *node,
+		void *data)
+{
+	QOS_ASSERT(node_child(node), "Node is not a child\n");
+	return node->child_prop.virt_bw_share;
+}
+
+
+/*
+ * Return the sum of bandwidth share of all desendants
+ * whose virtual parent is parent
+ * Return 0 if parent is internal
+ */
+STATIC_UNLESS_TEST unsigned int get_virt_children_bandwidth_share(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *parent)
+{
+	int phy;
+	int total;
+	struct pp_qos_dev *_qdev;
+
+	total = 0;
+	if (node_parent(parent) && !node_internal(parent)) {
+		_qdev = (struct pp_qos_dev *)qdev;
+		phy = get_phy_from_node(qdev->nodes, parent);
+		total = post_order_travers_tree(_qdev, phy,
+				node_virtual_child_of,
+				(void *)(uintptr_t)phy,
+				add_bandwidth_share, NULL);
+	}
+	return total;
+}
+#endif
+
+/*
+ * Return the sum of bandwidth share of all direct children of parent
+ */
+STATIC_UNLESS_TEST unsigned int get_children_bandwidth_share(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *parent)
+{
+	unsigned int total;
+	unsigned int num;
+	const struct qos_node *child;
+
+	total = 0;
+	if (node_parent(parent)) {
+		num = parent->parent_prop.num_of_children;
+		if (num > 0)
+			child = get_const_node_from_phy(qdev->nodes,
+					parent->parent_prop.first_child_phy);
+		for (; num; --num) {
+			total += child->child_prop.virt_bw_share;
+			++child;
+		}
+	}
+	return total;
+}
+
+static int node_remove_wrapper(
+		struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	uint16_t phy;
+	uint16_t id;
+	int rc;
+
+	phy = get_phy_from_node(qdev->nodes, node);
+	id = get_id_from_phy(qdev->mapping, phy);
+	rc = pp_queue_enqueue(qdev->queue, id);
+	QOS_ASSERT(rc == 0, "Could not enqueue\n");
+	return rc;
+}
+
+static int node_flush_wrapper(
+		struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	uint16_t phy;
+	uint16_t id;
+	int rc;
+
+	phy = get_phy_from_node(qdev->nodes, node);
+	id = get_id_from_phy(qdev->mapping, phy);
+	rc = _pp_qos_queue_flush(qdev, id);
+	QOS_ASSERT(rc == 0, "Could not enqueue\n");
+	return rc;
+}
+
+static int node_modify_blocked_status(
+		struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	uint16_t phy;
+	uint16_t id;
+	unsigned int status;
+	int rc;
+
+	status = (unsigned int)(uintptr_t)data;
+	phy = get_phy_from_node(qdev->nodes, node);
+	id = get_id_from_phy(qdev->mapping, phy);
+	if (status)
+		rc = _pp_qos_queue_block(qdev, id);
+	else
+		rc = _pp_qos_queue_unblock(qdev, id);
+	QOS_ASSERT(rc == 0, "Could not block queue\n");
+	return rc;
+}
+
+static int node_used_wrapper(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *node, void *data)
+{
+	return node_used(node);
+}
+
+int tree_remove(struct pp_qos_dev *qdev, unsigned int phy)
+{
+	struct qos_node *node;
+	unsigned int id;
+	unsigned int tmp;
+	int rc;
+
+	pp_queue_reset(qdev->queue);
+	rc = post_order_travers_tree(qdev, phy, node_used_wrapper, NULL,
+			node_remove_wrapper, NULL);
+	if (rc) {
+		QOS_LOG("Error while trying to delete subtree whose root is %u\n",
+				phy);
+		return -EBUSY;
+	}
+	id = pp_queue_dequeue(qdev->queue);
+	while (QOS_ID_VALID(id)) {
+		tmp = get_phy_from_id(qdev->mapping, id);
+		node =	get_node_from_phy(qdev->nodes, tmp);
+		node_remove(qdev, node);
+		id = pp_queue_dequeue(qdev->queue);
+	}
+
+	return 0;
+}
+
+int tree_flush(struct pp_qos_dev *qdev, unsigned int phy)
+{
+	int rc;
+
+	rc = post_order_travers_tree(qdev, phy, node_queue_wrapper,
+			NULL, node_flush_wrapper, NULL);
+	if (rc) {
+		QOS_LOG("Unexpected error while flush subtree root is %u\n",
+				phy);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+int tree_modify_blocked_status(
+		struct pp_qos_dev *qdev,
+		unsigned int phy,
+		unsigned int status)
+{
+	int rc;
+
+	rc = post_order_travers_tree(qdev,
+			phy, node_queue_wrapper,
+			NULL, node_modify_blocked_status,
+			(void *)(uintptr_t)status);
+	if (rc) {
+		QOS_LOG_ERR("Error when change blocked status to %u on tree with root %u\n",
+				status, phy);
+		rc =  -EBUSY;
+	}
+	return rc;
+}
+
+/******************************************************************************/
+/*                           Configuration                                    */
+/******************************************************************************/
+
+#ifdef MAY_USE_IN_THE_FUTURE
+static int shared_group_defined(
+		const struct pp_qos_dev *qdev,
+		unsigned int group_id)
+{
+	QOS_ASSERT(group_id <= QOS_MAX_SHARED_BANDWIDTH_GROUP,
+			"illegal shared group %u\n",
+		   group_id);
+	return (qdev->groups[group_id].limit != QOS_NO_BANDWIDTH_LIMIT);
+}
+#endif
+
+static int common_cfg_valid(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *node)
+{
+	unsigned int shared;
+
+	if (!(node->bandwidth_limit == QOS_NO_BANDWIDTH_LIMIT ||
+				node->bandwidth_limit <=
+				QOS_MAX_BANDWIDTH_LIMIT)) {
+		QOS_LOG("Invalid bandwidth limit %u\n", node->bandwidth_limit);
+		return 0;
+	}
+
+	shared = node->shared_bandwidth_group;
+	if (!(
+				(shared == QOS_NO_SHARED_BANDWIDTH_GROUP) ||
+				(shared <= QOS_MAX_SHARED_BANDWIDTH_GROUP &&
+				qdev->groups[shared].used)
+	     )) {
+		QOS_LOG("Invalid shared bandwidth group %u\n",
+				node->shared_bandwidth_group);
+		return 0;
+	}
+	return 1;
+}
+
+/*
+ * If new node
+ *     node.child.parent.phy == virtual parent phy
+ * Else
+ *     node.child.parent.phy is either virtual parent phy (if user is changing
+ *     parent) or actual parent (if user is not changing parent)
+ */
+static int child_cfg_valid(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *node,
+		const struct qos_node *orig_node,
+		unsigned int prev_virt_parent_phy)
+{
+	const struct qos_node *parent;
+	unsigned int tmp;
+	struct qos_node *child;
+	unsigned int parent_phy;
+	unsigned int i;
+	unsigned int bw;
+	unsigned int cur_virt_parent_phy;
+
+	QOS_ASSERT(node_child(node), "node is not a child\n");
+	parent_phy = node->child_prop.parent_phy;
+	if (parent_phy > NUM_OF_NODES - 1) {
+		QOS_LOG("Illegal parent %u\n", parent_phy);
+		return 0;
+	}
+
+	parent =  get_const_node_from_phy(qdev->nodes, parent_phy);
+	if (!node_parent(parent)) {
+		QOS_LOG("Node's parent %u is not a parent\n", parent_phy);
+		return 0;
+	}
+
+	if (parent->parent_prop.arbitration == PP_QOS_ARBITRATION_WSP) {
+		tmp = node->child_prop.priority;
+		if (tmp > QOS_MAX_CHILD_PRIORITY) {
+			QOS_LOG("Illegal node priority %u\n", tmp);
+			return 0;
+		}
+		i = parent->parent_prop.num_of_children;
+
+		if (i == 8) {
+			QOS_LOG("WSP parent can have upto 8 children\n");
+			return 0;
+		} else if (i > 0) {
+			child = get_node_from_phy(
+					qdev->nodes,
+					parent->parent_prop.first_child_phy);
+			for (; i > 0; --i, ++child) {
+				if ((tmp == child->child_prop.priority) &&
+						(orig_node != child)) {
+					QOS_LOG_ERR("Child prio %u exists\n",
+							tmp);
+					return 0;
+				}
+			}
+		}
+	}
+
+	/* Find current virtual parent and bandwidth of its direct children */
+	if (!node_internal(parent))
+		cur_virt_parent_phy = parent_phy;
+	else
+		cur_virt_parent_phy = get_virtual_parent_phy(
+				qdev->nodes,
+				parent);
+
+	bw = get_children_bandwidth_share(qdev,
+			get_node_from_phy(qdev->nodes, cur_virt_parent_phy));
+
+	/*
+	 * If child does not changing its virtual parent than need
+	 * to subtract its bandwidth share, so it will not calculated twice
+	 */
+	if (cur_virt_parent_phy == prev_virt_parent_phy)
+		bw -= node->child_prop.virt_bw_share;
+
+	if (bw + node->child_prop.virt_bw_share  > 100) {
+		QOS_LOG_ERR("Parent already gave %u bandwidth, can't add %u\n",
+			bw, node->child_prop.virt_bw_share);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int parent_cfg_valid(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *node)
+{
+	unsigned int tmp;
+
+	QOS_ASSERT(node_parent(node), "node is not a parent\n");
+	if (node->parent_prop.num_of_children > 8) {
+		QOS_LOG("node has %u children but max allowed is 8\n",
+				node->parent_prop.num_of_children);
+		return 0;
+	}
+	tmp = node->parent_prop.first_child_phy;
+	if ((node->parent_prop.num_of_children > 0) &&
+	    (tmp <= qdev->max_port || tmp > NUM_OF_NODES - 1)) {
+		QOS_LOG("node has %u children but first child %u is illegal\n",
+			node->parent_prop.num_of_children, tmp);
+		return 0;
+	}
+	return 1;
+}
+
+int node_cfg_valid(
+		const struct pp_qos_dev *qdev,
+		const struct qos_node *node,
+		const struct qos_node *orig_node,
+		unsigned int prev_virt_parent_phy)
+{
+	if (!common_cfg_valid(qdev, node))
+		return 0;
+	if (node_parent(node) && !parent_cfg_valid(qdev, node))
+		return 0;
+	if (node_child(node) &&
+			!child_cfg_valid(qdev, node,
+				orig_node, prev_virt_parent_phy))
+		return 0;
+	return 1;
+}
+
+unsigned int get_virtual_parent_phy(
+		const struct pp_nodes *nodes,
+		const struct qos_node *child)
+{
+	const struct qos_node *tmp;
+	unsigned int phy;
+
+	tmp = child;
+	do {
+		QOS_ASSERT(node_child(tmp), "Node is not a child\n");
+		phy = tmp->child_prop.parent_phy;
+		tmp = get_const_node_from_phy(nodes, phy);
+	} while (node_internal(tmp));
+	return phy;
+}
+
+int get_node_prop(const struct pp_qos_dev *qdev,
+		  const struct qos_node *node,
+		  struct pp_qos_common_node_properties *common,
+		  struct pp_qos_parent_node_properties *parent,
+		  struct pp_qos_child_node_properties *child)
+{
+
+	QOS_ASSERT(node_used(node), "Node is not used\n");
+
+	if (common) {
+		common->suspended =
+			!!QOS_BITS_IS_SET(node->flags,
+					QOS_NODE_FLAGS_SUSPENDED);
+		common->bandwidth_limit = node->bandwidth_limit;
+		common->shared_bandwidth_group = node->shared_bandwidth_group;
+	}
+
+	if (parent) {
+		parent->arbitration = node->parent_prop.arbitration;
+		parent->best_effort_enable =
+			!!QOS_BITS_IS_SET(node->flags,
+					QOS_NODE_FLAGS_PARENT_BEST_EFFORT_ENABLE
+					);
+	}
+
+	if (child) {
+		/*
+		 * Internal schedulers are tranparent to clients. Clients see
+		 * only virtual parent - the first parent in hierarchy which is
+		 * not internal scheduler
+		 */
+		child->parent = get_id_from_phy(qdev->mapping,
+				get_virtual_parent_phy(qdev->nodes, node));
+		child->priority = node->child_prop.priority;
+
+		child->bandwidth_share = node->child_prop.virt_bw_share;
+	}
+
+	return 0;
+}
+
+static int set_common(struct pp_qos_dev *qdev,
+		      struct qos_node *node,
+		      const struct pp_qos_common_node_properties *common,
+		      uint32_t *modified)
+{
+	unsigned int grp_id;
+
+	if (!!(common->suspended) !=
+			!!QOS_BITS_IS_SET(node->flags,
+				QOS_NODE_FLAGS_SUSPENDED)) {
+		QOS_BITS_TOGGLE(node->flags, QOS_NODE_FLAGS_SUSPENDED);
+		QOS_BITS_SET(*modified, QOS_MODIFIED_SUSPENDED);
+	}
+
+	if (node->bandwidth_limit != common->bandwidth_limit) {
+		node->bandwidth_limit = common->bandwidth_limit;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_BANDWIDTH_LIMIT);
+	}
+
+	grp_id = common->shared_bandwidth_group;
+	if (grp_id != node->shared_bandwidth_group) {
+		node->shared_bandwidth_group = grp_id;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_SUSPENDED);
+	}
+	return 0;
+}
+
+static int set_parent(struct pp_qos_dev *qdev,
+		      struct qos_node *node,
+		      const struct pp_qos_parent_node_properties *parent,
+		      uint32_t *modified)
+{
+	if (node->parent_prop.arbitration != parent->arbitration) {
+		node->parent_prop.arbitration = parent->arbitration;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_ARBITRATION);
+	}
+
+	if (!!(parent->best_effort_enable) !=
+			!!QOS_BITS_IS_SET(node->flags,
+				QOS_NODE_FLAGS_PARENT_BEST_EFFORT_ENABLE)) {
+		QOS_BITS_TOGGLE(node->flags,
+				QOS_NODE_FLAGS_PARENT_BEST_EFFORT_ENABLE);
+		QOS_BITS_SET(*modified, QOS_MODIFIED_BEST_EFFORT);
+	}
+	return 0;
+}
+
+
+static int set_child(struct pp_qos_dev *qdev,
+		     struct qos_node *node,
+		     const struct pp_qos_child_node_properties *child,
+		     uint32_t *modified)
+{
+	unsigned int conf_parent_phy;
+	unsigned int virt_parent_phy;
+
+	/* Equals to virtual parent phy since client is not aware of internal
+	 * schedulers, they are transparent to him
+	 */
+	conf_parent_phy = get_phy_from_id(qdev->mapping, child->parent);
+	virt_parent_phy = node->child_prop.parent_phy;
+
+	if (QOS_PHY_VALID(virt_parent_phy))
+		virt_parent_phy = get_virtual_parent_phy(qdev->nodes, node);
+
+	if (virt_parent_phy != conf_parent_phy) {
+		node->child_prop.parent_phy = conf_parent_phy;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_PARENT);
+	}
+
+	if (node->child_prop.priority != child->priority) {
+		node->child_prop.priority = child->priority;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_PRIORITY);
+	}
+
+	if (node->child_prop.virt_bw_share != child->bandwidth_share) {
+		node->child_prop.virt_bw_share = child->bandwidth_share;
+		QOS_BITS_SET(*modified, QOS_MODIFIED_VIRT_BW_SHARE);
+	}
+
+	return 0;
+}
+
+int set_node_prop(struct pp_qos_dev *qdev,
+		  struct qos_node *node,
+		  const struct pp_qos_common_node_properties *common,
+		  const struct pp_qos_parent_node_properties *parent,
+		  const struct pp_qos_child_node_properties *child,
+		  uint32_t *modified)
+{
+	int rc;
+
+	if (common) {
+		rc = set_common(qdev, node, common, modified);
+		if (rc)
+			return rc;
+	}
+	if (parent) {
+		rc = set_parent(qdev, node, parent, modified);
+		if (rc)
+			return rc;
+	}
+	if (child)
+		return set_child(qdev, node, child, modified);
+
+	return 0;
+}
+
+static void node_parent_init(struct qos_node *node)
+{
+	node->parent_prop.first_child_phy = QOS_INVALID_PHY;
+	node->parent_prop.num_of_children = 0;
+	node->parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+}
+
+static void node_child_init(struct qos_node *node)
+{
+	node->child_prop.parent_phy = QOS_INVALID_PHY;
+	node->child_prop.priority = 0;
+	node->child_prop.virt_bw_share = 0;
+}
+
+static void node_common_init(struct qos_node *node)
+{
+	node->flags = 0;
+	node->bandwidth_limit = QOS_NO_BANDWIDTH_LIMIT;
+	node->shared_bandwidth_group = QOS_NO_SHARED_BANDWIDTH_GROUP;
+}
+
+void node_init(const struct pp_qos_dev *qdev,
+	       struct qos_node *node,
+	       unsigned int common,
+	       unsigned int parent,
+	       unsigned int child)
+{
+	memset(node, 0, sizeof(struct qos_node));
+
+	node->type = TYPE_UNKNOWN;
+	if (common)
+		node_common_init(node);
+
+	if (parent)
+		node_parent_init(node);
+
+	if (child)
+		node_child_init(node);
+}
+
+
+/******************************************************************************/
+/*                              ALLOC PHY                                     */
+/******************************************************************************/
+
+
+/**
+ * Return phy where a child whose priority is child_priority should be placed.
+ * If parent is WRR than priority is ignored and the new child will
+ * be given a phy after the current last child
+ *
+ * Assumption: parent's children are held on a non full octet
+ */
+static unsigned int calculate_new_child_location(
+		struct pp_qos_dev *qdev,
+		struct qos_node *parent,
+		unsigned int child_priority)
+{
+	unsigned int phy;
+	struct qos_node *cur;
+	unsigned int i;
+
+	QOS_ASSERT(node_parent(parent), "node is not a parent\n");
+	if (parent->parent_prop.arbitration == PP_QOS_ARBITRATION_WRR) {
+		phy = parent->parent_prop.first_child_phy +
+			parent->parent_prop.num_of_children;
+	} else {
+		i = parent->parent_prop.num_of_children;
+		cur = get_node_from_phy(qdev->nodes,
+				parent->parent_prop.first_child_phy);
+		while ((i > 0) && (cur->child_prop.priority < child_priority)) {
+			--i;
+			++cur;
+		}
+		QOS_ASSERT((i == 0) ||
+				(cur->child_prop.priority != child_priority),
+			   "Trying to add a child with existing priority %u to wsp parent %u\n",
+			   child_priority,
+			   get_phy_from_node(qdev->nodes, parent));
+		phy = get_phy_from_node(qdev->nodes, cur);
+	}
+	return phy;
+}
+
+/**
+ * children_on_non_full_octet() - Allocate a phy for a new child whose siblings
+ * are held on a non full octet.
+ * @qdev:
+ * @parent:
+ * @usage:	    Number of used nodes on children's octet
+ * @child_priority: New child priority (relevant only for WSP parent)
+ * Return:	    New allocated phy
+ */
+static unsigned int children_on_non_full_octet(
+		struct pp_qos_dev *qdev,
+		struct qos_node *parent,
+		unsigned int usage,
+		unsigned int child_priority)
+{
+	unsigned int phy;
+
+	phy = calculate_new_child_location(qdev, parent, child_priority);
+	octet_nodes_shift(qdev, phy, usage - octet_phy_offset(phy), 1);
+	return phy;
+}
+
+static unsigned int children_on_non_full_octet_wrapper(
+		struct pp_qos_dev *qdev,
+		struct qos_node *parent,
+		unsigned int child_priority)
+{
+	unsigned int usage;
+	unsigned int octet;
+
+	QOS_ASSERT(node_parent(parent),
+			"Node %u is not a parent\n",
+			get_phy_from_node(qdev->nodes, parent));
+	octet = octet_of_phy(parent->parent_prop.first_child_phy);
+	usage = octet_get_use_count(qdev->octets, octet);
+	QOS_ASSERT(usage < 8, "Octet is full\n");
+	return children_on_non_full_octet(qdev, parent, usage, child_priority);
+}
+
+/**
+ * has_less_than_8_children_on_full_octet() - Allocate a phy for a new child
+ * are held on a full octet.
+ * @qdev:
+ * @parent:
+ * @children_octet:
+ * @child_priority:
+ * Return: New allocated phy
+ */
+static unsigned int has_less_than_8_children_on_full_octet(
+		struct pp_qos_dev *qdev,
+		struct qos_node *parent,
+		uint8_t children_octet,
+		unsigned int child_priority)
+{
+	struct qos_node *min_parent;
+	unsigned int octet;
+	unsigned int phy;
+	unsigned int num_of_required_entries;
+	unsigned int num_of_nodes_to_move;
+	unsigned int dst_phy;
+	unsigned int src_phy;
+	unsigned int parent_id;
+
+	parent_id = get_id_from_phy(qdev->mapping,
+			get_phy_from_node(qdev->nodes, parent));
+	QOS_ASSERT(octet_get_use_count(qdev->octets, children_octet) == 8,
+			"Octet %d is not full\n", children_octet);
+	min_parent =  octet_get_min_sibling_group(qdev, children_octet,
+			parent, &num_of_required_entries);
+	QOS_ASSERT(min_parent != NULL, "Can't find min_parent for octet %d\n",
+			children_octet);
+	octet = octet_get_with_at_least_free_entries(
+			qdev->octets,
+			num_of_required_entries);
+	if (!QOS_OCTET_VALID(octet)) {
+		QOS_LOG("could not find free octet\n");
+		return QOS_INVALID_PHY;
+	}
+
+	if (parent == min_parent)
+		num_of_nodes_to_move = num_of_required_entries - 1;
+	else
+		num_of_nodes_to_move = num_of_required_entries;
+
+	/* move children of min_parent to new octet */
+	dst_phy = 8 * octet + octet_get_use_count(qdev->octets, octet);
+	src_phy =  min_parent->parent_prop.first_child_phy;
+	nodes_move(qdev, dst_phy, src_phy, num_of_nodes_to_move);
+
+
+	/*
+	 * shift original octet if necessary i.e. if last moved
+	 * node was not last node on octet
+	 */
+	src_phy += num_of_nodes_to_move;
+	if (octet_phy_offset(src_phy - 1) != 7)
+		octet_nodes_shift(qdev, src_phy,
+				8 - octet_phy_offset(src_phy),
+				-num_of_nodes_to_move);
+
+	//min_parent->parent_prop.first_child_phy = dst_phy;
+
+	parent = get_node_from_phy(qdev->nodes,
+			get_phy_from_id(qdev->mapping, parent_id));
+	phy = children_on_non_full_octet_wrapper(qdev, parent, child_priority);
+	return phy;
+}
+
+/**
+ * phy_alloc_parent_has_less_than_8_children() - Allocate a phy
+ * for a new child whose parent has less than 8 children
+ * @qdev:
+ * @parent:
+ * @child_priority:
+ * Return:
+ */
+static unsigned int phy_alloc_parent_has_less_than_8_children(
+		struct pp_qos_dev *qdev,
+		struct qos_node *parent,
+		unsigned int child_priority)
+{
+	unsigned int phy;
+	unsigned int octet;
+	unsigned int usage;
+	unsigned int parent_id;
+
+	QOS_ASSERT(node_parent(parent) &&
+			(parent->parent_prop.num_of_children < 8),
+		   "Node %u is not a parent with less than 8 children\n",
+		   get_phy_from_node(qdev->nodes, parent));
+
+	parent_id = get_id_from_phy(qdev->mapping,
+			get_phy_from_node(qdev->nodes, parent));
+	if (parent->parent_prop.num_of_children == 0) {
+		octet = octet_get_with_at_least_free_entries(qdev->octets, 1);
+		if (!QOS_OCTET_VALID(octet)) {
+			QOS_LOG("could not find free octet\n");
+			return QOS_INVALID_PHY;
+		}
+		phy = octet * 8 + octet_get_use_count(qdev->octets, octet);
+	} else {
+		octet = octet_of_phy(parent->parent_prop.first_child_phy);
+		usage = octet_get_use_count(qdev->octets, octet);
+		if (usage < 8)
+			phy = children_on_non_full_octet(
+					qdev,
+					parent,
+					usage,
+					child_priority);
+		else
+			phy = has_less_than_8_children_on_full_octet(
+					qdev,
+					parent,
+					octet,
+					child_priority);
+	}
+	if (QOS_PHY_VALID(phy))
+		link_with_parent(qdev, phy, parent_id);
+	return phy;
+}
+
+/**
+ * create_internal_scheduler_on_node() - Create internal scheduler
+ * The original node is moved to another octet (which has at least 2 free slots
+ * and can accommodate the original node and a new child) and becomes a child of
+ * the created internal scheduler
+ * @qdev:
+ * @node: The node where the internal scheduler should be placed
+ * Return:
+ */
+static unsigned int create_internal_scheduler_on_node(
+		struct pp_qos_dev *qdev,
+		struct qos_node *node)
+{
+	struct qos_node *new_node;
+	unsigned int octet;
+	unsigned int new_phy;
+	unsigned int phy;
+	unsigned int id;
+	uint32_t modified;
+	struct pp_qos_sched_conf conf;
+
+	octet = octet_get_with_at_least_free_entries(qdev->octets, 2);
+	if (!QOS_OCTET_VALID(octet)) {
+		QOS_LOG("could not find free octet\n");
+		return  QOS_INVALID_PHY;
+	}
+
+	/*
+	 * The assertion on nodes_move especially the one that states
+	 * that if a child is moved to another octet than its siblings must
+	 * move also, prevents using move in this clone scenario
+	 */
+
+	/* find a place for a new node */
+	new_phy = octet * 8 + octet_get_use_count(qdev->octets, octet);
+	QOS_ASSERT(QOS_PHY_VALID(new_phy + 1),
+			"%u is not a valid phy\n", new_phy + 1);
+	new_node = get_node_from_phy(qdev->nodes, new_phy);
+	QOS_ASSERT(!node_used(new_node), "Node is used\n");
+	nodes_modify_used_status(qdev, new_phy, 1, 1);
+
+	/* update children to point to new node */
+	phy = get_phy_from_node(qdev->nodes, node);
+	id = get_id_from_phy(qdev->mapping, phy);
+	map_id_phy(qdev->mapping, id, new_phy);
+	if (node_parent(node))
+		node_update_children(qdev,
+				get_phy_from_node(qdev->nodes, node),
+				new_phy);
+	memcpy(new_node, node, sizeof(struct qos_node));
+	new_node->child_prop.parent_phy = phy;
+	create_move_cmd(qdev, new_phy, phy, get_port(qdev->nodes, new_phy));
+
+	/* virtual bw share and parent_phy remain */
+	node->flags = 0;
+	QOS_BITS_SET(node->flags,
+			QOS_NODE_FLAGS_USED | QOS_NODE_FLAGS_INTERNAL);
+	node->type = TYPE_SCHED;
+	node->parent_prop.arbitration = PP_QOS_ARBITRATION_WRR;
+	node->parent_prop.num_of_children = 1;
+	node->parent_prop.first_child_phy = new_phy;
+	id = pp_pool_get(qdev->ids);
+	QOS_ASSERT(QOS_ID_VALID(id), "Got invalid id\n");
+	map_id_phy(qdev->mapping, id, phy);
+
+	get_node_prop(qdev, node, &conf.common_prop,
+			&conf.sched_parent_prop,
+			&conf.sched_child_prop);
+	modified = 0;
+	QOS_BITS_SET(modified,
+			QOS_MODIFIED_NODE_TYPE | QOS_MODIFIED_SUSPENDED |
+			QOS_MODIFIED_BANDWIDTH_LIMIT |
+			QOS_MODIFIED_SHARED_GROUP_LIMIT |
+			QOS_MODIFIED_PRIORITY | QOS_MODIFIED_VIRT_BW_SHARE |
+			QOS_MODIFIED_PARENT | QOS_MODIFIED_ARBITRATION |
+			QOS_MODIFIED_BEST_EFFORT);
+
+	create_set_sched_cmd(qdev, &conf, phy,
+			node->child_prop.parent_phy, modified);
+
+	/* The new node will be put right after the moved node */
+	++new_phy;
+	link_with_parent(qdev, new_phy, id);
+
+	tree_update_predecessors(qdev, phy);
+
+	return new_phy;
+}
+
+/**
+ * phy_alloc_for_new_child_parent_has_8_children() - Allocate a phy for a new
+ * child whose parent has 8 children
+ * @qdev:
+ * @parent:
+ * Return:
+ */
+static unsigned int phy_alloc_for_new_child_parent_has_8_children(
+		struct pp_qos_dev *qdev,
+		struct qos_node *parent)
+{
+	unsigned int first_phy;
+	struct qos_node *child;
+	struct qos_node *last_child;
+	unsigned int phy;
+	unsigned int parent_phy;
+	unsigned int i;
+
+	phy = QOS_INVALID_PHY;
+
+	QOS_ASSERT(node_parent(parent) &&
+		   (parent->parent_prop.num_of_children == 8) &&
+		   (parent->parent_prop.arbitration == PP_QOS_ARBITRATION_WRR),
+		   "Node %u is not a WRR parent with 8 children\n",
+		   get_phy_from_node(qdev->nodes, parent));
+
+	first_phy = parent->parent_prop.first_child_phy;
+	child = get_node_from_phy(qdev->nodes, first_phy);
+	last_child = child + 7;
+	for (; child <= last_child; ++child) {
+		if (node_internal(child) &&
+				(child->parent_prop.num_of_children < 8)) {
+			return
+				phy_alloc_parent_has_less_than_8_children(
+					qdev,
+					child,
+					QOS_INVALID_PRIORITY);
+		}
+	}
+
+	child = get_node_from_phy(qdev->nodes, first_phy);
+	while (child < last_child && node_internal(child))
+		++child;
+	if (!node_internal(child))
+		return create_internal_scheduler_on_node(qdev, child);
+
+	/*
+	 * If we reach this point all children are full internal schedulers
+	 * We will do a breadth first traversal on the tree i.e. look for a
+	 * place for the child under the internal schedulers children of the
+	 * parent. For this we will need a queue
+	 */
+	for (i = 0; i < 8; ++i) {
+		if (pp_queue_enqueue(qdev->queue, first_phy + i)) {
+			QOS_LOG("Queue is full\n");
+			return QOS_INVALID_PHY;
+		}
+	}
+
+	parent_phy = pp_queue_dequeue(qdev->queue);
+	while (QOS_PHY_VALID(parent_phy)) {
+		phy = phy_alloc_for_new_child_parent_has_8_children(
+				qdev,
+				get_node_from_phy(qdev->nodes, parent_phy));
+		if (QOS_PHY_VALID(phy))
+			return phy;
+	}
+
+	return phy;
+}
+
+unsigned int phy_alloc_by_parent(
+		struct pp_qos_dev *qdev,
+		struct qos_node *parent,
+		unsigned int child_priority)
+{
+	unsigned int phy;
+
+	if (parent->parent_prop.num_of_children < 8) {
+		phy =  phy_alloc_parent_has_less_than_8_children(
+				qdev,
+				parent,
+				child_priority);
+	} else {
+		pp_queue_reset(qdev->queue);
+		phy =  phy_alloc_for_new_child_parent_has_8_children(qdev,
+				parent);
+	}
+
+	return phy;
+}
+
diff --git a/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_utils.h b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_utils.h
new file mode 100644
index 000000000..5572940fb
--- /dev/null
+++ b/drivers/net/ethernet/lantiq/ppv4/qos/pp_qos_utils.h
@@ -0,0 +1,675 @@
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#ifndef PP_QOS_UTILS_H
+#define PP_QOS_UTILS_H
+
+#include "pp_qos_common.h"
+#include "pp_qos_drv.h"
+
+/******************************************************************************/
+/*                              CONF                                          */
+/******************************************************************************/
+#define PPV4_QOS_DRV_VER "1.0.0-fl"
+
+#define NUM_OF_NODES	2048U
+#define NUM_OF_QUEUES	256U
+#define NUM_OF_OCTETS (NUM_OF_NODES / 8)
+#define QOS_MAX_SHARED_BW_GRP QOS_MAX_SHARED_BANDWIDTH_GROUP
+
+#define MAX_QOS_INSTANCES 1U
+
+/* for calculating number of ddr pages for qm from resource size */
+#define PPV4_QOS_PAGE_SIZE (4096U)
+#define PPV4_QOS_QM_BUF_SIZE (PPV4_QOS_PAGE_SIZE * 100)
+
+#define PPV4_QOS_LOGGER_MSG_SIZE (128U)
+#define PPV4_QOS_LOGGER_BUF_SIZE (PPV4_QOS_LOGGER_MSG_SIZE * 1024U)
+#define PPV4_QOS_STAT_SIZE	(1024U)
+
+#define PPV4_QOS_IVT_START (0x48000U)
+
+#define FIRMWARE_FILE "ppv4-qos-fw.bin"
+/******************************************************************************/
+/*                              DEFINES                                       */
+/******************************************************************************/
+
+#define QOS_INVALID_OCTET   0xFFFFU
+#define QOS_INVALID_ID      0xFFFFU
+#define QOS_INVALID_RLM     0xFFFFU
+#define QOS_INVALID_PHY     0xFFFFU
+#define QOS_UNKNOWN_PHY     0xFFFEU
+
+#define QOS_OCTET_VALID(octet)	((octet) < NUM_OF_OCTETS)
+#define QOS_ID_VALID(id)	((id) < NUM_OF_NODES)
+#define QOS_PHY_VALID(phy)	((phy) < NUM_OF_NODES)
+#define QOS_PHY_UNKNOWN(phy)	((phy) == QOS_UNKNOWN_PHY)
+#define QOS_BW_GRP_VALID(id)	((id) && ((id) <= QOS_MAX_SHARED_BW_GRP))
+
+#define QOS_MODIFIED_NODE_TYPE			BIT(0)
+#define QOS_MODIFIED_SUSPENDED			BIT(1)
+#define QOS_MODIFIED_BANDWIDTH_LIMIT		BIT(2)
+#define QOS_MODIFIED_SHARED_GROUP_LIMIT		BIT(3)
+#define QOS_MODIFIED_ARBITRATION		BIT(5)
+#define QOS_MODIFIED_BEST_EFFORT		BIT(6)
+#define QOS_MODIFIED_PRIORITY			BIT(7)
+#define QOS_MODIFIED_VIRT_BW_SHARE		BIT(8)
+#define QOS_MODIFIED_PARENT			BIT(9)
+#define QOS_MODIFIED_RING_ADDRESS		BIT(10)
+#define QOS_MODIFIED_RING_SIZE			BIT(11)
+#define QOS_MODIFIED_CREDIT			BIT(12)
+#define QOS_MODIFIED_MAX_BURST			BIT(15)
+#define QOS_MODIFIED_BLOCKED			BIT(16)
+#define QOS_MODIFIED_WRED_ENABLE		BIT(17)
+#define QOS_MODIFIED_WRED_MIN_GREEN		BIT(18)
+#define QOS_MODIFIED_WRED_MAX_GREEN		BIT(19)
+#define QOS_MODIFIED_WRED_SLOPE_GREEN		BIT(20)
+#define QOS_MODIFIED_WRED_MIN_YELLOW		BIT(21)
+#define QOS_MODIFIED_WRED_MAX_YELLOW		BIT(22)
+#define QOS_MODIFIED_WRED_SLOPE_YELLOW		BIT(23)
+#define QOS_MODIFIED_WRED_MAX_ALLOWED		BIT(24)
+#define QOS_MODIFIED_WRED_MIN_GUARANTEED	BIT(25)
+#define QOS_MODIFIED_RLM			BIT(26)
+#define QOS_MODIFIED_WRED_FIXED_DROP_PROB_ENABLE	BIT(27)
+#define QOS_MODIFIED_WRED_FIXED_GREEN_PROB	BIT(28)
+#define QOS_MODIFIED_WRED_FIXED_YELLOW_PROB	BIT(29)
+
+
+/******************************************************************************/
+/*                              TYPES                                         */
+/******************************************************************************/
+
+#define NODE_TYPE(OP)		\
+	OP(TYPE_UNKNOWN)	\
+	OP(TYPE_PORT)		\
+	OP(TYPE_SCHED)		\
+	OP(TYPE_QUEUE)
+
+enum node_type {
+	NODE_TYPE(GEN_ENUM)
+};
+
+struct child_node_properties {
+	uint16_t	parent_phy;
+	uint8_t		priority;
+	uint8_t		virt_bw_share;
+};
+
+struct parent_node_properties {
+	enum pp_qos_arbitration		arbitration;
+	uint16_t			first_child_phy;
+	uint8_t				num_of_children;
+};
+
+struct qos_node {
+	enum node_type			type;
+	uint16_t			bandwidth_limit;
+	uint16_t			shared_bandwidth_group;
+	struct child_node_properties	child_prop;
+	struct parent_node_properties	parent_prop;
+	union _data {
+		struct {
+			void		*ring_address;
+			size_t		ring_size;
+			unsigned int	credit;
+		} port;
+		struct {
+		} sched;
+		struct _queue {
+			uint32_t	green_min;
+			uint32_t	green_max;
+			uint32_t	yellow_min;
+			uint32_t	yellow_max;
+			uint32_t	max_allowed;
+			uint32_t	min_guaranteed;
+			uint32_t	fixed_green_prob;
+			uint32_t	fixed_yellow_prob;
+			uint16_t	max_burst;
+			uint16_t	rlm;
+			uint8_t		green_slope;
+			uint8_t		yellow_slope;
+		} queue;
+	} data;
+
+#define QOS_NODE_FLAGS_USED					BIT(0)
+#define QOS_NODE_FLAGS_INTERNAL					BIT(1)
+#define QOS_NODE_FLAGS_SUSPENDED				BIT(3)
+#define QOS_NODE_FLAGS_PARENT_BEST_EFFORT_ENABLE		BIT(4)
+#define QOS_NODE_FLAGS_PORT_PACKET_CREDIT_ENABLE		BIT(7)
+#define QOS_NODE_FLAGS_PORT_BYTE_CREDIT_ENABLE			BIT(8)
+#define QOS_NODE_FLAGS_QUEUE_WRED_ENABLE			BIT(10)
+#define QOS_NODE_FLAGS_QUEUE_BLOCKED				BIT(11)
+#define QOS_NODE_FLAGS_QUEUE_WRED_FIXED_DROP_PROB_ENABLE	BIT(12)
+	uint16_t	flags;
+};
+
+
+struct shared_bandwidth_group {
+	uint8_t		used;
+	uint8_t		reserved;
+	uint16_t	limit;
+};
+
+struct driver_cmds {
+	struct cmd_queue	*cmdq;
+	struct cmd_queue	*pendq;
+	unsigned int		cmd_id;
+	unsigned int		cmd_fw_id;
+};
+
+/* Communication to/from FW */
+struct fw_com {
+	void		*cmdbuf;
+	size_t		cmdbuf_sz;
+	uint32_t	*mems;
+	size_t		mems_size;
+	void __iomem	*mbx_to_uc;
+	void __iomem	*mbx_from_uc;
+	unsigned int	irqline;
+};
+
+/* HW configuration */
+struct hw_conf {
+	unsigned int	wred_total_avail_resources;
+	unsigned int	wred_prioritize_pop;
+	unsigned int	wred_const_p;
+	unsigned int	wred_max_q_size;
+	unsigned int	qm_ddr_start;
+	unsigned int	qm_num_pages;
+	unsigned int	fw_logger_start;
+	unsigned int	fw_stat;
+};
+
+struct fw_ver {
+	unsigned int major;
+	unsigned int minor;
+	unsigned int build;
+};
+
+struct pp_qos_dev {
+	LOCK				lock;
+	int				initialized;
+	unsigned int			max_port;
+	unsigned int			reserved_ports[MAX_PORTS];
+	struct shared_bandwidth_group	groups[QOS_MAX_SHARED_BW_GRP + 1];
+	struct pp_pool			*ids;
+	struct pp_pool			*rlms;
+	struct pp_pool			*portsphys;
+	struct pp_nodes			*nodes;
+	struct pp_mapping		*mapping;
+	struct pp_octets		*octets;
+	struct pp_queue			*queue;
+	struct driver_cmds		drvcmds;
+	struct fw_com			fwcom;
+	struct fw_ver			fwver;
+	void				*fwbuf;
+	struct hw_conf			hwconf;
+	void				*pdev;
+	void				*stat;
+#define PP_QOS_FLAGS_ASSERT		0x1U
+	unsigned int			flags;
+};
+
+#define PP_QOS_DEVICE_IS_ASSERT(qdev)\
+	(QOS_BITS_IS_SET((qdev)->flags, PP_QOS_FLAGS_ASSERT))
+
+/* Info from platform statically or device tree */
+struct ppv4_qos_platform_data  {
+	int		id;
+	unsigned int	max_port;
+	unsigned int	wred_prioritize_pop;
+	unsigned int	qm_ddr_start;
+	unsigned int	qm_num_pages;
+	unsigned int	fw_logger_start;
+	unsigned int	fw_stat;
+};
+
+/* Info needed to create descriptor */
+struct qos_dev_init_info {
+	struct ppv4_qos_platform_data pl_data;
+	struct fw_com fwcom;
+};
+
+
+/******************************************************************************/
+/*                              UTILS                                         */
+/******************************************************************************/
+#define QOS_LOG(format, arg...) QOS_LOG_DEBUG(format, ##arg)
+void stop_run(void);
+#define QOS_ASSERT(condition, format, arg...)				\
+do {									\
+	if (!(condition)) {						\
+		QOS_LOG_CRIT("!!! Assertion failed !!! on %s:%d: " format,\
+		__func__, __LINE__, ##arg);				\
+		stop_run();						\
+	}								\
+} while (0)
+
+#define QOS_BITS_SET(flags, bits)	((flags) |= (bits))
+#define QOS_BITS_CLEAR(flags, bits)	((flags) &= ~(bits))
+#define QOS_BITS_TOGGLE(flags, bits)	((flags) ^= (bits))
+#define QOS_BITS_IS_SET(flags, bits)	((flags) & (bits))
+
+
+/*
+ * Phy octet mapping
+ */
+static inline int octet_of_phy(unsigned int phy)
+{
+	QOS_ASSERT(QOS_PHY_VALID(phy), "Invalid phy %u\n", phy);
+	return (phy >> 3);
+}
+
+/*
+ * Offset of phy on octet (0..7)
+ */
+static inline int octet_phy_offset(unsigned int phy)
+{
+	QOS_ASSERT(QOS_PHY_VALID(phy), "Invalid phy %u\n", phy);
+	return (phy & 0x7);
+}
+
+/*
+ * Return true iff both phys are on same octet
+ */
+static inline int same_octet(unsigned int first_phy, unsigned int second_phy)
+{
+	return octet_of_phy(first_phy) == octet_of_phy(second_phy);
+}
+
+/*
+ * Predicates on nodes
+ */
+static inline int node_used(const struct qos_node *node)
+{
+	return QOS_BITS_IS_SET(node->flags, QOS_NODE_FLAGS_USED);
+}
+
+static inline int node_internal(const struct qos_node *node)
+{
+	return node_used(node) && QOS_BITS_IS_SET(node->flags,
+						  QOS_NODE_FLAGS_INTERNAL);
+}
+
+static inline int node_suspended(const struct qos_node *node)
+{
+	return node_used(node) && QOS_BITS_IS_SET(node->flags,
+			QOS_NODE_FLAGS_SUSPENDED);
+}
+
+static inline int node_child(const struct qos_node *node)
+{
+	return node_used(node) && (node->type == TYPE_SCHED ||
+				   node->type == TYPE_QUEUE);
+}
+
+static inline int node_parent(const struct qos_node *node)
+{
+	return node_used(node) && (node->type == TYPE_SCHED ||
+				   node->type == TYPE_PORT);
+}
+
+static inline int node_port(const struct qos_node *node)
+{
+	return node_used(node) && (node->type == TYPE_PORT);
+}
+
+static inline int node_sched(const struct qos_node *node)
+{
+	return node_used(node) && (node->type == TYPE_SCHED);
+}
+
+static inline int node_queue(const struct qos_node *node)
+{
+	return node_used(node) && (node->type == TYPE_QUEUE);
+}
+
+/*
+ * Id and phy mapping
+ */
+unsigned int get_id_from_phy(const struct pp_mapping *map, unsigned int phy);
+unsigned int get_phy_from_id(const struct pp_mapping *map, unsigned int id);
+
+/* Map id <==> phy */
+void map_id_phy(struct pp_mapping *map, unsigned int id, unsigned int phy);
+
+/* Map id ==>  QOS_UNKNOWN_PHY */
+void map_id_reserved(struct pp_mapping *map, unsigned int id);
+
+/* Invalidate both the id and the phy that is currently mapped to it */
+void map_invalidate_id(struct pp_mapping *map, unsigned int id);
+
+/*
+ * Phy node mapping
+ */
+struct qos_node *get_node_from_phy(struct pp_nodes *nodes, unsigned int phy);
+const struct qos_node *get_const_node_from_phy(const struct pp_nodes *nodes,
+					       unsigned int phy);
+unsigned int get_phy_from_node(const struct pp_nodes *nodes,
+		      const struct qos_node *node);
+
+/*
+ * Queue of uint16_t data.
+ * Data is dequeued from queue's head and enqueued into its tail
+ */
+/* return head of queue or QOS_INVALID_ID if queue is empty */
+uint16_t pp_queue_dequeue(struct pp_queue *queue);
+
+/* return 0 on success -1 if queue is full */
+int pp_queue_enqueue(struct pp_queue *queue, uint16_t data);
+
+/* empty queue */
+void pp_queue_reset(struct pp_queue *queue);
+
+/*
+ * Pool (stack) of uint16_t data
+ */
+uint16_t pp_pool_get(struct pp_pool *pool);
+int pp_pool_put(struct pp_pool *pool, uint16_t data);
+
+/*
+ * Cyclic buffer
+ */
+/*
+ * Copy size bytes from buffer to command,
+ * return 0 on success, -1 if there are less than size bytes
+ * in buffer.
+ * get remove read bytes from buffer while peek not
+ */
+int cmd_queue_get(struct cmd_queue *q, void *cmd, size_t size);
+int cmd_queue_peek(struct cmd_queue *q, void *_cmd, size_t size);
+int cmd_queue_is_empty(struct cmd_queue *q);
+
+/*
+ * Copy size bytes into cyclic buffer
+ * return 0 on success, -1 if buffer has no for size bytes
+ */
+int cmd_queue_put(struct cmd_queue *q, void *cmd, size_t size);
+
+/*
+ * Octets manipulation
+ */
+
+/**
+ * octet_get_with_at_least_free_entries() - find an octet with free entries.
+ * @octets:
+ * @count: The minimum number of free entries the octet should have
+ * Return: an octet which satisfies the following:
+ *  - It has at least count free entries
+ *  - No other octet that satisfies the previous condition has less free entries
+ *    than this octet
+ *
+ * If no such octet is found QOS_INVALID_OCTET is returned
+ *
+ * Note octet is removed from container, user should call
+ * to octet_set_use_count (or nodes_move/octet_nodes_shift which call it) to
+ * return it
+ */
+unsigned int octet_get_with_at_least_free_entries(struct pp_octets *octets,
+						  unsigned int count);
+
+/**
+ * octet_get_least_free_entries() - The octet that has the least free entries
+ * @octets:
+ * Return:
+ *
+ * Note octet is removed from container, user should call
+ * to octet_set_use_count (or nodes_move/octet_nodes_shift which call it) to
+ * return it
+ */
+unsigned int octet_get_least_free_entries(struct pp_octets *octets);
+
+/*
+ * Initialize qos dev, max port designates phy of highest port
+ */
+struct pp_qos_dev *_qos_init(unsigned int max_port);
+void _qos_clean(struct pp_qos_dev *qdev);
+
+/*
+ * Create qos dev with hardware parameters
+ */
+struct pp_qos_dev *create_qos_dev_desc(struct qos_dev_init_info *initinfo);
+
+/**
+ * octet_get_min_sibling_group() - Find "min parent" - a parent with least
+ * children on octet
+ * @qdev:
+ * @octet:
+ * @spc_parent:      The number of children for this parent is increased by one
+ *                   only for the sake of finding the "min parent"
+ * @num_of_children: Holds the number of children of "min parent"
+ * Return:	     The min parent, or NULL if octet is empty
+ *
+ * Note If several parents hold the same number of children on the octet the
+ * first is returned
+ *
+ */
+struct qos_node *octet_get_min_sibling_group(const struct pp_qos_dev *qdev,
+					     unsigned int octet,
+					     const struct qos_node *spc_parent,
+					     unsigned int *num_of_children);
+
+/**
+ * nodes_modify_used_status() - Set/Clear node's use status
+ * @qdev:
+ * @first_phy: First node's phy
+ * @count:     Number of nodes
+ * @status:    Desired status (0/1)
+ */
+void nodes_modify_used_status(struct pp_qos_dev *qdev, unsigned int first_phy,
+			      unsigned int count, unsigned int status);
+
+
+/*
+ * Remove node
+ */
+/* remove node and free its resources, assume node has no children */
+int node_remove(struct pp_qos_dev *qdev, struct qos_node *node);
+/*
+ * mark the node unused, update parents, id and rlm are not freed.
+ * assume node has no children
+ */
+void node_phy_delete(struct pp_qos_dev *qdev, unsigned int phy);
+
+void release_rlm(struct pp_pool *rlms, unsigned int rlm);
+
+int node_flush(struct pp_qos_dev *qdev, struct qos_node *node);
+
+/**
+ * node_cfg_valid() - check if node configuration is valid
+ * The process of modifying a node involves cloning orig node to
+ * temporary node, making the modification on this temporary node
+ * according to new configuration requested by user
+ * and check the validity of the result, if its valid than copy temporary node
+ * to orig node.
+ *
+ * @qdev:
+ * @node: The temporary node whose configuration should be checked
+ * @orig_node: original node from which node was copied from, NULL if new node
+ * @prev_virt_parenti_phy: phy of prev virtual parent
+ * Return: 1 if configuration is valid, 0 if not
+ */
+int node_cfg_valid(const struct pp_qos_dev *qdev,
+		   const struct qos_node *node,
+		   const struct qos_node *orig_node,
+		   unsigned int prev_virt_parent_phy);
+
+int get_node_prop(const struct pp_qos_dev *qdev,
+		  const struct qos_node *node,
+		  struct pp_qos_common_node_properties *common,
+		  struct pp_qos_parent_node_properties *parent,
+		  struct pp_qos_child_node_properties *child);
+
+int set_node_prop(struct pp_qos_dev *qdev,
+		  struct qos_node *node,
+		  const struct pp_qos_common_node_properties *common,
+		  const struct pp_qos_parent_node_properties *parent,
+		  const struct pp_qos_child_node_properties *child,
+		  uint32_t *modified);
+
+/*
+ * Init node with default values
+ * common, parent and child dictate which
+ * components of node should be initialized
+ */
+void node_init(const struct pp_qos_dev *qdev,
+	       struct qos_node *node,
+	       unsigned int common,
+	       unsigned int parent,
+	       unsigned int child);
+/**
+ * get_node_queues() - Return all queues on a subtree
+ * @qdev:
+ * @phy:       Phy of subtree's node
+ * @queue_ids: Array to store the queues ids - may be NULL
+ * @size:      Size of array - may be 0
+ * @queues_num: The number of queues on the subtree
+ */
+void get_node_queues(struct pp_qos_dev *qdev, unsigned int phy,
+		     uint16_t *queue_ids,
+		     unsigned int size, unsigned int *queues_num);
+
+/**
+ * phy_alloc_by_parent() - Allocate new phy for a node
+ * @qdev:
+ * @parent:          Node's parent
+ * @child_priority:  Relevant only for WSP parent
+ * Return:
+ */
+unsigned int phy_alloc_by_parent(struct pp_qos_dev *qdev,
+				 struct qos_node *parent,
+				 unsigned int child_priority);
+
+/* Return the first non internal ancestor of a node */
+unsigned int get_virtual_parent_phy(const struct pp_nodes *nodes,
+				    const struct qos_node *child);
+
+void node_update_children(struct pp_qos_dev *qdev,
+				 unsigned int phy,
+				 unsigned int new_phy);
+
+void tree_update_predecessors(struct pp_qos_dev *qdev, unsigned int phy);
+/*
+ * Assume that sched is internal scheduler
+ * 1. Update sched's virtual bandwidth share to be the sum of its
+ *    children's share.
+ * 2. If sched's parent is also internal scheduler
+ *    update its virtual bandwidth share also to be the update
+ *    bw share sum of its children. And keep doing that
+ *    up the tree hirarchy so long parent is an internal scheduler
+ * 3. For each
+ */
+void update_internal_bandwidth(const struct pp_qos_dev *qdev,
+			       struct qos_node *sched);
+
+/* Remove all nodes (including root) of a subtree */
+int tree_remove(struct pp_qos_dev *qdev, unsigned int phy);
+
+/* Flush all nodes (including root) of a subtree */
+int tree_flush(struct pp_qos_dev *qdev, unsigned int phy);
+
+/* Modify blocked status of queues on a subtree */
+int tree_modify_blocked_status(struct pp_qos_dev *qdev,
+			       unsigned int phy,
+			       unsigned int status);
+void get_bw_grp_members_under_node(struct pp_qos_dev *qdev, unsigned int id,
+				   unsigned int phy, uint16_t *ids,
+				   unsigned int size,
+				   unsigned int *ids_num);
+
+/* Creates pool of free ports phys */
+struct pp_pool *free_ports_phys_init(unsigned int *reserved,
+				     unsigned int max_port,
+				     const unsigned int *reserved_ports,
+				     unsigned int size);
+
+/* return the port ancestor of a node */
+static inline unsigned int get_port(const struct pp_nodes *nodes,
+				    unsigned int phy)
+{
+	const struct qos_node *node;
+
+	node = get_const_node_from_phy(nodes, phy);
+	while (node_child(node))
+		node = get_const_node_from_phy(nodes,
+					       node->child_prop.parent_phy);
+
+	QOS_ASSERT(node_port(node), "Did not reach port node for phy %u\n",
+		   phy);
+	return get_phy_from_node(nodes, node);
+}
+
+void qos_module_init(void);
+void remove_qos_instance(unsigned int id);
+int _pp_qos_queue_block(struct pp_qos_dev *qdev, unsigned int id);
+int _pp_qos_queue_unblock(struct pp_qos_dev *qdev, unsigned int id);
+int _pp_qos_queue_flush(struct pp_qos_dev *qdev, unsigned int id);
+void wake_uc(void *data);
+
+#ifdef PP_QOS_TEST
+void test_cmd_queue(void);
+void basic_tests(void);
+void advance_tests(void);
+void reposition_test(void);
+void falcon_test(void);
+void simple_test(void);
+void stat_test(void);
+void info_test(void);
+void load_fw_test(void);
+void tests(void);
+void test_init_instance(struct pp_qos_dev *qdev);
+int add_qos_dev(void);
+void remove_qos_dev(void);
+int port_cfg_valid(const struct pp_qos_dev *qdev, const struct qos_node *node,
+		   const struct qos_node *orig_node);
+int queue_cfg_valid(const struct pp_qos_dev *qdev, const struct qos_node *node,
+		    const struct qos_node *orig_node,
+		    unsigned int prev_virt_parent_phy);
+int sched_cfg_valid(const struct pp_qos_dev *qdev, const struct qos_node *node,
+		    const struct qos_node *orig_node,
+		    unsigned int prev_virt_parent_phy);
+struct pp_octets *octets_init(unsigned int last_port_octet);
+void octets_clean(struct pp_octets *octets);
+void debug_verify_octet_usage(struct pp_octets *octets, unsigned int octet,
+			      unsigned int usage);
+void nodes_move(struct pp_qos_dev *qdev, unsigned int dst_phy,
+		unsigned int src_phy, unsigned int count);
+void octet_nodes_shift(struct pp_qos_dev *qdev, unsigned int first_phy,
+		       unsigned int count, int shift);
+unsigned int get_children_bandwidth_share(const struct pp_qos_dev *qdev,
+					  const struct qos_node *parent);
+unsigned int get_virt_children_bandwidth_share(const struct pp_qos_dev *qdev,
+					       const struct qos_node *parent);
+unsigned int get_children_bandwidth_share(const struct pp_qos_dev *qdev,
+					  const struct qos_node *parent);
+void transmit_cmds(struct pp_qos_dev *qdev);
+void update_cmd_id(struct driver_cmds *drvcmds);
+#define STATIC_UNLESS_TEST
+#else
+#define STATIC_UNLESS_TEST static
+#endif
+
+#endif
diff --git a/include/net/datapath_api.h b/include/net/datapath_api.h
index f7d58ae1a..61e6d54ae 100644
--- a/include/net/datapath_api.h
+++ b/include/net/datapath_api.h
@@ -26,6 +26,7 @@
 #include <net/switch_api/lantiq_gsw_flow.h>
 #include <net/switch_api/lantiq_gsw.h>
 #include <net/switch_api/gsw_dev.h>
+#include <net/switch_api/gsw_flow_ops.h>
 #ifdef CONFIG_LTQ_DATAPATH_CPUFREQ
 #include <linux/cpufreq.h>
 #include <cpufreq/ltq_cpufreq.h>
@@ -247,6 +248,13 @@ typedef struct dp_subif {
 
 typedef dp_subif_t PPA_SUBIF; /*!< @brief structure type dp_subif PPA_SUBIF*/
 
+struct vlan_prop {
+	u8 num;
+	u16 out_proto, out_vid;
+	u16 in_proto, in_vid;
+	struct net_device *base;
+};
+
 /*! @brief struct for dp_drv_mib */
 typedef struct dp_drv_mib {
 	u64 rx_drop_pkts;  /*!< rx drop pkts */
@@ -560,7 +568,7 @@ enum DP_SUBIF_DATA_FLAG {
 
 /*! @brief struct dp_subif_data */
 struct dp_subif_data {
-	s8 deq_port_idx;  /*!< [in]
+	s8 deq_port_idx;  /*!< [in] range: 0 ~ its max deq_port_num - 1
 			   *  For PON, it is tcont_idx,
 			   *  For other device, normally its value is zero
 			   */
diff --git a/include/net/datapath_api_qos.h b/include/net/datapath_api_qos.h
index a765d8c61..80312f0bc 100644
--- a/include/net/datapath_api_qos.h
+++ b/include/net/datapath_api_qos.h
@@ -326,7 +326,7 @@
 #ifndef DP_QOS_API_H
 #define DP_QOS_API_H
 
-#define DP_MAX_SCH_LVL  4 /*!< @brief max number of hierarchy QOS layers
+#define DP_MAX_SCH_LVL  3 /*!< @brief max number of hierarchy QOS layers
 			   *   supported.
 			   *  can change MACRO for more layers
 			   */
@@ -340,9 +340,10 @@
 
 /*! @brief QOS Link Node Type: Queue, scheduler and dequeue port*/
 enum dp_node_type {
-	DP_NODE_QUEUE = 0, /*!< queue node*/
+	DP_NODE_UNKNOWN = 0,  /*!< Unallocated node*/
+	DP_NODE_QUEUE,  /*!< queue node*/
 	DP_NODE_SCH,   /*!< scheduler node*/
-	DP_NODE_PORT,   /*!< port node*/
+	DP_NODE_PORT   /*!< port node*/
 };
 
 /*! @brief QOS Link Node ID:
@@ -361,9 +362,18 @@ union dp_node_id {
 
 /*! @brief node flag to enable/disable/keep current setting */
 enum dp_node_en {
-	DP_NODE_DIS = 0,  /*!< disable node */
-	DP_NODE_EN,       /*!< enable node */
-	DP_NODE_NO_CHANGE /*!< keep exiting enable status */
+	DP_NODE_DIS = BIT(0),     /*!< disable node:drop new incoming pkt
+				   *   for ppv4, valid queue/port only
+				   */
+	DP_NODE_EN = BIT(1),      /*!< enable node:allow to enqueue
+				   *   for ppv4, valid queue/port only
+				   */
+	DP_NODE_SUSPEND = BIT(2), /*!< Suspend this node no scheduling:Not for TMU
+				   *   for ppv4, valid queue/sched/port
+				   */
+	DP_NODE_RESUME = BIT(3)   /*!< Resume scheduling for this node:Not for TMU
+				   *   for ppv4, valid queue/sched/port
+				   */
 };
 
 /*! @brief arbitration method used for the node in its parents scheduler/ports*/
@@ -432,7 +442,7 @@ struct dp_node_link {
 	union dp_node_id node_id; /*!< input node id.
 				   *  if id == DP_NODE_AUTO_ID, DP will
 				   *  allocate a free node from the reserved
-				   *  pool or global pool
+				   *  pool or global pool and set node id
 				   */
 
 	enum dp_arbitate arbi;  /*!< <PRE>arbitration method used in its parents
@@ -460,9 +470,19 @@ struct dp_node_link {
 					 *  for dp_node_unlink: no use</PRE>
 					 */
 	union dp_node_id p_node_id; /*!< <PRE>parent id
-				     * for dp_node_link_add: it is in  input
-				     * for dp_node_link_get: it is in  output
-				     * for dp_node_unlink: no use</PRE>
+				     * for dp_node_link_add: it is in input
+				     *   if type is scheduler
+				     *     if id == DP_NODE_AUTO_ID
+				     *        DP will allocate a free node
+				     *        from the reserved or global pool
+				     *        and set p_node_id
+				     *     else use user specified parent id
+				     *   else type is port
+				     *        User must provide valid
+				     *        cqm_deq_port value
+				     * for dp_node_link_get: it is in output
+				     * for dp_node_unlink: no use
+				     * </PRE>
 				     */
 	union dp_node_id cqm_deq_port; /*!< <PRE>input/optional:
 					*  for TMU queue link setup, it is
@@ -508,18 +528,18 @@ int dp_node_link_add(struct dp_node_link *info, int flag);
  *  @note It is used to unlink a specified node or including its full path
  *        depends on the flag set
  * <PRE>
- *  1) if it is to unlik a queue node, DP will do necessary work as below:
+ *  1) if it is to unlink a queue node, DP will do necessary work as below:
  *        re-map the lookup entry to a drop queue
  *        flushing the queue: not including those already dequeue by CQM
  *        disable the queue
- *        unlink the queue node as specified from its parent
- *  2) if it is to unlik a scheduler node, DP will do necessary work as below:
+ *        unlink the queue node as specified from its parent== Not needed?
+ *  2) if it is to unlink a scheduler node, DP will do necessary work as below:
  *       a) if there is still child linked to this scheduler yet
  *             i) without flag DP_NODE_SMART_UNLINK set:
  *                   return DP_FAILURE
  *             ii) with flag DP_NODE_SMART_UNLINK set:
- *                   unlink its all child first,
- *                   then unlink this node as specified.
+ *                   unlink its all child first,== Not needed?
+ *                   then unlink this node as specified.== Not needed?
  *  Note:
        a) This API only unlik the node from the QOS setup by default, but the
  *        node itself is not freed.
@@ -712,7 +732,7 @@ enum dp_q_size_unit {
 struct dp_queue_conf {
 	int inst; /*!< input: dp instance. For SOC side, it is always zero */
 	int q_id; /*!< input: q_id */
-	enum dp_node_en act;  /*!< enable/disable/ queue */
+	enum dp_node_en act;  /*!< enable/disable/suspend/resume queue */
 	enum dp_q_drop_mode drop; /*!< TMU: wred/tail drop, how about PP?? */
 	enum dp_q_size_unit unit; /*!< queue size unit:packet/bytes */
 	u32 min_size[DP_MAX_COLORS]; /*!< queue minimal size, If QOCC less than
@@ -747,9 +767,9 @@ int dp_queue_conf_get(struct dp_queue_conf *cfg, int flag);
 
 /*! @brief enum dp_shaper_cmd */
 enum dp_shaper_cmd {
-	dp_shaper_cmd_ADD = 0, /*!< add shaper */
-	dp_shaper_cmd_REMOVE,  /*!< remove shaper */
-	dp_shaper_cmd_DISABLE, /*!< disable the shaper: no limit*/
+	DP_SHAPER_CMD_ADD = 0, /*!< add shaper */
+	DP_SHAPER_CMD_REMOVE,  /*!< remove shaper */
+	DP_SHAPER_CMD_DISABLE, /*!< disable the shaper: no limit*/
 };
 
 /*! @brief dp_shaper_conf */
@@ -797,7 +817,7 @@ struct dp_node_alloc {
 			       *  its reserved pool
 			       *  otherwise provided by the caller itself.</PRE>
 			       */
-	int cqm_dq_port; /*Added for qos slim driver only*/
+	//int cqm_dq_port; /*Added for qos slim driver only*/
 };
 
 /*! \ingroup APIs_dp_node_alloc
@@ -821,6 +841,7 @@ int dp_node_free(struct dp_node_alloc *node, int flag);
 /*! @brief dp_queue_res */
 struct dp_queue_res {
 	int q_id; /*!< queue id */
+	int q_node; /*!< queue logica node id. For debugging only */
 	int sch_lvl; /*!< number of scheduler layers configured for this queue*/
 	int sch_id[DP_MAX_SCH_LVL]; /*!< Scheduler information.
 				     *  @note the scheduler of sch_id[0] is the
@@ -830,7 +851,7 @@ struct dp_queue_res {
 	int leaf[DP_MAX_SCH_LVL]; /*!< leaf information.
 				   *
 				   */
-	int cqm_deq_port; /*!< cqm dequeue port */
+	int cqm_deq_port; /*!< cqm dequeue port: absolute port id */
 	int qos_deq_port; /*!< qos dequeue port: Normally user no need to know*/
 };
 
@@ -846,8 +867,8 @@ struct dp_dequeue_res {
 	int dp_port;  /*!< input: if DEV NULL, dp_port must be valid,
 		       *  otherwise no use
 		       */
-	int cqm_deq_port; /*!< <PRE>get resource as specified dequeue port
-			   *  offset
+	int cqm_deq_idx; /*!< <PRE>get resource as specified dequeue port
+			   *  offset (relative)
 			   *  If it is DEQ_PORT_OFFSET_ALL, it means
 			   *  get all resource under that dev/dep_port
 			   *  related device.
@@ -856,6 +877,16 @@ struct dp_dequeue_res {
 			   *  For pon case, cqm_deq_port is like tcont idx
 			   * </PRE>
 			   */
+	int cqm_deq_port; /*!< <PRE>get resource as specified dequeue port
+			      *  absolution dequeue port
+			      *  output only: 
+			      *    for cqm_deq_idx, cqm_deq_port is 
+			      *           matched absolute cqm dequeue port
+			      	   if cqm_deq_idx == DEQ_PORT_OFFSET_ALL,
+			      *            it is the base of cqm dequeue port
+			      * </PRE>
+			      */
+
 	int num_deq_ports; /*!< <PRE>output: The number of dequeue port this
 			    *    dev have.
 			    *    Normally this value should be 1.
@@ -865,6 +896,7 @@ struct dp_dequeue_res {
 			    *    64.</PRE>
 			    */
 	int num_q; /*!< output: the number of queues*/
+	int q_res_size; /*!< input: to indicate q num can be stored in q_res*/
 	struct dp_queue_res *q_res;/*!< output: resource output.
 				    *  @note caller should allocate the memory.
 				    *  <PRE>Procedure:
diff --git a/include/net/datapath_inst.h b/include/net/datapath_inst.h
index 91b69d8a3..0acc3b756 100644
--- a/include/net/datapath_inst.h
+++ b/include/net/datapath_inst.h
@@ -44,6 +44,7 @@ struct dp_inst_info {
 	enum DP_HW_CAP_VER ver;  /*! HE version */
 	struct core_ops *ops[DP_MAX_GSW_HANDLE]; /*! GSWIP ops handler*/
 	int cbm_inst;  /*! CBM instance for this DP instance*/
+	int qos_inst; /*! QOS instance for this DP instance*/
 };
 
 struct inst_info {
@@ -110,6 +111,7 @@ struct inst_info {
 					   int inst, int bport);
 	int (*dp_mac_set)(int bport, int fid, int inst, u8 *addr);
 	int (*dp_mac_reset)(int bport, int fid, int inst, u8 *addr);
+	int (*dp_cfg_vlan)(int inst, int vap, int ep);
 #endif
 };
 
@@ -130,6 +132,7 @@ struct inst_property {
 	/*driver should know which HW to configure, esp for PCIe case */
 	struct core_ops *ops[DP_MAX_GSW_HANDLE];
 	int cbm_inst;
+	int qos_inst;
 	void *priv_hal; /*private data per HAL */
 };
 
diff --git a/include/net/datapath_proc_api.h b/include/net/datapath_proc_api.h
index 51a7fda3b..774ef05e7 100644
--- a/include/net/datapath_proc_api.h
+++ b/include/net/datapath_proc_api.h
@@ -17,10 +17,10 @@
 #include <linux/uaccess.h>	/*copy_from_user */
 
 #define set_ltq_dbg_flag(v, e, f) do {;\
-	if (e > 0)\
-		v |= (uint32_t)(f);\
+	if ((e) > 0)\
+		(v) |= (uint32_t)(f);\
 	else\
-		v &= (uint32_t)(~f); } \
+		(v) &= (uint32_t)(~f); } \
 	while (0)
 
 typedef void (*dp_proc_single_callback_t) (struct seq_file *);
@@ -46,6 +46,9 @@ struct dp_proc_entry {
 	struct file_operations ops;
 };
 
+int dp_getopt(char *cmd[], int cmd_size, int *cmd_offset,
+	      char **optarg, const char* optstring);
+
 void dp_proc_entry_create(struct proc_dir_entry *parent_node,
 			  struct dp_proc_entry *proc_entry);
 
diff --git a/include/net/pp_bm_regs.h b/include/net/pp_bm_regs.h
index 0debb1342..83d6e94c3 100755
--- a/include/net/pp_bm_regs.h
+++ b/include/net/pp_bm_regs.h
@@ -38,48 +38,48 @@
 #define PP_BMGR_MAX_POOLS_IN_POLICY					(4)
 
 
-#define BMGR_OFFSET_OF_POLICY(policy)					((policy) * (int)0x4)
-#define BMGR_OFFSET_OF_POOL(pool)					((pool) * (int)0x4)
-#define BMGR_OFFSET_OF_GROUP(group)					((group) * (int)0x4)
-#define BMGR_OFFSET_OF_POOL_IN_POLICY(pool, policy)			((BMGR_OFFSET_OF_POLICY(policy) * PP_BMGR_MAX_POOLS_IN_POLICY) + BMGR_OFFSET_OF_POOL(pool))
+#define BMGR_OFFSET_OF_POLICY(policy)										((policy) * (int)0x4)
+#define BMGR_OFFSET_OF_POOL(pool)											((pool) * (int)0x4)
+#define BMGR_OFFSET_OF_GROUP(group)											((group) * (int)0x4)
+#define BMGR_OFFSET_OF_POOL_IN_POLICY(pool, policy)							((BMGR_OFFSET_OF_POLICY(policy) * PP_BMGR_MAX_POOLS_IN_POLICY) + BMGR_OFFSET_OF_POOL(pool))
 // Regs
-#define BMGR_CTRL_REG_ADDR(BMGR_REG_BASE)						(BMGR_REG_BASE + 0x000)
-#define BMGR_POOL_MIN_GRNT_MASK_REG_ADDR(BMGR_REG_BASE)				(BMGR_REG_BASE + 0x004)
-#define BMGR_POOL_ENABLE_REG_ADDR(BMGR_REG_BASE)					(BMGR_REG_BASE + 0x008)
-#define BMGR_POOL_FIFO_RESET_REG_ADDR(BMGR_REG_BASE)					(BMGR_REG_BASE + 0x00C)
-#define BMGR_OCPM_BURST_SIZE_REG_ADDR(BMGR_REG_BASE)					(BMGR_REG_BASE + 0x010)
-#define BMGR_OCPM_NUM_OF_BURSTS_REG_ADDR(BMGR_REG_BASE)				(BMGR_REG_BASE + 0x014)
-#define BMGR_STATUS_REG_ADDR(BMGR_REG_BASE)						(BMGR_REG_BASE + 0x018)
-#define BMGR_POOL_SIZE_REG_ADDR(BMGR_REG_BASE, pool)					(BMGR_REG_BASE + 0x020 + BMGR_OFFSET_OF_POOL(pool))
+#define BMGR_CTRL_REG_ADDR(BMGR_REG_BASE)									(BMGR_REG_BASE + 0x000)
+#define BMGR_POOL_MIN_GRNT_MASK_REG_ADDR(BMGR_REG_BASE)						(BMGR_REG_BASE + 0x004)
+#define BMGR_POOL_ENABLE_REG_ADDR(BMGR_REG_BASE)							(BMGR_REG_BASE + 0x008)
+#define BMGR_POOL_FIFO_RESET_REG_ADDR(BMGR_REG_BASE)						(BMGR_REG_BASE + 0x00C)
+#define BMGR_OCPM_BURST_SIZE_REG_ADDR(BMGR_REG_BASE)						(BMGR_REG_BASE + 0x010)
+#define BMGR_OCPM_NUM_OF_BURSTS_REG_ADDR(BMGR_REG_BASE)						(BMGR_REG_BASE + 0x014)
+#define BMGR_STATUS_REG_ADDR(BMGR_REG_BASE)									(BMGR_REG_BASE + 0x018)
+#define BMGR_POOL_SIZE_REG_ADDR(BMGR_REG_BASE, pool)						(BMGR_REG_BASE + 0x020 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_GROUP_AVAILABLE_BUFF_REG_ADDR(BMGR_REG_BASE, group)			(BMGR_REG_BASE + 0x100 + BMGR_OFFSET_OF_GROUP(group))
-#define BMGR_GROUP_RESERVED_BUFF_REG_ADDR(BMGR_REG_BASE, group)			(BMGR_REG_BASE + 0x200 + BMGR_OFFSET_OF_GROUP(group))
+#define BMGR_GROUP_RESERVED_BUFF_REG_ADDR(BMGR_REG_BASE, group)				(BMGR_REG_BASE + 0x200 + BMGR_OFFSET_OF_GROUP(group))
 #define BMGR_POOL_PCU_FIFO_BASE_ADDR_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x400 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_PCU_FIFO_SIZE_REG_ADDR(BMGR_REG_BASE, pool)				(BMGR_REG_BASE + 0x440 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_PCU_FIFO_OCCUPANCY_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x480 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_PCU_FIFO_PROG_EMPTY_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x4C0 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_PCU_FIFO_PROG_FULL_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x500 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_EXT_FIFO_OCC_REG_ADDR(BMGR_REG_BASE, pool)				(BMGR_REG_BASE + 0x5C0 + BMGR_OFFSET_OF_POOL(pool))
-#define BMGR_POOL_EXT_FIFO_BASE_ADDR_LOW_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x540 + BMGR_OFFSET_OF_POOL(pool))
+#define BMGR_POOL_EXT_FIFO_BASE_ADDR_LOW_REG_ADDR(BMGR_REG_BASE, pool)		(BMGR_REG_BASE + 0x540 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_EXT_FIFO_BASE_ADDR_HIGH_REG_ADDR(BMGR_REG_BASE, pool)		(BMGR_REG_BASE + 0x580 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_ALLOCATED_COUNTER_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x600 + BMGR_OFFSET_OF_POOL(pool))
-#define BMGR_POOL_POP_COUNTER_REG_ADDR(BMGR_REG_BASE, pool)				(BMGR_REG_BASE + 0x640 + BMGR_OFFSET_OF_POOL(pool))
+#define BMGR_POOL_POP_COUNTER_REG_ADDR(BMGR_REG_BASE, pool)					(BMGR_REG_BASE + 0x640 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_PUSH_COUNTER_REG_ADDR(BMGR_REG_BASE, pool)				(BMGR_REG_BASE + 0x680 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_DDR_BURST_WRITE_COUNTER_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x6C0 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_DDR_BURST_READ_COUNTER_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x700 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POOL_WATERMARK_LOW_THRESHOLD_REG_ADDR(BMGR_REG_BASE, pool)		(BMGR_REG_BASE + 0x740 + BMGR_OFFSET_OF_POOL(pool))
-#define BMGR_POOL_WATERMARK_LOW_COUNTER_REG_ADDR(BMGR_REG_BASE, pool)			(BMGR_REG_BASE + 0x780 + BMGR_OFFSET_OF_POOL(pool))
+#define BMGR_POOL_WATERMARK_LOW_COUNTER_REG_ADDR(BMGR_REG_BASE, pool)		(BMGR_REG_BASE + 0x780 + BMGR_OFFSET_OF_POOL(pool))
 #define BMGR_POLICY_NULL_COUNTER_REG_ADDR(BMGR_REG_BASE, policy)			(BMGR_REG_BASE + 0x800 + BMGR_OFFSET_OF_POLICY(policy))
 // Ram
-#define BMGR_POLICY_MAX_ALLOWED_ADDR(BMGR_RAM_BASE, policy)				(BMGR_RAM_BASE + 0x0000 + BMGR_OFFSET_OF_POLICY(policy))
+#define BMGR_POLICY_MAX_ALLOWED_ADDR(BMGR_RAM_BASE, policy)					(BMGR_RAM_BASE + 0x0000 + BMGR_OFFSET_OF_POLICY(policy))
 #define BMGR_POLICY_MIN_GUARANTEED_ADDR(BMGR_RAM_BASE, policy)				(BMGR_RAM_BASE + 0x1000 + BMGR_OFFSET_OF_POLICY(policy))
 #define BMGR_POLICY_GROUP_ASSOCIATED_ADDR(BMGR_RAM_BASE, policy)			(BMGR_RAM_BASE + 0x2000 + BMGR_OFFSET_OF_POLICY(policy))
 #define BMGR_POLICY_POOLS_MAPPING_ADDR(BMGR_RAM_BASE, policy)				(BMGR_RAM_BASE + 0x3000 + BMGR_OFFSET_OF_POLICY(policy))
-#define BMGR_POLICY_MAX_ALLOWED_PER_POOL_ADDR(BMGR_RAM_BASE, policy, pool)		(BMGR_RAM_BASE + 0x4000 + BMGR_OFFSET_OF_POLICY(policy) + (pool * 0x1000))
+#define BMGR_POLICY_MAX_ALLOWED_PER_POOL_ADDR(BMGR_RAM_BASE, policy, pool)	(BMGR_RAM_BASE + 0x4000 + BMGR_OFFSET_OF_POLICY(policy) + (pool * 0x1000))
 #define BMGR_POLICY_ALLOC_BUFF_COUNTER_ADDR(BMGR_RAM_BASE, policy)			(BMGR_RAM_BASE + 0xA000 + BMGR_OFFSET_OF_POLICY(policy))
 #define BMGR_POLICY_ALLOC_BUFF_PER_POOL_COUNTER_ADDR(BMGR_RAM_BASE, pool, policy)	(BMGR_RAM_BASE + 0xB000 + BMGR_OFFSET_OF_POOL_IN_POLICY(pool, policy))
 
 // Data
-#define BMGR_DATAPATH_BASE						(0x3c0000)
+#define BMGR_DATAPATH_BASE						(0x18BC0000)
 
 #endif // _PP_BMGR_REGS_H_
 
diff --git a/include/net/pp_qos_drv.h b/include/net/pp_qos_drv.h
index 0dab8b509..8b2985fef 100644
--- a/include/net/pp_qos_drv.h
+++ b/include/net/pp_qos_drv.h
@@ -1,830 +1,954 @@
-/*
- * GPL LICENSE SUMMARY
- *
- *  Copyright(c) 2017 Intel Corporation.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of version 2 of the GNU General Public License as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *  The full GNU General Public License is included in this distribution
- *  in the file called LICENSE.GPL.
- *
- *  Contact Information:
- *  Intel Corporation
- *  2200 Mission College Blvd.
- *  Santa Clara, CA  97052
- */
-#ifndef _PP_QOS_API_H
-#define _PP_QOS_API_H
-
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-
-/******************************************************************************/
-/*                                 GENERAL                                    */
-/******************************************************************************/
-
-/**
- * DOC: Calling context
- *
- * The API exported by this driver, must be called only from a thread context.
- * Calling them from either interrupt/tasklet/buttom half is not allowed and
- * will result in unexpexted behaviour.
- */
-
-/**
- * DOC: Scheduling model
- *
- * Ports, schedulers and queues are modeled as a scheduling tree's nodes.
- * Each such node properties are classified to the following categories:
- * - Common properties - relevant to all nodes
- * - Parent properties - relevant only to parent nodes e.g. arbitration scheme
- *                       between children
- * - Children properties - relevant only to child nodes e.g. node's parent
- *
- * Note some nodes can be both parents and children e.g. schedulers
- */
-
-/**
- * DOC: Arbitration
- *
- * The way a parent arbitrates between its children can be either strict prioriy
- * or round robin.
- *
- * A WRR parent round robin its children - each child gets its turn - as if all
- * children have the same priority. The number of children for this parent type
- * is limited only by the number of nodes (> 1024)
- *
- * A WSP parent employs strict priorityi arbitration on its children - a child
- * will get scheduled only if there is no other sibling with higher priority
- * that can be scheduled. A WSP parent can have 8 direct children at most, each
- * child must have a different priority. To configure more that 8 children for
- * a WSP parent, or to have several children with the same priority,
- * intermediate schedulers should be use.
- *
- * Example of 5 queues q0, q1, q2, q3, q4 with priorities 0, 1, 2, 3, 3
- *
- *                     Port arbitrating WSP
- *                     /   |    |     \
- *                    q0   q1  q2  Sched arbitrating WRR
- *                                         |	      |
- *                                        q3          q4
- *
- *
- * Example of 16 queues q0 to q15, each with a different priority
- *
- *		      Port arbitrating WSP
- *		         |	       |
- *	    Sched0 arbitrating WSP   Sched1 arbitrating WSP
- *	       /               \       /                   \
- *	   q0 q1 q2 q3 q4 q5 q6 q7  q8 q9 q10 q11 q12 q13 q4 q15
- */
-
-/**
- * enum pp_qos_arbitration - parent method for arbitrating its children on tree.
- * @PP_QOS_ARBITRATION_WSP: strict priority
- * @PP_QOS_ARBITRATION_WRR: round robin
- * @PP_QOS_ARBITRATION_WFQ:
- */
-enum pp_qos_arbitration {
-	PP_QOS_ARBITRATION_WSP,
-	PP_QOS_ARBITRATION_WRR,
-	PP_QOS_ARBITRATION_WFQ,
-};
-
-
-/**
- * struct pp_qos_common_node_properties - Properties common to all nodes
- * @suspended:		    is node scheduling suspended
- * @bandwidth_limit:	    bandwidth limit in Mbps
- * @shared_bandwidth_group: shared bandwidth group
- */
-struct pp_qos_common_node_properties {
-	int	 suspended;
-
-        #define QOS_NO_BANDWIDTH_LIMIT          0
-	#define QOS_MAX_BANDWIDTH_LIMIT         0xFFFE
-	unsigned bandwidth_limit;
-
-	#define QOS_NO_SHARED_BANDWIDTH_GROUP   0xFFFF
-        #define QOS_MAX_SHARED_BANDWIDTH_GROUP  511
-	int shared_bandwidth_group;
-};
-
-
-/**
- * @struct pp_qos_parent_node_properties - Properties relevant for parent node
- * @arbitration:	how parent node arbitrates between its children
- * @best_effort_enable: whether best effort scheduling is enabled for this
- *                      node's children.
- */
-struct pp_qos_parent_node_properties {
-	enum pp_qos_arbitration arbitration;
-	int  best_effort_enable;
-};
-
-
-/**
- * @struct pp_qos_child_node_properties - Properties relevant for child node
- * @parent:		parent's id
- * @priority:		strict priority, relevant only if parent uses wsp
- *                      arbitration
- * @bandwidth_share:	bandwidth precentage from parent
- */
-struct pp_qos_child_node_properties {
-	int		parent;
-	#define QOS_MAX_CHILD_PRIORITY 7
-	unsigned	priority;
-	unsigned	bandwidth_share;
-};
-
-
-struct pp_qos_dev;
-
-/******************************************************************************/
-/*                                      PORTS                                 */
-/******************************************************************************/
-
-/* TODO */
-struct pp_qos_port_stat {
-};
-
-/**
- * @struct pp_qos_port_conf - Configuration structure for port
- * @common_prop:		common properties
- * @port_parent_prop:		properties as a parent
- * @ring_address:		address of ring.
- * @ring_size:			size of ring.
- * @packet_credit_enable:	support packet credit.
- * @byte_credit_enable:		support byte credit.
- * @packet_credit:		amount of packets credit to add to the port.
- * @byte_credit:                amount of bytes credit to add to the port.
- */
-struct pp_qos_port_conf {
-	struct pp_qos_common_node_properties common_prop;
-	struct pp_qos_parent_node_properties port_parent_prop;
-
-	void     *ring_address;
-	unsigned ring_size;
-	int      packet_credit_enable;
-	int      byte_credit_enable;
-	unsigned packet_credit;
-	unsigned byte_credit;
-};
-
-
-/**
- * @struct pp_qos_port_info -  Port's information
- * @physical_id:   node id on scheduling tree.
- * @num_of_queues: number of queues feeds this port.
- */
-struct pp_qos_port_info {
-	unsigned physical_id;
-	unsigned num_of_queues;
-};
-
-
-/**
- * pp_qos_port_conf_set_default() - Init port configuration with default values
- * @conf: pointer to client allocated struct. This struct will be filled with
- * default values.
- *
- * Defaults values:
- *    suspended - no
- *    bandwidth_limit - QOS_NO_BANDWIDTH_LIMIT
- *    shared_bandwidth_limit - QOS_NO_SHARED_BANDWIDTH_GROUP
- *    arbitration - WSP
- *    best_effort_enable - no
- *    packet_credit_enable - yes
- *    byte_credit_enable - no
- *    packet_credit - 0
- *    byte_credit - 0
- *    ring_size - 0
- *    ring_address - 0
- */
-void pp_qos_port_conf_set_default(struct pp_qos_port_conf *conf);
-
-
-#define ALLOC_PORT_ID 0xFFFF
-/**
- * pp_qos_port_allocate() - Allocate a resource for a new port
- * @qos_dev:     handle to qos device instance obtained previously
- *               from qos_dev_init
- * @physical_id: if equal ALLOC_PORT_ID then library allocates
- *               a free port id for the new port, otherwise must
- *               be one of the reserved ports ids that were
- *               configured at qos_init
- * @id:          upon success holds new port's id
- *
- * @Return:  0 on success.
- */
-int pp_qos_port_allocate(struct pp_qos_dev *qos_dev, int physical_id, int *id);
-
-
-/**
- * pp_qos_port_remove() - Remove port
- * @qos_dev: handle to qos hardware instance.
- * @id: port's id obtained from port_allocate
- *
- * @Return: 0 on success.
- */
-int pp_qos_port_remove(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_port_set() - Set port configuration
- * @qos_dev:   handle to qos device instance obtained previously from
- *         qos_dev_init
- * @id:    port's id obtained from port_allocate
- * @conf:  port configuration.
- *
- * @Return: 0 on success
- */
-int pp_qos_port_set(struct pp_qos_dev *qos_dev, int id,
-		    const struct pp_qos_port_conf *conf);
-
-
-/**
- * pp_qos_port_suspend() - Remove port desendants from scheduling
- * @qos_dev: handle to qos hardware instance.
- * @id:  port's id obtained from port_allocate
- *
- * @Return: 0 on success
- */
-int pp_qos_port_suspend(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_port_resume() - Resume port and its decendants scheduling
- * @qos_dev: handle to qos hardware instance.
- * @id:  port's id obtained from port_allocate
- *
- * @Return: 0 on success
- */
-int pp_qos_port_resume(struct pp_qos_dev *qos_dev, int id);
-
-/**
- * pp_qos_port_block() - All new packets enqueued to queues feeding this port
- *                    will be dropped
- * @qos_dev: handle to qos hardware instance.
- * @id:  port's id obtained from port_allocate
- *
- * Note	- already enportd descriptors will be transmitted
- *
- * Return: 0 on success
- */
-int pp_qos_port_block(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_port_unblock() - Unblock enqueuing of new packets to queues that feeds
- *                      this port
- * @qos_dev: handle to qos hardware instance.
- * @id:	 port's id obtained from port_allocate
- *
- * Return: 0 on success
- */
-int pp_qos_port_unblock(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_port_flush() - Drop all enqueued packets on queues under this port
- * @qos_dev: handle to qos hardware instance.
- * @id:	 port's id obtained from port_allocate
- *
- * Return: 0 on success
- */
-int pp_qos_port_flush(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_port_conf_get() - Get port configuration
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * id:	  port's id obtained from port_allocate
- * @conf: pointer to struct to be filled with port's configuration
- *
- * Return: 0 on success
- */
-
-int pp_qos_port_conf_get(struct pp_qos_dev *qos_dev, int id,
-			 struct pp_qos_port_conf *conf);
-
-
-/**
- * pp_qos_port_info_get() - Get extra information about port
- * id:	  port's id obtained from port_allocate
- * @info: pointer to struct to be filled with port's info
- *
- * Return: 0 on success
- */
-int pp_qos_port_info_get(struct pp_qos_dev *qos_dev, int id,
-			 struct pp_qos_port_info *info);
-
-
-/**
- * pp_qos_port_get_queues() - Get all port's queues
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	        scheduler's id obtained from sched_allocate
- * @queue_ids:  client allocated array that will hold queues ids
- * @size:       size of queue_ids
- * @queues_num: holds the number of queues
- *
- * Note queues_num can be bigger than size
- *
- * Return: 0 on success
- */
-int pp_qos_port_get_queues(struct pp_qos_dev *qos_dev, int id,
-			   uint16_t *queue_ids, unsigned size,
-			   unsigned *queues_num);
-
-
-/**
- * pp_qos_port_stat_get() - Get port's statistics
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  port's id obtained from port_allocate
- * @stat: pointer to struct to be filled with port's statistics
- *
- * Return: 0 on success
- */
-int pp_qos_port_stat_get(struct pp_qos_dev *qos_dev, int id,
-			 struct pp_qos_port_stat *stat);
-
-
-/******************************************************************************/
-/*                                    QUEUES                                  */
-/******************************************************************************/
-
-/* TODO */
-struct pp_qos_queue_stat {
-};
-
-/**
- * @struct pp_qos_queue_conf - Configuration structure for queue
- * @common_prop:		common properties
- * @queue_child_prop:           properties as a child node
- * @max_burst:		        max burst in Kbps
- * @blocked:			queue will drop new enqueued packets
- * @wred_enable:		WRED is applied on this queue, otherwise only
- *                              min and max values are used
- * @queue_wred_min_avg_green:
- * @queue_wred_max_avg_green:
- * @queue_wred_slope_green:
- * @queue_wred_min_avg_yellow:
- * @queue_wred_max_avg_yellow:
- * @queue_wred_slope_yellow:
- * @queue_wred_min_guaranteed:
- * @queue_wred_max_allowed:
- */
-struct pp_qos_queue_conf {
-	struct pp_qos_common_node_properties common_prop;
-	struct pp_qos_child_node_properties  queue_child_prop;
-	unsigned max_burst;
-	int      blocked;
-	int	 wred_enable;
-	unsigned queue_wred_min_avg_green;
-	unsigned queue_wred_max_avg_green;
-	unsigned queue_wred_slope_green;
-	unsigned queue_wred_min_avg_yellow;
-	unsigned queue_wred_max_avg_yellow;
-	unsigned queue_wred_slope_yellow;
-	unsigned queue_wred_min_guaranteed;
-	unsigned queue_wred_max_allowed;
-};
-
-
-/**
- * @struct pp_qos_queue_info - Queue information struct
- * @port_id:	 port fed by this queue
- * @physical_id: queue (ready list) number
- */
-struct pp_qos_queue_info {
-	int port_id;
-	int physical_id;
-};
-
-
-/**
- * pp_qos_queue_conf_set_default() - Init configuration with default values
- * @conf: pointer to client allocated struct. This struct will be filled with
- * default values.
- *
- * Defaults values:
- *    suspended - no
- *    max_burst - 0
- *    bandwidth_limit - 0
- *    shared_bandwidth_limit - 0
- *    parent - 0
- *    priority - 0
- *    bandwidth_share - 0
- *    wred_enable - no
- *    ALL wred params - 0
- */
-void pp_qos_queue_conf_set_default(struct pp_qos_queue_conf *conf);
-
-
-/**
- * pp_qos_queue_allocate() - Allocate aresouirce for a new queue
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:   upon success hold's new queue id
- *
- * Return:  0 on success.
- */
-int pp_qos_queue_allocate(struct pp_qos_dev *qos_dev, int *id);
-
-
-/**
- * pp_qos_queue_remove() -	Remove a queue
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:   queue's id obtained from queue_allocate
- *
- * Note: client should make sure that queue is empty and
- *       that new packets are not enqueued, by calling
- *       pp_qos_queue_disable and pp_qos_queue_flush
- *
- * Return: 0 on success
- */
-int pp_qos_queue_remove(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_queue_set() - Set queue configuration
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  queue's id obtained from queue_allocate
- * @conf: new configuration for the queue
- *
- * Return: 0 on success
- */
-int pp_qos_queue_set(struct pp_qos_dev *qos_dev, int id,
-		     const struct pp_qos_queue_conf *conf);
-
-
-/**
- * pp_qos_queue_suspend() - Remove queue from scheduling (doesn't get bandwidth)
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  queue's id obtained from queue_allocate
- *
- * Return: 0 on success
- */
-int pp_qos_queue_suspend(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_queue_resume() -	Return queue to be scheduleable
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:   queue's id obtained from queue_allocate
- *
- * Return: 0 on success
- */
-int pp_qos_queue_resume(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_queue_block() - All new packets enqueue to this queue will be dropped
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:   queue's id obtained from queue_allocate
- *
- * Note	- already enqueued descriptors will be transmitted
- *
- * Return: 0 on success
- */
-int pp_qos_queue_block(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_queue_unblock() - Unblock enqueuing of new packets
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  queue's id obtained from queue_allocate
- *
- * Return: 0 on success
- */
-int pp_qos_queue_unblock(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_queue_flush() - Drop all enqueued packets
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  queue's id obtained from queue_allocate
- *
- * Return: 0 on success
- */
-int pp_qos_queue_flush(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_queue_conf_get() - Get queue current configuration
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * id:	  port's id obtained from queue_allocate
- * @conf: pointer to struct to be filled with queue's configuration
- *
- * Return: 0 on success
- */
-int pp_qos_queue_conf_get(struct pp_qos_dev *qos_dev, int id,
-			  struct pp_qos_queue_conf *conf);
-
-
-/**
- * pp_qos_queue_info_get() - Get information about queue
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  queue's id obtained from queue_allocate
- * @info: pointer to struct to be filled with queue's info
- *
- * Return: 0 on success
- */
-int pp_qos_queue_info_get(struct pp_qos_dev *qos_dev, int id,
-			  struct pp_qos_queue_info *info);
-
-
-/**
- * pp_qos_queue_stat_get() - Get queue's statistics
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  queue's id obtained from queue_allocate
- * @stat: pointer to struct to be filled with queue's statistics
- *
- * Return: 0 on success
- */
-int pp_qos_queue_stat_get(struct pp_qos_dev *qos_dev, int id,
-		       struct pp_qos_queue_stat *stat);
-
-
-/******************************************************************************/
-/*                                 SCHEDULER                                  */
-/******************************************************************************/
-
-/* TODO */
-struct pp_qos_sched_stat {
-};
-
-/**
- * @struct pp_qos_sched_conf - Configuration structure for scheduler
- * @qos_dev:               handle to qos device instance obtained
- *                     previously from qos_dev_init
- * @common_prop:       common properties as a node
- * @sched_parent_prop: properties as a parent node
- * @sched_child_node:  properties as a child node
- */
-struct pp_qos_sched_conf {
-	struct pp_qos_common_node_properties common_prop;
-	struct pp_qos_parent_node_properties sched_parent_prop;
-	struct pp_qos_child_node_properties  sched_child_prop;
-};
-
-
-/**
- * @struct pp_qos_sched_info - Scheduler information struct
- * @qos_dev:                    handle to qos device instance obtained
- *                          previously from qos_dev_init
- * @port_id:		    port fed by this scheduler
- * @physical_id:	    node id on scheduling tree
- */
-struct pp_qos_sched_info {
-	int port_id;
-	int physical_id;
-};
-
-
-/**
- * pp_qos_sched_conf_set_default() - Init scheduler config with defaults
- * @conf: pointer to client allocated struct which will be filled with
- *        default values
- *
- * Defaults values:
- *    suspended - no
- *    bandwidth_limit - 0
- *    shared_bandwidth_limit - 0
- *    arbitration - WSP
- *    best_effort_enable - no
- *    parent - 0
- *    priority - 0
- *    bandwidth_share - 0
- */
-void pp_qos_sched_conf_set_default(struct pp_qos_sched_conf *conf);
-
-
-/**
- * pp_qos_sched_allocate() - Allocate a resource for a new scheduler
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:   upon success - holds new scheduler's id
- *
- * Return:  0 on success.
- */
-int pp_qos_sched_allocate(struct pp_qos_dev *qos_dev, int *id);
-
-
-/**
- * pp_qos_sched_remove() -	Remove scheduler
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  scheduler's id obtained from sched_allocate
- *
- * Return:	0 on success
- */
-int pp_qos_sched_remove(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_sched_set() - Set scheduler configuration
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  scheduler's id obtained from sched_allocate
- * @conf: new configuration for the scheduler
- *
- * Note: must init conf by pp_qos_sched_conf_init before calling
- *       this function.
- *
- * Return: 0 on success
- */
-int pp_qos_sched_set(struct pp_qos_dev *qos_dev, int id,
-		     const struct pp_qos_sched_conf *conf);
-
-
-/**
- * pp_qos_sched_suspend() - Remove scheduler from scheduling
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  scheduler's id obtained from sched_allocate
- *
- * Return: 0 on success
- */
-int pp_qos_sched_suspend(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_sched_resume() - Return scheduler and its decendants scheduleable
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:   scheduler's id obtained from sched_allocate
- *
- * Return: 0 on success
- */
-int pp_qos_sched_resume(struct pp_qos_dev *qos_dev, int id);
-
-
-/**
- * pp_qos_sched_conf_get() - Get sched current configuration
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * id:	  port's id obtained from port_allocate
- * @conf: pointer to struct to be filled with sched's configuration
- *
- * Return: 0 on success
- */
-int pp_qos_sched_conf_get(struct pp_qos_dev *qos_dev, int id,
-		       struct pp_qos_sched_conf *conf);
-
-
-/**
- * pp_qos_sched_info_get() - Get information about scheduler
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  scheduler's id obtained from sched_allocate
- * @info: pointer to struct to be filled with scheduler's info
- *
- * Return: 0 on success
- */
-int pp_qos_sched_info_get(struct pp_qos_dev *qos_dev, int id,
-			  struct pp_qos_sched_info *info);
-
-
-/**
- * pp_qos_sched_get_queues() - Get all scheduler's queues
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	        scheduler's id obtained from sched_allocate
- * @queue_ids:  client allocated array that will hold queues ids
- * @size:       size of queue_ids
- * @queues_num: holds the number of queues
- *
- * Note queues_num can be bigger than size
- *
- * Return: 0 on success
- */
-int pp_qos_sched_get_queues(struct pp_qos_dev *qos_dev, int id,
-			    uint16_t *queue_ids, unsigned size,
-			    unsigned *queues_num);
-
-
-/**
- * pp_qos_sched_stat_get() - Get scheduler's statistics
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	  scheduler's id obtained from sched_allocate
- * @info: pointer to struct to be filled with scheduler's statistics
- *
- * Return:
- */
-int pp_qos_sched_stat_get(struct pp_qos_dev *qos_dev, int id,
-			  struct pp_qos_sched_stat *stat);
-
-
-/******************************************************************************/
-/*                                 SHARED_LIMIT                               */
-/******************************************************************************/
-
-/**
- * qos_shared_limit_group_add() - Add new shared bandwidth group
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @limit: bandwidth limit in Mbps
- * @id:    upon success, new group's id
- *
- * Return: 0 on success
- */
-int qos_shared_limit_group_add(struct pp_qos_dev *qos_dev, unsigned limit,
-			       unsigned *id);
-
-
-/**
- * qos_shared_limit_group_remove() - Remove shared bandwidth group
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id: group's id previously obtained when adding group
- *
- * Return: 0 on success
- */
-int qos_shared_limit_group_remove(struct pp_qos_dev *qos_dev, unsigned id);
-
-
-/**
- * qos_shared_limit_group_modify() - Modify shared bandwidth group
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id: group's id previously obtained when adding group
- * @limit: bandwidth limit in Mbps
- *
- * Return: 0 on success
- */
-int qos_shared_limit_group_modify(struct pp_qos_dev *qos_dev,
-				  unsigned id, unsigned limit);
-
-
-/**
- * qos_shared_limit_group_get_members() - Get group's members
- * @qos_dev: handle to qos device instance obtained previously from qos_dev_init
- * @id:	         scheduler's id obtained from sched_allocate
- * @queue_ids:   client allocated array that will hold queues ids
- * @size:        size of queue_ids
- * @members_num: holds the number of queues
- *
- * Note members num can be bigger than size
- *
- * Return: 0 on success
- */
-int qos_shared_limit_group_get_nodes(struct pp_qos_dev *qos_dev, int id,
-				     uint16_t *queue_ids, unsigned size,
-				     unsigned *members_num);
-
-
-
-/******************************************************************************/
-/*                                  INIT	                              */
-/******************************************************************************/
-
-/**
- * @struct pp_qos_init_param - qos subsystem initialization struct
- * @max_port:             highest port number that should be used by this
- *                        system.
- * @qm_base_addr:         start address of DDR memory for saving packets
- *                        descriptors
- * @qm_num_of_pages:      number of  DDR memory pages
- * @wred_prioritized_pop: 1 - give strict priority to pop indications over push
- *                        requests.
- *                        0 - round robin between the two
- * @wred_p_const:         WRED algorithm p, used for calculating avg queue depth
- *	                  avg = (old_avg*(1-p)) + (curr_avg*p)
- *	                  p is taken to b wred_p_const / 1023
- *	                  wred_p_const should be in the range 0..1023
- * @reserved_ports:       These ports are reserved for client to explicity
- *                        select the physical_port ithat will be used when
- *                        adding a new port. All other non reserved ports are
- *                        managed by the system, i.e. when client adds a new
- *                        port system will select a new free port from the
- *                        unreserved ports.
- *                        To reserve a port its value on the array should be
- *                        not 0
- */
-struct pp_qos_init_param {
-	unsigned max_port;
-	void	*qm_base_addr;
-	unsigned qm_num_of_pages;
-	unsigned wred_prioritized_pop;
-	unsigned wred_p_const;
-	#define MAX_PORTS    128
-	uint8_t reserved_ports[MAX_PORTS];
-};
-
-
-/**
- * qos_dev_init() -	Initialize of a qos device
- * @conf: Initialization configuration
- *
- * Return: on success handle to qos device
- *         NULL otherwise
- */
-struct pp_qos_dev *qos_dev_init(struct pp_qos_init_param *conf);
-
-
-#endif
-
+/*
+ * GPL LICENSE SUMMARY
+ *
+ *  Copyright(c) 2017 Intel Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *  The full GNU General Public License is included in this distribution
+ *  in the file called LICENSE.GPL.
+ *
+ *  Contact Information:
+ *  Intel Corporation
+ *  2200 Mission College Blvd.
+ *  Santa Clara, CA  97052
+ */
+#ifndef _PP_QOS_API_H
+#define _PP_QOS_API_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+
+/******************************************************************************/
+/*                                 GENERAL                                    */
+/******************************************************************************/
+
+/**
+ * DOC: Calling context
+ *
+ * The API exported by this driver, must be called only from a thread context.
+ * Calling them from either interrupt/tasklet/buttom half is not allowed and
+ * will result in unexpexted behaviour.
+ */
+
+/**
+ * DOC: Scheduling model
+ *
+ * Ports, schedulers and queues are modeled as a scheduling tree's nodes.
+ * Each such node properties are classified to the following categories:
+ * - Common properties - relevant to all nodes
+ * - Parent properties - relevant only to parent nodes e.g. arbitration scheme
+ *                       between children
+ * - Children properties - relevant only to child nodes e.g. node's parent
+ *
+ * Note some nodes can be both parents and children e.g. schedulers
+ */
+
+/**
+ * DOC: Arbitration
+ *
+ * The way a parent arbitrates between its children can be either strict prioriy
+ * or round robin.
+ *
+ * A WRR parent round robin its children - each child gets its turn - as if all
+ * children have the same priority. The number of children for this parent type
+ * is limited only by the number of nodes (> 1024)
+ *
+ * A WSP parent employs strict priorityi arbitration on its children - a child
+ * will get scheduled only if there is no other sibling with higher priority
+ * that can be scheduled. A WSP parent can have 8 direct children at most, each
+ * child must have a different priority. To configure more that 8 children for
+ * a WSP parent, or to have several children with the same priority,
+ * intermediate schedulers should be use.
+ *
+ * Example of 5 queues q0, q1, q2, q3, q4 with priorities 0, 1, 2, 3, 3
+ *
+ *                     Port arbitrating WSP
+ *                     /   |    |     \
+ *                    q0   q1  q2  Sched arbitrating WRR
+ *                                         |	      |
+ *                                        q3          q4
+ *
+ *
+ * Example of 16 queues q0 to q15, each with a different priority
+ *
+ *		      Port arbitrating WSP
+ *		         |	       |
+ *	    Sched0 arbitrating WSP   Sched1 arbitrating WSP
+ *	       /               \       /                   \
+ *	   q0 q1 q2 q3 q4 q5 q6 q7  q8 q9 q10 q11 q12 q13 q4 q15
+ */
+
+/**
+ * enum pp_qos_arbitration - parent method for arbitrating its children on tree.
+ * @PP_QOS_ARBITRATION_WSP: strict priority
+ * @PP_QOS_ARBITRATION_WRR: round robin
+ * @PP_QOS_ARBITRATION_WFQ:
+ */
+enum pp_qos_arbitration {
+	PP_QOS_ARBITRATION_WSP,
+	PP_QOS_ARBITRATION_WRR,
+	PP_QOS_ARBITRATION_WFQ,
+};
+
+
+/**
+ * struct pp_qos_common_node_properties - Properties common to all nodes
+ * @suspended:		    is node scheduling suspended
+ * @bandwidth_limit:	    bandwidth limit in Mbps
+ * @shared_bandwidth_group: shared bandwidth group
+ */
+struct pp_qos_common_node_properties {
+	int	 suspended;
+
+	#define QOS_NO_BANDWIDTH_LIMIT          0
+	#define QOS_MAX_BANDWIDTH_LIMIT         0xFFFF
+	unsigned int bandwidth_limit;
+
+	#define QOS_NO_SHARED_BANDWIDTH_GROUP   0x0
+	#define QOS_MAX_SHARED_BANDWIDTH_GROUP  511
+	unsigned int shared_bandwidth_group;
+};
+
+
+/**
+ * @struct pp_qos_parent_node_properties - Properties relevant for parent node
+ * @arbitration:	how parent node arbitrates between its children
+ * @best_effort_enable: whether best effort scheduling is enabled for this
+ *                      node's children.
+ */
+struct pp_qos_parent_node_properties {
+	enum pp_qos_arbitration arbitration;
+	int  best_effort_enable;
+};
+
+
+/**
+ * @struct pp_qos_child_node_properties - Properties relevant for child node
+ * @parent:		parent's id
+ * @priority:		strict priority, relevant only if parent uses wsp
+ *                      arbitration
+ * @bandwidth_share:	bandwidth precentage from parent
+ */
+struct pp_qos_child_node_properties {
+	unsigned int	parent;
+	#define QOS_MAX_CHILD_PRIORITY 7
+	unsigned int	priority;
+	unsigned int	bandwidth_share;
+};
+
+
+struct pp_qos_dev;
+
+/******************************************************************************/
+/*                                      PORTS                                 */
+/******************************************************************************/
+
+/**
+ * @struct pp_qos_port_stat - Statistics per port
+ * @total_green_bytes - total green bytes currently in all queues
+ *			feeding this port
+ * @total_yellow_bytes - total yellow bytes currently in all queues
+ *			 feeding this port
+ */
+struct pp_qos_port_stat {
+	int reset;
+	unsigned int total_green_bytes;
+	unsigned int total_yellow_bytes;
+};
+
+/**
+ * @struct pp_qos_port_conf - Configuration structure for port
+ * @common_prop:		common properties
+ * @port_parent_prop:		properties as a parent
+ * @ring_address:		address of ring.
+ * @ring_size:			size of ring.
+ * @packet_credit_enable:	packet credit mode (byte credit disabled).
+ * @credit:		        amount of credit to add to the port.
+ *                              when packet_credit is enabled this designates
+ *                              packet credit, otherwise byte credit
+ */
+struct pp_qos_port_conf {
+	struct pp_qos_common_node_properties common_prop;
+	struct pp_qos_parent_node_properties port_parent_prop;
+	void     *ring_address;
+	unsigned int ring_size;
+	int      packet_credit_enable;
+	unsigned int credit;
+};
+
+
+/**
+ * @struct pp_qos_port_info -  Port's information
+ * @physical_id:   node id on scheduling tree.
+ * @num_of_queues: number of queues feeds this port.
+ */
+struct pp_qos_port_info {
+	unsigned int physical_id;
+	unsigned int num_of_queues;
+};
+
+
+/**
+ * pp_qos_port_conf_set_default() - Init port configuration with default values
+ * @conf: pointer to client allocated struct. This struct will be filled with
+ * default values.
+ *
+ * Defaults values:
+ *    suspended - no
+ *    bandwidth_limit - QOS_NO_BANDWIDTH_LIMIT
+ *    shared_bandwidth_limit - QOS_NO_SHARED_BANDWIDTH_GROUP
+ *    arbitration - WSP
+ *    best_effort_enable - no
+ *    packet_credit_enable - yes
+ *    byte_credit_enable - no
+ *    packet_credit - 0
+ *    byte_credit - 0
+ *    ring_size - 0
+ *    ring_address - 0
+ */
+void pp_qos_port_conf_set_default(struct pp_qos_port_conf *conf);
+
+
+#define ALLOC_PORT_ID 0xFFFF
+/**
+ * pp_qos_port_allocate() - Allocate a resource for a new port
+ * @qos_dev:     handle to qos device instance obtained previously
+ *               from pp_qos_dev_open
+ * @physical_id: if equal ALLOC_PORT_ID then library allocates
+ *               a free port id for the new port, otherwise must
+ *               be one of the reserved ports ids that were
+ *               configured at qos_init
+ * @id:          upon success holds new port's id
+ *
+ * @Return:  0 on success.
+ */
+int pp_qos_port_allocate(struct pp_qos_dev *qos_dev, unsigned int physical_id,
+			 unsigned int *id);
+
+
+/**
+ * pp_qos_port_remove() - Remove port
+ * @qos_dev: handle to qos hardware instance.
+ * @id: port's id obtained from port_allocate
+ *
+ * @Return: 0 on success.
+ */
+int pp_qos_port_remove(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_port_set() - Set port configuration
+ * @qos_dev:   handle to qos device instance obtained previously from
+ *         qos_dev_init
+ * @id:    port's id obtained from port_allocate
+ * @conf:  port configuration.
+ *
+ * @Return: 0 on success
+ */
+int pp_qos_port_set(struct pp_qos_dev *qos_dev, unsigned int id,
+		    const struct pp_qos_port_conf *conf);
+
+
+/**
+ * pp_qos_port_suspend() - Remove port desendants from scheduling
+ * @qos_dev: handle to qos hardware instance.
+ * @id:  port's id obtained from port_allocate
+ *
+ * @Return: 0 on success
+ */
+int pp_qos_port_suspend(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_port_resume() - Resume port and its decendants scheduling
+ * @qos_dev: handle to qos hardware instance.
+ * @id:  port's id obtained from port_allocate
+ *
+ * @Return: 0 on success
+ */
+int pp_qos_port_resume(struct pp_qos_dev *qos_dev, unsigned int id);
+
+/**
+ * pp_qos_port_block() - All new packets enqueued to queues feeding this port
+ *                    will be dropped
+ * @qos_dev: handle to qos hardware instance.
+ * @id:  port's id obtained from port_allocate
+ *
+ * Note	- already enportd descriptors will be transmitted
+ *
+ * Return: 0 on success
+ */
+int pp_qos_port_block(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_port_unblock() - Unblock enqueuing of new packets to queues that feeds
+ *                      this port
+ * @qos_dev: handle to qos hardware instance.
+ * @id:	 port's id obtained from port_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_port_unblock(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_port_flush() - Drop all enqueued packets on queues under this port
+ * @qos_dev: handle to qos hardware instance.
+ * @id:	 port's id obtained from port_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_port_flush(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_port_conf_get() - Get port configuration
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * id:	  port's id obtained from port_allocate
+ * @conf: pointer to struct to be filled with port's configuration
+ *
+ * Return: 0 on success
+ */
+
+int pp_qos_port_conf_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			 struct pp_qos_port_conf *conf);
+
+
+/**
+ * pp_qos_port_info_get() - Get extra information about port
+ * id:	  port's id obtained from port_allocate
+ * @info: pointer to struct to be filled with port's info
+ *
+ * Return: 0 on success
+ */
+int pp_qos_port_info_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			 struct pp_qos_port_info *info);
+
+
+/**
+ * pp_qos_port_get_queues() - Get all port's queues
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	        scheduler's id obtained from sched_allocate
+ * @queue_ids:  client allocated array that will hold queues ids
+ * @size:       size of queue_ids
+ * @queues_num: holds the number of queues
+ *
+ * Note queues_num can be bigger than size
+ *
+ * Return: 0 on success
+ */
+int pp_qos_port_get_queues(struct pp_qos_dev *qos_dev, unsigned int id,
+			   uint16_t *queue_ids, unsigned int size,
+			   unsigned int *queues_num);
+
+
+/**
+ * pp_qos_port_stat_get() - Get port's statistics
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  port's id obtained from port_allocate
+ * @stat: pointer to struct to be filled with port's statistics
+ *
+ * Return: 0 on success
+ */
+int pp_qos_port_stat_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			 struct pp_qos_port_stat *stat);
+
+
+/******************************************************************************/
+/*                                    QUEUES                                  */
+/******************************************************************************/
+
+/**
+ * @struct pp_qos_queue_stat - Statistics per queue
+ * @reset:		       Should statistics be reset after reading
+ * @queue_packets_occupancy:   Packetes currently in queue
+ * @queue_bytes_occupancy:     Bytes currently in queue
+ * @total_packets_accepted:    Packets accepted by WRED
+ * @total_packets_dropped:     Packets dropped by WRED due any reason
+ * @total_packets_red_dropped: Packets dropped by WRED due to being red
+ * @total_bytes_accepted:      Bytes accepted by WRED
+ * @total_bytes_dropped:       Bytes dropped by WRED
+ */
+struct pp_qos_queue_stat {
+	int reset;
+	unsigned int	queue_packets_occupancy;
+	unsigned int	queue_bytes_occupancy;
+	unsigned int	total_packets_accepted;
+	unsigned int	total_packets_dropped;
+	unsigned int	total_packets_red_dropped;
+	uint64_t	total_bytes_accepted;
+	uint64_t	total_bytes_dropped;
+};
+
+/**
+ * @struct pp_qos_queue_conf - Configuration structure for queue
+ * @common_prop:		common properties
+ * @queue_child_prop:           properties as a child node
+ * @max_burst:		        max burst in Kbps
+ * @blocked:			queue will drop new enqueued packets
+ * @aliased:			queue will be aliased by another queue
+ * @wred_enable:		WRED is applied on this queue, otherwise only
+ *                              min and max values are used
+ * @wred_fixed_drop_prob_enable:use fixed drop probability for WRED instead of
+ *                              slope
+ * @queue_wred_min_avg_green:
+ * @queue_wred_max_avg_green:
+ * @queue_wred_slope_green:
+ * @queue_wred_fixed_drop_prob_green:
+ * @queue_wred_min_avg_yellow:
+ * @queue_wred_max_avg_yellow:
+ * @queue_wred_slope_yellow:
+ * @queue_wred_fixed_drop_prob_yellow:
+ * @queue_wred_min_guaranteed:
+ * @queue_wred_max_allowed:
+ */
+struct pp_qos_queue_conf {
+	struct pp_qos_common_node_properties common_prop;
+	struct pp_qos_child_node_properties  queue_child_prop;
+	unsigned int max_burst;
+	int      blocked;
+	int      aliased;
+	int	 wred_enable;
+	int      wred_fixed_drop_prob_enable;
+	unsigned int queue_wred_min_avg_green;
+	unsigned int queue_wred_max_avg_green;
+	unsigned int queue_wred_slope_green;
+	unsigned int queue_wred_fixed_drop_prob_green;
+	unsigned int queue_wred_min_avg_yellow;
+	unsigned int queue_wred_max_avg_yellow;
+	unsigned int queue_wred_slope_yellow;
+	unsigned int queue_wred_fixed_drop_prob_yellow;
+	unsigned int queue_wred_min_guaranteed;
+	unsigned int queue_wred_max_allowed;
+};
+
+
+/**
+ * @struct pp_qos_queue_info - Queue information struct
+ * @port_id:	 port fed by this queue
+ * @physical_id: queue (ready list) number
+ * @alias_master_id: Id of the queue which is aliased by this queue,
+ *                   PP_QOS_INVALID_ID if this queue does not alias any queue
+ * @alias_slave_id: Id of the queue aliasing this queue, PP_QOS_INVALID_ID
+ *                  if this queue is not aliased by any queue.
+ *
+ */
+#define PP_QOS_INVALID_ID 0xFFFF
+struct pp_qos_queue_info {
+	unsigned int port_id;
+	unsigned int physical_id;
+	unsigned int alias_slave_id;
+	unsigned int alias_master_id;
+};
+
+
+/**
+ * pp_qos_queue_conf_set_default() - Init configuration with default values
+ * @conf: pointer to client allocated struct. This struct will be filled with
+ * default values.
+ *
+ * Defaults values:
+ *    suspended - no
+ *    max_burst - 0
+ *    bandwidth_limit - 0
+ *    shared_bandwidth_limit - 0
+ *    parent - 0
+ *    priority - 0
+ *    bandwidth_share - 0
+ *    wred_enable - no
+ *    ALL wred params - 0
+ */
+void pp_qos_queue_conf_set_default(struct pp_qos_queue_conf *conf);
+
+
+/**
+ * pp_qos_queue_allocate() - Allocate aresouirce for a new queue
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:   upon success hold's new queue id
+ *
+ * Return:  0 on success.
+ */
+int pp_qos_queue_allocate(struct pp_qos_dev *qos_dev, unsigned int *id);
+
+
+/**
+ * pp_qos_queue_remove() -	Remove a queue
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:   queue's id obtained from queue_allocate
+ *
+ * Note: client should make sure that queue is empty and
+ *       that new packets are not enqueued, by calling
+ *       pp_qos_queue_disable and pp_qos_queue_flush
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_remove(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_queue_set() - Set queue configuration
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  queue's id obtained from queue_allocate
+ * @conf: new configuration for the queue
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_set(struct pp_qos_dev *qos_dev, unsigned int id,
+		     const struct pp_qos_queue_conf *conf);
+
+
+/**
+ * pp_qos_queue_suspend() - Remove queue from scheduling (doesn't get bandwidth)
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  queue's id obtained from queue_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_suspend(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_queue_resume() -	Return queue to be scheduleable
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:   queue's id obtained from queue_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_resume(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_queue_block() - All new packets enqueue to this queue will be dropped
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:   queue's id obtained from queue_allocate
+ *
+ * Note	- already enqueued descriptors will be transmitted
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_block(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_queue_unblock() - Unblock enqueuing of new packets
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  queue's id obtained from queue_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_unblock(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_queue_flush() - Drop all enqueued packets
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  queue's id obtained from queue_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_flush(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_queue_conf_get() - Get queue current configuration
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * id:	  port's id obtained from queue_allocate
+ * @conf: pointer to struct to be filled with queue's configuration
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_conf_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_queue_conf *conf);
+
+
+/**
+ * pp_qos_queue_info_get() - Get information about queue
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  queue's id obtained from queue_allocate
+ * @info: pointer to struct to be filled with queue's info
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_info_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_queue_info *info);
+
+
+/**
+ * pp_qos_queue_stat_get() - Get queue's statistics
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  queue's id obtained from queue_allocate
+ * @stat: pointer to struct to be filled with queue's statistics
+ *
+ * Return: 0 on success
+ */
+int pp_qos_queue_stat_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_queue_stat *stat);
+
+
+/******************************************************************************/
+/*                                 SCHEDULER                                  */
+/******************************************************************************/
+
+/**
+ * @struct pp_qos_sched_conf - Configuration structure for scheduler
+ * @qos_dev:               handle to qos device instance obtained
+ *                     previously from pp_qos_dev_open
+ * @common_prop:       common properties as a node
+ * @sched_parent_prop: properties as a parent node
+ * @sched_child_node:  properties as a child node
+ */
+struct pp_qos_sched_conf {
+	struct pp_qos_common_node_properties common_prop;
+	struct pp_qos_parent_node_properties sched_parent_prop;
+	struct pp_qos_child_node_properties  sched_child_prop;
+};
+
+
+/**
+ * @struct pp_qos_sched_info - Scheduler information struct
+ * @qos_dev:                    handle to qos device instance obtained
+ *                          previously from pp_qos_dev_open
+ * @port_id:		    port fed by this scheduler
+ * @num_of_queues:	    number of queues feed this scheduler.
+ */
+struct pp_qos_sched_info {
+	unsigned int port_id;
+	unsigned int num_of_queues;
+};
+
+
+/**
+ * pp_qos_sched_conf_set_default() - Init scheduler config with defaults
+ * @conf: pointer to client allocated struct which will be filled with
+ *        default values
+ *
+ * Defaults values:
+ *    suspended - no
+ *    bandwidth_limit - 0
+ *    shared_bandwidth_limit - 0
+ *    arbitration - WSP
+ *    best_effort_enable - no
+ *    parent - 0
+ *    priority - 0
+ *    bandwidth_share - 0
+ */
+void pp_qos_sched_conf_set_default(struct pp_qos_sched_conf *conf);
+
+
+/**
+ * pp_qos_sched_allocate() - Allocate a resource for a new scheduler
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:   upon success - holds new scheduler's id
+ *
+ * Return:  0 on success.
+ */
+int pp_qos_sched_allocate(struct pp_qos_dev *qos_dev, unsigned int *id);
+
+
+/**
+ * pp_qos_sched_remove() -	Remove scheduler
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  scheduler's id obtained from sched_allocate
+ *
+ * Return:	0 on success
+ */
+int pp_qos_sched_remove(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_sched_set() - Set scheduler configuration
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  scheduler's id obtained from sched_allocate
+ * @conf: new configuration for the scheduler
+ *
+ * Note: must init conf by pp_qos_sched_conf_init before calling
+ *       this function.
+ *
+ * Return: 0 on success
+ */
+int pp_qos_sched_set(struct pp_qos_dev *qos_dev, unsigned int id,
+		     const struct pp_qos_sched_conf *conf);
+
+
+/**
+ * pp_qos_sched_suspend() - Remove scheduler from scheduling
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  scheduler's id obtained from sched_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_sched_suspend(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_sched_resume() - Return scheduler and its decendants scheduleable
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:   scheduler's id obtained from sched_allocate
+ *
+ * Return: 0 on success
+ */
+int pp_qos_sched_resume(struct pp_qos_dev *qos_dev, unsigned int id);
+
+
+/**
+ * pp_qos_sched_conf_get() - Get sched current configuration
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * id:	  port's id obtained from port_allocate
+ * @conf: pointer to struct to be filled with sched's configuration
+ *
+ * Return: 0 on success
+ */
+int pp_qos_sched_conf_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_sched_conf *conf);
+
+
+/**
+ * pp_qos_sched_info_get() - Get information about scheduler
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	  scheduler's id obtained from sched_allocate
+ * @info: pointer to struct to be filled with scheduler's info
+ *
+ * Return: 0 on success
+ */
+int pp_qos_sched_info_get(struct pp_qos_dev *qos_dev, unsigned int id,
+			  struct pp_qos_sched_info *info);
+
+
+/**
+ * pp_qos_sched_get_queues() - Get all scheduler's queues
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	        scheduler's id obtained from sched_allocate
+ * @queue_ids:  client allocated array that will hold queues ids
+ * @size:       size of queue_ids
+ * @queues_num: holds the number of queues
+ *
+ * Note queues_num can be bigger than size
+ *
+ * Return: 0 on success
+ */
+int pp_qos_sched_get_queues(struct pp_qos_dev *qos_dev, unsigned int id,
+			    uint16_t *queue_ids, unsigned int size,
+			    unsigned int *queues_num);
+
+/******************************************************************************/
+/*                                 SHARED_LIMIT                               */
+/******************************************************************************/
+
+/**
+ * qos_shared_limit_group_add() - Add new shared bandwidth group
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @limit: bandwidth limit in Mbps
+ * @id:    upon success, new group's id
+ *
+ * Return: 0 on success
+ */
+int pp_qos_shared_limit_group_add(struct pp_qos_dev *qos_dev,
+				  unsigned int limit, unsigned int *id);
+
+
+/**
+ * pp_qos_shared_limit_group_remove() - Remove shared bandwidth group
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id: group's id previously obtained when adding group
+ *
+ * Return: 0 on success
+ */
+int pp_qos_shared_limit_group_remove(struct pp_qos_dev *qos_dev,
+				     unsigned int id);
+
+
+/**
+ * pp_qos_shared_limit_group_modify() - Modify shared bandwidth group
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id: group's id previously obtained when adding group
+ * @limit: bandwidth limit in Mbps
+ *
+ * Return: 0 on success
+ */
+int pp_qos_shared_limit_group_modify(struct pp_qos_dev *qos_dev,
+				     unsigned int id, unsigned int limit);
+
+
+/**
+ * pp_qos_shared_limit_group_get_members() - Get group's members
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @id:	   shared bandwidth group id
+ * @members:   client allocated array that will hold ids members
+ * @size:        size of ids
+ * @members_num: holds the number of members
+ *
+ * Notes:
+ * members_num can be bigger than size
+ * function iterates over tree so it is rather slow
+ *
+ * Return: 0 on success
+ */
+int pp_qos_shared_limit_group_get_members(struct pp_qos_dev *qos_dev,
+					  unsigned int id, uint16_t *members,
+					  unsigned int size,
+					  unsigned int *members_num);
+
+
+
+/******************************************************************************/
+/*                                  INIT	                              */
+/******************************************************************************/
+
+/**
+ * pp_qos_dev_open() -	Initialize of a qos device
+ * @id: Identify the requested qos instance (same id that
+ *      designates the instance in the device tree)
+ *
+ * Return: on success handle to qos device
+ *         NULL otherwise
+ */
+struct pp_qos_dev *pp_qos_dev_open(unsigned int id);
+
+
+/**
+ * @struct pp_qos_init_param - qos subsystem initialization struct
+ * @wred_total_avail_resources: upper bound for the sum of bytes on all queues
+ * @wred_p_const:         WRED algorithm p, used for calculating avg queue depth
+ *                        avg = (old_avg*(1-p)) + (curr_avg*p)
+ *                        p is taken to b wred_p_const / 1023
+ *                        wred_p_const should be in the range 0..1023
+ * @wred_max_q_size:      maximum size on bytes of any queue
+ * @reserved_ports:       These ports are reserved for client to explicity
+ *                        select the physical_port ithat will be used when
+ *                        adding a new port. All other non reserved ports are
+ *                        managed by the system, i.e. when client adds a new
+ *                        port system will select a new free port from the
+ *                        unreserved ports.
+ *                        To reserve a port its value on the array should be
+ *                        not 0
+ */
+struct pp_qos_init_param {
+	unsigned int wred_total_avail_resources;
+	unsigned int wred_p_const;
+	unsigned int wred_max_q_size;
+	#define MAX_PORTS    128
+	unsigned int reserved_ports[MAX_PORTS];
+};
+
+
+/**
+ * pp_qos_dev_init() - set qos system wide configuration
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @conf:
+ *
+ * Note
+ * All other functions except pp_qos_dev_open will failed if called
+ * before this function
+ *
+ * Return: 0 on success
+ */
+int pp_qos_dev_init(struct pp_qos_dev *qos_dev, struct pp_qos_init_param *conf);
+
+/**
+ * pp_qos_dev_clean() - clean resources.
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ *
+ * Note:
+ *   qos_dev is invalid after this call
+ */
+void pp_qos_dev_clean(struct pp_qos_dev *qos_dev);
+
+
+/**
+ * pp_qos_get_fw_version () - get fw version
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @major: storage for major part of version
+ * @minor: storage for minor part of version
+ * @build: storage for build part of version
+ */
+int pp_qos_get_fw_version(
+		struct pp_qos_dev *qos_dev,
+		unsigned int *major,
+		unsigned int *minor,
+		unsigned int *build);
+
+
+enum pp_qos_node_type {
+	PPV4_QOS_NODE_TYPE_PORT,
+	PPV4_QOS_NODE_TYPE_SCHED,
+	PPV4_QOS_NODE_TYPE_QUEUE,
+};
+
+#define PPV4_QOS_INVALID (-1U)
+
+
+struct pp_qos_node_num {
+	/*
+	 * Physical node on scheduler's
+	 * node table
+	 */
+	unsigned int phy;
+
+	/*
+	 * Logical id seen by client
+	 */
+	unsigned int id;
+};
+
+/**
+ * struct pp_qos_node_info () -
+ * @ppv4_qos_node_type:
+ * @preds:		Ansectors of node (irrelevant to PORT)
+ * @num_of_children:    Number of direct children (irrelevant to QUEUE)
+ * @children:		Direct children of node (irrelevant to QUEUE)
+ * @is_suspended:	Is node suspended
+ * @is_internal:	Is it internal node
+ * @queue_physical_id:	Relevant only for QUEUE
+ * @bw_limit:		In Kbps
+ * @port:		The port (phy) that is fed from this queue
+ *                      (relevant only for QUEUE)
+ *
+ * Note:
+ * Since 0 is a valid value for some of the fields. PPV4_QOS_INVALID will
+ * be used to mark field value as irrelevant
+ *
+ * All above values except from type, is_internal and logical id
+ * are taken from HW and not from driver DB
+ */
+struct pp_qos_node_info {
+	enum pp_qos_node_type type;
+	struct pp_qos_node_num preds[6];
+	struct pp_qos_node_num children[8];
+	int is_suspended;
+	int is_internal;
+	unsigned int queue_physical_id;
+	unsigned int bw_limit;
+	unsigned int port;
+};
+
+/**
+ * pp_qos_get_node_info () - Obtain node's info
+ * @qos_dev: handle to qos device instance obtained from pp_qos_dev_open
+ * @info: Storage for info
+ * Return: 0 on success
+ */
+int pp_qos_get_node_info(
+		struct pp_qos_dev *qos_dev,
+		unsigned int id,
+		struct pp_qos_node_info *info);
+
+
+
+#endif
diff --git a/include/net/pp_qos_drv_slim.h b/include/net/pp_qos_drv_slim.h
index 0bf722e58..9ee7de4b7 100644
--- a/include/net/pp_qos_drv_slim.h
+++ b/include/net/pp_qos_drv_slim.h
@@ -302,14 +302,4 @@ s32 qos_node_get_stats(struct qos_node_stats *param);
  **/
 s32 qos_init_subsystem(struct qos_init_param *param);
 
-
-struct pp_qos_dev {
-	int dq_port;
-};
-
-//int pp_qos_queue_remove(struct pp_qos_dev *qos_dev, int id);
-int pp_qos_port_remove(struct pp_qos_dev *qos_dev, int id);
-
-
-
 #endif /* PPQOSDRV_H_ */
-- 
GitLab