diff --git a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.c b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.c
index 192b159e3fcc4790a9fd700ca75a33a7b54995c6..839f464f29d4f4e9fcf157062f409507b69a7cf4 100644
--- a/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.c
+++ b/drivers/net/ethernet/lantiq/cqm/falconmx/cqm.c
@@ -2906,9 +2906,8 @@ static int bm_init(struct platform_device *pdev)
 		panic("pool %d allocation failed\n", pool);
 	pool++;
 
-#ifdef CONFIG_LTQ_PPV4_BM_SLIM
 	bmgr_driver_init();
-#endif
+
 	for (i = 0; i < CQM_FMX_NUM_BM_POOLS; i++) {
 		p_params.group_id = 0;
 		p_params.num_buffers = bm_pool_conf[i].buf_frm_num;
diff --git a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c
index 0f4b7570d6ede92ca1ce766fb75bc30d2f595683..5f052fdc4af96b4299eb0154da41b7ea6a51536f 100644
--- a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c
+++ b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_debugfs.c
@@ -163,6 +163,7 @@ void dump_db(struct bmgr_driver_private *pdata)
 	pr_info("=================================\n");
 }
 
+static u16 policy_pop_test;
 static struct bmgr_buff_info buff_info;
 
 ssize_t bm_commmads_write(void *data, u64 val)
@@ -182,8 +183,15 @@ ssize_t bm_commmads_write(void *data, u64 val)
 	case 2:
 		memset(&buff_info, 0x0, sizeof(buff_info));
 
+		buff_info.num_allocs = 1;
+		buff_info.policy_id = policy_pop_test;
+
 		bmgr_pop_buffer(&buff_info);
 
+		policy_pop_test++;
+		if (policy_pop_test == 4)
+			policy_pop_test = 0;
+
 		pr_info("pop buffer address 0x%x (high 0x%x) from policy %d pool %d\n",
 			buff_info.addr_low[0], buff_info.addr_high[0],
 			buff_info.policy_id, buff_info.pool_id[0]);
@@ -199,7 +207,7 @@ ssize_t bm_commmads_write(void *data, u64 val)
 		dump_db(pdata);
 		break;
 	case 5:
-		print_hw_stats(0, 0, 0);
+		print_hw_stats();
 		break;
 	default:
 		pr_info("unknown command\n");
@@ -266,11 +274,10 @@ int bm_dbg_dev_init(struct platform_device *pdev)
 		return err;
 	}
 
-	dent = debugfs_create_file("commands",
-				   0644,
+	dent = debugfs_create_file("commands", 0644,
 				   pdata->debugfs_info.dir,
-				pdev,
-				&bm_commmads_fops);
+				   pdev,
+				   &bm_commmads_fops);
 	if (IS_ERR_OR_NULL(dent)) {
 		err = (int)PTR_ERR(dent);
 		dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c
index 090cee508d37000617cc89df8001817ce05d9a25..54aa4426b3bd227afa53d1c9b2ed7b265ac25bac 100644
--- a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c
+++ b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv.c
@@ -71,9 +71,15 @@ MODULE_DEVICE_TABLE(of, bm_match);
 
 //!< Platform driver
 static struct platform_driver	g_bm_platform_driver = {
-	.driver = { .name = "buffer_manager", .of_match_table = bm_match, },
 	.probe = buffer_manager_probe,
 	.remove = buffer_manager_remove,
+	.driver = {
+			.owner = THIS_MODULE,
+			.name = "buffer_manager",
+#ifdef CONFIG_OF
+			.of_match_table = bm_match,
+#endif
+		  },
 };
 
 //!< global pointer for private database
@@ -86,6 +92,7 @@ static struct platform_driver	g_bm_platform_driver = {
 
 void __iomem	*bm_config_addr_base;
 void __iomem	*bm_policy_mngr_addr_base;
+void __iomem	*uc_mcdma0_config_addr_base;
 
 #define BM_BASE		(bm_config_addr_base)		// (0x18B00000)
 #define BM_RAM_BASE	(bm_policy_mngr_addr_base)	// (0x18B10000)
@@ -96,8 +103,8 @@ void __iomem	*bm_policy_mngr_addr_base;
 //#define RD_DDR32(addr)	(*(volatile u32 *)(DDR_VIRT(addr)))
 #define WR_DDR32(addr, var)	((*(volatile u32 *)(DDR_VIRT(addr))) = var)
 
-#define RD_REG_32(addr)	__raw_readl((volatile u32 *)addr)
-#define WR_REG_32(addr, val)	__raw_writel(val, (volatile u32 *)addr)
+#define RD_REG_32(addr)	__raw_readl((volatile u32 *)(addr))
+#define WR_REG_32(addr, val)	__raw_writel(val, (volatile u32 *)(addr))
 //#define WR_REG_32(addr, var)	((*(volatile u32 *)(IO_VIRT(addr))) = var)
 //#define RD_REG_32(addr)	(*(volatile u32 *)(IO_VIRT(addr)))
 
@@ -175,10 +182,12 @@ static s32 buffer_manager_db_init(struct bmgr_driver_private *priv)
  **************************************************************************/
 static int buffer_manager_probe(struct platform_device *pdev)
 {
-	int							ret;
+	int				ret;
 	struct bmgr_driver_private	*priv;
-	struct resource				*res[2];
-	int							i;
+	struct resource			*res[3];
+	int				i;
+
+	dev_info(&pdev->dev, "BM probe...\n");
 
 #ifdef _FPGA_
 	priv = this;
@@ -200,19 +209,25 @@ static int buffer_manager_probe(struct platform_device *pdev)
 	}
 
 	/* load the memory ranges */
-	for (i = 0; i < 2; i++) {
+	for (i = 0; i < 3; i++) {
+		//dev_info(&pdev->dev, "i = %d\n", i);
 		res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
 		if (!res[i]) {
-			pr_err("failed to get resources %d\n", i);
+			dev_err(&pdev->dev, "failed to get resources %d\n", i);
 			return -ENOENT;
 		}
+		//dev_info(&pdev->dev, "i = %d, resource start = %d\n",
+		//	 i, res[i]->start);
 	}
 
 	bm_config_addr_base = devm_ioremap_resource(&pdev->dev, res[0]);
 	bm_policy_mngr_addr_base = devm_ioremap_resource(&pdev->dev, res[1]);
+	uc_mcdma0_config_addr_base = devm_ioremap_resource(&pdev->dev, res[2]);
 
-	if (!bm_config_addr_base || !bm_policy_mngr_addr_base) {
-		pr_err("failed to request and remap io ranges\n");
+	if (!bm_config_addr_base ||
+	    !bm_policy_mngr_addr_base ||
+	    !uc_mcdma0_config_addr_base) {
+		dev_err(&pdev->dev, "failed to request and remap io ranges\n");
 		return -ENOMEM;
 	}
 
@@ -245,7 +260,7 @@ static int buffer_manager_remove(struct platform_device *pdev)
 
 	bm_dbg_dev_clean(pdev);
 
-	pr_info("buffer_manager_remove(): remove done\n");
+	dev_info(&pdev->dev, "buffer_manager_remove(): remove done\n");
 
 	return 0;
 }
@@ -303,12 +318,12 @@ module_exit(buffer_manager_driver_exit);
 
 void copy_dma(u32 src, u32 dst, u32 flags)
 {
-	u32	BASE_PXP_QOS_IP_ADDRESS = 0; //0x18800000;
-	u32 MCDMA0_BASE_ADDR = BASE_PXP_QOS_IP_ADDRESS + 0x50000;
+	// base-0x18850000
+	u32 MCDMA0_BASE_ADDR = (u32)uc_mcdma0_config_addr_base;
 	u32 MCDMA_SRC_OFFSET = 0;
 	u32 MCDMA_DST_OFFSET = 0x4;
 	u32 MCDMA_CONTROL_OFFSET = 0x8;
-	u32 mcdma_channel = 3;
+	u32 mcdma_channel = 0;
 	u32 active_bit = (1 << 30);
 	u32 pxp_offset = 0;
 	struct timespec start_ts;
@@ -469,7 +484,7 @@ static s32 bmgr_is_policy_params_valid(
 }
 
 /**************************************************************************
- *! \fn	bmgr_set_buffer_manager_control
+ *! \fn	bmgr_set_control
  **************************************************************************
  *
  *  \brief	Sets the control register
@@ -477,7 +492,7 @@ static s32 bmgr_is_policy_params_valid(
  *  \return	RC_SUCCESS on success, other error code on failure
  *
  **************************************************************************/
-static s32 bmgr_set_buffer_manager_control(void)
+static s32 bmgr_set_control(void)
 {
 	// Buffer manager client enable
 	WR_REG_32(BMGR_CTRL_REG_ADDR(BM_BASE), 0x1);
@@ -485,6 +500,11 @@ static s32 bmgr_set_buffer_manager_control(void)
 	return RC_SUCCESS;
 }
 
+static u32 bmgr_get_control(void)
+{
+	return RD_REG_32(BMGR_CTRL_REG_ADDR(BM_BASE));
+}
+
 /**************************************************************************
  *! \fn	bmgr_enable_min_guarantee_per_pool
  **************************************************************************
@@ -1403,7 +1423,7 @@ static s32 bmgr_set_policy_max_allowed_per_pool(u8 policy_id,
 	WR_REG_32(BMGR_POLICY_MAX_ALLOWED_PER_POOL_ADDR(BM_RAM_BASE,
 							policy_id,
 							pool_id),
-		  max_allowed);
+							max_allowed);
 
 	return RC_SUCCESS;
 }
@@ -1445,7 +1465,7 @@ static u32 bmgr_get_policy_number_of_allocated_buffers(u8 policy_id)
 }
 
 /**************************************************************************
- *! \fn	bmgr_get_policy_number_of_allocated_buffers_per_pool
+ *! \fn	bmgr_get_policy_num_allocated_per_pool
  **************************************************************************
  *
  *  \brief	Returns the number of allocated buffers per pool in this policy
@@ -1456,13 +1476,57 @@ static u32 bmgr_get_policy_number_of_allocated_buffers(u8 policy_id)
  *  \return	Number of allocated buffers per pool in this policy
  *
  **************************************************************************/
-static u32 bmgr_get_policy_number_of_allocated_buffers_per_pool(u8 policy_id,
-								u8 pool_id)
+static u32 bmgr_get_policy_num_allocated_per_pool(u8 policy_id,
+						  u8 pool_id)
 {
 	return RD_REG_32(BMGR_POLICY_ALLOC_BUFF_PER_POOL_COUNTER_ADDR(
 			 BM_RAM_BASE, pool_id, policy_id));
 }
 
+static s32 bmgr_test_dma(u32 num_bytes)
+{
+	void		*addr;
+	void		*addr1;
+	dma_addr_t	dma;
+	dma_addr_t	dma1;
+
+	addr = dmam_alloc_coherent(&this->pdev->dev,
+				   2 * sizeof(u32),
+				   &dma,
+				   GFP_KERNEL | GFP_DMA);
+	if (!addr) {
+		dev_err(&this->pdev->dev, "Could not allocate using dmam_alloc_coherent\n");
+		return -ENOMEM;
+	}
+
+	addr1 = dmam_alloc_coherent(&this->pdev->dev,
+				    2 * sizeof(u32),
+				    &dma1,
+				    GFP_KERNEL | GFP_DMA);
+	if (!addr1) {
+		dev_err(&this->pdev->dev, "Could not allocate using dmam_alloc_coherent\n");
+		return -ENOMEM;
+	}
+
+	WR_REG_32(addr, 0);
+	WR_REG_32((addr + 4), 0);
+	WR_REG_32(addr1, 0xcafecafe);
+	WR_REG_32((addr1 + 4), 0x12345678);
+
+	pr_info("ADDRESSES ======> 0x%x[0x%x] ; 0x%x[0x%x] ; 0x%x[0x%x] ; 0x%x[0x%x]\n",
+		(u32)addr, (u32)dma, (u32)(addr + 4), (u32)(dma + 4),
+		(u32)(addr1), (u32)(dma1), (u32)(addr1 + 4), (u32)(dma1 + 4));
+	pr_info("TEST 1 ======> 0x%x ; 0x%x ; 0x%x ; 0x%x\n",
+		RD_REG_32(addr), RD_REG_32(addr + 4),
+		RD_REG_32(addr1), RD_REG_32(addr1 + 4));
+	copy_dma((u32)dma1, (u32)dma, (0x80100000 | (8 * num_bytes)));
+	pr_info("TEST 2 ======> 0x%x ; 0x%x ; 0x%x ; 0x%x\n",
+		RD_REG_32(addr), RD_REG_32(addr + 4),
+		RD_REG_32(addr1), RD_REG_32(addr1 + 4));
+
+	return 0;
+}
+
 /**************************************************************************
  *! \fn	bmgr_pop_buffer
  **************************************************************************
@@ -1477,11 +1541,12 @@ static u32 bmgr_get_policy_number_of_allocated_buffers_per_pool(u8 policy_id,
 s32 bmgr_pop_buffer(struct bmgr_buff_info * const buff_info)
 {
 	u32	address = BMGR_DATAPATH_BASE;
-	//u32	dst_addr = 0x600000;
 	u32	index = 0;
 	u32	low = 0;
 	u32	high = 0;
-	u32	ptr2pop[2 * PP_BMGR_MAX_BURST_IN_POP]; // 64 bit per allocation
+	void	*addr;
+	dma_addr_t	dma;
+	u16	offset = 0;
 
 	if (!buff_info) {
 		pr_err("bmgr_pop_buffer: buff_info is NULL\n");
@@ -1501,12 +1566,22 @@ s32 bmgr_pop_buffer(struct bmgr_buff_info * const buff_info)
 	// Write the Policy
 	address |= (buff_info->policy_id << 8);
 
-	copy_dma(address, (u32)&ptr2pop[0]/*dst_addr*/,
-		 (0x80100000 | (8 * buff_info->num_allocs))/*0x80100008*/);
+	addr = dmam_alloc_coherent(&this->pdev->dev,
+				   2 * sizeof(u32) * buff_info->num_allocs,
+				   &dma,
+				   GFP_KERNEL | GFP_DMA);
+	if (!addr) {
+		dev_err(&this->pdev->dev, "Could not allocate using dmam_alloc_coherent\n");
+		return -ENOMEM;
+	}
+
+	copy_dma(address, (u32)dma,
+		 (0x80100000 | (8 * buff_info->num_allocs)));
 
 	for (index = 0; index < 2 * buff_info->num_allocs; index += 2) {
-		low = ptr2pop[index]; // RD_DDR32(dst_addr+8*index);
-		high = ptr2pop[index + 1]; // RD_DDR32(dst_addr+4+8*index);
+		low = RD_REG_32(addr + offset);
+		high = RD_REG_32(addr + offset + 4);
+		offset += 8;
 
 		pr_info("POP ======> 0x%x ; 0x%x\n", low, high);
 
@@ -1615,20 +1690,37 @@ EXPORT_SYMBOL(bmgr_push_buffer);
  **************************************************************************/
 s32 bmgr_driver_init(void)
 {
-	u8 index;
+	u16 index;
+	u16 idx2;
 
 	pr_info("Buffer Manager driver is initializing....");
 
 	// @lock
 	bmgr_db_lock();
 
-	bmgr_set_buffer_manager_control();
+	bmgr_set_control();
 	bmgr_configure_ocp_master();
 
 	// Reset group reserved buffers
 	for (index = 0; index < PP_BMGR_MAX_GROUPS; index++)
 		bmgr_set_group_reserved_buffers(index, 0);
 
+	// Init RAM
+	for (index = 0; index < PP_BMGR_MAX_POLICIES; index++) {
+		bmgr_set_policy_max_allowed_per_policy(index, 0);
+		bmgr_set_policy_min_guaranteed_per_policy(index, 0);
+		bmgr_set_policy_group_association(index, 0);
+		WR_REG_32(BMGR_POLICY_POOLS_MAPPING_ADDR(BM_RAM_BASE, index),
+			  0);
+		WR_REG_32(BMGR_POLICY_ALLOC_BUFF_COUNTER_ADDR(BM_RAM_BASE,
+							      index), 0);
+		for (idx2 = 0; idx2 < PP_BMGR_MAX_POOLS; idx2++) {
+			bmgr_set_policy_max_allowed_per_pool(index, idx2, 0);
+			WR_REG_32(BMGR_POLICY_ALLOC_BUFF_PER_POOL_COUNTER_ADDR(
+					BM_RAM_BASE, idx2, index), 0);
+		}
+	}
+
 	// @unlock
 	bmgr_db_unlock();
 
@@ -1653,12 +1745,13 @@ EXPORT_SYMBOL(bmgr_driver_init);
 s32 bmgr_pool_configure(const struct bmgr_pool_params * const pool_params,
 			u8 * const pool_id)
 {
-	s32		status = RC_SUCCESS;
+	s32	status = RC_SUCCESS;
 	void	*pointers_table = NULL;
-	u32		index = 0;
-	u32		*temp_pointers_table_ptr = NULL;
-	u64		user_array_ptr;
-	u32		phy_ll_base;
+	u32	index = 0;
+	u32	*temp_pointers_table_ptr = NULL;
+	u64	user_array_ptr;
+	u32	phy_ll_base;
+	u32	val = 0;
 #ifdef _FPGA_
 	//u32	fpga_ddr_address = 0x500000;
 #endif
@@ -1752,8 +1845,8 @@ s32 bmgr_pool_configure(const struct bmgr_pool_params * const pool_params,
 		goto free_memory;
 
 	// Verify group is not full
-	index = this->driver_db.groups[pool_params->group_id].
-				num_pools_in_group;
+	val = pool_params->group_id;
+	index = this->driver_db.groups[val].num_pools_in_group;
 	if (index >= PP_BMGR_MAX_POOLS_IN_GROUP) {
 		pr_err("bmgr_pool_configure: Group %d is full. num_pools_in_group %d",
 		       pool_params->group_id, index);
@@ -1772,16 +1865,15 @@ s32 bmgr_pool_configure(const struct bmgr_pool_params * const pool_params,
 	this->driver_db.groups[pool_params->group_id].num_pools_in_group++;
 	// Group's reserved buffers will be updated when configuring the policy
 
-	status = bmgr_set_group_available_buffers(pool_params->group_id,
-						  this->driver_db.groups[
-				pool_params->group_id].available_buffers);
+	val = this->driver_db.groups[pool_params->group_id].available_buffers;
+	status = bmgr_set_group_available_buffers(pool_params->group_id, val);
 	if (status != RC_SUCCESS)
 		goto free_memory;
 
 	status = bmgr_set_pcu_fifo_base_address(*pool_id,
 						BMGR_START_PCU_FIFO_SRAM_ADDR +
 						(*pool_id *
-					BMGR_DEFAULT_PCU_FIFO_SIZE));
+						BMGR_DEFAULT_PCU_FIFO_SIZE));
 	if (status != RC_SUCCESS)
 		goto free_memory;
 
@@ -1793,18 +1885,18 @@ s32 bmgr_pool_configure(const struct bmgr_pool_params * const pool_params,
 	if (status != RC_SUCCESS)
 		goto free_memory;
 
-	status = bmgr_set_pcu_fifo_prog_empty(*pool_id,
-			BMGR_DEFAULT_PCU_FIFO_LOW_THRESHOLD);
+	val = BMGR_DEFAULT_PCU_FIFO_LOW_THRESHOLD;
+	status = bmgr_set_pcu_fifo_prog_empty(*pool_id, val);
 	if (status != RC_SUCCESS)
 		goto free_memory;
 
-	status = bmgr_set_pcu_fifo_prog_full(*pool_id,
-			BMGR_DEFAULT_PCU_FIFO_HIGH_THRESHOLD);
+	val = BMGR_DEFAULT_PCU_FIFO_HIGH_THRESHOLD;
+	status = bmgr_set_pcu_fifo_prog_full(*pool_id, val);
 	if (status != RC_SUCCESS)
 		goto free_memory;
 
-	status = bmgr_set_pool_watermark_low_threshold(*pool_id,
-			BMGR_DEFAULT_WATERMARK_LOW_THRESHOLD);
+	val = BMGR_DEFAULT_WATERMARK_LOW_THRESHOLD;
+	status = bmgr_set_pool_watermark_low_threshold(*pool_id, val);
 	if (status != RC_SUCCESS)
 		goto free_memory;
 
@@ -1869,7 +1961,10 @@ s32 bmgr_policy_configure(const struct bmgr_policy_params * const policy_params,
 			  u8 * const policy_id)
 {
 	s32	status = RC_SUCCESS;
-	u8	index = 0;
+	u16	index = 0;
+	u32	id = 0;
+	u32	min = 0;
+	u32	max = 0;
 
 	pr_info("Configuring buffer manager policy...");
 
@@ -1904,37 +1999,41 @@ s32 bmgr_policy_configure(const struct bmgr_policy_params * const policy_params,
 	if (status != RC_SUCCESS)
 		goto unlock;
 
+	max = policy_params->max_allowed;
 	status = bmgr_set_policy_max_allowed_per_policy(*policy_id,
-						policy_params->max_allowed);
+							max);
 	if (status != RC_SUCCESS)
 		goto unlock;
 
+	min = policy_params->min_guaranteed;
 	status = bmgr_set_policy_min_guaranteed_per_policy(*policy_id,
-						policy_params->min_guaranteed);
+							   min);
 	if (status != RC_SUCCESS)
 		goto unlock;
 
 	// Set the group's reserved buffers
 	this->driver_db.groups[policy_params->group_id].reserved_buffers =
 		bmgr_get_group_reserved_buffers(policy_params->group_id);
-	this->driver_db.groups[policy_params->group_id].
-		reserved_buffers += policy_params->min_guaranteed;
+	this->driver_db.groups[policy_params->group_id].reserved_buffers +=
+			policy_params->min_guaranteed;
 
-	status = bmgr_set_group_reserved_buffers(policy_params->group_id,
-						 this->driver_db.groups[
-				policy_params->group_id].reserved_buffers);
+	id = this->driver_db.groups[policy_params->group_id].reserved_buffers;
+	status = bmgr_set_group_reserved_buffers(policy_params->group_id, id);
 	if (status != RC_SUCCESS)
 		goto unlock;
 
 	for (index = 0; index < policy_params->num_pools_in_policy; index++) {
+		max = policy_params->pools_in_policy[index].max_allowed;
 		status = bmgr_set_policy_max_allowed_per_pool(*policy_id,
-			policy_params->pools_in_policy[index].pool_id,
-			policy_params->pools_in_policy[index].max_allowed);
+							      index,
+							      max);
 		if (status != RC_SUCCESS)
 			goto unlock;
 
+		id = policy_params->pools_in_policy[index].pool_id;
 		status = bmgr_set_policy_pool_mapping(*policy_id,
-			policy_params->pools_in_policy[index].pool_id, index);
+						      id,
+						      index);
 		if (status != RC_SUCCESS)
 			goto unlock;
 	}
@@ -1943,7 +2042,8 @@ s32 bmgr_policy_configure(const struct bmgr_policy_params * const policy_params,
 	this->driver_db.num_policies++;
 	this->driver_db.policies[*policy_id].is_busy = 1;
 	memcpy(&this->driver_db.policies[*policy_id].policy_params,
-	       policy_params, sizeof(struct bmgr_policy_params));
+	       policy_params,
+	       sizeof(struct bmgr_policy_params));
 
 	bmgr_wait_for_init_completion();
 
@@ -1966,12 +2066,12 @@ EXPORT_SYMBOL(bmgr_policy_configure);
 
 void test_init_bmgr(struct device *dev)
 {
-	struct bmgr_pool_params	pool_params;
+	struct bmgr_pool_params		pool_params;
 	struct bmgr_policy_params	policy_params;
-	u8					pool_id;
-	u8					policy_id;
-	u8					num_buffers = 10;
-	u8					size_of_buffer = 64;
+	u8				pool_id;
+	u8				policy_id;
+	u8				num_buffers = 10;
+	u8				size_of_buffer = 64;
 	void				*ptr = NULL;
 
 	if (bmgr_driver_init() != RC_SUCCESS) {
@@ -2020,67 +2120,223 @@ void test_init_bmgr(struct device *dev)
 		policy_id, num_buffers / 2, num_buffers / 5, num_buffers);
 }
 
-void print_hw_stats(u8 policy_id, u8 group_id, u8 pool_id)
+void print_hw_stats(void)
 {
 	u32	counter;
+	u16	max_pools = this->driver_db.num_pools;
+	u16	max_groups = this->driver_db.num_groups;
+	u16	max_policies = this->driver_db.num_policies;
+	u16	idx;
+	u16	idx1;
 
-	counter = bmgr_get_pool_pop_counter(pool_id);
-	pr_info("Pop counter (pool %d) = %d\n", pool_id, counter);
+	counter = bmgr_get_control();
+	pr_info("Control = 0x%x\n", counter);
 
-	counter = bmgr_get_pool_push_counter(pool_id);
-	pr_info("Push counter (pool %d) = %d\n", pool_id, counter);
+	counter = RD_REG_32(BMGR_POOL_MIN_GRNT_MASK_REG_ADDR(BM_BASE));
+	pr_info("Pool Min Grant Bit Mask = 0x%x\n", counter);
 
-	counter = bmgr_get_pool_allocated_counter(pool_id);
-	pr_info("Pool %d allocated counter = %d\n", pool_id, counter);
+	counter = RD_REG_32(BMGR_POOL_ENABLE_REG_ADDR(BM_BASE));
+	pr_info("Pool Enable = 0x%x\n", counter);
 
-	counter = bmgr_get_pool_size(pool_id);
-	pr_info("Pool %d size = %d\n", pool_id, counter);
+	counter = RD_REG_32(BMGR_POOL_FIFO_RESET_REG_ADDR(BM_BASE));
+	pr_info("Pool FIFO Reset = 0x%x\n", counter);
 
-	counter = bmgr_get_policy_number_of_allocated_buffers(policy_id);
-	pr_info("Policy %d num allocated = %d\n", policy_id, counter);
+	counter = bmgr_get_ocp_burst_size();
+	pr_info("OCP Master Burst Size = 0x%x\n", counter);
 
-	counter = bmgr_get_policy_number_of_allocated_buffers_per_pool(
-				policy_id, pool_id);
-	pr_info("Policy %d num allocated per pool %d = %d\n",
-		policy_id, pool_id, counter);
+	counter = RD_REG_32(BMGR_OCPM_NUM_OF_BURSTS_REG_ADDR(BM_BASE));
+	pr_info("OCP Master Number Of Bursts = 0x%x\n", counter);
 
-	counter = bmgr_get_policy_group_association(policy_id);
-	pr_info("Policy %d group association = %d\n", policy_id, counter);
+	counter = RD_REG_32(BMGR_STATUS_REG_ADDR(BM_BASE));
+	pr_info("Status = 0x%x\n", counter);
 
-	counter = bmgr_get_policy_max_allowed_per_policy(policy_id);
-	pr_info("Policy %d max allowed = %d\n", policy_id, counter);
+	pr_info("Pool size:\n");
+	pr_info("==========\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pool_size(idx);
+		pr_info("Pool %d size = 0x%x\n", idx, counter);
+	}
 
-	counter = bmgr_get_policy_max_allowed_per_pool(policy_id, pool_id);
-	pr_info("Policy %d max allowed per pool %d = %d\n",
-		policy_id, pool_id, counter);
+	pr_info("Group available buffers:\n");
+	pr_info("========================\n");
+	for (idx = 0; idx < max_groups; idx++) {
+		counter = bmgr_get_group_available_buffers(idx);
+		pr_info("Group %d available buffers = 0x%x\n", idx, counter);
+	}
 
-	counter = bmgr_get_policy_min_guaranteed_per_policy(policy_id);
-	pr_info("Policy %d min guaranteed = %d\n", policy_id, counter);
+	pr_info("Group reserved buffers:\n");
+	pr_info("=======================\n");
+	for (idx = 0; idx < max_groups; idx++) {
+		counter = bmgr_get_group_reserved_buffers(idx);
+		pr_info("Group %d reserved buffers = 0x%x\n", idx, counter);
+	}
 
-	counter = bmgr_get_policy_null_counter(policy_id);
-	pr_info("Policy %d null counter = %d\n", policy_id, counter);
+	pr_info("PCU FIFO base address:\n");
+	pr_info("======================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pcu_fifo_base_address(idx);
+		pr_info("Pool %d PCU FIFO base address = 0x%x\n",
+			idx, counter);
+	}
 
-	counter = bmgr_get_group_available_buffers(group_id);
-	pr_info("Group %d available buffers = %d\n", group_id, counter);
+	pr_info("PCU FIFO size:\n");
+	pr_info("==============\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pcu_fifo_size(idx);
+		pr_info("Pool %d PCU FIFO size = 0x%x\n", idx, counter);
+	}
 
-	counter = bmgr_get_group_reserved_buffers(group_id);
-	pr_info("Group %d reserved buffers = %d\n", group_id, counter);
+	pr_info("PCU FIFO occupancy:\n");
+	pr_info("===================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pcu_fifo_occupancy(idx);
+		pr_info("Pool %d PCU FIFO occupancy = 0x%x\n", idx, counter);
+	}
 
-	{
-		// Just to ignore compilation warnings
-		bmgr_get_ocp_burst_size();
-		bmgr_get_pcu_fifo_base_address(0);
-		bmgr_get_pcu_fifo_size(0);
-		bmgr_get_pcu_fifo_occupancy(0);
-		bmgr_get_pcu_fifo_prog_empty(0);
-		bmgr_get_pcu_fifo_prog_full(0);
-		bmgr_get_ext_fifo_base_addr_low(0);
-		bmgr_get_ext_fifo_base_addr_high(0);
-		bmgr_get_ext_fifo_occupancy(0);
-		bmgr_get_pool_burst_write_counter(0);
-		bmgr_get_pool_burst_read_counter(0);
-		bmgr_get_pool_watermark_low_threshold(0);
-		bmgr_get_policy_pools_mapping(0);
+	pr_info("PCU FIFO Prog empty:\n");
+	pr_info("====================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pcu_fifo_prog_empty(idx);
+		pr_info("Pool %d PCU FIFO Prog empty = 0x%x\n", idx, counter);
+	}
+
+	pr_info("PCU FIFO Prog full:\n");
+	pr_info("===================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pcu_fifo_prog_full(idx);
+		pr_info("Pool %d PCU FIFO Prog full = 0x%x\n", idx, counter);
 	}
-}
 
+	pr_info("EXT FIFO Base Addr Low:\n");
+	pr_info("=======================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_ext_fifo_base_addr_low(idx);
+		pr_info("Pool %d EXT FIFO Base Addr Low = 0x%x\n",
+			idx, counter);
+	}
+
+	pr_info("EXT FIFO Base Addr High:\n");
+	pr_info("========================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_ext_fifo_base_addr_high(idx);
+		pr_info("Pool %d EXT FIFO Base Addr High = 0x%x\n",
+			idx, counter);
+	}
+
+	pr_info("EXT FIFO occupancy:\n");
+	pr_info("===================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_ext_fifo_occupancy(idx);
+		pr_info("Pool %d EXT FIFO occupancy = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Pool allocated counter:\n");
+	pr_info("=======================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pool_allocated_counter(idx);
+		pr_info("Pool %d allocated counter = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Pool pop counter:\n");
+	pr_info("=================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pool_pop_counter(idx);
+		pr_info("Pool %d pop counter = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Pool push counter:\n");
+	pr_info("==================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pool_push_counter(idx);
+		pr_info("Pool %d push counter = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Pool DDR Burst write counter:\n");
+	pr_info("=============================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pool_burst_write_counter(idx);
+		pr_info("Pool %d Burst write counter = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Pool DDR Burst read counter:\n");
+	pr_info("============================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pool_burst_read_counter(idx);
+		pr_info("Pool %d Burst read counter = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Pool watermark low threshold:\n");
+	pr_info("=============================\n");
+	for (idx = 0; idx < max_pools; idx++) {
+		counter = bmgr_get_pool_watermark_low_threshold(idx);
+		pr_info("Pool %d watermark low threshold = 0x%x\n",
+			idx, counter);
+	}
+
+	pr_info("Policy NULL counter:\n");
+	pr_info("====================\n");
+	for (idx = 0; idx < max_policies; idx++) {
+		counter = bmgr_get_policy_null_counter(idx);
+		pr_info("Policy %d NULL counter = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Policy Max allowed per policy:\n");
+	pr_info("==============================\n");
+	for (idx = 0; idx < max_policies; idx++) {
+		counter = bmgr_get_policy_max_allowed_per_policy(idx);
+		pr_info("Policy %d Max allowed = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Policy Min guaranteed per policy:\n");
+	pr_info("=================================\n");
+	for (idx = 0; idx < max_policies; idx++) {
+		counter = bmgr_get_policy_min_guaranteed_per_policy(idx);
+		pr_info("Policy %d Min guaranteed = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Policy group association:\n");
+	pr_info("=========================\n");
+	for (idx = 0; idx < max_policies; idx++) {
+		counter = bmgr_get_policy_group_association(idx);
+		pr_info("Policy %d group association = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Policy pools mapping:\n");
+	pr_info("=====================\n");
+	for (idx = 0; idx < max_policies; idx++) {
+		counter = bmgr_get_policy_pools_mapping(idx);
+		pr_info("Policy %d pools mapping = 0x%x\n", idx, counter);
+	}
+
+	pr_info("Policy max allowed per pool:\n");
+	pr_info("============================\n");
+	for (idx = 0; idx < max_policies; idx++) {
+		pr_info("Policy %d:\n", idx);
+		for (idx1 = 0; idx1 < max_pools; idx1++) {
+			counter = bmgr_get_policy_max_allowed_per_pool(idx,
+								       idx1);
+			pr_info("    max allowed per pool with priority %d = 0x%x\n",
+				idx1, counter);
+		}
+	}
+
+	pr_info("Policy num allocated buffers:\n");
+	pr_info("=============================\n");
+	for (idx = 0; idx < max_policies; idx++) {
+		counter = bmgr_get_policy_number_of_allocated_buffers(idx);
+		pr_info("Policy %d num allocated buffers = 0x%x\n",
+			idx, counter);
+	}
+
+	pr_info("Policy num allocated buffers per pool:\n");
+	pr_info("======================================\n");
+	for (idx = 0; idx < max_policies; idx++) {
+		pr_info("Policy %d:\n", idx);
+		for (idx1 = 0; idx1 < max_pools; idx1++) {
+			counter = bmgr_get_policy_num_allocated_per_pool(idx,
+									 idx1);
+			pr_info("    num allocated per pool with priority %d = 0x%x\n",
+				idx1, counter);
+		}
+	}
+}
diff --git a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv_internal.h b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv_internal.h
index 45c0cbffcbf9f288754546778f285589409847f9..5f4c548c2a12b8de8c1889772c5ef41a38055fd2 100644
--- a/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv_internal.h
+++ b/drivers/net/ethernet/lantiq/ppv4/bm/pp_bm_drv_internal.h
@@ -202,7 +202,7 @@ struct bmgr_driver_private {
 };
 
 void test_init_bmgr(struct device *dev);
-void print_hw_stats(u8 policy_id, u8 group_id, u8 pool_id);
+void print_hw_stats(void);
 
 int bm_dbg_dev_init(struct platform_device *pdev);
 void bm_dbg_dev_clean(struct platform_device *pdev);