From 9f435e9db7493a22da3b2fa0101ce2bd8b162301 Mon Sep 17 00:00:00 2001 From: Oussama Ghorbel <oussama.ghorbel@iopsys.eu> Date: Thu, 6 Feb 2020 16:21:25 +0100 Subject: [PATCH] update to ugw 8.4.1.30 --- Makefile | 1 + common/directconnect_dp_api.c | 57 +- common/directconnect_dp_dcmode_wrapper.c | 106 +- common/directconnect_dp_dcmode_wrapper.h | 15 + common/directconnect_dp_debug.h | 0 common/directconnect_dp_device.c | 28 +- common/directconnect_dp_device.h | 3 +- common/directconnect_dp_platform_cfg.h | 83 + common/directconnect_dp_proc.c | 132 +- dc_mode/Makefile | 2 +- dc_mode/dc_mode0/dc_mode0_xrx500.c | 818 +++++-- dc_mode/dc_modex/Makefile | 9 + dc_mode/dc_modex/dc_modex_ext.c | 1693 ++++++++++++++ include/directconnect_dp_api.h | 1202 +++++----- include/directconnect_dp_dcmode_api.h | 6 + test/Makefile | 3 + test/README | 101 + test/test_wave600_drv.c | 2647 ++++++++++++++++++++++ test/test_wave600_proc_api.c | 354 +++ test/test_wlan_drv.h | 21 + test/test_wlan_proc_api.h | 57 + 21 files changed, 6524 insertions(+), 814 deletions(-) delete mode 100644 common/directconnect_dp_debug.h create mode 100644 common/directconnect_dp_platform_cfg.h create mode 100644 dc_mode/dc_modex/Makefile create mode 100644 dc_mode/dc_modex/dc_modex_ext.c create mode 100644 test/README create mode 100644 test/test_wave600_drv.c create mode 100644 test/test_wave600_proc_api.c create mode 100644 test/test_wlan_drv.h create mode 100644 test/test_wlan_proc_api.h diff --git a/Makefile b/Makefile index 96b86cf..aed90ac 100755 --- a/Makefile +++ b/Makefile @@ -4,4 +4,5 @@ clean: rm -f common/{*.ko,*.o,.*.o.cmd} rm -f dc_mode/dc_mode0/{*.ko,*.o,.*.o.cmd} rm -f dc_mode/dc_mode1/{*.ko,*.o,.*.o.cmd} + rm -f dc_mode/dc_modex/{*.ko,*.o,.*.o.cmd} rm -f test/{*.ko,*.o,.*.o.cmd} diff --git a/common/directconnect_dp_api.c b/common/directconnect_dp_api.c index fb0b07d..6fe02f8 100644 --- a/common/directconnect_dp_api.c +++ b/common/directconnect_dp_api.c @@ -115,6 +115,22 @@ _dc_dp_dump_raw_data(char *buf, int len, char *prefix_str) * Direct Connect Driver Common Interface APIs * ======================================================================== */ +int32_t +dc_dp_get_peripheral_config(enum dc_dp_dev_type dev_id, struct dc_dp_dev_config *dev_config) +{ + int32_t ret = -1; + + if (NULL == dev_config) + goto out; + + memset(dev_config, 0, sizeof(struct dc_dp_dev_config)); + ret = dc_dp_dcmode_wrapper_get_peripheral_config(dev_id, dev_config); + +out: + return ret; +} +EXPORT_SYMBOL(dc_dp_get_peripheral_config); + int32_t dc_dp_get_host_capability(struct dc_dp_host_cap *cap, uint32_t flags) { @@ -197,6 +213,10 @@ dc_dp_alloc_port(struct module *owner, uint32_t dev_port, p_devinfo->dev = dev; p_devinfo->dev_port = dev_port; p_devinfo->port_id = ep_id; + if ( (flags & DC_DP_F_MULTI_PORT) && (flags & DC_DP_F_SHARED_RES) ) + p_devinfo->ref_port_id = port_id; + else + p_devinfo->ref_port_id = ep_id; p_devinfo->alloc_flags = flags; p_devinfo->flags = DC_DP_DEV_PORT_ALLOCATED; @@ -533,6 +553,26 @@ out: } EXPORT_SYMBOL(dc_dp_handle_ring_sw); +int32_t +dc_dp_return_bufs(uint32_t port_id, void **buflist, + uint32_t buflist_len, uint32_t flags) +{ + int32_t ret = DC_DP_FAILURE; + struct dc_dp_priv_dev_info *p_devinfo = NULL; + + ret = dc_dp_get_device_by_port(port_id, &p_devinfo); + if (ret != 0) { + DC_DP_ERROR("failed to register/de-register subif as port_id=%d is not allocated yet!!!\n", port_id); + goto out; + } + + ret = dc_dp_dcmode_wrapper_return_bufs(p_devinfo, port_id, buflist, buflist_len, flags); + +out: + return ret; +} +EXPORT_SYMBOL(dc_dp_return_bufs); + struct sk_buff * dc_dp_alloc_skb(uint32_t len, struct dp_subif *subif, uint32_t flags) { @@ -652,7 +692,9 @@ dc_dp_disconn_if(struct net_device *netif, struct dp_subif *subif_id, out: /* Remove the MAC from Linux bridge forwarding table */ if (netif) { +#if IS_ENABLED(CONFIG_BRIDGE) dc_dp_br_fdb_delete(netif, mac_addr); +#endif /* #if IS_ENABLED(CONFIG_BRIDGE) */ #if defined(CONFIG_MCAST_HELPER_MODULE) || defined(CONFIG_MCAST_HELPER) if (mac_addr) { memcpy(hw_addr, mac_addr, MAX_ETH_ALEN); @@ -685,7 +727,7 @@ dc_dp_rx(struct net_device *rxif, struct net_device *txif, struct dp_subif *subi struct sk_buff *tx_skb = NULL; struct dp_subif subif = {0}; struct dp_subif tx_subif = {0}; - uint32_t pkt_subif_id; + //uint32_t pkt_subif_id; int32_t subif_idx; //DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Entry.\n"); @@ -747,6 +789,7 @@ dc_dp_rx(struct net_device *rxif, struct net_device *txif, struct dp_subif *subi if (ret > 0 && *pskb != NULL) { tx_skb = *pskb; +#if 0 if (txif) { /* Received from LITEPATH, so transmit to SWPATH; In any case, Lower layer should free the skb */ if (dc_dp_get_mark_pkt(tx_subif.port_id, tx_skb, 0)) { @@ -767,9 +810,13 @@ dc_dp_rx(struct net_device *rxif, struct net_device *txif, struct dp_subif *subi /* Received from SWPATH, so transmit to LITEPATH; In any case, Lower layer should free the skb */ ret = dc_dp_dcmode_wrapper_xmit(p_devinfo, rxif, &subif, &tx_subif, tx_skb, tx_skb->len, DC_DP_F_XMIT_LITEPATH); } - } else { + } else +#endif + { + DC_DP_ERROR("Dropping packet (dev=%s), no shortcut transmit handling!!!\n", + ((*pskb)->dev ? (*pskb)->dev->name : NULL)); ret = 0; - goto drop; + dev_kfree_skb_any(tx_skb); } } else if (*pskb != NULL) DC_DP_ERROR("Client driver (dev=%s) should consume the skb. skb is freed???\n", @@ -982,7 +1029,9 @@ dc_dp_mark_pkt_devqos(int32_t port_id, struct net_device *dst_netif, struct sk_b { int32_t ret; struct dc_dp_priv_dev_info *p_devinfo = NULL; +#if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) struct dma_tx_desc_1 *desc_1 = (struct dma_tx_desc_1 *) &skb->DW1; +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ int32_t class_prio; int32_t devqos; bool is_class = 1; @@ -1005,7 +1054,9 @@ dc_dp_mark_pkt_devqos(int32_t port_id, struct net_device *dst_netif, struct sk_b devqos = _dc_dp_get_class2devqos(p_devinfo, class_prio, is_class); +#if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) desc_1->field.resv1 = devqos; +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ return devqos; } diff --git a/common/directconnect_dp_dcmode_wrapper.c b/common/directconnect_dp_dcmode_wrapper.c index ed52434..18c9d59 100644 --- a/common/directconnect_dp_dcmode_wrapper.c +++ b/common/directconnect_dp_dcmode_wrapper.c @@ -18,14 +18,43 @@ #include "directconnect_dp_device.h" #include "directconnect_dp_swpath.h" #include "directconnect_dp_litepath.h" +#include "directconnect_dp_platform_cfg.h" -#ifdef HAVE_DATAPATH_API /* NOTE : Generic Datapath driver MUST have a common macro. */ -#define DP_F_FASTPATH DP_F_FAST_WLAN -#endif /* #ifdef HAVE_DATAPATH_API */ +#if defined(HAVE_DATAPATH_EXT_API) + #define DP_F_FASTPATH DP_F_ACA +#elif defined(HAVE_DATAPATH_API) + #define DP_F_FASTPATH DP_F_FAST_WLAN +#endif extern struct dc_dp_dcmode **dc_dp_get_dcmode_head(void); +int32_t +dc_dp_dcmode_wrapper_get_peripheral_config(enum dc_dp_dev_type dev_id, + struct dc_dp_dev_config *dev_config) +{ + dev_config->txin_ring_size = DC_DP_CFG_MAX_TXIN_RING_SZ; + dev_config->txout_ring_size = DC_DP_CFG_MAX_TXOUT_RING_SZ; + dev_config->rxin_ring_size = DC_DP_CFG_MAX_RXIN_RING_SZ; + dev_config->rxout_ring_size = DC_DP_CFG_MAX_RXOUT_RING_SZ; + + if (dev_id == DC_DP_DEV_WAV_600_CDB) { + dev_config->rx_bufs = DC_DP_CFG_MAX_RX_BUFS_WAV_600_CDB; + dev_config->tx_bufs = DC_DP_CFG_MAX_TX_BUFS_WAV_600_CDB; + } else if (dev_id == DC_DP_DEV_WAV_600_24G) { + dev_config->rx_bufs = DC_DP_CFG_MAX_RX_BUFS_WAV_600_24G; + dev_config->tx_bufs = DC_DP_CFG_MAX_TX_BUFS_WAV_600_24G; + } else if (dev_id == DC_DP_DEV_WAV_600_5G) { + dev_config->rx_bufs = DC_DP_CFG_MAX_RX_BUFS_WAV_600_5G; + dev_config->tx_bufs = DC_DP_CFG_MAX_TX_BUFS_WAV_600_5G; + } else { + dev_config->rx_bufs = DC_DP_CFG_MAX_RX_BUFS_WAV_500; + dev_config->tx_bufs = DC_DP_CFG_MAX_TX_BUFS_WAV_500; + } + + return 0; +} + int32_t dc_dp_dcmode_wrapper_get_host_capability(struct dc_dp_host_cap *cap, uint32_t flags) { @@ -52,15 +81,42 @@ dc_dp_dcmode_wrapper_alloc_port(struct dc_dp_priv_dev_info *dev_ctx, struct net_device *dev, int32_t port_id, uint32_t flags) { int32_t ret = -1; + uint32_t dp_alloc_flags = 0; /* Allocate a port (HW or SW) */ -#ifdef HAVE_DATAPATH_API /* FIXME : DP_F_FAST_DSL flag use in DP API? */ - if ( (flags & (DC_DP_F_FASTPATH | DC_DP_F_FAST_DSL)) ) - ret = dp_alloc_port(owner, dev, dev_port, port_id, NULL, DP_F_FASTPATH); -#endif /* #ifdef HAVE_DATAPATH_API */ + if ( (flags & (DC_DP_F_FASTPATH | DC_DP_F_FAST_DSL)) ) { + dp_alloc_flags = DP_F_FASTPATH; + + /* Allocate a DCDP port for Fastpath multiport/shared device */ + if ( (flags & DC_DP_F_MULTI_PORT) && (flags & DC_DP_F_SHARED_RES) ) + ret = dp_alloc_sw_port(owner, dev, dev_port, port_id, flags); + + /* Otherwise, allocate a DP port */ + else { +#if defined(HAVE_DATAPATH_EXT_API) + /* Convert the alloc flags */ + if ( (flags & DC_DP_F_FAST_WLAN) ) + dp_alloc_flags |= DP_F_FAST_WLAN; + else if ( (flags & DC_DP_F_FAST_DSL) ) + dp_alloc_flags |= DP_F_FAST_DSL; + else + dp_alloc_flags |= DP_F_FAST_WLAN; + + ret = dp_alloc_port_ext(0, owner, dev, dev_port, port_id, NULL, NULL, dp_alloc_flags); + +#elif defined(HAVE_DATAPATH_API) /* #if defined(HAVE_DATAPATH_EXT_API) */ + /* DP alloc flags */ + if ( (flags & DC_DP_F_FAST_DSL) ) + dp_alloc_flags |= DP_F_FAST_DSL; + + ret = dp_alloc_port(owner, dev, dev_port, port_id, NULL, dp_alloc_flags); +#endif /* #elif defined(HAVE_DATAPATH_API) */ + } + } + /* Allocate a DCDP port for Litepath/Swpath device */ if ((ret < 0) && (flags & (DC_DP_F_LITEPATH | DC_DP_F_SWPATH))) ret = dp_alloc_sw_port(owner, dev, dev_port, port_id, flags); @@ -69,17 +125,19 @@ dc_dp_dcmode_wrapper_alloc_port(struct dc_dp_priv_dev_info *dev_ctx, int32_t dc_dp_dcmode_wrapper_dealloc_port(struct dc_dp_priv_dev_info *dev_ctx, - struct module *owner, uint32_t dev_port, - struct net_device *dev, int32_t port_id, uint32_t flags) + struct module *owner, uint32_t dev_port, + struct net_device *dev, int32_t port_id, uint32_t flags) { int32_t ret = -1; /* De-allocate a port (HW or SW) */ -#ifdef HAVE_DATAPATH_API - if (is_hw_port(port_id)) + if (is_hw_port(port_id)) { +#if defined(HAVE_DATAPATH_EXT_API) + ret = dp_alloc_port_ext(0, owner, dev, dev_port, port_id, NULL, NULL, DP_F_DEREGISTER); +#elif defined(HAVE_DATAPATH_API) ret = dp_alloc_port(owner, dev, dev_port, port_id, NULL, DP_F_DEREGISTER); - else #endif /* #ifdef HAVE_DATAPATH_API */ + } else ret = dp_dealloc_sw_port(owner, dev, dev_port, port_id, 0); return ret; @@ -98,13 +156,16 @@ dc_dp_dcmode_wrapper_register_dev(struct dc_dp_priv_dev_info *dev_ctx, if (!dev_ctx) goto err_out; - ret = dc_dp_get_dcmode((void *)dev_ctx, port_id, resources, &dcmode, flags); + ret = dc_dp_get_dcmode((void *)dev_ctx, dev_ctx->ref_port_id, resources, &dcmode, flags); if (ret) goto litepath_reg_dev; dev_ctx->dcmode = dcmode; - if (dev_ctx->dcmode->dcmode_ops && dev_ctx->dcmode->dcmode_ops->register_dev) + if (dev_ctx->dcmode && dev_ctx->dcmode->dcmode_ops && dev_ctx->dcmode->dcmode_ops->register_dev_ex) + ret = dev_ctx->dcmode->dcmode_ops->register_dev_ex(dev_ctx->priv, owner, port_id, dev, datapathcb, resources, devspec, + dev_ctx->ref_port_id, dev_ctx->alloc_flags, flags); + else if (dev_ctx->dcmode && dev_ctx->dcmode->dcmode_ops && dev_ctx->dcmode->dcmode_ops->register_dev) ret = dev_ctx->dcmode->dcmode_ops->register_dev(dev_ctx->priv, owner, port_id, dev, datapathcb, resources, devspec, flags); litepath_reg_dev: @@ -133,7 +194,10 @@ dc_dp_dcmode_wrapper_deregister_dev(struct dc_dp_priv_dev_info *dev_ctx, if (is_litepath_dev_registered(dev_ctx, port_id, dev)) ret = dc_dp_litepath_deregister_dev(dev_ctx, owner, port_id, dev, datapathcb, devspec, flags); - if (dev_ctx->dcmode && dev_ctx->dcmode->dcmode_ops && dev_ctx->dcmode->dcmode_ops->register_dev) + if (dev_ctx->dcmode && dev_ctx->dcmode->dcmode_ops && dev_ctx->dcmode->dcmode_ops->register_dev_ex) + ret = dev_ctx->dcmode->dcmode_ops->register_dev_ex(dev_ctx->priv, owner, port_id, dev, datapathcb, resources, devspec, + dev_ctx->ref_port_id, dev_ctx->alloc_flags, flags); + else if (dev_ctx->dcmode && dev_ctx->dcmode->dcmode_ops && dev_ctx->dcmode->dcmode_ops->register_dev) ret = dev_ctx->dcmode->dcmode_ops->register_dev(dev_ctx->priv, owner, port_id, dev, datapathcb, resources, devspec, flags); err_out: @@ -238,6 +302,18 @@ dc_dp_dcmode_wrapper_handle_ring_sw(struct dc_dp_priv_dev_info *dev_ctx, struct return ret; } +int32_t +dc_dp_dcmode_wrapper_return_bufs(struct dc_dp_priv_dev_info *dev_ctx, uint32_t port_id, + void **buflist, uint32_t buflist_len, uint32_t flags) +{ + int32_t ret = 0; + + if (dev_ctx && dev_ctx->dcmode && dev_ctx->dcmode->dcmode_ops && dev_ctx->dcmode->dcmode_ops->return_bufs) + ret = dev_ctx->dcmode->dcmode_ops->return_bufs(dev_ctx->priv, port_id, buflist, buflist_len, flags); + + return ret; +} + int32_t dc_dp_dcmode_wrapper_add_session_shortcut_forward(struct dc_dp_priv_dev_info *dev_ctx, struct dp_subif *subif, struct sk_buff *skb, uint32_t flags) { diff --git a/common/directconnect_dp_dcmode_wrapper.h b/common/directconnect_dp_dcmode_wrapper.h index db9e250..92b6ec8 100644 --- a/common/directconnect_dp_dcmode_wrapper.h +++ b/common/directconnect_dp_dcmode_wrapper.h @@ -24,6 +24,12 @@ dc_dp_dcmode_wrapper_get_host_capability ( uint32_t flags ); +int32_t +dc_dp_dcmode_wrapper_get_peripheral_config ( + enum dc_dp_dev_type dev_id, + struct dc_dp_dev_config *dev_config +); + extern int32_t dc_dp_dcmode_wrapper_alloc_port ( struct dc_dp_priv_dev_info *dev_ctx, @@ -109,6 +115,15 @@ dc_dp_dcmode_wrapper_handle_ring_sw ( uint32_t flags ); +extern int32_t +dc_dp_dcmode_wrapper_return_bufs ( + struct dc_dp_priv_dev_info *dev_ctx, + uint32_t port_id, + void **buflist, + uint32_t buflist_len, + uint32_t flags +); + extern int32_t dc_dp_dcmode_wrapper_add_session_shortcut_forward ( struct dc_dp_priv_dev_info *dev_ctx, diff --git a/common/directconnect_dp_debug.h b/common/directconnect_dp_debug.h deleted file mode 100644 index e69de29..0000000 diff --git a/common/directconnect_dp_device.c b/common/directconnect_dp_device.c index afc785f..00d5579 100644 --- a/common/directconnect_dp_device.c +++ b/common/directconnect_dp_device.c @@ -1,5 +1,5 @@ /* - * DirectConnect provides a common interface for the network devices to achieve the full or partial + * DirectConnect provides a common interface for the network devices to achieve the full or partial acceleration services from the underlying packet acceleration engine * Copyright (c) 2017, Intel Corporation. * @@ -21,7 +21,7 @@ #define MAX(x, y) (x > y ? x : y) /*! - \brief DC DP device block synchronization + \brief DC DP device block synchronization */ struct dc_dp_dev_lock { spinlock_t lock; /*!< DC DP lock */ @@ -49,7 +49,7 @@ dc_dp_lock_destroy(struct dc_dp_dev_lock *p_lock) static inline void dc_dp_lock_device_list(struct dc_dp_dev_lock *p_lock) -{ +{ spin_lock_bh(&p_lock->lock); } @@ -246,14 +246,14 @@ dc_dp_alloc_device(int32_t port_id, struct net_device *dev, struct dc_dp_priv_de } dc_dp_unlock_device_list(&g_dc_dp_dev_lock); - + if (pp_devinfo) - *pp_devinfo = p_devinfo; - + *pp_devinfo = p_devinfo; + return ret; } -void +void dc_dp_free_device(int32_t port_id, struct net_device *dev) { dc_dp_lock_device_list(&g_dc_dp_dev_lock); @@ -277,11 +277,11 @@ dc_dp_alloc_subif(struct dc_dp_priv_dev_info *p_devinfo, struct net_device *dev, } dc_dp_unlock_device_list(&g_dc_dp_dev_lock); - + return ret; } -void +void dc_dp_free_subif(struct dc_dp_priv_subif_info *p_subifinfo) { dc_dp_lock_device_list(&g_dc_dp_dev_lock); @@ -298,7 +298,7 @@ dc_dp_get_device_by_module_port(struct module *owner, int32_t port_id, struct dc struct dc_dp_priv_dev_info *p_devinfo = NULL; if ( pp_devinfo ) - *pp_devinfo = NULL; + *pp_devinfo = NULL; dc_dp_lock_device_list(&g_dc_dp_dev_lock); @@ -363,7 +363,7 @@ dc_dp_get_device_by_subif_netif(struct net_device *netif, struct dc_dp_priv_dev_ { if (p_devinfo->subif_info[subif_idx].flags == DC_DP_SUBIF_FREE) continue; - + if ( p_devinfo->subif_info[subif_idx].netif == netif ) { *pp_devinfo = p_devinfo; @@ -387,7 +387,7 @@ dc_dp_get_device_by_subif_netif(struct net_device *netif, struct dc_dp_priv_dev_ { if (p_devinfo->subif_info[subif_idx].flags == DC_DP_SUBIF_FREE) continue; - + if ( p_devinfo->subif_info[subif_idx].netif == netif ) { *pp_devinfo = p_devinfo; @@ -432,7 +432,7 @@ dc_dp_get_device_by_subif_ifname(char *ifname, struct dc_dp_priv_dev_info **pp_d { if (p_devinfo->subif_info[subif_idx].flags == DC_DP_SUBIF_FREE) continue; - + if ( strncmp(p_devinfo->subif_info[subif_idx].device_name, ifname, IFNAMSIZ) == 0 ) { *pp_devinfo = p_devinfo; @@ -456,7 +456,7 @@ dc_dp_get_device_by_subif_ifname(char *ifname, struct dc_dp_priv_dev_info **pp_d { if (p_devinfo->subif_info[subif_idx].flags == DC_DP_SUBIF_FREE) continue; - + if ( strncmp(p_devinfo->subif_info[subif_idx].device_name, ifname, IFNAMSIZ) == 0 ) { *pp_devinfo = p_devinfo; diff --git a/common/directconnect_dp_device.h b/common/directconnect_dp_device.h index aeed8a2..c3e4dcc 100644 --- a/common/directconnect_dp_device.h +++ b/common/directconnect_dp_device.h @@ -27,7 +27,7 @@ #define DC_DP_SUBIFID_MASK 0xF #define DC_DP_MAX_HW_DEVICE 16 /*< Applicable for DC fastpath device */ -#define DC_DP_MAX_SW_DEVICE 16 /*< Applicable for DC litepath/swpath device */ +#define DC_DP_MAX_SW_DEVICE 4 /*< Applicable for DC litepath/swpath device */ #define DC_DP_HW_PORT_RANGE_START 0 #define DC_DP_HW_PORT_RANGE_END ((DC_DP_HW_PORT_RANGE_START + DC_DP_MAX_HW_DEVICE) - 1) #define DC_DP_SW_PORT_RANGE_START 16 @@ -63,6 +63,7 @@ typedef struct dc_dp_priv_dev_info { struct net_device *dev; uint32_t dev_port; int32_t port_id; + int32_t ref_port_id; struct dc_dp_cb cb; /*! Callback Pointer to DIRECTPATH_CB */ uint32_t litepath_used; uint32_t litepath_port; diff --git a/common/directconnect_dp_platform_cfg.h b/common/directconnect_dp_platform_cfg.h new file mode 100644 index 0000000..e8e29b1 --- /dev/null +++ b/common/directconnect_dp_platform_cfg.h @@ -0,0 +1,83 @@ +/* + * DirectConnect provides a common interface for the network devices to achieve the full or partial + acceleration services from the underlying packet acceleration engine + * Copyright (c) 2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _DIRECTCONNECT_DP_PLATFORM_CFG_H_ +#define _DIRECTCONNECT_DP_PLATFORM_CFG_H_ + +#if defined(CONFIG_X86_INTEL_LGM) + /* LGM */ + #define DC_DP_CFG_MAX_TXIN_RING_SZ 0x20 /* 32 */ + #define DC_DP_CFG_MAX_TXOUT_RING_SZ 0x20 /* 32 */ + #define DC_DP_CFG_MAX_RXIN_RING_SZ 0x20 /* 32 */ + #define DC_DP_CFG_MAX_RXOUT_RING_SZ 0x20 /* 32 */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_CDB 0xA000 /* 40K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_CDB 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_24G 0xA000 /* 40K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_24G 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_5G 0xA000 /* 40K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_5G 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_500 0xA000 /* 40K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_500 0x8800 /* 34K */ + +#elif defined(CONFIG_X86_PUMA7) + /* CgR/PUMA/GRX750 */ + #define DC_DP_CFG_MAX_TXIN_RING_SZ 0x1000 /* 4K */ + #define DC_DP_CFG_MAX_TXOUT_RING_SZ 0x1000 /* 4K */ + #define DC_DP_CFG_MAX_RXOUT_RING_SZ 0x1000 /* 4K */ + #define DC_DP_CFG_MAX_RXIN_RING_SZ 0x1000 /* 4K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_CDB 0xC000 /* 48K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_CDB 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_24G 0xC000 /* 48K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_24G 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_5G 0xC000 /* 48K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_5G 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_500 0xC000 /* 48K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_500 0x8800 /* 34K */ + +#elif defined(CONFIG_PRX300_CQM) + /* PRX */ + #define DC_DP_CFG_MAX_TXIN_RING_SZ 0x20 /* 32 */ + #define DC_DP_CFG_MAX_TXOUT_RING_SZ 0x20 /* 32 */ + #define DC_DP_CFG_MAX_RXOUT_RING_SZ 0x0FFF /* 4K - 1 */ + #define DC_DP_CFG_MAX_RXIN_RING_SZ DC_DP_CFG_MAX_RXOUT_RING_SZ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_CDB 0x0FFF /* 4K - 1 */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_CDB 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_24G 0x0FFF /* 4K - 1 */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_24G 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_5G 0x0FFF /* 4K - 1 */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_5G 0x8800 /* 34K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_500 0x0FFF /* 4K - 1 */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_500 0x8800 /* 34K */ + +#elif defined(CONFIG_SOC_GRX500) + /* GRX500 */ + #define DC_DP_CFG_MAX_TXIN_RING_SZ 0x20 /* 32 */ + #define DC_DP_CFG_MAX_TXOUT_RING_SZ 0x20 /* 32 */ + #define DC_DP_CFG_MAX_RXOUT_RING_SZ 0x0800 /* 2K */ + #define DC_DP_CFG_MAX_RXIN_RING_SZ DC_DP_CFG_MAX_RXOUT_RING_SZ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_CDB 0x0800 /* 2K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_CDB 0x1000 /* 4K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_24G 0x0800 /* 2K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_24G 0x1000 /* 4K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_600_5G 0x0800 /* 2K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_600_5G 0x1000 /* 4K */ + #define DC_DP_CFG_MAX_RX_BUFS_WAV_500 0x0800 /* 2K */ + #define DC_DP_CFG_MAX_TX_BUFS_WAV_500 0x1000 /* 4K */ + +#else + #error "Not supported" +#endif + +#endif /* _DIRECTCONNECT_DP_PLATFORM_CFG_H_ */ diff --git a/common/directconnect_dp_proc.c b/common/directconnect_dp_proc.c index 472a815..30e8b76 100644 --- a/common/directconnect_dp_proc.c +++ b/common/directconnect_dp_proc.c @@ -206,6 +206,11 @@ static int proc_read_dc_dp_dbg(struct seq_file *seq, void *v) { int i; + if (!capable(CAP_NET_ADMIN)) { + printk("Read Permission denied"); + return 0; + } + seq_printf(seq, "dbg_flag=0x%08x\n", dc_dp_get_dbg_flag()); seq_printf(seq, "Supported Flags =%d\n", _dc_dp_get_dbg_flag_str_size()); @@ -232,6 +237,11 @@ static ssize_t proc_write_dc_dp_dbg(struct file *file, const char __user *buf, s char *p; int f_enable; + if (!capable(CAP_NET_ADMIN)) { + printk("Write Permission denied"); + return 0; + } + len = min(count, (size_t)(sizeof(str) - 1)); len -= copy_from_user(str, buf, len); while ( len && str[len - 1] <= ' ' ) @@ -277,6 +287,11 @@ static int proc_read_dc_dp_dev_show(struct seq_file *seq, void *v) struct dc_dp_priv_dev_info *p_sw_dev_info = dc_dp_get_sw_device_head(); struct dc_dp_priv_dev_info *dev_info = NULL; + if (!capable(CAP_NET_ADMIN)) { + printk ("Read Permission denied"); + return 0; + } + for (dev_idx = 0; dev_idx < DC_DP_MAX_HW_DEVICE + DC_DP_MAX_SW_DEVICE; dev_idx++) { if (dev_idx < DC_DP_MAX_HW_DEVICE) dev_info = &p_hw_dev_info[dev_idx]; @@ -289,8 +304,8 @@ static int proc_read_dc_dp_dev_show(struct seq_file *seq, void *v) } seq_printf(seq, - "%d: module=0x%08x(name:%8s) dev_port=%02d dp_port=%02d\n", - dev_idx, (uint32_t)dev_info->owner, dev_info->owner->name, + "%d: module=0x%px(name:%8s) dev_port=%02d dp_port=%02d\n", + dev_idx, dev_info->owner, dev_info->owner->name, dev_info->dev_port, dev_info->port_id); seq_printf(seq, " status: %s\n", g_dc_dp_port_status_str[dev_info->flags]); @@ -302,22 +317,22 @@ static int proc_read_dc_dp_dev_show(struct seq_file *seq, void *v) } seq_printf(seq, "\n"); - seq_printf(seq, " cb->rx_fn: 0x%0x\n", - (uint32_t) dev_info->cb.rx_fn); - seq_printf(seq, " cb->stop_fn: 0x%0x\n", - (uint32_t) dev_info->cb.stop_fn); - seq_printf(seq, " cb->restart_fn: 0x%0x\n", - (uint32_t) dev_info->cb.restart_fn); - seq_printf(seq, " cb->get_subif_fn: 0x%0x\n", - (uint32_t) dev_info->cb.get_subif_fn); - seq_printf(seq, " cb->get_desc_info_fn: 0x%0x\n", - (uint32_t) dev_info->cb.get_desc_info_fn); - seq_printf(seq, " cb->reset_mib_fn: 0x%0x\n", - (uint32_t) dev_info->cb.reset_mib_fn); - seq_printf(seq, " cb->get_mib_fn: 0x%0x\n", - (uint32_t) dev_info->cb.reset_mib_fn); - seq_printf(seq, " cb->recovery_fn: 0x%0x\n", - (uint32_t) dev_info->cb.recovery_fn); + seq_printf(seq, " cb->rx_fn: 0x%px\n", + dev_info->cb.rx_fn); + seq_printf(seq, " cb->stop_fn: 0x%px\n", + dev_info->cb.stop_fn); + seq_printf(seq, " cb->restart_fn: 0x%px\n", + dev_info->cb.restart_fn); + seq_printf(seq, " cb->get_subif_fn: 0x%px\n", + dev_info->cb.get_subif_fn); + seq_printf(seq, " cb->get_desc_info_fn: 0x%px\n", + dev_info->cb.get_desc_info_fn); + seq_printf(seq, " cb->reset_mib_fn: 0x%px\n", + dev_info->cb.reset_mib_fn); + seq_printf(seq, " cb->get_mib_fn: 0x%px\n", + dev_info->cb.reset_mib_fn); + seq_printf(seq, " cb->recovery_fn: 0x%px\n", + dev_info->cb.recovery_fn); seq_printf(seq, " class2prio: "); for (i = 0; i < DC_DP_MAX_SOC_CLASS; i++) { @@ -336,10 +351,10 @@ static int proc_read_dc_dp_dev_show(struct seq_file *seq, void *v) for (i = 0; i < DC_DP_MAX_SUBIF_PER_DEV; i++) { if (dev_info->subif_info[i].flags) { seq_printf(seq, - " [%02d]: subif=0x%04x(vap=%d) netif=0x%0x(name=%s), device_name=%s\n", + " [%02d]: subif=0x%04x(vap=%d) netif=0x%px(name=%s), device_name=%s\n", i, dev_info->subif_info[i].subif, (dev_info->subif_info[i].subif >> DC_DP_SUBIFID_OFFSET) & DC_DP_SUBIFID_MASK, - (uint32_t)dev_info->subif_info[i].netif, + dev_info->subif_info[i].netif, dev_info->subif_info[i].netif ? dev_info->subif_info[i].netif->name : "NULL", dev_info->subif_info[i].device_name); } @@ -523,6 +538,11 @@ static int proc_read_dc_dp_dcmode_show(struct seq_file *seq, void *v) int32_t dcmode_idx; struct dc_dp_dcmode **dcmode = dc_dp_get_dcmode_head(); + if (!capable(CAP_NET_ADMIN)) { + printk("Read Permission denied"); + return 0; + } + for (dcmode_idx = 0; dcmode_idx < DC_DP_DCMODE_MAX; dcmode_idx++) { if (dcmode[dcmode_idx] == NULL) { seq_printf(seq, "%d: Not registered\n", dcmode_idx); @@ -532,42 +552,42 @@ static int proc_read_dc_dp_dcmode_show(struct seq_file *seq, void *v) seq_printf(seq, " dcmode_cap: 0x%0x\n", (uint32_t) dcmode[dcmode_idx]->dcmode_cap); - seq_printf(seq, " register_dev: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->register_dev); - seq_printf(seq, " register_subif: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->register_subif); - seq_printf(seq, " xmit: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->xmit); - seq_printf(seq, " handle_ring_sw: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->handle_ring_sw); - seq_printf(seq, " add_session_shortcut_forward: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->add_session_shortcut_forward); - seq_printf(seq, " disconn_if: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->disconn_if); - seq_printf(seq, " get_netif_stats: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->get_netif_stats); - seq_printf(seq, " clear_netif_stats: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->clear_netif_stats); - seq_printf(seq, " register_qos_class2prio_cb: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->register_qos_class2prio_cb); - seq_printf(seq, " map_class2devqos: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->map_class2devqos); - seq_printf(seq, " alloc_skb: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->alloc_skb); - seq_printf(seq, " free_skb: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->free_skb); - seq_printf(seq, " change_dev_status: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->change_dev_status); - seq_printf(seq, " get_wol_cfg: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->get_wol_cfg); - seq_printf(seq, " set_wol_cfg: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->set_wol_cfg); - seq_printf(seq, " set_wol_ctrl: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->set_wol_ctrl); - seq_printf(seq, " get_wol_ctrl_status: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->get_wol_ctrl_status); - seq_printf(seq, " dump_proc: 0x%0x\n", - (uint32_t) dcmode[dcmode_idx]->dcmode_ops->dump_proc); + seq_printf(seq, " register_dev: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->register_dev); + seq_printf(seq, " register_subif: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->register_subif); + seq_printf(seq, " xmit: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->xmit); + seq_printf(seq, " handle_ring_sw: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->handle_ring_sw); + seq_printf(seq, " add_session_shortcut_forward: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->add_session_shortcut_forward); + seq_printf(seq, " disconn_if: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->disconn_if); + seq_printf(seq, " get_netif_stats: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->get_netif_stats); + seq_printf(seq, " clear_netif_stats: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->clear_netif_stats); + seq_printf(seq, " register_qos_class2prio_cb: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->register_qos_class2prio_cb); + seq_printf(seq, " map_class2devqos: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->map_class2devqos); + seq_printf(seq, " alloc_skb: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->alloc_skb); + seq_printf(seq, " free_skb: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->free_skb); + seq_printf(seq, " change_dev_status: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->change_dev_status); + seq_printf(seq, " get_wol_cfg: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->get_wol_cfg); + seq_printf(seq, " set_wol_cfg: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->set_wol_cfg); + seq_printf(seq, " set_wol_ctrl: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->set_wol_ctrl); + seq_printf(seq, " get_wol_ctrl_status: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->get_wol_ctrl_status); + seq_printf(seq, " dump_proc: 0x%px\n", + dcmode[dcmode_idx]->dcmode_ops->dump_proc); } return 0; diff --git a/dc_mode/Makefile b/dc_mode/Makefile index d1b8d04..ffdd0e0 100644 --- a/dc_mode/Makefile +++ b/dc_mode/Makefile @@ -1 +1 @@ -obj-m += dc_mode0/ dc_mode1/ +obj-m += dc_mode0/ dc_mode1/ dc_modex/ diff --git a/dc_mode/dc_mode0/dc_mode0_xrx500.c b/dc_mode/dc_mode0/dc_mode0_xrx500.c index 3d3ab00..2f72d63 100644 --- a/dc_mode/dc_mode0/dc_mode0_xrx500.c +++ b/dc_mode/dc_mode0/dc_mode0_xrx500.c @@ -1,5 +1,5 @@ /* - * DirectConnect provides a common interface for the network devices to achieve the full or partial + * DirectConnect provides a common interface for the network devices to achieve the full or partial acceleration services from the underlying packet acceleration engine * Copyright (c) 2017, Intel Corporation. * @@ -39,25 +39,30 @@ #define XRX500_DCMODE0_DRV_MODULE_VERSION "1.1.2" #define DEV2SOC_FRAG_EXCEPTION_HANDLING /* Dev2SoC Fragmentation exception handling */ -#define XRX500_DCMODE0_BRIDGE_FLOW_LEARNING 1 #define XRX500_DCMODE0_MIB_WORKAROUND 1 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define SWITCH_DEV "/dev/switch_api/1" +#define XRX500_DCMODE0_MAX_PORT 20 /* 16 DP Port + 4 DCDP Port */ #define XRX500_DCMODE0_MAX_SUBIF_PER_DEV 16 #define XRX500_DCMODE0_SUBIFID_OFFSET 8 #define XRX500_DCMODE0_SUBIFID_MASK 0xF #define XRX500_DCMODE0_GET_SUBIFIDX(subif_id) \ ((subif_id >> XRX500_DCMODE0_SUBIFID_OFFSET) & XRX500_DCMODE0_SUBIFID_MASK) +#define MULTIPORT_WORKAROUND_MAX_SUBIF_NUM 8 +#define MULTIPORT_WORKAROUND_SUBIFID_MASK \ + (MULTIPORT_WORKAROUND_MAX_SUBIF_NUM << XRX500_DCMODE0_SUBIFID_OFFSET) #define DC_DP_LOCK spin_lock_bh #define DC_DP_UNLOCK spin_unlock_bh #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) - #define XRX500_DCMODE0_MAX_DEV_NUM 4 + #define XRX500_DCMODE0_MAX_DEV_NUM 4 + #define XRX500_DCMODE0_MAX_DEV_PORT_NUM 6 #else /* #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) */ - #define XRX500_DCMODE0_MAX_DEV_NUM 1 + #define XRX500_DCMODE0_MAX_DEV_NUM 1 + #define XRX500_DCMODE0_MAX_DEV_PORT_NUM 2 #endif /* #else */ #define XRX500_DCMODE0_UMT_PERIOD_DEFAULT 200 /* in micro second (GRX350/550) */ @@ -75,11 +80,11 @@ #define DATA_LEN(desc_p) (desc_p->status.field.data_len) #define DATA_POINTER(desc_p) (desc_p->data_pointer) -#define CHAN_SOC2DEV_P(ctx) (&((ctx)->ch[DCMODE_CHAN_SOC2DEV])) -#define CHAN_SOC2DEV_RET_P(ctx) (&((ctx)->ch[DCMODE_CHAN_SOC2DEV_RET])) -#define CHAN_DEV2SOC_P(ctx) (&((ctx)->ch[DCMODE_CHAN_DEV2SOC])) -#define CHAN_DEV2SOC_RET_P(ctx) (&((ctx)->ch[DCMODE_CHAN_DEV2SOC_RET])) -#define CHAN_DEV2SOC_EX_P(ctx) (&((ctx)->ch[DCMODE_CHAN_DEV2SOC_EXCEPT])) +#define CHAN_SOC2DEV_P(ctx) (&((ctx)->shared_info->ch[DCMODE_CHAN_SOC2DEV])) +#define CHAN_SOC2DEV_RET_P(ctx) (&((ctx)->shared_info->ch[DCMODE_CHAN_SOC2DEV_RET])) +#define CHAN_DEV2SOC_P(ctx) (&((ctx)->shared_info->ch[DCMODE_CHAN_DEV2SOC])) +#define CHAN_DEV2SOC_RET_P(ctx) (&((ctx)->shared_info->ch[DCMODE_CHAN_DEV2SOC_RET])) +#define CHAN_DEV2SOC_EX_P(ctx) (&((ctx)->shared_info->ch[DCMODE_CHAN_DEV2SOC_EXCEPT])) /*Global Variable*/ uint32_t g_inst = 0; @@ -140,31 +145,51 @@ typedef struct } dcmode_chan_t; #endif /* #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING */ -struct xrx500_dcmode0_dev_info +struct xrx500_dcmode0_subif_info { + struct net_device *netif; /*! pointer to net_device*/ +}; + +struct xrx500_dcmode0_dev_shared_info { #define XRX500_DCMODE0_DEV_STATUS_FREE 0x0 #define XRX500_DCMODE0_DEV_STATUS_USED 0x1 int32_t status; int32_t port_id; + int32_t alt_port_id; /* Multiport reference port id */ + int32_t ref_count; int32_t cbm_pid; int32_t umt_id; int32_t umt_period; uint32_t dma_ctrlid; uint32_t dma_cid; uint32_t dma_ch; - - /* Resources */ - int32_t num_subif; int32_t num_bufpools; struct dc_dp_buf_pool *buflist; #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING dcmode_chan_t ch[DCMODE_CHAN_MAX]; // Array of Rings. Typically 4 #endif /* #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING */ + int32_t num_subif; + struct xrx500_dcmode0_subif_info subif_info[XRX500_DCMODE0_MAX_SUBIF_PER_DEV]; +}; + +struct xrx500_dcmode0_dev_info +{ +#define XRX500_DCMODE0_DEV_STATUS_FREE 0x0 +#define XRX500_DCMODE0_DEV_STATUS_USED 0x1 + int32_t status; + int32_t port_id; + uint32_t alloc_flags; + int32_t num_subif; + + /* shared info */ + struct xrx500_dcmode0_dev_shared_info *shared_info; /* mib */ struct dc_dp_dev_mib subif_mib[XRX500_DCMODE0_MAX_SUBIF_PER_DEV]; }; -static struct xrx500_dcmode0_dev_info g_dcmode0_dev[XRX500_DCMODE0_MAX_DEV_NUM]; +static struct xrx500_dcmode0_dev_shared_info g_dcmode0_dev_shinfo[XRX500_DCMODE0_MAX_DEV_NUM]; +static struct xrx500_dcmode0_dev_info g_dcmode0_dev[XRX500_DCMODE0_MAX_DEV_PORT_NUM]; +static struct xrx500_dcmode0_dev_info *g_dcmode0_dev_p[XRX500_DCMODE0_MAX_PORT] = {NULL} ; spinlock_t g_dcmode0_dev_lock; static int32_t g_dcmode0_init_ok = 0; @@ -172,10 +197,15 @@ static int32_t g_dcmode0_init_ok = 0; /* Local */ static int32_t xrx500_dcmode0_rx_cb(struct net_device *rxif, struct net_device *txif, - struct sk_buff *skb, int32_t len); + struct sk_buff *skb, int32_t len); +static int32_t +xrx500_dcmode0_get_netif_subifid_cb(struct net_device *netif, + struct sk_buff *skb, void *subif_data, + uint8_t dst_mac[MAX_ETH_ALEN], + dp_subif_t *subif, uint32_t flags); static inline int32_t -xrx500_setup_pmac_port(int32_t port_id, int32_t dma_cid, uint32_t flags); +xrx500_setup_pmac_port(int32_t port_id, int32_t dma_cid, int32_t ref_port_id, uint32_t dev_cap_req, uint32_t flags); static inline int32_t xrx500_alloc_ring_buffers(uint32_t num_bufs_req, int32_t *num_bufpools, struct dc_dp_buf_pool **buflist); static inline void @@ -200,7 +230,7 @@ _dc_dp_get_class2devqos(uint8_t *class2prio, uint8_t *prio2devqos, uint8_t class static void _dc_dp_dump_raw_data(char *buf, int len, char *prefix_str); static void _dc_dp_dump_rx_pmac(struct pmac_rx_hdr *pmac); #endif /* #if defined(CONFIG_DIRECTCONNECT_DP_DBG) && CONFIG_DIRECTCONNECT_DP_DBG */ -#endif +#endif /* #if 0 */ #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING #define _MMB_OPS_ADDR(a) (volatile void __iomem *)(a) @@ -208,50 +238,26 @@ static void _dc_dp_dump_rx_pmac(struct pmac_rx_hdr *pmac); static inline void dcmode_raw_writel(uint32_t val, volatile void __iomem *addr) { -#if 0 - writel(le32_to_cpu(val), _MMB_OPS_ADDR(addr)); -#else /* #if 0 */ writel(val, _MMB_OPS_ADDR(addr)); -#endif /* #else */ } static inline uint32_t dcmode_raw_readl(const volatile void __iomem *addr) { uint32_t val = readl(_MMB_OPS_ADDR(addr)); -#if 0 - return cpu_to_le32(val); -#else /* #if 0 */ return val; -#endif /* #else */ } static inline uint32_t dcmode_read_cntr(dcmode_chan_t *ch) { -#if 0 - if ( (ch->read_cntr.rw_endian & DC_DP_F_DCCNTR_MODE_BIG_ENDIAN) ) - //return (be32_to_cpu(*(uint32_t *)ch->read_cntr.virt)); - return (be32_to_cpu(*(uint32_t *)ch->read_cntr.phys)); - else - //return (le32_to_cpu(*(uint32_t *)ch->read_cntr.virt)); - return (le32_to_cpu(*(uint32_t *)ch->read_cntr.phys)); -#else /* #if 0 */ return dcmode_raw_readl(ch->read_cntr.virt); -#endif /* #else */ } static inline void dcmode_write_cntr(dcmode_chan_t *ch, uint32_t write_val) { -#if 0 - if ( (ch->write_cntr.rw_endian & DC_DP_F_DCCNTR_MODE_BIG_ENDIAN) ) - *(uint32_t *)ch->write_cntr.virt = cpu_to_be32(write_val); - else - *(uint32_t *)ch->write_cntr.virt = cpu_to_le32(write_val); -#else /* #if 0 */ return dcmode_raw_writel(write_val, ch->write_cntr.virt); -#endif /* #else */ } #endif /* #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING */ @@ -261,8 +267,168 @@ dcmode_write_cntr(dcmode_chan_t *ch, uint32_t write_val) * ======================================================================== */ +static inline bool is_multiport(uint32_t alloc_flags) +{ + return (0 != (alloc_flags & DC_DP_F_MULTI_PORT)); +} + +static inline bool is_multiport_main(uint32_t alloc_flags) +{ + return ( (0 != (alloc_flags & DC_DP_F_MULTI_PORT)) && + (0 == (alloc_flags & DC_DP_F_SHARED_RES)) ); +} + +static inline bool is_multiport_sub(uint32_t alloc_flags) +{ + return ( (0 != (alloc_flags & DC_DP_F_MULTI_PORT)) && + (0 != (alloc_flags & DC_DP_F_SHARED_RES)) ); +} + +static inline bool is_multiport_main_by_subif(uint16_t subif_id) +{ + return (XRX500_DCMODE0_GET_SUBIFIDX(subif_id) >= MULTIPORT_WORKAROUND_MAX_SUBIF_NUM); +} + +static inline bool is_multiport_sub_by_subif(uint16_t subif_id) +{ + return (XRX500_DCMODE0_GET_SUBIFIDX(subif_id) < MULTIPORT_WORKAROUND_MAX_SUBIF_NUM); +} + +static inline void +multiport_wa_forward_map_subifid(struct xrx500_dcmode0_dev_info *dev_ctx, struct dp_subif *subif_id) +{ + /* Multiport 1st device? */ + if ( is_multiport_main(dev_ctx->alloc_flags) ) + subif_id->subif |= MULTIPORT_WORKAROUND_SUBIFID_MASK; + + /* Multiport sub-sequent device? */ + else + subif_id->port_id = dev_ctx->shared_info->port_id; +} + +static inline void +multiport_wa_reverse_map_subifid(struct xrx500_dcmode0_dev_info *dev_ctx, struct dp_subif *subif_id) +{ + /* Multiport 1st device? */ + if ( is_multiport_main_by_subif(subif_id->subif) ) + subif_id->subif &= ~MULTIPORT_WORKAROUND_SUBIFID_MASK; + + /* Multiport sub-sequent device? */ + else + subif_id->port_id = dev_ctx->shared_info->alt_port_id; +} + +static struct xrx500_dcmode0_dev_shared_info * +xrx500_dcmode0_alloc_dev_shared_ctx(int32_t port_id, int32_t ref_port_id, uint32_t alloc_flags) +{ + int32_t dev_shinfo_idx; + struct xrx500_dcmode0_dev_shared_info *dev_shinfo_ctx = NULL; + struct xrx500_dcmode0_dev_info *ref_dev_ctx = NULL; + + /* Multiport sub-sequent device? */ + if ( is_multiport_sub(alloc_flags) && + (NULL != (ref_dev_ctx = g_dcmode0_dev_p[ref_port_id])) ) { + dev_shinfo_ctx = ref_dev_ctx->shared_info; + if (NULL == dev_shinfo_ctx) + goto err_out; + + dev_shinfo_ctx->alt_port_id = port_id; + dev_shinfo_ctx->ref_count++; + } else { + /* Find a free device shinfo index */ + for (dev_shinfo_idx = 0; dev_shinfo_idx < XRX500_DCMODE0_MAX_DEV_NUM; dev_shinfo_idx++) { + if (g_dcmode0_dev_shinfo[dev_shinfo_idx].status != XRX500_DCMODE0_DEV_STATUS_USED) { + break; + } + } + + if (dev_shinfo_idx >= XRX500_DCMODE0_MAX_DEV_NUM) { + DC_DP_ERROR("failed to allocate port as it reaches maximum directconnect device limit - %d!!!\n", XRX500_DCMODE0_MAX_DEV_NUM); + goto err_out; + } + dev_shinfo_ctx = &g_dcmode0_dev_shinfo[dev_shinfo_idx]; + + memset(dev_shinfo_ctx, 0, sizeof(struct xrx500_dcmode0_dev_shared_info)); + dev_shinfo_ctx->status = XRX500_DCMODE0_DEV_STATUS_USED; + + /* Multiport sub-sequent device? */ + if ( is_multiport_sub(alloc_flags) ) { + dev_shinfo_ctx->port_id = ref_port_id; + dev_shinfo_ctx->alt_port_id = port_id; + } else + dev_shinfo_ctx->port_id = port_id; + + dev_shinfo_ctx->ref_count = 1; + } + +err_out: + return dev_shinfo_ctx; +} + +static inline void +xrx500_dcmode0_free_dev_shared_ctx(struct xrx500_dcmode0_dev_shared_info *dev_shinfo_ctx) +{ + if (NULL != dev_shinfo_ctx) { + dev_shinfo_ctx->ref_count--; + if (0 == dev_shinfo_ctx->ref_count) + memset(dev_shinfo_ctx, 0, sizeof(struct xrx500_dcmode0_dev_shared_info)); + } +} + +static struct xrx500_dcmode0_dev_info * +xrx500_dcmode0_alloc_dev_ctx(int32_t port_id, int32_t ref_port_id, uint32_t alloc_flags) +{ + int32_t dev_idx; + struct xrx500_dcmode0_dev_info *dev_ctx = NULL; + struct xrx500_dcmode0_dev_shared_info *dev_shinfo_ctx = NULL; + + /* Find a free device index */ + for (dev_idx = 0; dev_idx < XRX500_DCMODE0_MAX_DEV_PORT_NUM; dev_idx++) { + if (g_dcmode0_dev[dev_idx].status != XRX500_DCMODE0_DEV_STATUS_USED) { + break; + } + } + + if (dev_idx >= XRX500_DCMODE0_MAX_DEV_PORT_NUM) { + DC_DP_ERROR("failed to allocate port as it reaches maximum directconnect device limit - %d!!!\n", + XRX500_DCMODE0_MAX_DEV_PORT_NUM); + goto out; + } + + /* Allocate device shared context */ + dev_shinfo_ctx = xrx500_dcmode0_alloc_dev_shared_ctx(port_id, ref_port_id, alloc_flags); + if (NULL == dev_shinfo_ctx) + goto out; + + dev_ctx = &g_dcmode0_dev[dev_idx]; + + /* Reset DC Mode0 device structure */ + memset(dev_ctx, 0, sizeof(struct xrx500_dcmode0_dev_info)); + dev_ctx->status = XRX500_DCMODE0_DEV_STATUS_USED; + dev_ctx->port_id = port_id; + dev_ctx->alloc_flags = alloc_flags; + dev_ctx->shared_info = dev_shinfo_ctx; + + g_dcmode0_dev_p[port_id] = dev_ctx; + +out: + return dev_ctx; +} + +static inline void +xrx500_dcmode0_free_dev_ctx(struct xrx500_dcmode0_dev_info *dev_ctx) +{ + if (NULL != dev_ctx) { + /* Free device shared context */ + xrx500_dcmode0_free_dev_shared_ctx(dev_ctx->shared_info); + + g_dcmode0_dev_p[dev_ctx->port_id] = NULL; + memset(dev_ctx, 0, sizeof(struct xrx500_dcmode0_dev_info)); + } +} + static inline int32_t -xrx500_setup_pmac_port(int32_t port_id, int32_t dma_cid, uint32_t flags) +xrx500_setup_pmac_port(int32_t port_id, int32_t dma_cid, int32_t ref_port_id, uint32_t dev_cap_req, uint32_t flags) { u8 i = 0, j = 0; GSW_API_HANDLE gswr; @@ -270,9 +436,6 @@ xrx500_setup_pmac_port(int32_t port_id, int32_t dma_cid, uint32_t flags) GSW_PMAC_Ig_Cfg_t igCfg; GSW_register_t regCfg; - memset((void *)&egCfg, 0x00, sizeof(egCfg)); - memset((void *)&igCfg, 0x00, sizeof(igCfg)); - /* Do the GSW-R configuration */ gswr = gsw_api_kopen(SWITCH_DEV); if (gswr == 0) { @@ -286,15 +449,22 @@ xrx500_setup_pmac_port(int32_t port_id, int32_t dma_cid, uint32_t flags) DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "PMAC_EG_CFG_SET for GSW-R.\n"); for (i = 0; i <= 15; i++) { for (j = 0; j <= 3; j++) { + memset((void *)&egCfg, 0x00, sizeof(egCfg)); egCfg.nRxDmaChanId = 0; - egCfg.bPmacEna = 0; - egCfg.bFcsEna = 0; + if ( (dev_cap_req & DC_DP_F_DEV_REQ_TX_PMAC) ) + egCfg.bPmacEna = 1; + else + egCfg.bPmacEna = 0; + if ( (dev_cap_req & DC_DP_F_DEV_REQ_TX_FCS) ) + egCfg.bFcsEna = 1; + else + egCfg.bFcsEna = 0; egCfg.bRemL2Hdr = 0; egCfg.numBytesRem = 0; egCfg.nResDW1 = 0; egCfg.nRes1DW0 = 0; egCfg.nRes2DW0 = 0; - egCfg.nDestPortId = port_id; + egCfg.nDestPortId = ref_port_id; egCfg.nTrafficClass = i; egCfg.bMpe1Flag = 0; egCfg.bMpe2Flag = 0; @@ -303,14 +473,18 @@ xrx500_setup_pmac_port(int32_t port_id, int32_t dma_cid, uint32_t flags) egCfg.nFlowIDMsb = j; egCfg.bTCEnable = 1; - gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_SET, (unsigned int)&egCfg); + gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_SET, &egCfg); } } /* GSWIP-R PMAC Ingress Configuration Table */ DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "PMAC_IG_CFG_SET for GSW-R.\n"); + memset((void *)&igCfg, 0x00, sizeof(igCfg)); igCfg.nTxDmaChanId = dma_cid; - igCfg.bPmacPresent = 0; + if ( (dev_cap_req & DC_DP_F_DEV_REQ_RX_PMAC) ) + igCfg.bPmacPresent = 1; + else + igCfg.bPmacPresent = 0; igCfg.bSpIdDefault = 1; igCfg.eSubId = GSW_PMAC_IG_CFG_SRC_DMA_DESC; igCfg.bClassDefault = 0; @@ -322,31 +496,60 @@ xrx500_setup_pmac_port(int32_t port_id, int32_t dma_cid, uint32_t flags) igCfg.defPmacHdr[0] = 0; igCfg.defPmacHdr[1] = 0; - igCfg.defPmacHdr[2] = port_id << 4; + igCfg.defPmacHdr[2] = (ref_port_id << 4); igCfg.defPmacHdr[3] = 0x80; igCfg.defPmacHdr[4] = 0; + if ( (dev_cap_req & DC_DP_F_DEV_REQ_RX_FCS) ) + igCfg.defPmacHdr[4] |= 0x80; + else + igCfg.defPmacHdr[4] &= ~0x80; igCfg.defPmacHdr[5] = 0; igCfg.defPmacHdr[6] = 0xFF; igCfg.defPmacHdr[7] = 0xFF; - gsw_api_kioctl(gswr, GSW_PMAC_IG_CFG_SET, (unsigned int)&igCfg); + gsw_api_kioctl(gswr, GSW_PMAC_IG_CFG_SET, &igCfg); - /* FIMXE : setup based on DSL or not */ - /* Allow traffic from one VAP to any VAP */ + /* No Rx FCS, then allow any short packet without padding */ + /* SDMA_PRIO:USIGN */ + #define SDMA_PRIO_REG_BASE 0xBC1 + #define SDMA_PRIO_REG_PORT_STEP 6 + #define SDMA_PRIO_REG_USIGN 0x4 - /* PCE_PCTRL_3 */ memset((void *)®Cfg, 0x00, sizeof(regCfg)); - regCfg.nRegAddr = 0x483 + (10 * port_id); - gsw_api_kioctl(gswr, GSW_REGISTER_GET, (unsigned int)®Cfg); - regCfg.nData |= 0x4000; - gsw_api_kioctl(gswr, GSW_REGISTER_SET, (unsigned int)®Cfg); + regCfg.nRegAddr = SDMA_PRIO_REG_BASE + (SDMA_PRIO_REG_PORT_STEP * port_id); + gsw_api_kioctl(gswr, GSW_REGISTER_GET, ®Cfg); - /* PCE_IGPTRM */ - memset((void *)®Cfg, 0x00, sizeof(regCfg)); - regCfg.nRegAddr = 0x544 + (16 * port_id); - gsw_api_kioctl(gswr, GSW_REGISTER_GET, (unsigned int)®Cfg); - regCfg.nData |= 0xFFFF; - gsw_api_kioctl(gswr, GSW_REGISTER_SET, (unsigned int)®Cfg); + if ( (dev_cap_req & DC_DP_F_DEV_REQ_RX_FCS) ) + regCfg.nData &= ~SDMA_PRIO_REG_USIGN; + else + regCfg.nData |= SDMA_PRIO_REG_USIGN; + + gsw_api_kioctl(gswr, GSW_REGISTER_SET, ®Cfg); + + /* FIMXE : setup based on DSL or not */ + /* Allow traffic from one VAP to any VAP */ + + /* PCE_PCTRL_3:IGPTRM */ + #define PCE_PCTRL_3_REG_BASE 0x483 + #define PCE_PCTRL_3_REG_PORT_STEP 10 + #define PCE_PCTRL_3_REG_IGPTRM 0x4000 + + memset((void *)®Cfg, 0x00, sizeof(regCfg)); + regCfg.nRegAddr = PCE_PCTRL_3_REG_BASE + (PCE_PCTRL_3_REG_PORT_STEP * port_id); + gsw_api_kioctl(gswr, GSW_REGISTER_GET, ®Cfg); + regCfg.nData |= PCE_PCTRL_3_REG_IGPTRM; + gsw_api_kioctl(gswr, GSW_REGISTER_SET, ®Cfg); + + /* PCE_IGPTRM:SUBx */ + #define PCE_IGPTRM_REG_BASE 0x544 + #define PCE_IGPTRM_REG_PORT_STEP 16 + #define PCE_IGPTRM_REG_SUB_ALL 0xFFFF + + memset((void *)®Cfg, 0x00, sizeof(regCfg)); + regCfg.nRegAddr = PCE_IGPTRM_REG_BASE + (PCE_IGPTRM_REG_PORT_STEP * port_id); + gsw_api_kioctl(gswr, GSW_REGISTER_GET, ®Cfg); + regCfg.nData |= PCE_IGPTRM_REG_SUB_ALL; + gsw_api_kioctl(gswr, GSW_REGISTER_SET, ®Cfg); gsw_api_kclose(gswr); @@ -548,22 +751,22 @@ xrx500_setup_ring_resources(struct xrx500_dcmode0_dev_info *dev_ctx, int32_t por cbm_dq_port_res_t cbm_res = {0}; /* Allocate DMA1-TX DMA channel */ - ret = ltq_request_dma(dev_ctx->dma_ch, "dma1tx dc"); + ret = ltq_request_dma(dev_ctx->shared_info->dma_ch, "dma1tx dc"); if (ret) { - DC_DP_ERROR("failed to allocate DMA1-TX DMA channel 0x%x!!!\n", dev_ctx->dma_ch); + DC_DP_ERROR("failed to allocate DMA1-TX DMA channel 0x%x!!!\n", dev_ctx->shared_info->dma_ch); goto err_out; } /* Set DMA1-TX DMA channel descriptors */ - ret = ltq_dma_chan_desc_alloc(dev_ctx->dma_ch, res->rings.dev2soc.size); + ret = ltq_dma_chan_desc_alloc(dev_ctx->shared_info->dma_ch, res->rings.dev2soc.size); if (ret) { DC_DP_ERROR("failed to allocate %d descriptors for DMA1-TX DMA channel 0x%x!!!\n", - res->rings.dev2soc.size, dev_ctx->dma_ch); + res->rings.dev2soc.size, dev_ctx->shared_info->dma_ch); goto err_out_free_dma; } /* Update returned 'resource' structure */ - res->rings.dev2soc.phys_base = (void *)ltq_dma_chan_get_desc_phys_base(dev_ctx->dma_ch); + res->rings.dev2soc.phys_base = (void *)ltq_dma_chan_get_desc_phys_base(dev_ctx->shared_info->dma_ch); res->rings.dev2soc.base = (void *)CKSEG1ADDR((uint32_t)res->rings.dev2soc.phys_base); /* res->rings.dev2soc.size unchanged */ res->rings.dev2soc.desc_dwsz = 4; @@ -584,7 +787,7 @@ xrx500_setup_ring_resources(struct xrx500_dcmode0_dev_info *dev_ctx, int32_t por res->rings.soc2dev_ret.base = (void *)cbm_res.cbm_buf_free_base; res->rings.soc2dev_ret.phys_base = (void *)RPHYSADDR((uint32_t)cbm_res.cbm_buf_free_base); res->rings.soc2dev_ret.size = 32; // FIXME : cbm_res.num_free_entries; - res->rings.soc2dev_ret.desc_dwsz = 4; + res->rings.soc2dev_ret.desc_dwsz = 1; if (cbm_res.num_deq_ports) { res->rings.soc2dev.base = (void *)cbm_res.deq_info[0].cbm_dq_port_base; res->rings.soc2dev.phys_base = (void *)RPHYSADDR((uint32_t)cbm_res.deq_info[0].cbm_dq_port_base); @@ -610,8 +813,8 @@ xrx500_setup_ring_resources(struct xrx500_dcmode0_dev_info *dev_ctx, int32_t por DC_DP_ERROR("failed to register dev as tx buffer allocation failure!!!\n"); goto err_out_free_desc; } - dev_ctx->num_bufpools = num_bufpools; - dev_ctx->buflist = buflist; + dev_ctx->shared_info->num_bufpools = num_bufpools; + dev_ctx->shared_info->buflist = buflist; res->num_bufpools = res->num_bufs_req; res->buflist = (struct dc_dp_buf_pool *) kmalloc((res->num_bufpools * sizeof(struct dc_dp_buf_pool)), GFP_KERNEL); @@ -644,10 +847,10 @@ err_out_free_buf: xrx500_free_ring_buffers(num_bufpools, &buflist); err_out_free_desc: - ltq_dma_chan_desc_free(dev_ctx->dma_ch); + ltq_dma_chan_desc_free(dev_ctx->shared_info->dma_ch); err_out_free_dma: - ltq_free_dma(dev_ctx->dma_ch); + ltq_free_dma(dev_ctx->shared_info->dma_ch); err_out: return ret; @@ -664,19 +867,19 @@ xrx500_cleanup_ring_resources(struct xrx500_dcmode0_dev_info *dev_ctx, int32_t p #endif /* #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING */ /* Reset the DMA1-TX DMA channel */ - ltq_dma_chan_reset(dev_ctx->dma_ch); + ltq_dma_chan_reset(dev_ctx->shared_info->dma_ch); /* De-allocate Tx buffer pool */ - xrx500_free_ring_buffers(dev_ctx->num_bufpools, &dev_ctx->buflist); + xrx500_free_ring_buffers(dev_ctx->shared_info->num_bufpools, &dev_ctx->shared_info->buflist); /* Free the DMA1-TX DMA channel descriptors */ - if (ltq_dma_chan_desc_free(dev_ctx->dma_ch)) { - DC_DP_ERROR("failed to free descriptors for DMA1-TX DMA channel 0x%x!!!\n", dev_ctx->dma_ch); + if (ltq_dma_chan_desc_free(dev_ctx->shared_info->dma_ch)) { + DC_DP_ERROR("failed to free descriptors for DMA1-TX DMA channel 0x%x!!!\n", dev_ctx->shared_info->dma_ch); } /* Free DMA1-TX DMA channel */ - if (ltq_free_dma(dev_ctx->dma_ch)) { - DC_DP_ERROR("failed to free DMA1-TX DMA channel 0x%x!!!\n", dev_ctx->dma_ch); + if (ltq_free_dma(dev_ctx->shared_info->dma_ch)) { + DC_DP_ERROR("failed to free DMA1-TX DMA channel 0x%x!!!\n", dev_ctx->shared_info->dma_ch); } #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING @@ -842,12 +1045,12 @@ xrx500_setup_umt_port(struct xrx500_dcmode0_dev_info *dev_ctx, int32_t port_id, #endif /* #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) */ /* Initialize */ - dev_ctx->cbm_pid = cbm_pid; - dev_ctx->umt_id = umt_id; - dev_ctx->dma_ctrlid = dma_ctrlid; - dev_ctx->dma_cid = dma_cid; - dev_ctx->dma_ch = _DMA_C(dma_ctrlid, DMA1TX_PORT, dma_cid); - dev_ctx->umt_period = XRX500_DCMODE0_UMT_PERIOD_DEFAULT; + dev_ctx->shared_info->cbm_pid = cbm_pid; + dev_ctx->shared_info->umt_id = umt_id; + dev_ctx->shared_info->dma_ctrlid = dma_ctrlid; + dev_ctx->shared_info->dma_cid = dma_cid; + dev_ctx->shared_info->dma_ch = _DMA_C(dma_ctrlid, DMA1TX_PORT, dma_cid); + dev_ctx->shared_info->umt_period = XRX500_DCMODE0_UMT_PERIOD_DEFAULT; return ret; @@ -1132,14 +1335,14 @@ xrx500_dcmode0_get_host_capability(struct dc_dp_host_cap *cap, uint32_t flags) } static int32_t -xrx500_dcmode0_register_dev(void *ctx, +xrx500_dcmode0_register_dev_ex(void *ctx, struct module *owner, uint32_t port_id, struct net_device *dev, struct dc_dp_cb *datapathcb, - struct dc_dp_res *resources, struct dc_dp_dev *devspec, uint32_t flags) + struct dc_dp_res *resources, struct dc_dp_dev *devspec, + int32_t ref_port_id, uint32_t alloc_flags, uint32_t flags) { int32_t ret; struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; - int32_t dev_idx; dp_cb_t dp_cb = {0}; DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "dev_ctx=%p, owner=%p, port_id=%u, dev=%p, datapathcb=%p, " @@ -1151,7 +1354,7 @@ xrx500_dcmode0_register_dev(void *ctx, DC_DP_LOCK(&g_dcmode0_dev_lock); - if ( (NULL == dev_ctx) ) { + if ( !((NULL != dev_ctx) && (NULL != dev_ctx->shared_info)) ) { ret = DC_DP_FAILURE; goto err_unlock_out; } @@ -1160,18 +1363,23 @@ xrx500_dcmode0_register_dev(void *ctx, dc_dp_register_dcmode_device(owner, port_id, dev, dev_ctx, DC_DP_DCMODE_DEV_DEREGISTER); /* De-register device from DP Lib */ - dp_register_dev(owner, port_id, &dp_cb, DP_F_DEREGISTER); - - /* Cleanup ring resources */ - DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "De-configuring DMA1-Tx channel 0x%x.\n", - dev_ctx->dma_ch); - xrx500_cleanup_ring_resources(dev_ctx, port_id, resources, flags); - - /* Cleanup UMT resources */ - xrx500_cleanup_umt_port(port_id, dev_ctx->umt_id); + /* Skip, iff Multiport sub-sequent device? */ + if ( !is_multiport_sub(dev_ctx->alloc_flags) ) + dp_register_dev(owner, port_id, &dp_cb, DP_F_DEREGISTER); + + /* For the last device */ + if ( (1 == dev_ctx->shared_info->ref_count) ) { + /* Cleanup ring resources */ + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "De-configuring DMA1-Tx channel 0x%x.\n", + dev_ctx->shared_info->dma_ch); + xrx500_cleanup_ring_resources(dev_ctx, dev_ctx->shared_info->port_id, resources, flags); + + /* Cleanup UMT resources */ + xrx500_cleanup_umt_port(dev_ctx->shared_info->port_id, dev_ctx->shared_info->umt_id); + } - /* Reset DC Mode0 device structure */ - memset(dev_ctx, 0, sizeof(struct xrx500_dcmode0_dev_info)); + /* Free DC Mode0 device context */ + xrx500_dcmode0_free_dev_ctx(dev_ctx); DC_DP_UNLOCK(&g_dcmode0_dev_lock); @@ -1188,62 +1396,66 @@ xrx500_dcmode0_register_dev(void *ctx, DC_DP_LOCK(&g_dcmode0_dev_lock); - /* Find a free device index */ - for (dev_idx = 0; dev_idx < XRX500_DCMODE0_MAX_DEV_NUM; dev_idx++) { - if (g_dcmode0_dev[dev_idx].status != XRX500_DCMODE0_DEV_STATUS_USED) { - break; - } + if (NULL != g_dcmode0_dev_p[port_id]) { + ret = DC_DP_FAILURE; + goto err_unlock_out; } - if (dev_idx >= XRX500_DCMODE0_MAX_DEV_NUM) { - DC_DP_ERROR("failed to allocate port as it reaches maximum directconnect device limit - %d!!!\n", XRX500_DCMODE0_MAX_DEV_NUM); + dev_ctx = xrx500_dcmode0_alloc_dev_ctx(port_id, ref_port_id, alloc_flags); + if (NULL == dev_ctx) { ret = DC_DP_FAILURE; goto err_unlock_out; } - dev_ctx = &g_dcmode0_dev[dev_idx]; - - /* Reset DC Mode0 device structure */ - memset(dev_ctx, 0, sizeof(struct xrx500_dcmode0_dev_info)); - dev_ctx->port_id = port_id; - /* Datapath Library callback registration */ - dp_cb.rx_fn = xrx500_dcmode0_rx_cb; - dp_cb.stop_fn = datapathcb->stop_fn; - dp_cb.restart_fn = datapathcb->restart_fn; - dp_cb.get_subifid_fn = dc_dp_get_netif_subifid; // Exported from DC common layer - dp_cb.reset_mib_fn = datapathcb->reset_mib_fn; - dp_cb.get_mib_fn = datapathcb->get_mib_fn; + /* Skip, iff multiport sub-sequent device? */ + if ( !is_multiport_sub(alloc_flags) ) { + /* Datapath Library callback registration */ + dp_cb.rx_fn = xrx500_dcmode0_rx_cb; + dp_cb.stop_fn = datapathcb->stop_fn; + dp_cb.restart_fn = datapathcb->restart_fn; + if ( is_multiport(alloc_flags) ) + dp_cb.get_subifid_fn = xrx500_dcmode0_get_netif_subifid_cb; + else + dp_cb.get_subifid_fn = dc_dp_get_netif_subifid; // Exported from DC common layer + dp_cb.reset_mib_fn = datapathcb->reset_mib_fn; + dp_cb.get_mib_fn = datapathcb->get_mib_fn; - ret = dp_register_dev(owner, port_id, &dp_cb, 0); - if (ret != DP_SUCCESS) { - DC_DP_ERROR("failed to register dev to Datapath Library/Core!!!\n"); - goto err_unlock_out; + ret = dp_register_dev(owner, port_id, &dp_cb, 0); + if (ret != DP_SUCCESS) { + DC_DP_ERROR("failed to register dev to Datapath Library/Core!!!\n"); + goto err_out_free_dev; + } } - /* Setup UMT resources */ - ret = xrx500_setup_umt_port(dev_ctx, port_id, resources, flags); - if (ret != DP_SUCCESS) { - DC_DP_ERROR("failed to setup UMT resources!!!\n"); - goto err_out_dereg_dev; - } + /* For the first device */ + if ( (1 == dev_ctx->shared_info->ref_count) ) { + /* Setup UMT resources */ + ret = xrx500_setup_umt_port(dev_ctx, dev_ctx->shared_info->port_id, resources, flags); + if (ret != DP_SUCCESS) { + DC_DP_ERROR("failed to setup UMT resources!!!\n"); + goto err_out_dereg_dev; + } - /* Setup Port resources */ - ret = xrx500_setup_pmac_port(port_id, dev_ctx->dma_cid, flags); - if (ret != DP_SUCCESS) { - DC_DP_ERROR("failed to setup UMT resources!!!\n"); - goto err_out_rel_umt; - } + /* Setup Port resources */ + ret = xrx500_setup_pmac_port(port_id, dev_ctx->shared_info->dma_cid, dev_ctx->shared_info->port_id, devspec->dev_cap_req, flags); + if (ret != DP_SUCCESS) { + DC_DP_ERROR("failed to setup UMT resources!!!\n"); + goto err_out_rel_umt; + } - /* Setup ring resources */ - DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Configuring DMA1-Tx channel 0x%x.\n", - dev_ctx->dma_ch); - ret = xrx500_setup_ring_resources(dev_ctx, port_id, resources, flags); - if (ret) { - DC_DP_ERROR("failed to configure DMA1-TX DMA channel 0x%x!!!\n", - dev_ctx->dma_ch); - goto err_out_res_pmac; + /* Setup ring resources */ + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Configuring DMA1-Tx channel 0x%x.\n", + dev_ctx->shared_info->dma_ch); + ret = xrx500_setup_ring_resources(dev_ctx, dev_ctx->shared_info->port_id, resources, flags); + if (ret) { + DC_DP_ERROR("failed to configure DMA1-TX DMA channel 0x%x!!!\n", + dev_ctx->shared_info->dma_ch); + goto err_out_res_pmac; + } } + /* FIXME : prepare returned resources */ + devspec->dc_accel_used = DC_DP_ACCEL_FULL_OFFLOAD; devspec->dc_tx_ring_used = DC_DP_RING_HW_MODE0; devspec->dc_rx_ring_used = DC_DP_RING_HW_MODE0; @@ -1256,19 +1468,21 @@ xrx500_dcmode0_register_dev(void *ctx, goto err_out_res_pmac; } - /* Initialize DC Mode0 device structure */ - dev_ctx->status = XRX500_DCMODE0_DEV_STATUS_USED; - DC_DP_UNLOCK(&g_dcmode0_dev_lock); return ret; err_out_res_pmac: err_out_rel_umt: - xrx500_cleanup_umt_port(port_id, dev_ctx->umt_id); + xrx500_cleanup_umt_port(dev_ctx->shared_info->port_id, dev_ctx->shared_info->umt_id); err_out_dereg_dev: - dp_register_dev(owner, port_id, &dp_cb, DP_F_DEREGISTER); + /* Skip, iff Multiport sub-sequent device? */ + if ( !is_multiport_sub(alloc_flags) ) + dp_register_dev(owner, port_id, &dp_cb, DP_F_DEREGISTER); + +err_out_free_dev: + xrx500_dcmode0_free_dev_ctx(dev_ctx); err_unlock_out: DC_DP_UNLOCK(&g_dcmode0_dev_lock); @@ -1285,6 +1499,7 @@ xrx500_dcmode0_register_subif(void *ctx, { int32_t ret = DC_DP_FAILURE; struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; + struct dp_subif subif = *subif_id; DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "dev_ctx=%p, owner=%p, dev=%p, subif_id=%p, flags=0x%08X\n", dev_ctx, owner, dev, subif_id, flags); @@ -1294,43 +1509,49 @@ xrx500_dcmode0_register_subif(void *ctx, DC_DP_LOCK(&g_dcmode0_dev_lock); + /* Multiport device? */ + if ( is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); + /* For the last subif */ - if (dev_ctx->num_subif == 1) { + if (dev_ctx->shared_info->num_subif == 1) { /* Disable DMA1-TX DMA channel */ - if (ltq_dma_chan_off(dev_ctx->dma_ch)) { - DC_DP_ERROR("failed to close dma1-tx dma channel 0x%x!!!\n", dev_ctx->dma_ch); + if (ltq_dma_chan_off(dev_ctx->shared_info->dma_ch)) { + DC_DP_ERROR("failed to close dma1-tx dma channel 0x%x!!!\n", dev_ctx->shared_info->dma_ch); } #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) /* Disable UMT port */ DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Disabling UMT.\n"); - if (ltq_umt_enable(dev_ctx->umt_id, subif_id->port_id, UMT_DISABLE)) { + if (ltq_umt_enable(dev_ctx->shared_info->umt_id, dev_ctx->shared_info->port_id, UMT_DISABLE)) { DC_DP_ERROR("failed to disable umt_id=%d, port_id=%d!!!\n", - dev_ctx->umt_id, subif_id->port_id); + dev_ctx->shared_info->umt_id, dev_ctx->shared_info->port_id); } #endif /* #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) */ /* Delay for ongoing UMT transfer */ udelay(XRX500_DCMODE0_UMT_PERIOD_DEFAULT * 2); /* Disable and Flush CBM/TMU Queue(s) */ - xrx500_flush_cbm_queue(subif_id->port_id); + xrx500_flush_cbm_queue(subif.port_id); } /* De-register subif from Datapath Library/Core */ - ret = dp_register_subif(owner, dev, (uint8_t *)subif_name, subif_id, DP_F_DEREGISTER); + ret = dp_register_subif(owner, dev, (uint8_t *)subif_name, &subif, DP_F_DEREGISTER); if (ret != DP_SUCCESS) { DC_DP_ERROR("failed to de-register subif from Datapath Library/Core!!!\n"); goto err_unlock_out; } /* For the last subif, Restore TMU queue map */ - if (dev_ctx->num_subif == 1) - xrx500_restore_tmu_queuemap(subif_id->port_id); + if (dev_ctx->shared_info->num_subif == 1) + xrx500_restore_tmu_queuemap(subif.port_id); /* Reset device mib structure */ memset(&dev_ctx->subif_mib[XRX500_DCMODE0_GET_SUBIFIDX(subif_id->subif)], 0, sizeof(struct dc_dp_dev_mib)); dev_ctx->num_subif--; + dev_ctx->shared_info->num_subif--; + dev_ctx->shared_info->subif_info[XRX500_DCMODE0_GET_SUBIFIDX(subif.subif)].netif = NULL; DC_DP_UNLOCK(&g_dcmode0_dev_lock); @@ -1340,38 +1561,57 @@ xrx500_dcmode0_register_subif(void *ctx, DC_DP_LOCK(&g_dcmode0_dev_lock); + /* Multiport device? */ + if ( is_multiport(dev_ctx->alloc_flags) ) { + /* Single port should not have more than 8 Subif */ + if ( XRX500_DCMODE0_GET_SUBIFIDX(subif_id->subif) >= MULTIPORT_WORKAROUND_MAX_SUBIF_NUM ) { + DC_DP_ERROR("failed to register subif, subif_id_num=0x%04x reaches maximum limit 8!!!\n", subif_id->subif); + goto err_unlock_out; + } + + multiport_wa_forward_map_subifid(dev_ctx, &subif); + } + /* For the first subif, enable DMA1-TX DMA channel */ - if (dev_ctx->num_subif == 0) { + if (dev_ctx->shared_info->num_subif == 0) { #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) /* Enable UMT hw */ DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Enabling UMT HW.\n"); - ret = ltq_umt_enable(dev_ctx->umt_id, subif_id->port_id, UMT_ENABLE); + ret = ltq_umt_enable(dev_ctx->shared_info->umt_id, dev_ctx->shared_info->port_id, UMT_ENABLE); if (ret) { DC_DP_ERROR("failed to enable umt_id=%d, port_id=%d!!!\n", - dev_ctx->umt_id, subif_id->port_id); + dev_ctx->shared_info->umt_id, dev_ctx->shared_info->port_id); goto err_unlock_out; } #endif /* #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) */ /* Enable DMA1-TX DMA channel */ - ret = ltq_dma_chan_on(dev_ctx->dma_ch); + ret = ltq_dma_chan_on(dev_ctx->shared_info->dma_ch); if (ret) { - DC_DP_ERROR("failed to open dma1-tx dma channel=0x%x!!!\n", dev_ctx->dma_ch); + DC_DP_ERROR("failed to open dma1-tx dma channel=0x%x!!!\n", dev_ctx->shared_info->dma_ch); goto err_out_disable_umt; } } /* Register subif to Datapath Library/Core */ - ret = dp_register_subif(owner, dev, (uint8_t *)subif_name, subif_id, 0); + ret = dp_register_subif(owner, dev, (uint8_t *)subif_name, &subif, 0); if (ret != DP_SUCCESS) { DC_DP_ERROR("failed to register subif to Datapath Library/Core!!!\n"); goto err_out_disable_dmach; } + /* Multiport 1st device? */ + if ( is_multiport_main(dev_ctx->alloc_flags) ) + subif_id->subif = (subif.subif & ~MULTIPORT_WORKAROUND_SUBIFID_MASK); + else + subif_id->subif = subif.subif; + /* Reset device mib structure */ memset(&dev_ctx->subif_mib[XRX500_DCMODE0_GET_SUBIFIDX(subif_id->subif)], 0, sizeof(struct dc_dp_dev_mib)); + dev_ctx->shared_info->subif_info[XRX500_DCMODE0_GET_SUBIFIDX(subif.subif)].netif = dev; + dev_ctx->shared_info->num_subif++; dev_ctx->num_subif++; DC_DP_UNLOCK(&g_dcmode0_dev_lock); @@ -1379,11 +1619,11 @@ xrx500_dcmode0_register_subif(void *ctx, goto out; err_out_disable_dmach: - ltq_dma_chan_off(dev_ctx->dma_ch); + ltq_dma_chan_off(dev_ctx->shared_info->dma_ch); err_out_disable_umt: #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) - ltq_umt_enable(dev_ctx->umt_id, subif_id->port_id, UMT_DISABLE); + ltq_umt_enable(dev_ctx->shared_info->umt_id, dev_ctx->shared_info->port_id, UMT_DISABLE); #endif /* #if defined(CONFIG_LTQ_UMT_EXPAND_MODE) */ err_unlock_out: @@ -1401,17 +1641,30 @@ xrx500_dcmode0_xmit(void *ctx, struct net_device *rx_if, struct dp_subif *rx_sub int32_t ret; uint32_t dp_flags = 0; struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; + struct dp_subif subif = *tx_subif; + struct dma_tx_desc_0 *desc_0 = (struct dma_tx_desc_0 *) &skb->DW0; + struct dma_tx_desc_1 *desc_1 = (struct dma_tx_desc_1 *) &skb->DW1; + + /* Multiport device? */ + if ( dev_ctx && is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); + + /* skb->DWx */ + desc_1->field.ep = subif.port_id; + desc_0->field.dest_sub_if_id = subif.subif; if ( (skb->ip_summed == CHECKSUM_PARTIAL) ) dp_flags = DP_TX_CAL_CHKSUM; /* Send it to Datapath library for transmit */ - ret = dp_xmit(skb->dev, tx_subif, skb, skb->len, dp_flags); - if ( (DP_SUCCESS == ret) ) { - dev_ctx->subif_mib[XRX500_DCMODE0_GET_SUBIFIDX(tx_subif->subif)].tx_pkts ++; - dev_ctx->subif_mib[XRX500_DCMODE0_GET_SUBIFIDX(tx_subif->subif)].tx_bytes += len; - } else - dev_ctx->subif_mib[XRX500_DCMODE0_GET_SUBIFIDX(tx_subif->subif)].tx_errs ++; + ret = dp_xmit(skb->dev, &subif, skb, skb->len, dp_flags); + if (dev_ctx) { + if ( (DP_SUCCESS == ret) ) { + dev_ctx->subif_mib[XRX500_DCMODE0_GET_SUBIFIDX(tx_subif->subif)].tx_pkts ++; + dev_ctx->subif_mib[XRX500_DCMODE0_GET_SUBIFIDX(tx_subif->subif)].tx_bytes += len; + } else + dev_ctx->subif_mib[XRX500_DCMODE0_GET_SUBIFIDX(tx_subif->subif)].tx_errs ++; + } return ret; } @@ -1466,7 +1719,37 @@ out: } #endif /* #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING */ -#if (defined(XRX500_DCMODE0_BRIDGE_FLOW_LEARNING) && XRX500_DCMODE0_BRIDGE_FLOW_LEARNING) +static int32_t +xrx500_dcmode0_return_bufs(void *ctx, uint32_t port_id, + void **buflist, uint32_t buflist_len, uint32_t flags) +{ + int32_t ret = DC_DP_FAILURE; + int32_t buf_idx; + uint32_t **tmp_buflist; + uint32_t buf_free_count = 0; + + if ( (buflist_len > 0) && (NULL != buflist) ) { + tmp_buflist = (uint32_t **)buflist; + for (buf_idx = 0; buf_idx < buflist_len; buf_idx++) { + /* + * flag=0, for buffer virtual address + * flag=1, for buffer physical address (used) + */ + ret = cbm_buffer_free(smp_processor_id(), (void *)tmp_buflist[buf_idx], 1); + if (ret == DP_SUCCESS) + buf_free_count++; + } + if (buf_free_count == buflist_len) + ret = DC_DP_SUCCESS; + else + ret = DC_DP_FAILURE; + } + + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "cbm buffer returned = %d/%d\n", buf_free_count, buflist_len); + + return ret; +} + static int32_t xrx500_dcmode0_add_session_shortcut_forward(void *ctx, struct dp_subif *subif, struct sk_buff *skb, uint32_t flags) { @@ -1502,7 +1785,6 @@ xrx500_dcmode0_add_session_shortcut_forward(void *ctx, struct dp_subif *subif, s return 0; } -#endif /* #if (defined(XRX500_DCMODE0_BRIDGE_FLOW_LEARNING) && XRX500_DCMODE0_BRIDGE_FLOW_LEARNING) */ static int32_t xrx500_dcmode0_disconn_if(void *ctx, struct net_device *netif, struct dp_subif *subif_id, @@ -1511,9 +1793,16 @@ xrx500_dcmode0_disconn_if(void *ctx, struct net_device *netif, struct dp_subif * int32_t ret = DC_DP_SUCCESS; #if IS_ENABLED(CONFIG_PPA) + struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; + struct dp_subif subif = *subif_id; + + /* Multiport device? */ + if ( dev_ctx && is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); + /* Remove all the sessions from PPA */ if (ppa_hook_disconn_if_fn) - ret = ppa_hook_disconn_if_fn(netif, subif_id, mac_addr, flags); + ret = ppa_hook_disconn_if_fn(netif, &subif, mac_addr, flags); #endif /* #if IS_ENABLED(CONFIG_PPA) */ return ret; @@ -1531,8 +1820,8 @@ xrx500_dcmode0_rx_cb(struct net_device *rxif, struct net_device *txif, int32_t ret; struct pmac_rx_hdr *pmac; struct dp_subif rx_subif = {0}; - int32_t dev_idx; int32_t subif_idx; + struct xrx500_dcmode0_dev_info *dev_ctx = NULL; if (!skb) { DC_DP_ERROR("failed to receive as skb=%p!!!\n", skb); @@ -1553,23 +1842,95 @@ xrx500_dcmode0_rx_cb(struct net_device *rxif, struct net_device *txif, len -= sizeof(struct pmac_rx_hdr); skb_pull(skb, sizeof(struct pmac_rx_hdr)); + dev_ctx = g_dcmode0_dev_p[rx_subif.port_id]; + if (NULL == dev_ctx) { + DC_DP_ERROR("failed to receive as dev_ctx=%p for port_id=%d!!!\n", dev_ctx, rx_subif.port_id); + dev_kfree_skb_any(skb); + return -1; + } + + /* Multiport device? */ + if ( is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_reverse_map_subifid(dev_ctx, &rx_subif); + if ( (rxif->features & NETIF_F_RXCSUM) ) skb->ip_summed = CHECKSUM_UNNECESSARY; ret = dc_dp_rx(rxif, txif, &rx_subif, skb, skb->len, 0); if ( !ret ) { /* Update Rx mib */ - for (dev_idx = 0; dev_idx < XRX500_DCMODE0_MAX_DEV_NUM; dev_idx++) { - if ( (g_dcmode0_dev[dev_idx].status != XRX500_DCMODE0_DEV_STATUS_FREE) && - (g_dcmode0_dev[dev_idx].port_id == rx_subif.port_id)) { - subif_idx = XRX500_DCMODE0_GET_SUBIFIDX(rx_subif.subif); - g_dcmode0_dev[dev_idx].subif_mib[subif_idx].rx_fn_rxif_pkts ++; - g_dcmode0_dev[dev_idx].subif_mib[subif_idx].rx_fn_rxif_bytes += len; + subif_idx = XRX500_DCMODE0_GET_SUBIFIDX(rx_subif.subif); + dev_ctx->subif_mib[subif_idx].rx_fn_rxif_pkts ++; + dev_ctx->subif_mib[subif_idx].rx_fn_rxif_bytes += len; + } + + return ret; +} + +static int32_t +xrx500_dcmode0_get_netif_subifid_cb(struct net_device *netif, + struct sk_buff *skb, void *subif_data, + uint8_t dst_mac[MAX_ETH_ALEN], + dp_subif_t *subif, uint32_t flags) /*! get subifid */ +{ + int32_t ret = 1; + struct dp_subif subif_id = {0}; + struct xrx500_dcmode0_dev_info *dev_ctx; + struct xrx500_dcmode0_dev_shared_info *dev_shared_ctx; + int32_t subif_idx; + + /* Validate input argument */ + if (!netif) { + DC_DP_ERROR("failed to get subifid as netif=%p!!!\n", netif); + goto out; + } + + if (!subif) { + DC_DP_ERROR("failed to get subifid as subif=%p!!!\n", subif); + goto out; + } + + /* Validate device context */ + dev_ctx = g_dcmode0_dev_p[subif->port_id]; + if (!dev_ctx) { + DC_DP_ERROR("port_id=%d not registered!!!\n", subif->port_id); + goto out; + } + + dev_shared_ctx = dev_ctx->shared_info; + if (!dev_shared_ctx) { + DC_DP_ERROR("port_id=%d not registered!!!\n", subif->port_id); + goto out; + } + + /* FIXME : No valid subif from DPLib in get_subif_fn CB? */ + subif_idx = XRX500_DCMODE0_GET_SUBIFIDX(subif->subif); + if (dev_shared_ctx->subif_info[subif_idx].netif != netif) { + for (subif_idx = 0; subif_idx < XRX500_DCMODE0_MAX_SUBIF_PER_DEV; subif_idx++) { + if (dev_shared_ctx->subif_info[subif_idx].netif == netif) { + subif->subif &= ~(XRX500_DCMODE0_SUBIFID_MASK << XRX500_DCMODE0_SUBIFID_OFFSET); + subif->subif |= (subif_idx << XRX500_DCMODE0_SUBIFID_OFFSET); break; } } + + if (subif_idx == XRX500_DCMODE0_MAX_SUBIF_PER_DEV) { + DC_DP_ERROR("No matching netif=%p!!!\n", netif); + goto out; + } } + subif_id = *subif; + multiport_wa_reverse_map_subifid(dev_ctx, &subif_id); + + ret = dc_dp_get_netif_subifid(netif, skb, subif_data, dst_mac, &subif_id, flags); + if (ret) + goto out; + + subif->subif &= ~0x00FF; + subif->subif |= (subif_id.subif & 0x00FF); + +out: return ret; } @@ -1587,6 +1948,12 @@ xrx500_dcmode0_register_qos_class2prio_cb(void *ctx, int32_t port_id, struct net int32_t ret = DC_DP_SUCCESS; #if IS_ENABLED(CONFIG_PPA) + struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; + + /* Multiport sub-sequent device? */ + if ( dev_ctx && is_multiport_sub(dev_ctx->alloc_flags) ) + return DC_DP_SUCCESS; + if (ppa_register_qos_class2prio_hook_fn) ret = ppa_register_qos_class2prio_hook_fn(port_id, netif, cb, flags); #endif /* #if IS_ENABLED(CONFIG_PPA) */ @@ -1598,6 +1965,7 @@ static int32_t xrx500_dcmode0_map_class2devqos(void *ctx, int32_t port_id, struct net_device *netif, uint8_t class2prio[], uint8_t prio2devqos[]) { + struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; uint8_t devqos; /* Configure the egress PMAC table to mark the WMM/TID in the descriptor DW1[7:4] */ @@ -1605,6 +1973,10 @@ xrx500_dcmode0_map_class2devqos(void *ctx, int32_t port_id, struct net_device *n GSW_PMAC_Eg_Cfg_t egcfg; GSW_API_HANDLE gswr; + /* Multiport sub-sequent device? */ + if ( dev_ctx && is_multiport_sub(dev_ctx->alloc_flags) ) + return DC_DP_SUCCESS; + /* Do the GSW-R configuration */ gswr = gsw_api_kopen(SWITCH_DEV); if (gswr == 0) { @@ -1621,10 +1993,10 @@ xrx500_dcmode0_map_class2devqos(void *ctx, int32_t port_id, struct net_device *n egcfg.nDestPortId = port_id; egcfg.nTrafficClass = i; egcfg.nFlowIDMsb = j; - gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_GET, (unsigned int)&egcfg); + gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_GET, &egcfg); egcfg.nResDW1 = devqos; - gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_SET, (unsigned int)&egcfg); + gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_SET, &egcfg); } } @@ -1639,14 +2011,19 @@ xrx500_dcmode0_get_netif_stats(void *ctx, struct rtnl_link_stats64 *if_stats, uint32_t flags) { int32_t ret; + struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; + struct dp_subif subif = *subif_id; uint32_t dp_flags = 0x0; int32_t subif_idx; - struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; + + /* Multiport device? */ + if ( dev_ctx && is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); if ( (flags & DC_DP_F_SUBIF_LOGICAL) ) dp_flags = DP_F_STATS_SUBIF; - ret = dp_get_netif_stats(netif, subif_id, if_stats, dp_flags); + ret = dp_get_netif_stats(netif, &subif, if_stats, dp_flags); if ( (NULL != dev_ctx) ) { if ( (flags & DC_DP_F_SUBIF_LOGICAL) ) { @@ -1690,8 +2067,13 @@ xrx500_dcmode0_clear_netif_stats(void *ctx, struct net_device *netif, struct dp_subif *subif_id, uint32_t flags) { - uint32_t dp_flags = 0x0; struct xrx500_dcmode0_dev_info *dev_ctx = (struct xrx500_dcmode0_dev_info *)ctx; + struct dp_subif subif = *subif_id; + uint32_t dp_flags = 0x0; + + /* Multiport device? */ + if ( dev_ctx && is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); if ( (flags & DC_DP_F_SUBIF_LOGICAL) ) { /* Clear local subif MIB statistics */ @@ -1705,7 +2087,7 @@ xrx500_dcmode0_clear_netif_stats(void *ctx, memset(&dev_ctx->subif_mib[0], 0, (XRX500_DCMODE0_MAX_SUBIF_PER_DEV * sizeof(struct dc_dp_dev_mib))); } - return dp_clear_netif_stats(netif, subif_id, dp_flags); + return dp_clear_netif_stats(netif, &subif, dp_flags); } static int32_t @@ -1724,10 +2106,10 @@ xrx500_dcmode0_change_dev_status(void *ctx, int32_t port_id, uint32_t flags) if (flags & DC_DP_F_DEQ_ENABLE) { /* Enable DMA1-TX DMA channel */ - ret = ltq_dma_chan_on(dev_ctx->dma_ch); + ret = ltq_dma_chan_on(dev_ctx->shared_info->dma_ch); } else if (flags & DC_DP_F_DEQ_DISABLE) { /* Disable DMA1-TX DMA channel */ - ret = ltq_dma_chan_off(dev_ctx->dma_ch); + ret = ltq_dma_chan_off(dev_ctx->shared_info->dma_ch); } return ret; @@ -1748,7 +2130,7 @@ xrx500_dcmode0_get_wol_cfg(void *ctx, int32_t port_id, struct dc_dp_wol_cfg *cfg } memset(&wol_cfg, 0, sizeof(GSW_WoL_Cfg_t)); - ret = gsw_api_kioctl(gswr, GSW_WOL_CFG_GET, (unsigned int)&wol_cfg); + ret = gsw_api_kioctl(gswr, GSW_WOL_CFG_GET, &wol_cfg); memcpy(cfg->wol_mac, wol_cfg.nWolMAC, sizeof(wol_cfg.nWolMAC)); memcpy(cfg->wol_passwd, wol_cfg.nWolPassword, sizeof(cfg->wol_passwd)); cfg->wol_passwd_enable = wol_cfg.bWolPasswordEnable; @@ -1776,7 +2158,7 @@ xrx500_dcmode0_set_wol_cfg(void *ctx, int32_t port_id, struct dc_dp_wol_cfg *cfg memcpy(wol_cfg.nWolMAC, cfg->wol_mac, sizeof(wol_cfg.nWolMAC)); memcpy(wol_cfg.nWolPassword, cfg->wol_passwd, sizeof(wol_cfg.nWolPassword)); wol_cfg.bWolPasswordEnable = cfg->wol_passwd_enable; - ret = gsw_api_kioctl(gswr, GSW_WOL_CFG_SET, (unsigned int)&wol_cfg); + ret = gsw_api_kioctl(gswr, GSW_WOL_CFG_SET, &wol_cfg); gsw_api_kclose(gswr); @@ -1800,7 +2182,7 @@ xrx500_dcmode0_set_wol_ctrl(void *ctx, int32_t port_id, uint32_t enable) memset(&wol_port_cfg, 0, sizeof(GSW_WoL_PortCfg_t)); wol_port_cfg.nPortId = port_id; wol_port_cfg.bWakeOnLAN_Enable = enable; - ret = gsw_api_kioctl(gswr, GSW_WOL_PORT_CFG_SET, (unsigned int)&wol_port_cfg); + ret = gsw_api_kioctl(gswr, GSW_WOL_PORT_CFG_SET, &wol_port_cfg); gsw_api_kclose(gswr); @@ -1823,7 +2205,7 @@ xrx500_dcmode0_get_wol_ctrl_status(void *ctx, int32_t port_id) memset(&wol_port_cfg, 0, sizeof(GSW_WoL_PortCfg_t)); wol_port_cfg.nPortId = port_id; - ret = gsw_api_kioctl(gswr, GSW_WOL_PORT_CFG_GET, (unsigned int)&wol_port_cfg); + ret = gsw_api_kioctl(gswr, GSW_WOL_PORT_CFG_GET, &wol_port_cfg); if (ret == 0) { if (wol_port_cfg.bWakeOnLAN_Enable) ret = 1; @@ -1846,24 +2228,24 @@ xrx500_dcmode0_dump_proc(void *ctx, struct seq_file *seq) return; seq_printf(seq, " cbm_pid: %d\n", - dev_ctx->cbm_pid); + dev_ctx->shared_info->cbm_pid); seq_printf(seq, " dma_ch: %d\n", - _DMA_CHANNEL(dev_ctx->dma_ch)); + _DMA_CHANNEL(dev_ctx->shared_info->dma_ch)); seq_printf(seq, " num_bufpools: %02d\n", - dev_ctx->num_bufpools); + dev_ctx->shared_info->num_bufpools); #if 0 - for (i = 0; i < dev_ctx->num_bufpools; i++) { + for (i = 0; i < dev_ctx->shared_info->num_bufpools; i++) { seq_printf(seq, " buflist %d:\n", (i + 1)); seq_printf(seq, " virtual range: 0x%p-0x%p (%d KB)\n", - dev_ctx->buflist[i].pool, - ((uint8_t *)dev_ctx->buflist[i].pool + dev_ctx->buflist[i].size), - (dev_ctx->buflist[i].size >> 10)); + dev_ctx->shared_info->buflist[i].pool, + ((uint8_t *)dev_ctx->shared_info->buflist[i].pool + dev_ctx->shared_info->buflist[i].size), + (dev_ctx->shared_info->buflist[i].size >> 10)); seq_printf(seq, " physical range: 0x%p-0x%p (%d KB)\n", - dev_ctx->buflist[i].phys_pool, - ((uint8_t *)dev_ctx->buflist[i].phys_pool + dev_ctx->buflist[i].size), - (dev_ctx->buflist[i].size >> 10)); + dev_ctx->shared_info->buflist[i].phys_pool, + ((uint8_t *)dev_ctx->shared_info->buflist[i].phys_pool + dev_ctx->shared_info->buflist[i].size), + (dev_ctx->shared_info->buflist[i].size >> 10)); } #endif @@ -1881,39 +2263,37 @@ xrx500_dcmode0_dump_proc(void *ctx, struct seq_file *seq) seq_printf(seq, " Ring: dev2soc_frag_except:\n"); seq_printf(seq, " Ring Base: P:0x%p, V:0x%p\n", - dev_ctx->ch[i].ring.phys, dev_ctx->ch[i].ring.virt); + dev_ctx->shared_info->ch[i].ring.phys, dev_ctx->shared_info->ch[i].ring.virt); seq_printf(seq, " Ring Size: %d, each of size %d DW\n", - dev_ctx->ch[i].ring.size, dev_ctx->ch[i].ring.desc_dwsz); + dev_ctx->shared_info->ch[i].ring.size, dev_ctx->shared_info->ch[i].ring.desc_dwsz); seq_printf(seq, " Ring Track: rp=%#x wp=%#x\n", - dev_ctx->ch[i].ring.trck.rp, dev_ctx->ch[i].ring.trck.wp); + dev_ctx->shared_info->ch[i].ring.trck.rp, dev_ctx->shared_info->ch[i].ring.trck.wp); seq_printf(seq, " Soc To Dev Write Counter: P:0x%p, V:0x%p (Size %d Bytes)\n", - dev_ctx->ch[i].write_cntr.phys, dev_ctx->ch[i].write_cntr.virt, - dev_ctx->ch[i].write_cntr.size); + dev_ctx->shared_info->ch[i].write_cntr.phys, dev_ctx->shared_info->ch[i].write_cntr.virt, + dev_ctx->shared_info->ch[i].write_cntr.size); seq_printf(seq, " Soc From Dev Read Counter: P:0x%p, V:0x%p (Size %d Bytes)\n", - dev_ctx->ch[i].read_cntr.phys, dev_ctx->ch[i].read_cntr.virt, - dev_ctx->ch[i].read_cntr.size); + dev_ctx->shared_info->ch[i].read_cntr.phys, dev_ctx->shared_info->ch[i].read_cntr.virt, + dev_ctx->shared_info->ch[i].read_cntr.size); } #endif /* #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING */ seq_printf(seq, " umt_id: %d\n", - dev_ctx->umt_id); + dev_ctx->shared_info->umt_id); seq_printf(seq, " umt_period: %d (in micro second)\n", - dev_ctx->umt_period); + dev_ctx->shared_info->umt_period); } static struct dc_dp_dcmode_ops xrx500_dcmode0_ops = { .get_host_capability = xrx500_dcmode0_get_host_capability, - .register_dev = xrx500_dcmode0_register_dev, + .register_dev = NULL, + .register_dev_ex = xrx500_dcmode0_register_dev_ex, .register_subif = xrx500_dcmode0_register_subif, .xmit = xrx500_dcmode0_xmit, #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING .handle_ring_sw = xrx500_dcmode0_handle_ring_sw, #endif /* #ifdef DEV2SOC_FRAG_EXCEPTION_HANDLING */ -#if (defined(XRX500_DCMODE0_BRIDGE_FLOW_LEARNING) && XRX500_DCMODE0_BRIDGE_FLOW_LEARNING) + .return_bufs = xrx500_dcmode0_return_bufs, .add_session_shortcut_forward = xrx500_dcmode0_add_session_shortcut_forward, -#else /* #if (defined(XRX500_DCMODE0_BRIDGE_FLOW_LEARNING) && XRX500_DCMODE0_BRIDGE_FLOW_LEARNING) */ - .add_session_shortcut_forward = NULL, -#endif /* #else */ .disconn_if = xrx500_dcmode0_disconn_if, .get_netif_stats = xrx500_dcmode0_get_netif_stats, .clear_netif_stats = xrx500_dcmode0_clear_netif_stats, diff --git a/dc_mode/dc_modex/Makefile b/dc_mode/dc_modex/Makefile new file mode 100644 index 0000000..1de399c --- /dev/null +++ b/dc_mode/dc_modex/Makefile @@ -0,0 +1,9 @@ +ifeq ($(CONFIG_DIRECTCONNECT_DP_LGM),y) + obj-m += dc_mode1-lgm.o + dc_mode1-lgm-objs += dc_modex_ext.o +endif + +ifeq ($(CONFIG_DIRECTCONNECT_DP_PRX300),y) + obj-m += dc_mode1-prx300.o + dc_mode1-prx300-objs += dc_modex_ext.o +endif diff --git a/dc_mode/dc_modex/dc_modex_ext.c b/dc_mode/dc_modex/dc_modex_ext.c new file mode 100644 index 0000000..cd69359 --- /dev/null +++ b/dc_mode/dc_modex/dc_modex_ext.c @@ -0,0 +1,1693 @@ +/* + * DirectConnect provides a common interface for the network devices to achieve the full or partial + acceleration services from the underlying packet acceleration engine + * Copyright (c) 2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +/* Includes */ +#include <linux/version.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/interrupt.h> +#if defined(CONFIG_X86_INTEL_LGM) +#include <linux/pp_api.h> +#endif + +#include <net/switch_api/lantiq_gsw_api.h> +#if IS_ENABLED(CONFIG_PPA) +#include <net/ppa/ppa_api.h> +#endif /* #if IS_ENABLED(CONFIG_PPA) */ + +#include <directconnect_dp_api.h> +#include <directconnect_dp_dcmode_api.h> +#include <directconnect_dp_debug.h> + +/* Defines */ +#define DCMODE_EXT_DRV_MODULE_NAME "dc_mode-ext" +#define DCMODE_EXT_DRV_MODULE_VERSION "1.0" + +#define DCMODE_EXT_BRIDGE_FLOW_LEARNING 1 + +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define SWITCH_DEV "/dev/switch_api/1" + +#define DCMODE_EXT_MAX_PORT 20 +#define DCMODE_EXT_MAX_SUBIF_PER_DEV 16 +#define DCMODE_EXT_SUBIFID_OFFSET 8 +#define DCMODE_EXT_SUBIFID_MASK 0xF +#define DCMODE_EXT_GET_SUBIFIDX(subif_id) \ + ((subif_id >> DCMODE_EXT_SUBIFID_OFFSET) & DCMODE_EXT_SUBIFID_MASK) +#define MULTIPORT_WORKAROUND_MAX_SUBIF_NUM 8 +#define MULTIPORT_WORKAROUND_SUBIFID_MASK \ + (MULTIPORT_WORKAROUND_MAX_SUBIF_NUM << DCMODE_EXT_SUBIFID_OFFSET) + +#if defined(CONFIG_PRX300_CQM) +#define DC_DP_DEFINE_LOCK(lock) DEFINE_MUTEX(lock) +#define DC_DP_LOCK mutex_lock +#define DC_DP_UNLOCK mutex_unlock +#else /* #if defined(CONFIG_PRX300_CQM) */ +#define DC_DP_DEFINE_LOCK(lock) DEFINE_SPINLOCK(lock) +#define DC_DP_LOCK spin_lock_bh +#define DC_DP_UNLOCK spin_unlock_bh +#endif /* #else */ + +#define DCMODE_EXT_MAX_DEV_NUM 4 +#define DCMODE_EXT_MAX_DEV_PORT_NUM (DCMODE_EXT_MAX_DEV_NUM << 1) +#define DCMODE_EXT_DEF_UMT_PERIOD 200 /* in micro second */ + +#define DC_DP_MAX_SOC_CLASS 16 + +struct dcmode_ext_subif_info { + struct net_device *netif; /*! pointer to net_device*/ +}; + +struct dcmode_ext_dev_shared_info +{ +#define DCMODE_DEV_STATUS_FREE 0x0 +#define DCMODE_DEV_STATUS_USED 0x1 + int32_t status; + int32_t port_id; + int32_t alt_port_id; /* Multiport reference port id */ + int32_t ref_count; + int32_t cqm_pid; + int32_t umt_id; + int32_t umt_period; + uint32_t dma_ctrlid; + uint32_t dma_cid; + uint32_t dma_ch; + int32_t num_bufpools; + struct dc_dp_buf_pool *buflist; + int32_t num_subif; + struct dcmode_ext_subif_info subif_info[DCMODE_EXT_MAX_SUBIF_PER_DEV]; +}; + +struct dcmode_ext_dev_info +{ +#define DCMODE_EXT_DEV_STATUS_FREE 0x0 +#define DCMODE_EXT_DEV_STATUS_USED 0x1 + int32_t status; + int32_t port_id; + uint32_t alloc_flags; + int32_t num_subif; + + /* shared info */ + struct dcmode_ext_dev_shared_info *shared_info; +}; +static struct dcmode_ext_dev_shared_info g_dcmode_ext_dev_shinfo[DCMODE_EXT_MAX_DEV_NUM]; +static struct dcmode_ext_dev_info g_dcmode_ext_dev[DCMODE_EXT_MAX_DEV_PORT_NUM]; +static struct dcmode_ext_dev_info *g_dcmode_ext_dev_p[DCMODE_EXT_MAX_PORT] = {NULL} ; +static struct dp_cap g_dp_cap = {0}; +DC_DP_DEFINE_LOCK(g_dcmode_ext_dev_lock); +static int32_t g_dcmode_ext_init_ok = 0; + +/* Function prototypes */ +/* Local */ +static int32_t +dcmode_ext_rx_cb(struct net_device *rxif, struct net_device *txif, + struct sk_buff *skb, int32_t len); +static int32_t +dcmode_ext_get_netif_subifid_cb(struct net_device *netif, + struct sk_buff *skb, void *subif_data, + uint8_t dst_mac[MAX_ETH_ALEN], + dp_subif_t *subif, uint32_t flags); + +#if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) +static inline int32_t +dcmode_ext_setup_pmac_port(int32_t port_id, int32_t dma_cid, int32_t ref_port_id, uint32_t dev_cap_req, uint32_t flags); +static inline int32_t +dcmode_ext_alloc_ring_buffers(uint32_t num_bufs_req, int32_t *num_bufpools, struct dc_dp_buf_pool **buflist); +static inline void +dcmode_ext_free_ring_buffers(int32_t num_bufpools, struct dc_dp_buf_pool **buflist); +static inline void +dcmode_ext_cleanup_ring_resources(struct dcmode_ext_dev_info *dev_info, int32_t port_id, struct dc_dp_res *res, uint32_t flags); +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) */ +static inline int32_t +dcmode_ext_setup_ring_resources(struct dcmode_ext_dev_info *dev_info, int32_t port_id, + struct dp_dev_data *dp_device, struct dc_dp_res *res, uint32_t flags); +static inline uint8_t +_dc_dp_get_class2devqos(uint8_t *class2prio, uint8_t *prio2devqos, uint8_t class); + +#if 0 +#if defined(CONFIG_DIRECTCONNECT_DP_DBG) && CONFIG_DIRECTCONNECT_DP_DBG +static void _dc_dp_dump_raw_data(char *buf, int len, char *prefix_str); +static void _dc_dp_dump_rx_pmac(struct pmac_rx_hdr *pmac); +#endif /* #if defined(CONFIG_DIRECTCONNECT_DP_DBG) && CONFIG_DIRECTCONNECT_DP_DBG */ +#endif /* #if 0 */ + +/* + * ======================================================================== + * Local Interface API + * ======================================================================== + */ + +static inline bool is_multiport(uint32_t alloc_flags) +{ + return (0 != (alloc_flags & DC_DP_F_MULTI_PORT)); +} + +static inline bool is_multiport_main(uint32_t alloc_flags) +{ + return ( (0 != (alloc_flags & DC_DP_F_MULTI_PORT)) && + (0 == (alloc_flags & DC_DP_F_SHARED_RES)) ); +} + +static inline bool is_multiport_sub(uint32_t alloc_flags) +{ + return ( (0 != (alloc_flags & DC_DP_F_MULTI_PORT)) && + (0 != (alloc_flags & DC_DP_F_SHARED_RES)) ); +} + +static inline bool is_multiport_main_by_subif(struct dp_subif *subif_id) +{ + return (DCMODE_EXT_GET_SUBIFIDX(subif_id->subif) >= MULTIPORT_WORKAROUND_MAX_SUBIF_NUM); +} + +static inline void +multiport_wa_forward_map_subifid(struct dcmode_ext_dev_info *dev_ctx, struct dp_subif *subif_id) +{ + /* Multiport 1st device? */ + if ( is_multiport_main(dev_ctx->alloc_flags) ) + subif_id->subif |= MULTIPORT_WORKAROUND_SUBIFID_MASK; + /* Multiport sub-sequent device? */ + else + subif_id->port_id = dev_ctx->shared_info->port_id; +} + +static inline void +multiport_wa_reverse_map_subifid(struct dcmode_ext_dev_info *dev_ctx, struct dp_subif *subif_id) +{ + /* Multiport 1st device? */ + if ( is_multiport_main_by_subif(subif_id) ) + subif_id->subif &= ~MULTIPORT_WORKAROUND_SUBIFID_MASK; + + /* Multiport sub-sequent device? */ + else + subif_id->port_id = dev_ctx->shared_info->alt_port_id; +} + +static inline bool is_multiport_sub_by_subif(dp_subif_t *subif_id) +{ + return (DCMODE_EXT_GET_SUBIFIDX(subif_id->subif) < MULTIPORT_WORKAROUND_MAX_SUBIF_NUM); +} + +#if defined(CONFIG_X86_INTEL_LGM) +static inline bool +is_required_auto_detect_buffer_return(struct dc_dp_dev *devspec, uint32_t alloc_flags) +{ + bool ret = false; + + if ( (devspec->dev_cap_req & DC_DP_F_DEV_REQ_AUTO_DETECT_BUFFER_RETURN) ) { + ret = true; + + /* Default to be set for FAST_WLAN */ + } else if ( (alloc_flags & DC_DP_F_FAST_WLAN) || + !(alloc_flags & DC_DP_F_FAST_DSL) ) { + ret = true; + } + + return ret; +} + +static inline bool +is_required_buffer_marking(struct dc_dp_dev *devspec, uint32_t alloc_flags) +{ + bool ret = false; + + if ( (devspec->dev_cap_req & DC_DP_F_DEV_REQ_BUFFER_MARKING) ) { + ret = true; + + /* Default to be set for FAST_WLAN */ + } else if ( (alloc_flags & DC_DP_F_FAST_WLAN) || + !(alloc_flags & DC_DP_F_FAST_DSL) ) { + ret = true; + } + + return ret; +} +#endif /* #if defined(CONFIG_X86_INTEL_LGM) */ + +static struct dcmode_ext_dev_shared_info * +dcmode_ext_alloc_dev_shared_ctx(int32_t port_id, int32_t ref_port_id, uint32_t alloc_flags) +{ + int32_t dev_shinfo_idx; + struct dcmode_ext_dev_shared_info *dev_shinfo_ctx = NULL; + struct dcmode_ext_dev_info *ref_dev_ctx = NULL; + + /* Multiport 2nd device? */ + if ( is_multiport_sub(alloc_flags) && + (NULL != (ref_dev_ctx = g_dcmode_ext_dev_p[ref_port_id])) ) { + dev_shinfo_ctx = ref_dev_ctx->shared_info; + if (NULL == dev_shinfo_ctx) + goto err_out; + + dev_shinfo_ctx->alt_port_id = port_id; + dev_shinfo_ctx->ref_count++; + } else { + /* Find a free device shinfo index */ + for (dev_shinfo_idx = 0; dev_shinfo_idx < DCMODE_EXT_MAX_DEV_NUM; dev_shinfo_idx++) { + if (g_dcmode_ext_dev_shinfo[dev_shinfo_idx].status != DCMODE_EXT_DEV_STATUS_USED) { + break; + } + } + + if (dev_shinfo_idx >= DCMODE_EXT_MAX_DEV_NUM) { + DC_DP_ERROR("failed to allocate port as it reaches maximum directconnect device limit - %d!!!\n", DCMODE_EXT_MAX_DEV_NUM); + goto err_out; + } + dev_shinfo_ctx = &g_dcmode_ext_dev_shinfo[dev_shinfo_idx]; + + memset(dev_shinfo_ctx, 0, sizeof(struct dcmode_ext_dev_shared_info)); + dev_shinfo_ctx->status = DCMODE_EXT_DEV_STATUS_USED; + + /* Multiport 2nd radio? */ + if ( is_multiport_sub(alloc_flags) ) { + dev_shinfo_ctx->port_id = ref_port_id; + dev_shinfo_ctx->alt_port_id = port_id; + } else + dev_shinfo_ctx->port_id = port_id; + + dev_shinfo_ctx->ref_count = 1; + } + +err_out: + return dev_shinfo_ctx; +} + +static inline void +dcmode_ext_free_dev_shared_ctx(struct dcmode_ext_dev_shared_info *dev_shinfo_ctx) +{ + if (NULL != dev_shinfo_ctx) { + dev_shinfo_ctx->ref_count--; + if (0 == dev_shinfo_ctx->ref_count) + memset(dev_shinfo_ctx, 0, sizeof(struct dcmode_ext_dev_shared_info)); + } +} + +static struct dcmode_ext_dev_info * +dcmode_ext_alloc_dev_ctx(int32_t port_id, int32_t ref_port_id, uint32_t alloc_flags) +{ + int32_t dev_idx; + struct dcmode_ext_dev_info *dev_ctx = NULL; + struct dcmode_ext_dev_shared_info *dev_shinfo_ctx = NULL; + + /* Find a free device index */ + for (dev_idx = 0; dev_idx < DCMODE_EXT_MAX_DEV_PORT_NUM; dev_idx++) { + if (g_dcmode_ext_dev[dev_idx].status != DCMODE_EXT_DEV_STATUS_USED) { + break; + } + } + + if (dev_idx >= DCMODE_EXT_MAX_DEV_PORT_NUM) { + DC_DP_ERROR("failed to allocate port as it reaches maximum directconnect device limit - %d!!!\n", DCMODE_EXT_MAX_DEV_PORT_NUM); + goto out; + } + + /* Allocate device shared context */ + dev_shinfo_ctx = dcmode_ext_alloc_dev_shared_ctx(port_id, ref_port_id, alloc_flags); + if (NULL == dev_shinfo_ctx) + goto out; + + dev_ctx = &g_dcmode_ext_dev[dev_idx]; + + /* Reset DC ModeX device structure */ + memset(dev_ctx, 0, sizeof(struct dcmode_ext_dev_info)); + dev_ctx->status = DCMODE_EXT_DEV_STATUS_USED; + dev_ctx->port_id = port_id; + dev_ctx->alloc_flags = alloc_flags; + dev_ctx->shared_info = dev_shinfo_ctx; + + g_dcmode_ext_dev_p[port_id] = dev_ctx; + +out: + return dev_ctx; +} + +static inline void +dcmode_ext_free_dev_ctx(struct dcmode_ext_dev_info *dev_ctx) +{ + if (NULL != dev_ctx) { + /* Free device shared context */ + dcmode_ext_free_dev_shared_ctx(dev_ctx->shared_info); + + g_dcmode_ext_dev_p[dev_ctx->port_id] = NULL; + memset(dev_ctx, 0, sizeof(struct dcmode_ext_dev_info)); + } +} + +#if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) +static inline int32_t +dcmode_ext_setup_pmac_port(int32_t port_id, int32_t dma_cid, int32_t ref_port_id, uint32_t dev_cap_req, uint32_t flags) +{ + u8 i = 0, j = 0; + GSW_API_HANDLE gswr; + GSW_PMAC_Eg_Cfg_t egCfg; + GSW_PMAC_Ig_Cfg_t igCfg; + GSW_register_t regCfg; + + /* Do the GSW-R configuration */ + gswr = gsw_api_kopen(SWITCH_DEV); + if (gswr == 0) { + DC_DP_ERROR("Open SWAPI device FAILED!!!\n"); + return -EIO; + } + + /* FIXME : setup FCS/PMAC setting based on device request */ + + /* GSWIP-R PMAC Egress Configuration Table */ + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "PMAC_EG_CFG_SET for GSW-R.\n"); + for (i = 0; i <= 15; i++) { + for (j = 0; j <= 3; j++) { + memset((void *)&egCfg, 0x00, sizeof(egCfg)); + egCfg.nRxDmaChanId = 0; + if ( (dev_cap_req & DC_DP_F_DEV_REQ_TX_PMAC) ) + egCfg.bPmacEna = 1; + else + egCfg.bPmacEna = 0; + if ( (dev_cap_req & DC_DP_F_DEV_REQ_TX_FCS) ) + egCfg.bFcsEna = 1; + else + egCfg.bFcsEna = 0; + egCfg.bRemL2Hdr = 0; + egCfg.numBytesRem = 0; + egCfg.nResDW1 = 0; + egCfg.nRes1DW0 = 0; + egCfg.nRes2DW0 = 0; + egCfg.nDestPortId = port_id; + egCfg.nTrafficClass = i; + egCfg.bMpe1Flag = 0; + egCfg.bMpe2Flag = 0; + egCfg.bEncFlag = 0; + egCfg.bDecFlag = 0; + egCfg.nFlowIDMsb = j; + egCfg.bTCEnable = 1; + + gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_SET, (unsigned int)&egCfg); + } + } + + /* GSWIP-R PMAC Ingress Configuration Table */ + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "PMAC_IG_CFG_SET for GSW-R.\n"); + memset((void *)&igCfg, 0x00, sizeof(igCfg)); + igCfg.nTxDmaChanId = dma_cid; + if ( (dev_cap_req & DC_DP_F_DEV_REQ_RX_PMAC) ) + igCfg.bPmacPresent = 1; + else + igCfg.bPmacPresent = 0; + igCfg.bSpIdDefault = 1; + igCfg.eSubId = GSW_PMAC_IG_CFG_SRC_DMA_DESC; + igCfg.bClassDefault = 0; + igCfg.bClassEna = 0; + igCfg.bErrPktsDisc = 1; + + igCfg.bPmapDefault = 1; + igCfg.bPmapEna = 0; + + igCfg.defPmacHdr[0] = 0; + igCfg.defPmacHdr[1] = 0; + igCfg.defPmacHdr[2] = (ref_port_id << 4); + igCfg.defPmacHdr[3] = 0x80; + igCfg.defPmacHdr[4] = 0; + if ( (dev_cap_req & DC_DP_F_DEV_REQ_RX_FCS) ) + igCfg.defPmacHdr[4] |= 0x80; + else + igCfg.defPmacHdr[4] &= ~0x80; + igCfg.defPmacHdr[5] = 0; + igCfg.defPmacHdr[6] = 0xFF; + igCfg.defPmacHdr[7] = 0xFF; + + gsw_api_kioctl(gswr, GSW_PMAC_IG_CFG_SET, (unsigned int)&igCfg); + + /* No Rx FCS, then allow any short packet without padding */ + /* SDMA_PRIO:USIGN */ + #define SDMA_PRIO_REG_BASE 0xBC1 + #define SDMA_PRIO_REG_PORT_STEP 6 + #define SDMA_PRIO_REG_USIGN 0x4 + + memset((void *)®Cfg, 0x00, sizeof(regCfg)); + regCfg.nRegAddr = SDMA_PRIO_REG_BASE + (SDMA_PRIO_REG_PORT_STEP * port_id); + gsw_api_kioctl(gswr, GSW_REGISTER_GET, (unsigned int)®Cfg); + + if ( (dev_cap_req & DC_DP_F_DEV_REQ_RX_FCS) ) + regCfg.nData &= ~SDMA_PRIO_REG_USIGN; + else + regCfg.nData |= SDMA_PRIO_REG_USIGN; + + gsw_api_kioctl(gswr, GSW_REGISTER_SET, (unsigned int)®Cfg); + + /* FIMXE : setup based on DSL or not */ + /* Allow traffic from one VAP to any VAP */ + + /* PCE_PCTRL_3:IGPTRM */ + #define PCE_PCTRL_3_REG_BASE 0x483 + #define PCE_PCTRL_3_REG_PORT_STEP 10 + #define PCE_PCTRL_3_REG_IGPTRM 0x4000 + + memset((void *)®Cfg, 0x00, sizeof(regCfg)); + regCfg.nRegAddr = PCE_PCTRL_3_REG_BASE + (PCE_PCTRL_3_REG_PORT_STEP * port_id); + gsw_api_kioctl(gswr, GSW_REGISTER_GET, (unsigned int)®Cfg); + regCfg.nData |= PCE_PCTRL_3_REG_IGPTRM; + gsw_api_kioctl(gswr, GSW_REGISTER_SET, (unsigned int)®Cfg); + + /* PCE_IGPTRM:SUBx */ + #define PCE_IGPTRM_REG_BASE 0x544 + #define PCE_IGPTRM_REG_PORT_STEP 16 + #define PCE_IGPTRM_REG_SUB_ALL 0xFFFF + + memset((void *)®Cfg, 0x00, sizeof(regCfg)); + regCfg.nRegAddr = PCE_IGPTRM_REG_BASE + (PCE_IGPTRM_REG_PORT_STEP * port_id); + gsw_api_kioctl(gswr, GSW_REGISTER_GET, (unsigned int)®Cfg); + regCfg.nData |= PCE_IGPTRM_REG_SUB_ALL; + gsw_api_kioctl(gswr, GSW_REGISTER_SET, (unsigned int)®Cfg); + + gsw_api_kclose(gswr); + + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "GSW PMAC Init Done.\n"); + return 0; +} + +static inline int32_t +dcmode_ext_alloc_ring_buffers(uint32_t num_bufs_req, int32_t *num_bufpools, struct dc_dp_buf_pool **buflist) +{ + int32_t i; + uint32_t order; + uint32_t max_buf_pool_num; + uint32_t max_bufs_req_sz; + struct dc_dp_buf_pool *buflist_base = NULL; + size_t buflist_sz; + size_t num_buflist_entries; + uint32_t num_buf_req_rem; + uint32_t tmp_num_bufs_req; + uint32_t tmp_buf_pool_sz; + uint8_t *buf_addr_base = NULL; + + if (num_bufs_req <= 0) { + return -1; + } + + if (DC_DP_PKT_BUF_SIZE_DEFAULT < PAGE_SIZE) + max_buf_pool_num = PAGE_SIZE / DC_DP_PKT_BUF_SIZE_DEFAULT; + else + max_buf_pool_num = 1; + + max_bufs_req_sz = num_bufs_req * DC_DP_PKT_BUF_SIZE_DEFAULT; + num_buflist_entries = (num_bufs_req + (max_buf_pool_num - 1)) / max_buf_pool_num; + + buflist_sz = (num_buflist_entries * sizeof(struct dc_dp_buf_pool)); /* virt buflist size */ + + /* Allocate Tx buffers */ + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Allocating %d DMA1-Tx buffer lists.\n", (uint32_t)num_buflist_entries); + buflist_base = (struct dc_dp_buf_pool *) kmalloc(buflist_sz, GFP_KERNEL); + if (!buflist_base) { + DC_DP_ERROR("failed to allocate %d buffer lists!!!\n", (uint32_t)num_buflist_entries); + return -ENOMEM; + } + memset((void *)buflist_base, 0, buflist_sz); + + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Allocating %d DMA1-Tx buffer pools.\n", (uint32_t)num_buflist_entries); + num_buf_req_rem = num_bufs_req; + for (i = 0; i < num_buflist_entries; i++) { + tmp_num_bufs_req = MIN(num_buf_req_rem, max_buf_pool_num); + tmp_buf_pool_sz = tmp_num_bufs_req * DC_DP_PKT_BUF_SIZE_DEFAULT; + order = get_order(tmp_buf_pool_sz); + + buf_addr_base = (uint8_t *)__get_free_pages(GFP_KERNEL, order); + if (!buf_addr_base) { + DC_DP_ERROR("failed to allocate pool %d of size %d KB!!!\n", + (i + 1), (tmp_buf_pool_sz >> 10)); + goto err_out_free_buf; + } + + /* Buffer pool */ + buflist_base[i].pool = (void *)buf_addr_base; + buflist_base[i].phys_pool = (void *)virt_to_phys(buf_addr_base); + buflist_base[i].size = tmp_buf_pool_sz; + + num_buf_req_rem -= tmp_num_bufs_req; + } + + /* Return */ + *num_bufpools = num_buflist_entries; + *buflist = buflist_base; + + return 0; + +err_out_free_buf: + dcmode_ext_free_ring_buffers(num_buflist_entries, &buflist_base); + + return -ENOMEM; +} + +static inline void +dcmode_ext_free_ring_buffers(int32_t num_bufpools, struct dc_dp_buf_pool **buflist_base) +{ + int32_t i; + uint32_t order; + struct dc_dp_buf_pool *buflist; + + if (NULL == buflist_base) + return; + + buflist = *buflist_base; + + /* De-allocate Tx buffer pool */ + if (buflist) { + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "De-allocating %d DMA1-Tx buffer pools.\n", num_bufpools); + for (i = 0; i < num_bufpools; i++) { + if (buflist[i].pool && buflist[i].size) { + order = get_order(buflist[i].size); + free_pages((unsigned long)buflist[i].pool, order); + buflist[i].pool = NULL; + buflist[i].size = 0; + } + } + + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "De-allocating %d buffer lists.\n", num_bufpools); + kfree(buflist); + *buflist_base = NULL; + } +} + +static inline void +dcmode_ext_cleanup_ring_resources(struct dcmode_ext_dev_info *dev_ctx, int32_t port_id, struct dc_dp_res *res, uint32_t flags) +{ + /* De-allocate Tx buffer pool */ + dcmode_ext_free_ring_buffers(dev_ctx->shared_info->num_bufpools, &dev_ctx->shared_info->buflist); +} +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) */ + +static inline int32_t +dcmode_ext_setup_ring_resources(struct dcmode_ext_dev_info *dev_ctx, int32_t port_id, + struct dp_dev_data *dp_device, struct dc_dp_res *res, uint32_t flags) +{ + int32_t ret = DC_DP_SUCCESS; +#if !defined(CONFIG_X86_INTEL_LGM) + int32_t buflist_idx; +#if defined(CONFIG_PRX300_CQM) + uint32_t **v_buflist_base = NULL; + uint32_t **p_buflist_base = NULL; +#else /* #if defined(CONFIG_PRX300_CQM) */ + int32_t num_bufpools = 0; + struct dc_dp_buf_pool *buflist = NULL; + int32_t i; + int32_t j; +#endif /* #else */ +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ + + /* rx_out ring */ + res->rings.dev2soc.phys_base = dp_device->rx_ring[0].out_enq_paddr; + res->rings.dev2soc.base = phys_to_virt((phys_addr_t)dp_device->rx_ring[0].out_enq_paddr); + res->rings.dev2soc.size = dp_device->rx_ring[0].out_enq_ring_size; + res->rings.dev2soc.desc_dwsz = 4; +#if defined(CONFIG_X86_INTEL_LGM) + res->rings.dev2soc.policy_base = dp_device->rx_ring[0].rx_policy_base; + //res->rings.dev2soc.pool_id = dp_device->rx_ring[0].rx_poolid; + //res->rings.dev2soc.high_4bits = dp_device->rx_ring[0].; + res->rings.rxout_temp_dw3 = 0; + /* Pool_Policy[23:16] */ + res->rings.rxout_temp_dw3 |= ((res->rings.dev2soc.policy_base & 0xFF) << 14); + /* Source_Pool[27:24] */ + res->rings.rxout_temp_dw3 |= (res->rings.dev2soc.pool_id & 0xF); +#endif + + /* rx_in ring is being used? */ + if (dp_device->rx_ring[0].in_alloc_paddr) { + res->rings.dev2soc_ret.phys_base = dp_device->rx_ring[0].in_alloc_paddr; + res->rings.dev2soc_ret.base = phys_to_virt((phys_addr_t)dp_device->rx_ring[0].in_alloc_paddr); + res->rings.dev2soc_ret.size = dp_device->rx_ring[0].in_alloc_ring_size; +#if defined(CONFIG_X86_INTEL_LGM) + res->rings.dev2soc_ret.desc_dwsz = 2; +#else /* #if defined(CONFIG_X86_INTEL_LGM) */ + res->rings.dev2soc_ret.desc_dwsz = 4; +#endif /* #else */ + + /* rx_in ring same as rx_out ring */ + } else { + res->rings.dev2soc_ret = res->rings.dev2soc; + } + + dev_ctx->shared_info->cqm_pid = dp_device->rx_ring[0].out_enq_port_id; + dev_ctx->shared_info->dma_ch = dp_device->rx_ring[0].out_dma_ch_to_gswip; + + /* tx_in ring */ + res->rings.soc2dev.phys_base = dp_device->tx_ring[0].in_deq_paddr; + res->rings.soc2dev.base = phys_to_virt((phys_addr_t)dp_device->tx_ring[0].in_deq_paddr); + res->rings.soc2dev.size = dp_device->tx_ring[0].in_deq_ring_size; + res->rings.soc2dev.desc_dwsz = 4; + + /* tx_out ring */ + res->rings.soc2dev_ret.phys_base = dp_device->tx_ring[0].out_free_paddr; + res->rings.soc2dev_ret.base = phys_to_virt((phys_addr_t)dp_device->tx_ring[0].out_free_paddr); + res->rings.soc2dev_ret.size = dp_device->tx_ring[0].out_free_ring_size; +#if defined(CONFIG_X86_INTEL_LGM) || defined(CONFIG_PRX300_CQM) + res->rings.soc2dev_ret.desc_dwsz = 2; +#else /* #if defined(CONFIG_X86_INTEL_LGM) || defined(CONFIG_PRX300_CQM) */ + res->rings.soc2dev_ret.desc_dwsz = 1; +#endif /* #else */ + res->rings.soc2dev_ret.policy_base = dp_device->tx_ring[0].txout_policy_base; + res->rings.soc2dev_ret.pool_id = dp_device->tx_ring[0].tx_poolid; +#if defined(CONFIG_X86_INTEL_LGM) + //res->rings.soc2dev_ret.high_4bits = dp_device->tx_ring[0].; + res->rings.txout_temp_dw3 = 0; + /* Buffer Pointer[26:23] */ + res->rings.txout_temp_dw3 |= ((res->rings.soc2dev_ret.high_4bits & 0xF) << 23); + /* Pool_Policy[21:14] */ + res->rings.txout_temp_dw3 |= ((res->rings.soc2dev_ret.policy_base & 0xFF) << 14); + /* Source_Pool[3:0] */ + res->rings.txout_temp_dw3 |= (res->rings.soc2dev_ret.pool_id & 0xF); +#elif defined(CONFIG_PRX300_CQM) /* #if defined(CONFIG_X86_INTEL_LGM) */ + res->rings.txout_temp_dw3 = 0; + /* Policy[22:20] */ + res->rings.txout_temp_dw3 |= ((res->rings.soc2dev_ret.policy_base & 0x7) << 20); + /* Pool[18:16] */ + res->rings.txout_temp_dw3 |= ((res->rings.soc2dev_ret.pool_id & 0x7) << 16); +#endif /* #elif defined(CONFIG_PRX300_CQM) */ + + /* FIXME : Tx buffer allocation */ + + /* Allocate Rx buffers */ + res->num_bufpools = dp_device->rx_ring[0].num_pkt; + dev_ctx->shared_info->num_bufpools = res->num_bufpools; + /* NOTE : In LGM, no buflist resource update required */ + +#if defined(CONFIG_PRX300_CQM) + res->buflist = (struct dc_dp_buf_pool *) kmalloc((res->num_bufpools * sizeof(struct dc_dp_buf_pool)), GFP_KERNEL); + if (!res->buflist) { + DC_DP_ERROR("failed to allocate %d buffer lists!!!\n", res->num_bufpools); + ret = DC_DP_FAILURE; + goto err_out; + } + + v_buflist_base = (uint32_t **)dp_device->rx_ring[0].pkt_list_vaddr; + p_buflist_base = (uint32_t **)dp_device->rx_ring[0].pkt_base_vaddr; + for (buflist_idx = 0; buflist_idx < res->num_bufpools; buflist_idx++) { + res->buflist[buflist_idx].pool = (void *)v_buflist_base[buflist_idx]; + res->buflist[buflist_idx].phys_pool = (void *)p_buflist_base[buflist_idx]; + res->buflist[buflist_idx].size = dp_device->rx_ring[0].rx_pkt_size; + } +#elif !defined(CONFIG_X86_INTEL_LGM) /* #if defined(CONFIG_PRX300_CQM) */ + ret = dcmode_ext_alloc_ring_buffers(res->num_bufs_req, &num_bufpools, &buflist); + if (ret) { + DC_DP_ERROR("failed to register dev as tx buffer allocation failure!!!\n"); + goto err_out; + } + dev_ctx->shared_info->num_bufpools = num_bufpools; + dev_ctx->shared_info->buflist = buflist; + + res->num_bufpools = res->num_bufs_req; + res->buflist = (struct dc_dp_buf_pool *) kmalloc((res->num_bufpools * sizeof(struct dc_dp_buf_pool)), GFP_KERNEL); + if (!res->buflist) { + DC_DP_ERROR("failed to allocate %d buffer lists!!!\n", res->num_bufs_req); + ret = DC_DP_FAILURE; + goto err_out_free_buf; + } + + buflist_idx = 0; + for (i = 0; i < num_bufpools; i++) { + for (j = 0; j < buflist[i].size; j += DC_DP_PKT_BUF_SIZE_DEFAULT) { + res->buflist[buflist_idx].pool = buflist[i].pool + j; + res->buflist[buflist_idx].phys_pool = buflist[i].phys_pool + j; + res->buflist[buflist_idx].size = MIN(buflist[i].size, DC_DP_PKT_BUF_SIZE_DEFAULT); + buflist_idx++; + } + } +#endif /* #else */ + + return ret; + +#if !defined(CONFIG_X86_INTEL_LGM) +#if !defined(CONFIG_PRX300_CQM) +err_out_free_buf: + dcmode_ext_free_ring_buffers(num_bufpools, &buflist); +#endif /* #if !defined(CONFIG_PRX300_CQM) */ + +err_out: + return ret; +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ +} + +#if 0 +#if defined(CONFIG_DIRECTCONNECT_DP_DBG) && CONFIG_DIRECTCONNECT_DP_DBG +static void +_dc_dp_dump_rx_pmac(struct pmac_rx_hdr *pmac) +{ + int i; + unsigned char *p = (char *)pmac; + + if (!pmac) { + pr_err("dump_rx_pmac pmac NULL ??\n"); + return ; + } + + pr_info("PMAC at 0x%p: ", p); + for (i = 0; i < 8; i++) + pr_info("0x%02x ", p[i]); + pr_info("\n"); + + /*byte 0 */ + pr_info(" byte 0:res=%d ver_done=%d ip_offset=%d\n", pmac->res1, + pmac->ver_done, pmac->ip_offset); + /*byte 1 */ + pr_info(" byte 1:tcp_h_offset=%d tcp_type=%d\n", pmac->tcp_h_offset, + pmac->tcp_type); + /*byte 2 */ + pr_info(" byte 2:ppid=%d class=%d\n", pmac->sppid, pmac->class); + /*byte 3 */ + pr_info(" byte 3:res=%d pkt_type=%d\n", pmac->res2, pmac->pkt_type); + /*byte 4 */ + pr_info(" byte 4:res=%d redirect=%d res2=%d src_sub_inf_id=%d\n", + pmac->res3, pmac->redirect, pmac->res4, pmac->src_sub_inf_id); + /*byte 5 */ + pr_info(" byte 5:src_sub_inf_id2=%d\n", pmac->src_sub_inf_id2); + /*byte 6 */ + pr_info(" byte 6:port_map=%d\n", pmac->port_map); + /*byte 7 */ + pr_info(" byte 7:port_map2=%d\n", pmac->port_map2); +} +#endif /* #if defined(CONFIG_DIRECTCONNECT_DP_DBG) && CONFIG_DIRECTCONNECT_DP_DBG */ +#endif /* #if 0 */ + +#define DC_DP_DEV_CLASS_MASK 0x7 +static inline uint8_t +_dc_dp_get_class2devqos(uint8_t class2prio[], uint8_t prio2devqos[], uint8_t class) +{ + uint8_t devqos; + uint8_t prio; + + class = (class & 0x0F); + prio = class2prio[class]; + + prio = (prio & DC_DP_DEV_CLASS_MASK); + devqos = prio2devqos[prio]; + + return devqos; +} + +static inline void +get_soc_capability(uint32_t *cap) +{ + /* Linux offload capability */ + *cap = DC_DP_F_HOST_CAP_SG; + if (g_dp_cap.tx_hw_chksum) + *cap |= DC_DP_F_HOST_CAP_HW_CSUM; + if (g_dp_cap.rx_hw_chksum) + *cap |= DC_DP_F_HOST_CAP_RXCSUM; + if (g_dp_cap.hw_tso) { + *cap |= DC_DP_F_HOST_CAP_TSO; + *cap |= DC_DP_F_HOST_CAP_TSO6; + } + +#if !defined(CONFIG_X86_INTEL_LGM) +#ifdef CONFIG_PPA_LRO + *cap |= DC_DP_F_HOST_CAP_LRO; +#endif /* #ifdef CONFIG_PPA_LRO */ + + /* FCS capability */ + *cap |= DC_DP_F_HOST_CAP_TX_FCS; + *cap |= DC_DP_F_HOST_CAP_RX_FCS; + *cap |= DC_DP_F_HOST_CAP_TX_WO_FCS; + *cap |= DC_DP_F_HOST_CAP_RX_WO_FCS; + + /* PMAC capability */ + *cap |= DC_DP_F_HOST_CAP_TX_PMAC; + *cap |= DC_DP_F_HOST_CAP_RX_PMAC; + *cap |= DC_DP_F_HOST_CAP_RX_WO_PMAC; +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ + + /* QoS */ + *cap |= DC_DP_F_HOST_CAP_HW_QOS | DC_DP_F_HOST_CAP_HW_QOS_WAN; + *cap |= DC_DP_F_HOST_CAP_DEVQOS; + +#if defined(CONFIG_X86_INTEL_LGM) + *cap |= DC_DP_F_HOST_CAP_AUTO_DETECT_BUFFER_RETURN; + *cap |= DC_DP_F_HOST_CAP_BUFFER_MARKING; +#endif /* #if defined(CONFIG_X86_INTEL_LGM) */ +} + +#if defined(CONFIG_X86_INTEL_LGM) +static int32_t +dcmode_ext_get_subif_by_dev(struct net_device *dev, struct dp_subif *subif) +{ + int32_t ret = DC_DP_FAILURE; + int32_t dev_idx; + int32_t subif_idx; + + DC_DP_LOCK(&g_dcmode_ext_dev_lock); + for (dev_idx = 0; dev_idx < DCMODE_EXT_MAX_DEV_NUM; dev_idx++) { + if (g_dcmode_ext_dev_shinfo[dev_idx].status == DCMODE_DEV_STATUS_FREE) + continue; + + for (subif_idx = 0; subif_idx < DCMODE_EXT_MAX_SUBIF_PER_DEV; subif_idx++) { + if (g_dcmode_ext_dev_shinfo[dev_idx].subif_info[subif_idx].netif == dev) { + subif->port_id = g_dcmode_ext_dev_shinfo[dev_idx].port_id; + subif->subif = (subif_idx << DCMODE_EXT_SUBIFID_OFFSET); + + ret = DC_DP_SUCCESS; + goto out; + } + } + } + +out: + DC_DP_UNLOCK(&g_dcmode_ext_dev_lock); + return ret; +} +#endif /* #if defined(CONFIG_X86_INTEL_LGM) */ + +/* + * ======================================================================== + * DirectConnect Driver Interface API (SoC specific) + * ======================================================================== + */ +static int32_t +dcmode_ext_get_host_capability(struct dc_dp_host_cap *cap, uint32_t flags) +{ + int32_t ret = DC_DP_FAILURE; + + if (cap) { + cap->fastpath.support = 1; +#if defined(CONFIG_X86_INTEL_LGM) + cap->fastpath.hw_dcmode = DC_DP_MODE_TYPE_1_EXT; +#elif defined(CONFIG_PRX300_CQM) /* #if defined(CONFIG_X86_INTEL_LGM) */ + cap->fastpath.hw_dcmode = DC_DP_MODE_TYPE_0_EXT; +#else /* #elif defined(CONFIG_PRX300_CQM) */ + cap->fastpath.hw_dcmode = DC_DP_MODE_TYPE_0; +#endif /* #else */ + + if (g_dp_cap.umt.umt_hw_auto.enable == 1) { + if (g_dp_cap.umt.umt_hw_auto.rx_accumulate == 1) + cap->fastpath.hw_cmode.dev2soc_write = DC_DP_F_DCCNTR_MODE_CUMULATIVE; + if (g_dp_cap.umt.umt_hw_auto.rx_incremental == 1) + cap->fastpath.hw_cmode.dev2soc_write |= DC_DP_F_DCCNTR_MODE_INCREMENTAL; + if (g_dp_cap.umt.umt_hw_auto.tx_accumulate == 1) + cap->fastpath.hw_cmode.soc2dev_write |= DC_DP_F_DCCNTR_MODE_CUMULATIVE; + if (g_dp_cap.umt.umt_hw_auto.tx_incremental == 1) + cap->fastpath.hw_cmode.soc2dev_write |= DC_DP_F_DCCNTR_MODE_INCREMENTAL; + } + + cap->fastpath.hw_cmode.soc2dev_write |= DC_DP_F_DCCNTR_MODE_LITTLE_ENDIAN; + cap->fastpath.hw_cmode.dev2soc_write |= DC_DP_F_DCCNTR_MODE_LITTLE_ENDIAN; + + get_soc_capability(&cap->fastpath.hw_cap); + ret = DC_DP_SUCCESS; + } + + return ret; +} + +static int32_t +dcmode_ext_register_dev_ex(void *ctx, + struct module *owner, uint32_t port_id, + struct net_device *dev, struct dc_dp_cb *datapathcb, + struct dc_dp_res *resources, struct dc_dp_dev *devspec, + int32_t ref_port_id, uint32_t alloc_flags, uint32_t flags) +{ + int32_t ret; + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; + dp_cb_t dp_cb = {0}; + struct dp_dev_data dp_device = {0}; + int32_t umt_idx; + + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "dev_ctx=%p, owner=%p, port_id=%u, dev=%p, datapathcb=%p, " + "resources=%p, dev_spec=%p, flags=0x%08X\n", + dev_ctx, owner, port_id, dev, datapathcb, resources, devspec, flags); + + /* De-register */ + if (flags & DC_DP_F_DEREGISTER) { + + DC_DP_LOCK(&g_dcmode_ext_dev_lock); + + if ( !((NULL != dev_ctx) && (NULL != dev_ctx->shared_info)) ) { + ret = DC_DP_FAILURE; + goto err_unlock_out; + } + + /* De-register DC ModeX device from DC Common layer */ + dc_dp_register_dcmode_device(owner, port_id, dev, dev_ctx, DC_DP_DCMODE_DEV_DEREGISTER); + + /* De-register device from DP Lib */ + /* Skip, iff sub-sequent Multiport device? */ + if ( !is_multiport_sub(dev_ctx->alloc_flags) ) + dp_register_dev_ext(0, owner, port_id, &dp_cb, &dp_device, DP_F_DEREGISTER); + +#if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) + /* For the last device */ + if ( (1 == dev_ctx->shared_info->ref_count) ) { + /* Cleanup ring resources */ + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "De-configuring DMA1-Tx channel 0x%x.\n", + dev_ctx->shared_info->dma_ch); + dcmode_ext_cleanup_ring_resources(dev_ctx, dev_ctx->shared_info->port_id, resources, flags); + } +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) */ + + /* Free DC ModeX device context */ + dcmode_ext_free_dev_ctx(dev_ctx); + + DC_DP_UNLOCK(&g_dcmode_ext_dev_lock); + + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Success, returned %d.\n", DC_DP_SUCCESS); + return DC_DP_SUCCESS; + } + + /* Validate input arguments */ + if (resources->num_dccntr != 1 && !resources->dccntr) { + DC_DP_ERROR("failed to register device for the port_id %d!!!\n", port_id); + ret = DC_DP_FAILURE; + goto err_out; + } + + DC_DP_LOCK(&g_dcmode_ext_dev_lock); + + if (NULL != g_dcmode_ext_dev_p[port_id]) { + ret = DC_DP_FAILURE; + goto err_unlock_out; + } + + dev_ctx = dcmode_ext_alloc_dev_ctx(port_id, ref_port_id, alloc_flags); + if (NULL == dev_ctx) { + ret = DC_DP_FAILURE; + goto err_unlock_out; + } + + /* Skip, iff sub-sequent Multiport device? */ + if ( !is_multiport_sub(alloc_flags) ) { + + /* Datapath Library callback registration */ + dp_cb.rx_fn = dcmode_ext_rx_cb; + dp_cb.stop_fn = datapathcb->stop_fn; + dp_cb.restart_fn = datapathcb->restart_fn; + if ( is_multiport(alloc_flags) ) + dp_cb.get_subifid_fn = dcmode_ext_get_netif_subifid_cb; + else + dp_cb.get_subifid_fn = dc_dp_get_netif_subifid; // Exported from DC common layer + dp_cb.reset_mib_fn = datapathcb->reset_mib_fn; + dp_cb.get_mib_fn = datapathcb->get_mib_fn; + + /* Update DP device structure */ + dp_device.num_rx_ring = 1; + dp_device.num_tx_ring = 1; + dp_device.num_umt_port = resources->num_dccntr; + + /* Rx ring */ + dp_device.rx_ring[0].out_enq_ring_size = resources->rings.dev2soc.size; + dp_device.rx_ring[0].num_pkt = resources->num_bufs_req; + dp_device.rx_ring[0].rx_pkt_size = DC_DP_PKT_BUF_SIZE_DEFAULT; +#if defined(CONFIG_X86_INTEL_LGM) + dp_device.rx_ring[0].prefill_pkt_num = 0; +#else /* #if defined(CONFIG_X86_INTEL_LGM) */ + dp_device.rx_ring[0].prefill_pkt_num = dp_device.rx_ring[0].num_pkt; +#endif /* #else */ + dp_device.rx_ring[0].out_msg_mode = UMT_RXOUT_MSG_SUB; + dp_device.rx_ring[0].out_qos_mode = DP_RXOUT_BYPASS_QOS_ONLY; + + /* Tx ring */ + dp_device.tx_ring[0].in_deq_ring_size = resources->rings.soc2dev.size; + dp_device.tx_ring[0].num_tx_pkt = resources->tx_num_bufs_req; + dp_device.tx_ring[0].tx_pkt_size = DC_DP_PKT_BUF_SIZE_DEFAULT; + //dp_device.tx_ring[0].gpid_info. = ; + +#if defined(CONFIG_X86_INTEL_LGM) + if (is_required_auto_detect_buffer_return(devspec, alloc_flags) ) { + dp_device.tx_ring[0].f_out_auto_free = 1; + devspec->dc_cap |= DC_DP_F_HOST_CAP_AUTO_DETECT_BUFFER_RETURN; + } +#endif /* #if defined(CONFIG_X86_INTEL_LGM) */ + + /* TODO : GPID info */ + + /* UMT info */ + for (umt_idx = 0; umt_idx < dp_device.num_umt_port; umt_idx++) { + dp_device.umt[umt_idx].ctl.enable = 1; + if (resources->dccntr[umt_idx].dev2soc_dccntr_timer > 0) + dp_device.umt[umt_idx].ctl.msg_interval = resources->dccntr[umt_idx].dev2soc_dccntr_timer; + else + dp_device.umt[umt_idx].ctl.msg_interval = DCMODE_EXT_DEF_UMT_PERIOD; + + pr_info("%s: umt_mode = %d \n", __func__, resources->dccntr[umt_idx].umt_mode); + if ( resources->dccntr[umt_idx].umt_mode == DC_DP_UMT_MODE_HW_AUTO) { + dp_device.umt[umt_idx].ctl.daddr = (dma_addr_t)resources->dccntr[umt_idx].dev2soc_ret_enq_phys_base; + dp_device.umt[umt_idx].ctl.msg_mode = UMT_MSG_SELFCNT; + } else if (resources->dccntr[umt_idx].umt_mode == DC_DP_UMT_MODE_SW) { + dp_device.umt[umt_idx].ctl.daddr = (dma_addr_t)resources->dccntr[umt_idx].dev2soc_ret_enq_base; + dp_device.umt[umt_idx].ctl.msg_mode = UMT_MSG_SELFCNT; + //dp_device.umt[umt_idx].usr_msg = ; + } else if (resources->dccntr[umt_idx].umt_mode == DC_DP_UMT_MODE_HW_USER) { + dp_device.umt[umt_idx].ctl.daddr = (dma_addr_t)resources->dccntr[umt_idx].dev2soc_ret_enq_phys_base; + dp_device.umt[umt_idx].ctl.msg_mode = UMT_MSG_USER_MODE; + } + + if ( (resources->dccntr[umt_idx].soc_write_dccntr_mode & DC_DP_F_DCCNTR_MODE_CUMULATIVE) ) + dp_device.umt[umt_idx].ctl.cnt_mode = UMT_CNT_ACC; + else + dp_device.umt[umt_idx].ctl.cnt_mode = UMT_CNT_INC; + } + +#if defined(CONFIG_X86_INTEL_LGM) + if (is_required_buffer_marking(devspec, alloc_flags)) { + dp_device.enable_cqm_meta = 1; + devspec->dc_cap |= DC_DP_F_HOST_CAP_BUFFER_MARKING; + } +#endif /* #if defined(CONFIG_X86_INTEL_LGM) */ + dp_device.max_ctp = DCMODE_EXT_MAX_SUBIF_PER_DEV; + + ret = dp_register_dev_ext(0, owner, port_id, &dp_cb, &dp_device, 0); + if (ret != DP_SUCCESS) { + DC_DP_ERROR("failed to register dev to Datapath Library/Core!!!\n"); + goto err_out_free_dev; + } + } + + /* For the first device */ + if ( (1 == dev_ctx->shared_info->ref_count) ) { +#if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) + /* Setup Port resources */ + ret = dcmode_ext_setup_pmac_port(port_id, dev_ctx->shared_info->dma_cid, dev_ctx->shared_info->port_id, devspec->dev_cap_req, flags); + if (ret != DP_SUCCESS) { + DC_DP_ERROR("failed to setup UMT resources!!!\n"); + goto err_out_dereg_dev; + } +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) */ + + /* Setup ring resources */ + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Preparing the resources to be returned for the port=%d.\n", port_id); + ret = dcmode_ext_setup_ring_resources(dev_ctx, dev_ctx->shared_info->port_id, &dp_device, resources, flags); + if (ret) { + DC_DP_ERROR("failed to prepare the resources for the port_id=%d!!!\n", port_id); + goto err_out_dereg_dev; + } + } + + devspec->dc_accel_used = DC_DP_ACCEL_FULL_OFFLOAD; +#if defined(CONFIG_X86_INTEL_LGM) + devspec->dc_tx_ring_used = DC_DP_RING_HW_MODE1_EXT; + devspec->dc_rx_ring_used = DC_DP_RING_HW_MODE1_EXT; +#elif defined(CONFIG_PRX300_CQM) /* #if defined(CONFIG_X86_INTEL_LGM) */ + devspec->dc_tx_ring_used = DC_DP_RING_HW_MODE0_EXT; + devspec->dc_rx_ring_used = DC_DP_RING_HW_MODE0_EXT; +#else /* #elif defined(CONFIG_PRX300_CQM) */ + devspec->dc_tx_ring_used = DC_DP_RING_HW_MODE0; + devspec->dc_rx_ring_used = DC_DP_RING_HW_MODE0; +#endif /* #else */ + get_soc_capability(&devspec->dc_cap); + + /* Register DC ModeX device to DC common layer */ + ret = dc_dp_register_dcmode_device(owner, port_id, dev, dev_ctx, 0); + if (ret) { + DC_DP_ERROR("failed to register device to DC common layer!!!\n"); + goto err_out_dereg_dev; + } + + DC_DP_UNLOCK(&g_dcmode_ext_dev_lock); + + return ret; + +err_out_dereg_dev: + /* Skip, iff sub-sequent Multiport device? */ + if ( !is_multiport_sub(alloc_flags) ) + dp_register_dev_ext(0, owner, port_id, &dp_cb, &dp_device, DP_F_DEREGISTER); + +err_out_free_dev: + dcmode_ext_free_dev_ctx(dev_ctx); + +err_unlock_out: + DC_DP_UNLOCK(&g_dcmode_ext_dev_lock); + +err_out: + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Failure, returned %d.\n", ret); + return ret; +} + +static int32_t +dcmode_ext_register_subif(void *ctx, + struct module *owner, struct net_device *dev, + const uint8_t *subif_name, struct dp_subif *subif_id, uint32_t flags) +{ + int32_t ret = DC_DP_FAILURE; + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; + struct dp_subif subif = {0}; + struct dp_subif_data subif_data = {0}; + + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "dev_ctx=%p, owner=%p, dev=%p, subif_id=%p, flags=0x%08X\n", + dev_ctx, owner, dev, subif_id, flags); + + memcpy(&subif, subif_id, sizeof(struct dp_subif)); + + /* De-register */ + if (flags & DC_DP_F_DEREGISTER) { + + DC_DP_LOCK(&g_dcmode_ext_dev_lock); + + /* Multiport device? */ + if ( is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); + + /* De-register subif from Datapath Library/Core */ + ret = dp_register_subif_ext(0, owner, dev, (uint8_t *)subif_name, &subif, &subif_data, DP_F_DEREGISTER); + if (ret != DP_SUCCESS) { + DC_DP_ERROR("failed to de-register subif from Datapath Library/Core!!!\n"); + goto err_unlock_out; + } + + dev_ctx->num_subif--; + dev_ctx->shared_info->num_subif--; + dev_ctx->shared_info->subif_info[DCMODE_EXT_GET_SUBIFIDX(subif.subif)].netif = NULL; + + DC_DP_UNLOCK(&g_dcmode_ext_dev_lock); + + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Success, returned %d.\n", DC_DP_SUCCESS); + return DC_DP_SUCCESS; + } + + DC_DP_LOCK(&g_dcmode_ext_dev_lock); + + /* Multiport device? */ + if ( is_multiport(dev_ctx->alloc_flags) ) { + /* Single port should not have more than 8 Subif */ + if ( DCMODE_EXT_GET_SUBIFIDX(subif_id->subif) >= MULTIPORT_WORKAROUND_MAX_SUBIF_NUM ) { + DC_DP_ERROR("failed to register subif, subif_id_num=0x%04x reaches maximum limit 8!!!\n", subif_id->subif); + goto err_unlock_out; + } + + multiport_wa_forward_map_subifid(dev_ctx, &subif); + } + + /* TODO : subif_data */ + //subif_data.deq_port_idx = ; + //subif_data.flag_ops = ; + + /* Register subif to Datapath Library/Core */ + ret = dp_register_subif_ext(0, owner, dev, (uint8_t *)subif_name, &subif, &subif_data, 0); + if (ret != DP_SUCCESS) { + DC_DP_ERROR("failed to register subif to Datapath Library/Core!!!\n"); + goto err_unlock_out; + } + + /* Multiport 1st device? */ + if ( is_multiport_main(dev_ctx->alloc_flags) ) + subif_id->subif = (subif.subif & ~MULTIPORT_WORKAROUND_SUBIFID_MASK); + else + subif_id->subif = subif.subif; + + dev_ctx->shared_info->subif_info[DCMODE_EXT_GET_SUBIFIDX(subif.subif)].netif = dev; + dev_ctx->shared_info->num_subif++; + dev_ctx->num_subif++; + + DC_DP_UNLOCK(&g_dcmode_ext_dev_lock); + + goto out; + +err_unlock_out: + DC_DP_UNLOCK(&g_dcmode_ext_dev_lock); + +out: + DC_DP_DEBUG(DC_DP_DBG_FLAG_DBG, "Returned %d.\n", ret); + return ret; +} + +static int32_t +dcmode_ext_xmit(void *ctx, struct net_device *rx_if, struct dp_subif *rx_subif, struct dp_subif *tx_subif, + struct sk_buff *skb, int32_t len, uint32_t flags) +{ + int32_t ret = DC_DP_FAILURE; + struct dp_subif subif = {0}; + uint32_t dp_flags = 0; + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; +#if !defined(CONFIG_X86_INTEL_LGM) + struct dma_tx_desc_0 *desc_0 = (struct dma_tx_desc_0 *) &skb->DW0; + struct dma_tx_desc_1 *desc_1 = (struct dma_tx_desc_1 *) &skb->DW1; +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ + + if (!tx_subif) { + DC_DP_ERROR("tx_subif is NULL!!!\n"); + goto drop; + } + + subif = *tx_subif; + + /* Multiport device? */ + if ( dev_ctx && is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); + +#if !defined(CONFIG_X86_INTEL_LGM) + /* skb->DWx */ + desc_1->field.ep = subif.port_id; + desc_0->field.dest_sub_if_id = subif.subif; +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ + + if ( (skb->ip_summed == CHECKSUM_PARTIAL) ) + dp_flags = DP_TX_CAL_CHKSUM; + + /* Send it to Datapath library for transmit */ + return dp_xmit(skb->dev, &subif, skb, skb->len, dp_flags); + +drop: + if (skb) + dev_kfree_skb_any(skb); + + return ret; +} + +#if (defined(DCMODE_EXT_BRIDGE_FLOW_LEARNING) && DCMODE_EXT_BRIDGE_FLOW_LEARNING) +static int32_t +dcmode_ext_add_session_shortcut_forward(void *ctx, struct dp_subif *subif, struct sk_buff *skb, uint32_t flags) +{ + int32_t ret = DC_DP_SUCCESS; + + /* FIXME : For LGM, need to review. */ +#if !defined(CONFIG_X86_INTEL_LGM) +#if IS_ENABLED(CONFIG_PPA) && defined(CONFIG_PPA_BR_SESS_LEARNING) + struct ethhdr *eth; + + if (!skb) + return DC_DP_FAILURE; + + /* FIXME : Enabled bridge flow learning globally for all netif (mainly appicable for WLAN netif) */ + if ( (flags & DC_DP_F_PREFORWARDING) ) { + skb_reset_mac_header(skb); + eth = eth_hdr(skb); + if (unlikely(is_multicast_ether_addr(eth->h_dest)) || + unlikely(ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr))) { + /* Skipping, as no acceleration is possible */ + return 0; + } + + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = ntohs(eth->h_proto); + skb_set_network_header(skb, ETH_HLEN); + + if ( NULL != ppa_hook_session_add_fn ) + ret = ppa_hook_session_add_fn(skb, NULL, (PPA_F_BRIDGED_SESSION | PPA_F_BEFORE_NAT_TRANSFORM)); + } else if ( (flags & DC_DP_F_POSTFORWARDING) ) { + if ( NULL != ppa_hook_session_add_fn ) + ret = ppa_hook_session_add_fn(skb, NULL, PPA_F_BRIDGED_SESSION); + } +#endif /* #if IS_ENABLED(CONFIG_PPA) && defined(CONFIG_PPA_BR_SESS_LEARNING) */ +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ + + return ret; +} +#endif /* #if (defined(DCMODE_EXT_BRIDGE_FLOW_LEARNING) && DCMODE_EXT_BRIDGE_FLOW_LEARNING) */ + +static int32_t +dcmode_ext_disconn_if(void *ctx, struct net_device *netif, struct dp_subif *subif_id, + uint8_t mac_addr[MAX_ETH_ALEN], uint32_t flags) +{ + int32_t ret = DC_DP_SUCCESS; + + /* FIXME : For LGM, need to review. */ +#if !defined(CONFIG_X86_INTEL_LGM) +#if IS_ENABLED(CONFIG_PPA) + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; + struct dp_subif subif = {0}; + + subif = *subif_id; + + /* Multiport device? */ + if ( dev_ctx && is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); + + /* Remove all the sessions from PPA */ + if (ppa_hook_disconn_if_fn) + ret = ppa_hook_disconn_if_fn(netif, &subif, mac_addr, flags); +#endif /* #if IS_ENABLED(CONFIG_PPA) */ +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ + + return ret; +} + +/* + * ======================================================================== + * Callbacks Registered to Datapath Library/Core + * ======================================================================== + */ +static int32_t +dcmode_ext_rx_cb(struct net_device *rxif, struct net_device *txif, + struct sk_buff *skb, int32_t len) +{ + int32_t ret; +#if defined(CONFIG_X86_INTEL_LGM) + struct dma_rx_desc_0 *desc_0 = (struct dma_rx_desc_0 *)&skb->DW0; +#elif defined(CONFIG_PRX300_CQM) + struct dma_rx_desc_1 *desc_1 = (struct dma_rx_desc_1 *)&skb->DW1; +#else /* #if defined(CONFIG_X86_INTEL_LGM) */ + struct pmac_rx_hdr *pmac; +#endif /* #else */ + struct dp_subif rx_subif = {0}; + struct dcmode_ext_dev_info *dev_ctx = NULL; + + if (!skb) { + DC_DP_ERROR("failed to receive as skb=%p!!!\n", skb); + goto err_out; + } + + if (!rxif) { + DC_DP_ERROR("failed to receive as rxif=%p!!!\n", rxif); + goto err_out_drop; + } + +#if defined(CONFIG_X86_INTEL_LGM) + ret = dcmode_ext_get_subif_by_dev(rxif, &rx_subif); + if (ret != DC_DP_SUCCESS) { + DC_DP_ERROR("Why <port_id=%d, subif_id=0x%x> for rxif=%s???\n", + rx_subif.port_id, rx_subif.subif, rxif->name); + goto err_out_drop; + } + rx_subif.subif = desc_0->field.dest_sub_if_id; + +#elif defined(CONFIG_PRX300_CQM) /* #if defined(CONFIG_X86_INTEL_LGM) */ + rx_subif.port_id = desc_1->field.ip; + rx_subif.subif = desc_1->field.session_id; + + len -= sizeof(struct pmac_rx_hdr); + skb_pull(skb, sizeof(struct pmac_rx_hdr)); +#else /* #elif defined(CONFIG_PRX300_CQM) */ + pmac = (struct pmac_rx_hdr *)(skb->data); + rx_subif.port_id = pmac->sppid; + rx_subif.subif = (pmac->src_sub_inf_id << 8); + rx_subif.subif |= (pmac->src_sub_inf_id2 & 0xFF); + + len -= sizeof(struct pmac_rx_hdr); + skb_pull(skb, sizeof(struct pmac_rx_hdr)); +#endif /* #else */ + + dev_ctx = g_dcmode_ext_dev_p[rx_subif.port_id]; + if (NULL == dev_ctx) { + DC_DP_ERROR("failed to receive as dev_ctx=%p for port_id=%d!!!\n", dev_ctx, rx_subif.port_id); + goto err_out_drop; + } + + /* Multiport device? */ + if ( is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_reverse_map_subifid(dev_ctx, &rx_subif); + + if ( (rxif->features & NETIF_F_RXCSUM) ) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + ret = dc_dp_rx(rxif, txif, &rx_subif, skb, skb->len, 0); + + return ret; + +err_out_drop: + dev_kfree_skb_any(skb); + +err_out: + return DP_FAILURE; +} + +static int32_t +dcmode_ext_get_netif_subifid_cb(struct net_device *netif, + struct sk_buff *skb, void *subif_data, + uint8_t dst_mac[MAX_ETH_ALEN], + dp_subif_t *subif, uint32_t flags) /*! get subifid */ +{ + int32_t ret = 1; + struct dp_subif subif_id = {0}; + struct dcmode_ext_dev_info *dev_ctx; + struct dcmode_ext_dev_shared_info *dev_shared_ctx; + int32_t subif_idx; + + /* Validate input argument */ + if (!netif) { + DC_DP_ERROR("failed to get subifid as netif=%p!!!\n", netif); + goto out; + } + + if (!subif) { + DC_DP_ERROR("failed to get subifid as subif=%p!!!\n", subif); + goto out; + } + + /* Validate device context */ + dev_ctx = g_dcmode_ext_dev_p[subif->port_id]; + if (!dev_ctx) { + DC_DP_ERROR("port_id=%d not registered!!!\n", subif->port_id); + goto out; + } + + dev_shared_ctx = dev_ctx->shared_info; + if (!dev_shared_ctx) { + DC_DP_ERROR("port_id=%d not registered!!!\n", subif->port_id); + goto out; + } + + /* FIXME : No valid subif from DPLib in get_subif_fn CB? */ + subif_idx = DCMODE_EXT_GET_SUBIFIDX(subif->subif); + if (dev_shared_ctx->subif_info[subif_idx].netif != netif) { + for (subif_idx = 0; subif_idx < DCMODE_EXT_MAX_SUBIF_PER_DEV; subif_idx++) { + if (dev_shared_ctx->subif_info[subif_idx].netif == netif) { + subif->subif &= ~(DCMODE_EXT_SUBIFID_MASK << DCMODE_EXT_SUBIFID_OFFSET); + subif->subif |= (subif_idx << DCMODE_EXT_SUBIFID_OFFSET); + break; + } + } + + if (subif_idx == DCMODE_EXT_MAX_SUBIF_PER_DEV) { + DC_DP_ERROR("No matching netif=%p!!!\n", netif); + goto out; + } + } + + memcpy(&subif_id, subif, sizeof(struct dp_subif)); + + multiport_wa_reverse_map_subifid(dev_ctx, &subif_id); + + ret = dc_dp_get_netif_subifid(netif, skb, subif_data, dst_mac, &subif_id, flags); + if (ret) + goto out; + + subif->subif &= ~0x00FF; + subif->subif |= (subif_id.subif & 0x00FF); + +out: + return ret; +} + +/* + * ======================================================================== + * Misclleneous API + * ======================================================================== + */ + +static int32_t +dcmode_ext_register_qos_class2prio_cb(void *ctx, int32_t port_id, struct net_device *netif, + int (*cb)(int32_t port_id, struct net_device *netif, uint8_t class2prio[]), + int32_t flags) +{ + int32_t ret = DC_DP_SUCCESS; + + /* FIXME : For LGM, need to review. */ +#if !defined(CONFIG_X86_INTEL_LGM) +#if IS_ENABLED(CONFIG_PPA) + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; + + /* Multiport sub-sequent device? */ + if ( dev_ctx && is_multiport_sub(dev_ctx->alloc_flags) ) + return DC_DP_SUCCESS; + + if (ppa_register_qos_class2prio_hook_fn) + ret = ppa_register_qos_class2prio_hook_fn(port_id, netif, cb, flags); +#endif /* #if IS_ENABLED(CONFIG_PPA) */ +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) */ + + return ret; +} + +static int32_t +dcmode_ext_map_class2devqos(void *ctx, int32_t port_id, struct net_device *netif, + uint8_t class2prio[], uint8_t prio2devqos[]) +{ + /* FIXME : For LGM, is it still required? */ +#if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) + uint8_t devqos; + + /* Configure the egress PMAC table to mark the WMM/TID in the descriptor DW1[7:4] */ + uint8_t i = 0, j = 0; + GSW_PMAC_Eg_Cfg_t egcfg; + GSW_API_HANDLE gswr; + + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; + + /* Multiport sub-sequent device? */ + if ( dev_ctx && is_multiport_sub(dev_ctx->alloc_flags) ) + return DC_DP_SUCCESS; + + /* Do the GSW-R configuration */ + gswr = gsw_api_kopen(SWITCH_DEV); + if (gswr == 0) { + DC_DP_ERROR("failed to open SWAPI device!!!\n"); + return -EIO; + } + + /* GSWIP-R PMAC Egress Configuration Table */ + for (i = 0; i < DC_DP_MAX_SOC_CLASS; i++) { + devqos = _dc_dp_get_class2devqos(class2prio, prio2devqos, i); + + for (j = 0; j <= 3; j++) { + memset((void *)&egcfg, 0x00, sizeof(egcfg)); + egcfg.nDestPortId = port_id; + egcfg.nTrafficClass = i; + egcfg.nFlowIDMsb = j; + gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_GET, (unsigned int)&egcfg); + + egcfg.nResDW1 = devqos; + gsw_api_kioctl(gswr, GSW_PMAC_EG_CFG_SET, (unsigned int)&egcfg); + } + } + + gsw_api_kclose(gswr); +#endif /* #if !defined(CONFIG_X86_INTEL_LGM) && !defined(CONFIG_PRX300_CQM) */ + + return DC_DP_SUCCESS; +} + +static int32_t +dcmode_ext_get_netif_stats(void *ctx, + struct net_device *netif, struct dp_subif *subif_id, + struct rtnl_link_stats64 *if_stats, uint32_t flags) +{ + //int32_t ret; + uint32_t dp_flags = 0x0; + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; + struct dp_subif subif = {0}; + + subif = *subif_id; + + /* Multiport radio? */ + if ( dev_ctx && is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); + + if ( (flags & DC_DP_F_SUBIF_LOGICAL) ) + dp_flags = DP_F_STATS_SUBIF; + + return dp_get_netif_stats(netif, &subif, if_stats, dp_flags); +} + +static int32_t +dcmode_ext_clear_netif_stats(void *ctx, + struct net_device *netif, struct dp_subif *subif_id, + uint32_t flags) +{ + uint32_t dp_flags = 0x0; + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; + struct dp_subif subif = {0}; + + subif = *subif_id; + + /* Multiport radio? */ + if ( dev_ctx && is_multiport(dev_ctx->alloc_flags) ) + multiport_wa_forward_map_subifid(dev_ctx, &subif); + + if ( (flags & DC_DP_F_SUBIF_LOGICAL) ) + dp_flags = DP_F_STATS_SUBIF; + + return dp_clear_netif_stats(netif, &subif, dp_flags); +} + +static void +dcmode_ext_dump_proc(void *ctx, struct seq_file *seq) +{ + struct dcmode_ext_dev_info *dev_ctx = (struct dcmode_ext_dev_info *)ctx; + + if (!dev_ctx) + return; + + seq_printf(seq, " cqm_pid: %d\n", + dev_ctx->shared_info->cqm_pid); + + seq_printf(seq, " dma_ch: %d\n", + dev_ctx->shared_info->dma_ch); + + seq_printf(seq, " num_bufpools: %02d\n", + dev_ctx->shared_info->num_bufpools); + + seq_printf(seq, " umt_id: %d\n", + dev_ctx->shared_info->umt_id); + seq_printf(seq, " umt_period: %d (in micro second)\n", + dev_ctx->shared_info->umt_period); +} + +static struct dc_dp_dcmode_ops dcmode_ext_ops = { + .get_host_capability = dcmode_ext_get_host_capability, + .register_dev = NULL, + .register_dev_ex = dcmode_ext_register_dev_ex, + .register_subif = dcmode_ext_register_subif, + .xmit = dcmode_ext_xmit, + .handle_ring_sw = NULL, + .add_session_shortcut_forward = dcmode_ext_add_session_shortcut_forward, + .disconn_if = dcmode_ext_disconn_if, + .get_netif_stats = dcmode_ext_get_netif_stats, + .clear_netif_stats = dcmode_ext_clear_netif_stats, + .register_qos_class2prio_cb = dcmode_ext_register_qos_class2prio_cb, + .map_class2devqos = dcmode_ext_map_class2devqos, + .alloc_skb = NULL, + .free_skb = NULL, + .change_dev_status = NULL, + .get_wol_cfg = NULL, + .set_wol_cfg = NULL, + .get_wol_ctrl_status = NULL, + .set_wol_ctrl = NULL, + .dump_proc = dcmode_ext_dump_proc +}; + +static struct dc_dp_dcmode dcmode_ext = { + .dcmode_cap = DC_DP_F_DCMODE_HW | DC_DP_F_DCMODE_0, + .dcmode_ops = &dcmode_ext_ops +}; + +static __init int dcmode_ext_init_module(void) +{ + int32_t ret = 0; + + if (!g_dcmode_ext_init_ok) { + /* Get Platform capability */ + ret = dp_get_cap(&g_dp_cap, 0); + if (ret) + DC_DP_ERROR("failed to get DP capability!!!\n"); + + memset(g_dcmode_ext_dev, 0, sizeof(g_dcmode_ext_dev)); + + /* Register DCMODE */ + ret = dc_dp_register_dcmode(&dcmode_ext, 0); + +#if IS_ENABLED(CONFIG_PPA) + ppa_check_if_netif_fastpath_fn = dc_dp_check_if_netif_fastpath; +#endif /* #if IS_ENABLED(CONFIG_PPA) */ + + g_dcmode_ext_init_ok = 1; + } + + return ret; +} + +static __exit void dcmode_ext_exit_module(void) +{ + if (g_dcmode_ext_init_ok) { +#if IS_ENABLED(CONFIG_PPA) + ppa_check_if_netif_fastpath_fn = NULL; +#endif /* #if IS_ENABLED(CONFIG_PPA) */ + + /* De-register DCMODE */ + dc_dp_register_dcmode(&dcmode_ext, DC_DP_F_DCMODE_DEREGISTER); + + /* Reset private data structure */ + memset(g_dcmode_ext_dev, 0, sizeof(g_dcmode_ext_dev)); + g_dcmode_ext_init_ok = 0; + } +} + +module_init(dcmode_ext_init_module); +module_exit(dcmode_ext_exit_module); + +MODULE_AUTHOR("Anath Bandhu Garai"); +MODULE_DESCRIPTION("Extended DC Mode support for any platform."); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DCMODE_EXT_DRV_MODULE_VERSION); diff --git a/include/directconnect_dp_api.h b/include/directconnect_dp_api.h index 148efcd..5bb6c21 100644 --- a/include/directconnect_dp_api.h +++ b/include/directconnect_dp_api.h @@ -1,5 +1,5 @@ /* - * DirectConnect provides a common interface for the network devices to achieve the full or partial + * DirectConnect provides a common interface for the network devices to achieve the full or partial acceleration services from the underlying packet acceleration engine * Copyright (c) 2017, Intel Corporation. * @@ -16,9 +16,9 @@ #ifndef _UGW_DIRECTCONNECT_DP_API_H_ #define _UGW_DIRECTCONNECT_DP_API_H_ -#ifdef CONFIG_LTQ_DATAPATH /* for GRX500/GRX750 */ +#if defined(CONFIG_INTEL_DATAPATH) || defined(CONFIG_LTQ_DATAPATH) /* for LGM/GRX500/GRX750 */ #include <net/datapath_api.h> -#else /* #ifdef CONFIG_LTQ_DATAPATH */ +#else /* #if defined(CONFIG_INTEL_DATAPATH) || defined(CONFIG_LTQ_DATAPATH) */ #include <datapath_api_common.h> #endif /* #ifndef CONFIG_LTQ_DATAPATH */ @@ -27,453 +27,488 @@ /** \defgroup DirectConnect_Datapath_Lib_Defines Direct Connect Defines \brief This section groups all the constant defines used in DirectConnect data structures. -*/ + */ /** \defgroup DirectConnect_Datapath_Lib_Enums Direct Connect Enums \brief This section groups all the enumeration definitions used in DirectConnect data structures. -*/ + */ /** \defgroup DirectConnect_Datapath_Lib_Unions Direct Connect Unions \brief This section groups all the Union type data structures definitions used in DirectConnect. -*/ + */ /** \defgroup DirectConnect_Datapath_Lib_Structs Direct Connect Structures \brief This section groups all the Struct type data structures definitions used in DirectConnect. -*/ + */ /** \defgroup DirectConnect_Datapath_Lib_APIs Direct Connect APIs \brief This section groups all the APIs definitions used in DirectConnect. -*/ + */ /** \defgroup DirectConnect_Datapath_Lib_Power_Management_Wrapper_APIs Direct Connect Power Management APIs \brief This section provides all the Power Management Wrapper API (Applicable if UGW CoC framework is used). -*/ + */ /** \addtogroup DirectConnect_Datapath_Lib_Defines */ /* @{ */ /** \brief DC DP API version code -*/ -#define DC_DP_API_VERSION_CODE 0x040103 + */ +#define DC_DP_API_VERSION_CODE 0x050000 /** \brief DC DP API version -*/ -#define DC_DP_API_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) + */ +#define DC_DP_API_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) /** \brief Deregister -*/ + */ #define DC_DP_F_DEREGISTER 0x00000001 /** \brief Alloc flag as FASTPATH -*/ + */ #define DC_DP_F_FASTPATH 0x00000010 /** \brief Alloc flag as LITEPATH -*/ + */ #define DC_DP_F_LITEPATH 0x00000020 /** \brief Alloc flag as SWPATH -*/ + */ #define DC_DP_F_SWPATH 0x00000040 +/** + \brief Alloc flag as FASTPATH WLAN + */ +#define DC_DP_F_FAST_WLAN 0x00000100 + /** \brief Alloc flag as FASTPATH DSL -*/ -#define DC_DP_F_FAST_DSL 0x00000100 + */ +#define DC_DP_F_FAST_DSL 0x00000200 + +/** + \brief Alloc flag as Multiport support + */ +#define DC_DP_F_MULTI_PORT 0x00010000 /** \brief Alloc flag as shared resource -*/ -#define DC_DP_F_SHARED_RES 0x00000200 + */ +#define DC_DP_F_SHARED_RES 0x00020000 /** \brief Register device flag; Specify if peripheral driver want to allocate SW Tx ring -*/ + */ #define DC_DP_F_ALLOC_SW_TX_RING 0x00000010 /** \brief Register device flag; Specify if peripheral driver want to allocate SW Rx ring -*/ + */ #define DC_DP_F_ALLOC_SW_RX_RING 0x00000020 /** \brief Register device flag; Specify if peripheral driver dont want to allocate HW Tx ring (may be for debugging) -*/ + */ #define DC_DP_F_DONT_ALLOC_HW_TX_RING 0x00000040 /** \brief Register device flag; Specify if peripheral driver dont want to allocate HW Rx ring (may be for debugging) -*/ + */ #define DC_DP_F_DONT_ALLOC_HW_RX_RING 0x00000080 /** \brief Register device flags; Specify it if peripheral/dev QoS class is required in DC peripheral -*/ + */ #define DC_DP_F_QOS 0x00001000 /** \brief Register device flags: Specify it if peripheral driver requires to disable inter-subif forwarding (not in use) -*/ + */ #define DC_DP_F_DISABLE_INTER_SUBIF_FORWARDING 0x00002000 /** \brief Register_device flags: Specify it if peripheral driver requires to disable intra-subif forwarding (not in use) -*/ + */ #define DC_DP_F_DISABLE_INTRA_SUBIF_FORWARDING 0x00004000 /** \brief DC ring flags: Specify it if the ring is used only for buffer enqueuing in Tx/Rx direction -*/ + */ #define DC_DP_F_PKTDESC_ENQ 0x00000001 /** \brief DC ring flags: Specify it if the ring is used only for buffer return in Tx/Rx direction -*/ + */ #define DC_DP_F_PKTDESC_RET 0x00000002 /** \brief Dc ring flags: Specify it if the ring is used for both buffer enqueuing or buffer return in Tx/Rx direction -*/ + */ #define DC_DP_F_RET_RING_SAME_AS_ENQ 0x00000003 /** \brief DC Counter mode flags: Specify it if incremental DC counter is to be used. -*/ + */ #define DC_DP_F_DCCNTR_MODE_INCREMENTAL 0x01 /** \brief DC Counter mode flags: Specify it if cumulative DC counter is to be used. -*/ + */ #define DC_DP_F_DCCNTR_MODE_CUMULATIVE 0x02 /** \brief DC Counter mode flags: Specify it if DC counter in Little-endian mode is to be used. -*/ + */ #define DC_DP_F_DCCNTR_MODE_LITTLE_ENDIAN 0x04 /** \brief DC Counter mode flags: Specify it if DC counter in Big-endian mode is to be used. -*/ + */ #define DC_DP_F_DCCNTR_MODE_BIG_ENDIAN 0x08 /** \brief Host capability: Scatter/gather IO; Mapped to NETIF_F_SG -*/ + */ #define DC_DP_F_HOST_CAP_SG 0x00000001 /** \brief Host capability: Can checksum TCP/UDP over IPv4; Mapped to NETIF_F_IP_CSUM -*/ + */ #define DC_DP_F_HOST_CAP_IP_CSUM 0x00000002 /** \brief Host capability: Can checksum all the packets; Mapped to NETIF_F_HW_CSUM -*/ + */ #define DC_DP_F_HOST_CAP_HW_CSUM 0x00000004 /** \brief Host capability: Can checksum TCP/UDP over IPv6; Mapped NETIF_F_IPV6_CSUM -*/ + */ #define DC_DP_F_HOST_CAP_IPV6_CSUM 0x00000008 /** \brief Host capability: Receive checksumming offload; Mapped NETIF_F_RXCSUM -*/ + */ #define DC_DP_F_HOST_CAP_RXCSUM 0x00000010 /** \brief Host capability: TCPv4 segmentation; Mapped to NETIF_F_TSO -*/ + */ #define DC_DP_F_HOST_CAP_TSO 0x00000020 /** \brief Host capability: TCPv6 segmentation; Mapped to NETIF_F_TSO6 -*/ + */ #define DC_DP_F_HOST_CAP_TSO6 0x00000040 /** \brief Host capability: large receive offload; Mapped to NETIF_F_LRO -*/ + */ #define DC_DP_F_HOST_CAP_LRO 0x00000080 /** \brief Host capability: Tx FCS calculation and insertion -*/ + */ #define DC_DP_F_HOST_CAP_TX_FCS 0x00000100 /** \brief Host capability: Rx FCS automatic verfication -*/ + */ #define DC_DP_F_HOST_CAP_RX_FCS 0x00000200 /** \brief Host capability: Specify when host can transmit frame without Tx FCS -*/ + */ #define DC_DP_F_HOST_CAP_TX_WO_FCS 0x00000400 /** \brief Host capability: Specify when host can accept frame without Rx FCS -*/ + */ #define DC_DP_F_HOST_CAP_RX_WO_FCS 0x00000800 /** \brief Host capability: Tx PMAC insertion -*/ + */ #define DC_DP_F_HOST_CAP_TX_PMAC 0x00001000 /** \brief Host capability: Specify when host can accept with Rx PMAC -*/ + */ #define DC_DP_F_HOST_CAP_RX_PMAC 0x00002000 /** \brief Host capability: Sepcify when host can accept without RX PMAC -*/ + */ #define DC_DP_F_HOST_CAP_RX_WO_PMAC 0x00004000 /** \brief Host capability: Specify when host can do HW QoS -*/ + */ #define DC_DP_F_HOST_CAP_HW_QOS 0x00008000 /** \brief Host capability: Specify when host can do HW QoS on WAN interface -*/ + */ #define DC_DP_F_HOST_CAP_HW_QOS_WAN 0x00010000 /** \brief Host capability: Fragementation exception handling (Applicable for GRX350/550 FASTPATH) -*/ -#define DC_DP_F_HOST_CAP_RX_FRAG_HANDLING_RESTRICTED 0x00020000 + */ +#define DC_DP_F_HOST_CAP_RX_FRAG_HANDLING_RESTRICTED 0x00020000 /** \brief Host capability: Specify when host supports 2DW desc format (Applicable for GRX330 FASTPATH) -*/ + */ #define DC_DP_F_HOST_CAP_DESC_2DW 0x00040000 /** \brief Host capability: Specify when host supports device QoS, like - WMM for WLAN etc. -*/ + */ #define DC_DP_F_HOST_CAP_DEVQOS 0x00080000 /** \brief Host capability: Specify when host cpu is BIG endian. -*/ + */ #define DC_DP_F_HOST_CAP_CPU_BIG_ENDIAN 0x00100000 /** \brief Host capability: Specify when host support SKB CHAIN (for future LITEPATH/SWPATH performance boost). -*/ + */ #define DC_DP_F_HOST_CAP_SKB_CHAIN 0x00200000 /** - \brief Device request: Specify when device expects without Tx FCS. -*/ -#define DC_DP_F_DEV_REQ_TX_WO_FCS 0x00000001 + \brief Host capability: Specify when host support to enable Buffer Marking + to set Policy ID in the Packet Buffer meta data. This is used by host to free up + the non-returned buffers to the CQM buffer pool in the event of peripheral FW crash. + */ +#define DC_DP_F_HOST_CAP_BUFFER_MARKING 0x00400000 /** - \brief Device request: Specify when device expects without Rx FCS. -*/ -#define DC_DP_F_DEV_REQ_RX_WO_FCS 0x00000002 + \brief Host capability: Specify when host support to enable auto detection of policy and buffer pool + based on the address range. SoC returns the buffers to the appropriate buffer pool based on this. + To be set by the peripheral driver only if the corresponding ACA is not able to return + the Policy ID and Pool info in the return Descriptor. + */ +#define DC_DP_F_HOST_CAP_AUTO_DETECT_BUFFER_RETURN 0x00800000 + +/** + \brief Device request: Specify when device expects with Tx FCS. + */ +#define DC_DP_F_DEV_REQ_TX_FCS 0x00000001 + +/** + \brief Device request: Specify when device expects with Rx FCS. + */ +#define DC_DP_F_DEV_REQ_RX_FCS 0x00000002 /** \brief Device request: Specify when device expects Tx PMAC. -*/ + */ #define DC_DP_F_DEV_REQ_TX_PMAC 0x00000004 /** \brief Device request: Specify when device expects with Rx PMAC. -*/ + */ #define DC_DP_F_DEV_REQ_RX_PMAC 0x00000008 /** \brief Device capability: Specify when device supports SKB chain on receive. -*/ -#define DC_DP_F_DEV_CAP_RX_SKB_CHAIN 0x00000010 + */ +#define DC_DP_F_DEV_REQ_RX_SKB_CHAIN 0x00000010 /** \brief Device request: Specify when device expects Rx fragmentation handling. -*/ + */ #define DC_DP_F_DEV_REQ_RX_FRAG_HANDLING 0x00000020 /** \brief Device request: Specify when device expects 2DW desc (Applicable in GRX330 FASTPATH). -*/ + */ #define DC_DP_F_DEV_REQ_DESC_2DW 0x00000040 /** \brief Device request: Specify when device expects Rx in little endian (Applicatable in SW ring handling). -*/ + */ #define DC_DP_F_DEV_REQ_RX_LTTLE_ENDIAN 0x00000080 /** \brief Device request: Specify when device expects Tx in little endian (Applicable in SW ring handling). -*/ + */ #define DC_DP_F_DEV_REQ_TX_LTTLE_ENDIAN 0x00000100 /** \brief Device request: Specify when device expects SWPATH->LITEPATH internal forwarding. -*/ + */ #define DC_DP_F_DEV_REQ_LITEPATH_TX_SHORTCUT 0x00000200 /** \brief Device request: Specify when device expects LITEPATH->SWPATH internal forwarding. -*/ + */ #define DC_DP_F_DEV_REQ_LITEPATH_RX_SHORTCUT 0x00000400 +/** + \brief Device request: Specify when device expects the buffer marking. + */ +#define DC_DP_F_DEV_REQ_BUFFER_MARKING DC_DP_F_HOST_CAP_BUFFER_MARKING + +/** + \brief Device request: Specify when device expects auto detetction of policy and buffer pool. + */ +#define DC_DP_F_DEV_REQ_AUTO_DETECT_BUFFER_RETURN DC_DP_F_HOST_CAP_AUTO_DETECT_BUFFER_RETURN + /** \brief Register subif flags; Specify to register already registered subif/vap as LitePath Partial offload -*/ + */ #define DC_DP_F_REGISTER_LITEPATH 0x00000100 /** \brief Register subif flags; De-register already registered LitePath subif/logical netdev from LitePath -*/ + */ #define DC_DP_F_DEREGISTER_LITEPATH 0x00000200 /** \brief Xmit flags: Specify it as dc_dp_xmit() flags value if xmit to litepath -*/ + */ #define DC_DP_F_XMIT_LITEPATH 0x01 /** \brief Xmit flags: Specify it as dc_dp_xmit() flags value if xmit to ATM interface, to be used by DP Lib -*/ + */ #define DC_DP_F_XMIT_OAM 0x02 /** \brief Rx CB flags: Specify it as dc_dp_rx_fn_t flags value if received from litepath -*/ + */ #define DC_DP_F_RX_LITEPATH 0x01 /** \brief Specify to enable SoC->peripheral enqueue -*/ + */ #define DC_DP_F_ENQ_ENABLE 0x00000001 /** \brief Specify to disable SoC->peripheral enqueue -*/ + */ #define DC_DP_F_ENQ_DISABLE 0x00000002 /** \brief Specify to enable peripheral->SoC dequeue -*/ + */ #define DC_DP_F_DEQ_ENABLE 0x00000004 /** \brief Specify to disable peripheral->SoC dequeue -*/ + */ #define DC_DP_F_DEQ_DISABLE 0x00000008 /** \brief Specify it if intra subif session is per-forwarding stage -*/ + */ #define DC_DP_F_PREFORWARDING 0x01 /** \brief Specify it if intra subif session is post-forwarding stage -*/ + */ #define DC_DP_F_POSTFORWARDING 0x02 /** \brief Handle ring sw flags: Specify it if SWPATH ring handling for Tx return -*/ + */ #define DC_DP_F_TX_COMPLETE 0x0001 /** \brief Handle ring sw flags: Specify it if SWPATH ring handling for Rx -*/ + */ #define DC_DP_F_RX 0x0002 /** \brief Handle ring sw flags: Specify it if fragmentation exception ring handling for Rx -*/ + */ #define DC_DP_F_RX_FRAG_EXCEPT 0x0004 /** \brief Peripheral to SoC path (e.g. DMA1-TX for GRX500) default packet buffer size. -*/ + */ #define DC_DP_PKT_BUF_SIZE_DEFAULT 2048 /** \brief Multicast (MC) module register request (if applicable to peripheral driver). -*/ + */ #define DC_DP_F_MC_REGISTER 0x01 /** \brief Multicast module de-register request (if applicable to peripheral driver). -*/ + */ #define DC_DP_F_MC_DEREGISTER 0x02 /** \brief Multicast module register update MACs request -*/ + */ #define DC_DP_F_MC_UPDATE_MAC_ADDR 0x08 /** \brief Multicast module cleanup request (if applicable to peripheral driver). -*/ + */ #define DC_DP_F_MC_FW_RESET 0x10 /** \brief Multicast module re-learning request (if applicable to peripheral driver). -*/ + */ #define DC_DP_F_MC_NEW_STA 0x20 /** \brief A new multicast group membership add request. -*/ + */ #define DC_DP_MC_F_ADD 0x01 /** \brief An existing multicast group membership delete request -*/ + */ #define DC_DP_MC_F_DEL 0x02 /** - \brief An existing multicast group membership update request -*/ + \brief An existing multicast group membership update request + */ #define DC_DP_MC_F_UPD 0x03 /** - \brief Maximum number of mac address -*/ + \brief Maximum number of mac address + */ #define DC_DP_MAX_MAC 64 /** \brief Number of Device QoS class/WiFi WMM Class/TID -*/ + */ #define DC_DP_MAX_DEV_CLASS 8 /** \brief stats flags: specify it if request for subif specific stats -*/ + */ #define DC_DP_F_SUBIF_LOGICAL 0x00001000 /** \brief Power Saving (Cpufreq) callback register request -*/ + */ #define DC_DP_F_PS_REGISTER 0x01 /** \brief Power Saving (PS) callback de-register request -*/ + */ #define DC_DP_F_PS_DEREGISTER 0x02 /** \brief Power Saving (PS) notifier list type - Transition Notifier -*/ + */ #define DC_DP_PS_TRANSITION_NOTIFIER (0) /** \brief Power Saving (PS) notifier list type - Policy Notifier -*/ + */ #define DC_DP_PS_POLICY_NOTIFIER (1) /** \brief PS (CPUFreq) Operation Success */ @@ -487,11 +522,11 @@ /** \brief PS (Cpufreq) notifier list Add Operation -*/ + */ #define DC_DP_PS_LIST_ADD 1 /** \brief PS (Cpufreq) notifier list Delete Operation -*/ + */ #define DC_DP_PS_LIST_DEL 0 /* @} */ @@ -501,25 +536,25 @@ /* @{ */ /** \brief Definition of the DC DP API status. -*/ + */ enum dc_dp_api_status { DC_DP_FAILURE = -1, DC_DP_SUCCESS = 0, }; /** \brief Definition of the IP Type used. -*/ + */ enum dc_dp_iptype { - /** IPv4 Type selector */ + /** IPv4 Type selector */ DC_DP_IPV4 = 0, - /** IPv6 Type selector */ + /** IPv6 Type selector */ DC_DP_IPV6 = 1, - /** None IP selector */ + /** None IP selector */ INVALID, }; /** \brief Definition of DC acceleration type -*/ + */ enum dc_dp_accel_type { /** No Offload/Soft Path */ DC_DP_ACCEL_NO_OFFLOAD, @@ -530,7 +565,7 @@ enum dc_dp_accel_type { }; /** \brief Definition of DC Tx/Rx ring type -*/ + */ enum dc_dp_ring_type { /** Tx/Rx ring not used (e.g., Lite Path only) */ DC_DP_RING_NONE = 0, @@ -538,22 +573,38 @@ enum dc_dp_ring_type { DC_DP_RING_SW_MODE1, /** Tx/Rx ring as HW DC Mode0 (e.g., GRX350 Fast Path) */ DC_DP_RING_HW_MODE0, + /** Tx/Rx ring as HW DC Mode0 Ext (e.g., FMX Fast Path) */ + DC_DP_RING_HW_MODE0_EXT, /** Tx/Rx ring as HW DC Mode1 (e.g., GRX750 Fast Path) */ DC_DP_RING_HW_MODE1, + /** Tx/Rx ring as HW DC Mode1 Ext (e.g., LGM Fast Path) */ + DC_DP_RING_HW_MODE1_EXT, +}; + +/** \brief Definition of DC UMT mode + */ +enum dc_dp_umt_mode { + DC_DP_UMT_MODE_HW_AUTO = 0, + DC_DP_UMT_MODE_HW_USER, + DC_DP_UMT_MODE_SW }; /** - \brief Definition of DC Mode type. -*/ + \brief Definition of DC Mode type. + */ enum dc_dp_mode_type { /** DC Mode 0 */ DC_DP_MODE_TYPE_0 = 0, + /** DC Mode 0 Ext */ + DC_DP_MODE_TYPE_0_EXT, /** DC Mode 1 */ DC_DP_MODE_TYPE_1, + /** DC Mode 1 Ext */ + DC_DP_MODE_TYPE_1_EXT, }; /** \brief Definition of DC direction type -*/ + */ enum dc_dp_dir_type { /** Dev to SoC direction */ DC_DP_DIR_DEV2SOC = 0, @@ -561,14 +612,23 @@ enum dc_dp_dir_type { DC_DP_DIR_SOC2DEV, }; +/** \brief Definition of DC device type + */ +enum dc_dp_dev_type { + DC_DP_DEV_WAV_500 = 0, + DC_DP_DEV_WAV_600_24G, + DC_DP_DEV_WAV_600_5G, + DC_DP_DEV_WAV_600_CDB, +}; + /** \brief Definition of Power Management module -*/ + */ enum dc_dp_power_module { DC_DP_MAX_PM = -1 }; /** \brief Definition of Power Management state -*/ + */ enum dc_dp_power_state { /** Power State Invalid. */ DC_DP_PS_UNDEF, @@ -604,33 +664,33 @@ struct dc_dp_dev { }; /** - \brief Ring recovery stats Structure - \note If stats difference exceeds pre-configured threshold the system would be rebooted to recover from loss of descriptiors. - FIXME : will be generalzed for 4 ring. -*/ + \brief Ring recovery stats Structure + \note If stats difference exceeds pre-configured threshold the system would be rebooted to recover from loss of descriptiors. + FIXME : will be generalzed for 4 ring. + */ struct dc_dp_recovery_stats { uint32_t soc2dev_announced_desc_cum; /*!< Announced cumulative desc count by DC Counter */ uint32_t soc2dev_to_be_pulled_counter; /*!< Desc count to be pulled from DC SoC (e.g. CBM for GRX500) Dequeue port */ uint32_t soc2dev_to_be_freed_buffer; /*!< Buffer count to be freed to DC SoC (e.g. CBM for GRX500) Dequeue Free port */ uint32_t dc_rx_outstanding_desc; /*!< DC client driver/FW HD ring outstanding decsriptors- - Add num_desc when written to HD ring and subtract num_desc based on DC counter value received. */ + Add num_desc when written to HD ring and subtract num_desc based on DC counter value received. */ //FIXME : For non-GRX500 : Expand dev2soc fields.?? }; /** \brief Rx function callback - basic data struct for ACA Peripherals. - \param[in] rx_if Rx If netdevice pointer - \param[in] tx_if Tx If netdevice pointer - optional - \param[in] subif Tx/Rx SubIf pointer - \param[in] skb Pointer to pointer to packet buffer, like sk_buff - \param[in] len Length of the packet (optional as also present in skb->len) - \param[in] flags: - DC_DP_F_RX_LITEPATH - recieved packet through litepath - \return 0 if OK / -1 if error / > 0, if DC DP layer needs to do LitePath Rx - \note The receive callback is MUST to register and is invoked by DirectConnect datapath driver to pass the packets to the peripheral driver - \note If the Peripheral Driver does not pass the packet to stack, and does - not free the Rx buffer, it needs to return > 0 to indicate to DC DP Library to - send packet to accelerator - Valid for LitePath Rx case only. -*/ + \param[in] rx_if Rx If netdevice pointer + \param[in] tx_if Tx If netdevice pointer - optional + \param[in] subif Tx/Rx SubIf pointer + \param[in] skb Pointer to pointer to packet buffer, like sk_buff + \param[in] len Length of the packet (optional as also present in skb->len) + \param[in] flags: + - DC_DP_F_RX_LITEPATH : recieved packet through litepath + \return 0 if OK / -1 if error / > 0, if DC DP layer needs to do LitePath Rx + \note The receive callback is MUST to register and is invoked by DirectConnect datapath driver to pass the packets to the peripheral driver + \note If the Peripheral Driver does not pass the packet to stack, and does + not free the Rx buffer, it needs to return > 0 to indicate to DC DP Library to + send packet to accelerator - Valid for LitePath Rx case only. + */ typedef int32_t (*dc_dp_rx_fn_t) ( struct net_device *rx_if, struct net_device *tx_if, @@ -641,16 +701,16 @@ typedef int32_t (*dc_dp_rx_fn_t) ( ); /*!< Rx function callback */ /** - \brief Get Meta-SubInterface Integer Identifier (e.g. Station Id) callback. Mostly applicable to DC WLAN peripheral only. - \param[in] netif Network interface through whireturn packet to dst MAC address mac_addr will be transmitted - \param[in] skb Pointer to pointer to packet buffer, like sk_buff - \param[in] subif_data VCC information for DSL nas interface, must provide valid subif_data, otherwise set to NULL - \param[in] mac_addr MAC Address of Station - \param[in,out] subif subif->port_id as input and sub-interface including Meta Sub Interface Id (e.g. STA Id) returned - \param[in] flags Reserved for future use - \return 0 if OK, -1 on ERROR - \note : Optional function - Not needed in case Client driver does not have peripheral specific mapping of Meta Sub Interface in the sub-interfce field. -*/ + \brief Get Meta-SubInterface Integer Identifier (e.g. Station Id) callback. Mostly applicable to DC WLAN peripheral only. + \param[in] netif Network interface through whireturn packet to dst MAC address mac_addr will be transmitted + \param[in] skb Pointer to pointer to packet buffer, like sk_buff + \param[in] subif_data VCC information for DSL nas interface, must provide valid subif_data, otherwise set to NULL + \param[in] mac_addr MAC Address of Station + \param[in,out] subif subif->port_id as input and sub-interface including Meta Sub Interface Id (e.g. STA Id) returned + \param[in] flags Reserved for future use + \return 0 if OK, -1 on ERROR + \note : Optional function - Not needed in case Client driver does not have peripheral specific mapping of Meta Sub Interface in the sub-interfce field. + */ typedef int32_t(*dc_dp_get_netif_subinterface_fn_t) ( struct net_device *netif, struct sk_buff *skb, @@ -696,16 +756,16 @@ struct dc_dp_fields_value_dw { }; /** \brief Get DC Peripheral specific Desc Information callback, which is - invoked by DC library to the Peripheral driver. Peripheral driver indicates - the field value and mask it wants to set in the desc/HD DWORDs - \param[in] port_id Port Number for which information is sought - \param[in] skb Pointer to skbuff for which Peripheral specific info is - requested/ - \param[out] desc_fields Pointer to dc_dp_fields_value_dw structure - \param[in] flags : Reserved - \return 0 if OK / -1 if error - \remark The Acceleration layer can treat this info as 'black box' if required -*/ + invoked by DC library to the Peripheral driver. Peripheral driver indicates + the field value and mask it wants to set in the desc/HD DWORDs + \param[in] port_id Port Number for which information is sought + \param[in] skb Pointer to skbuff for which Peripheral specific info is + requested/ + \param[out] desc_fields Pointer to dc_dp_fields_value_dw structure + \param[in] flags : Reserved + \return 0 if OK / -1 if error + \remark The Acceleration layer can treat this info as 'black box' if required + */ typedef int32_t (*dc_dp_get_dev_specific_desc_info_t) ( int32_t port_id, /* Port Identifier */ struct sk_buff *skb, @@ -714,16 +774,16 @@ typedef int32_t (*dc_dp_get_dev_specific_desc_info_t) ( ); /** \brief Get DC Peripheral specific Desc Information, that will be passed in - the HD to the peripheral (and read back from HD from the peripheral) - it - indicates the mapping of the fields as well (not in use) - \param[in] port_id Port Number for which information is sought - \param[in] skb Pointer to skbuff for which Peripheral specific info is requested - \param[out] desc_fields Pointer to dc_dp_field_value structure - \param[in] flags : Reserved - \return 0 if OK / -1 if error - \remark The Acceleration layer can treat this info as 'black box' if required - and combine it into a single peripheral/vendor specific field if required. -*/ + the HD to the peripheral (and read back from HD from the peripheral) - it + indicates the mapping of the fields as well (not in use) + \param[in] port_id Port Number for which information is sought + \param[in] skb Pointer to skbuff for which Peripheral specific info is requested + \param[out] desc_fields Pointer to dc_dp_field_value structure + \param[in] flags : Reserved + \return 0 if OK / -1 if error + \remark The Acceleration layer can treat this info as 'black box' if required + and combine it into a single peripheral/vendor specific field if required. + */ int32_t dc_dp_get_dev_specific_desc_fields ( int32_t port_id, /* Port Identifier */ struct sk_buff *skb, @@ -732,34 +792,50 @@ int32_t dc_dp_get_dev_specific_desc_fields ( ); /** - \brief Get ring/Buffer recovery stats callback + \brief Get ring/Buffer recovery stats callback + \param[in] netif Pointer to Linux netdevice structure + \param[in] port_id Port Identifier [either netdevice or port_id - one value to be passed]. + \param[out] stats Pointer to dc_dp_recovery_stats structure + \param[in] flags Reserved for future use + \return 0 if OK, -1 on ERROR + \note Optional recovery stats callback. The DC datapath driver can tally up the buffer and + ring stats on the DC peripheral and on the host SoC side, and trigger recovery - for exampe, + through rebooting system if significant buffrs lost + */ +typedef int32_t (*dc_dp_get_recovery_stats_fn_t) ( + struct net_device *netif, + int32_t port_id, + struct dc_dp_recovery_stats *stats, + uint32_t flags +); + +/** + \brief DMA Rx handler callback \param[in] netif Pointer to Linux netdevice structure \param[in] port_id Port Identifier [either netdevice or port_id - one value to be passed]. - \param[out] stats Pointer to dc_dp_recovery_stats structure \param[in] flags Reserved for future use \return 0 if OK, -1 on ERROR \note Optional recovery stats callback. The DC datapath driver can tally up the buffer and ring stats on the DC peripheral and on the host SoC side, and trigger recovery - for exampe, through rebooting system if significant buffrs lost */ -typedef int32_t (*dc_dp_get_recovery_stats_fn_t) ( +typedef int32_t (*dc_dp_irq_handler_fn_t) ( struct net_device *netif, int32_t port_id, - struct dc_dp_recovery_stats *stats, uint32_t flags ); /** \brief Multicast module callback to add/delete a mcast group membership to/from a DirectConnect interface. - \param[in] grp_id Multicast group id. - \param[in] dev Registered net device. - \param[in] mc_stream Multicast stream information. - \param[in] flags : - DC_DP_MC_F_ADD - Add a new mcast group membership to a DirectConnect interface. - DC_DP_MC_F_UPD - Update an existing mcast group membership to a DirectConnect interface. - DC_DP_MC_F_DEL - Delete an existing mcast group membership from a DirectConnect interface. - \return none - \note Group Identifier is allocated and managed by Multicast Subsystem. -*/ + \param[in] grp_id Multicast group id. + \param[in] dev Registered net device. + \param[in] mc_stream Multicast stream information. + \param[in] flags : + DC_DP_MC_F_ADD - Add a new mcast group membership to a DirectConnect interface. + DC_DP_MC_F_UPD - Update an existing mcast group membership to a DirectConnect interface. + DC_DP_MC_F_DEL - Delete an existing mcast group membership from a DirectConnect interface. + \return none + \note Group Identifier is allocated and managed by Multicast Subsystem. + */ typedef void (*dc_dp_mcast_callback_fn_t) ( uint32_t grp_id, struct net_device *dev, @@ -768,8 +844,8 @@ typedef void (*dc_dp_mcast_callback_fn_t) ( ); /** - \brief IP address data structure - used in Multicast registration. -*/ + \brief IP address data structure - used in Multicast registration. + */ struct dc_dp_ip_addr { enum dc_dp_iptype ip_type; /*!< IPv4 or IPv6 Type */ union { @@ -779,13 +855,13 @@ struct dc_dp_ip_addr { }; /** - \brief Mutlicast stream (5-tuple) structure -*/ + \brief Mutlicast stream (5-tuple) structure + */ struct dc_dp_mcast_stream { - struct net_device *mem_dev; /*!< Member Netdevice */ + struct net_device *mem_dev; /*!< Member Netdevice */ struct dc_dp_ip_addr src_ip; /*!< Source ip : can be ipv4 or ipv6 */ struct dc_dp_ip_addr dst_ip; /*!< Destination ip - GA : can be ipv4 or ipv6 */ - uint32_t proto; /*!< Protocol type : Mostly UDP for Multicast */ + uint32_t proto; /*!< Protocol type : Mostly UDP for Multicast */ uint32_t src_port; /*!< Source port */ uint32_t dst_port; /*!< Destination port */ union { @@ -797,8 +873,8 @@ struct dc_dp_mcast_stream { }; /** - \brief DirectConnect Power Saving (CoC) Thresholds values for different power states. -*/ + \brief DirectConnect Power Saving (CoC) Thresholds values for different power states. + */ struct dc_dp_ps_threshold { int32_t th_d0; /*!< Power State D0 (Highest Power) Threshold Level */ int32_t th_d1; /*!< Power State D1 Threshold Level */ @@ -807,54 +883,57 @@ struct dc_dp_ps_threshold { }; /** - \brief DirectConnect Power State Module Information to be used for registration. -*/ + \brief DirectConnect Power State Module Information to be used for registration. + */ struct dc_dp_ps_module_info { - char *module_name; /*!< Power State registeirng module Name */ - enum dc_dp_power_state power_feature_state; /*!< Current Power State */ + char *module_name; /*!< Power State registeirng module Name */ + enum dc_dp_power_state power_feature_state; /*!< Current Power State */ int32_t (*dc_dp_ps_state_get) (enum dc_dp_power_state *pmcu_state); /*!< Callback to query current power state */ int32_t (*dc_dp_ps_state_switch) (int32_t pmcu_pwr_state_ena); /*!< Callback to enable power state */ }; /** - \brief Wake-on-LAN Config Structure -*/ + \brief Wake-on-LAN Config Structure + */ struct dc_dp_wol_cfg { uint8_t wol_mac[MAX_ETH_ALEN]; /*!< Wake-on-LAN MAC address - part of Magic packet (16 times repeat)*/ bool wol_passwd_enable; /*!< Wake-on-LAN password enable */ uint8_t wol_passwd[MAX_ETH_ALEN]; /*!< Wake-on-LAN password */ }; -/* @} */ - -/** \addtogroup DirectConnect_Datapath_Lib_Structs */ -/* @{ */ - /** - \brief DirectConnect Datapath Lib Registration Callback. - It is supplied by individual DC peripheral driver. -*/ + \brief DirectConnect Datapath Lib Registration Callback. - It is supplied by individual DC peripheral driver. + */ struct dc_dp_cb { dc_dp_rx_fn_t rx_fn; /*!< Rx function callback */ dp_stop_tx_fn_t stop_fn; /*!< Stop Tx function callback for Tx flow control - Optional (NULL) */ dp_restart_tx_fn_t restart_fn; /*!< Start Tx function callback for Tx flow control - Optional (NULL)*/ dc_dp_get_netif_subinterface_fn_t get_subif_fn; /*!< Get Subinterface metaid callback - Optional (for non-WLAN) (NULL) */ dc_dp_get_dev_specific_desc_info_t get_desc_info_fn; /*!< Get device/peripheral specific field info for - desc/HD */ + desc/HD */ dp_reset_mib_fn_t reset_mib_fn; /*!< reset registered device's network mib counters - Optional (NULL) */ dp_get_mib_fn_t get_mib_fn; /*!< Retrieve registered device's network mib counters */ dc_dp_get_recovery_stats_fn_t recovery_fn; /*!< Get Recovery related stats - Optional (NULL) */ + dc_dp_irq_handler_fn_t dma_rx_handler_fn; /*!< DMA RX IRQ Handler */ }; /** - \brief DirectConnect Buffer pools/Chunks Data structure. - \\FIXME : For Rx direction only. Max Size in a pool : 4 MB (get_freepages). -*/ + \brief DirectConnect Buffer pools/Chunks Data structure. + \\FIXME : For Rx direction only. Max Size in a pool : 4 MB (get_freepages). + */ struct dc_dp_buf_pool { void *pool; /*!< Pointer to pool */ void *phys_pool; /*!< Physical address to pool */ uint32_t size; /*!< Size of pool in bytes - Must be multiple of individual buffer size */ }; +/** \brief This data structure describes the ring meta-information. + */ +struct dc_dp_ring_metadata { + void *credit_add; /*!< [out] Peripheral writes the credit add value to this address */ + void *credit_left; /*!< [out] Peripheral reads the credit left value from this address */ +}; + /** \brief This data structure describes the ring attributes. */ /* FIXME : Any budget for burst handling mainly for sw ring handling? in form of num of desc or in fastpath it could be timer based for umt hw? */ @@ -863,10 +942,13 @@ struct dc_dp_ring { void *phys_base; /*!< [out] Physical Base address of ring */ int32_t size; /*!< [in/out] Size of ring in terms of entries : FIXME : Zero implies ring is unused. */ int32_t desc_dwsz; /*!< [in/out] Size of descriptor in DW. */ - int32_t flags; /*!< [in] DC_DP_F_PKTDESC_ENQ, DC_DP_F_PKTDESC_RET - DC_DP_F_RET_RING_SAME_AS_ENQ*/ + int32_t flags; /*!< [in] DC_DP_F_PKTDESC_ENQ, DC_DP_F_PKTDESC_RET + DC_DP_F_RET_RING_SAME_AS_ENQ*/ void *ring; /*!< [out] Place holder for ring meta-structure - only used - internally in the lower layers */ + internally in the lower layers */ + uint8_t policy_base; /*!< [out] Buffer Manager Policy Base for this ring*/ + uint8_t pool_id; /*!< [out] Buffer Manager Pool Id for this ring*/ + uint8_t high_4bits; /*!< [out] High 4 bits of 36bit buffer address */ }; /** @@ -880,10 +962,10 @@ struct dc_dp_ring { * ----------------------------------- * * soc2dev - CBM Dequeue Port Base. SoC -> Peripheral direction and enqueue - * counter update in Peripheral memory - incremental + * counter update in Peripheral memory - incremental * * soc2dev_ret is CBM Buffer return port base, Peripheral -> SoC. SoC detects in HW buffer return and does not - * need any enqueue counter update + * need any enqueue counter update * * Above Ring Sizes are fixed * @@ -891,10 +973,10 @@ struct dc_dp_ring { * ----------------------------------- * * dev2soc - It is the DMA1-Tx channel ring (Peripheral -> SoC). Enqueue counter is ignored as HW DMA engine polls. - * Peripheral may track on its own for its own writes! + * Peripheral may track on its own for its own writes! * * dev2soc_ret - Same as dev2soc_ring and points to DMA1-TX channel base. Can be ignored - * in that mode. Size is also the same + * in that mode. Size is also the same * * For DC Mode 1: (GRX750/PUMA7) * @@ -907,7 +989,7 @@ struct dc_dp_ring { * soc2dev_ret - Peripheral -> SoC direction enqueue (desc/buffer return) and enqueue counter * update (incremental) in SoC memory. * - * Above Ring Sizes are equal. Currently, GRX750/PUMA7 may allocate all peripheral rings as equal + * Above Ring Sizes are equal. Currently, GRX750/PUMA7 may allocate all peripheral rings as equal * * Peripheral -> SoC Pkt direction (Rx) * ----------------------------------- @@ -934,14 +1016,14 @@ struct dc_dp_ring { * update (incremental) in SoC memory. This ring may not be allocated and * then the return desc/pkt is written on the soc2dev ring. * - * Above Ring Sizes are equal. Currently, GRX750/PUMA7 may allocate all peripheral rings as equal + * Above Ring Sizes are equal. Currently, GRX750/PUMA7 may allocate all peripheral rings as equal * * Peripheral -> SoC Pkt direction (Rx) * ----------------------------------- * * dev2soc - Peripheral -> SoC direction enqueue (desc/buffer) and enqueue counter * update (incremental) in SoC memory - * + * * dev2soc_ret - SoC -> Peripheral direction enqueue (desc/buffer return) and enqueue counter * update (incremental) in Peripheral memory. This ring may not be allocated and * then the return desc/pkt is written on the dev2soc ring. @@ -954,6 +1036,11 @@ struct dc_dp_ring_res { struct dc_dp_ring dev2soc; /*!< Params of Rx ring - Dev to SoC */ struct dc_dp_ring dev2soc_ret; /*!< Params of Rx Return ring - Dev to SoC (Optional : Mode0 / Mode2) */ struct dc_dp_ring dev2soc_except; /*!< Params of Rx exception ring - Dev to SoC (Optional : Mode1 / Mode2) */ + uint32_t txout_temp_dw3; /*!< [out] TXOUT(soc2dev_ret) Template DW3 - 32bit + LGM - DW contains Policy, Pool, 4bit High Address + PRX (Falcon) - DW contains Policy, Pool */ + uint32_t rxout_temp_dw3; /*!< [out] RXOUT (dev2soc) Template DW3 - 32bit + LGM - DW contains Policy, Pool */ }; /** @@ -963,11 +1050,13 @@ struct dc_dp_ring_res { * recommended to write to SoC memory for generic handling for all periherals, * and for Driver to not have to read over PCIe which is slower.. * \remark Some SoC Accelerators may need 2 separate counter update locations, while others need 1. Hence 2 distinct counter bases in Dev2SoC direction. -*/ + */ /* FIXME : Is it better in more sturutured form? */ struct dc_dp_dccntr { int32_t soc_write_dccntr_mode; /*!< [in] DC Counter Write mode in peripheral memory : Incremental/Cumulative and Big/Little endian */ int32_t dev_write_dccntr_mode; /*!< [out] Dc Counter Read mode from SoC memory: Incremental/Cumulative and Big/Little endian */ + int32_t dev2soc_dccntr_timer; /*! <in] UMT Message timer for RXOUT ring */ + enum dc_dp_umt_mode umt_mode; /*! <in] UMT mode: DC_DP_UMT_MODE_HW_AUTO, DC_DP_UMT_MODE_HW_USER, DC_DP_UMT_MODE_SW */ void *soc2dev_enq_phys_base; /*!< [in] Physical Base address of DC Counter write location (in DC peripheral memory) for soc2dev ring enq counter. It MUST be provided by peripheral driver. */ void *soc2dev_enq_base; /*!< [in] Base address of DC Counter write location (in DC peripheral memory) for soc2dev ring enq counter. */ @@ -1013,38 +1102,38 @@ struct dc_dp_dccntr { }; /** - \brief DirectConnect Datapath Lib Resource structure. - Caller needs to allocate the DC counter structure array. -*/ + \brief DirectConnect Datapath Lib Resource structure. + Caller needs to allocate the DC counter structure array. + */ struct dc_dp_res { uint32_t num_bufs_req; /*!< [in] Number of buffers to allocate (Directconnect Peripheral -> SoC direction) */ int32_t num_bufpools; /*!< [out] Number of buffer pools/chunks allocated for the desired no of buffers */ struct dc_dp_buf_pool *buflist; /*!< [out] Allocated list of buffer chunks from which packet buffers are carved out. - Caller needs to free the memory given by buflist pointer. */ + Caller needs to free the memory given by buflist pointer. */ uint32_t tx_num_bufs_req; /*!< [in] Number of buffers to allocate (Soc -> Directconnect Peripheral direction) */ int32_t tx_num_bufpools; /*!< [out] Number of buffer pools/chunks allocated for the desired no of buffers */ struct dc_dp_buf_pool *tx_buflist; /*!< [out] Allocated list of buffer chunks from which packet buffers are carved out. - Caller needs to free the memory given by buflist pointer. */ + Caller needs to free the memory given by buflist pointer. */ uint32_t tx_desc_start_idx; /*!< [out] Tx Prefill counter to be set */ struct dc_dp_ring_res rings; /*!< [in/out] All the communication rings depending on DCMode0/DCMode1/SWModes */ - int32_t num_dccntr; /*!< [in] Number of DC counter units used - Could be HW or SW . Eg. VRX518 DSL Bonding will use 2 x DC counter. */ - struct dc_dp_dccntr *dccntr; /*!< [in/out] array of number of DC counter structures. Caller needs to allocate and free memory of dccntr pointer. */ - void *dev_priv_res; /*!< [in] Pointer to any dev specific resource structure */ + int32_t num_dccntr; /*!< [in] Number of DC counter units used - Could be HW or SW . Eg. VRX518 DSL Bonding will use 2 x DC counter. */ + struct dc_dp_dccntr *dccntr; /*!< [in/out] array of number of DC counter structures. Caller needs to allocate and free memory of dccntr pointer. */ + void *dev_priv_res; /*!< [in] Pointer to any dev specific resource structure */ }; /** - \brief DirectConnect Datapath Lib Counter write/read mode structure. -*/ + \brief DirectConnect Datapath Lib Counter write/read mode structure. + */ struct dc_dp_counter_mode { - uint32_t soc2dev_write; /*!< Supported DC Counter write mode, as defined in DC_DP_F_DCCNTR_MODE flags */ - uint32_t dev2soc_write; /*!< Supported DC Counter read mode, as defined in DC_DP_F_DCCNTR_MODE flags */ + uint32_t soc2dev_write; /*!< Supported DC Counter write mode, as defined in DC_DP_F_DCCNTR_MODE flags */ + uint32_t dev2soc_write; /*!< Supported DC Counter read mode, as defined in DC_DP_F_DCCNTR_MODE flags */ }; /** - \brief DirectConnect Datapath Lib SoC capability structure. -*/ + \brief DirectConnect Datapath Lib SoC capability structure. + */ /* FIXME : can we use same object for all path? */ struct dc_dp_host_cap { uint32_t version; /*!< DC API version */ @@ -1066,17 +1155,46 @@ struct dc_dp_host_cap { } swpath; }; +/** \brief DirectConnect Data Path subif registration attributes. + */ +struct dc_dp_res_ext { + int32_t deq_port_base; /*!< Base CQM port obtained for this peripheral */ + struct dc_dp_ring soc2dev; /*!< soc2dev/TXIN ring */ +}; + +/** \brief DirectConnect Datapath Device configuration in SoC + */ +struct dc_dp_dev_config { + uint32_t txin_ring_size; /*!< [out] Max TX_IN Ring Size supported by SoC */ + uint32_t txout_ring_size; /*!< [out] Max TX_OUT Ring Size supported by SoC */ + uint32_t rxin_ring_size; /*!< [out] Max RX_IN Ring Size supported by SoC */ + uint32_t rxout_ring_size; /*!< [out] Max RX_OUT Ring Size supported by SoC */ + uint32_t rx_bufs; /*!< [out] Max Rx Bufs requested by Peripheral for prefetching in RXIN */ + uint32_t tx_bufs; /*!< [out] Max Tx Bufs requested by Peripheral for prefetching in TXIN */ +}; + /* @} */ /** \addtogroup DirectConnect_Datapath_Lib_APIs */ /* @{ */ -/** \brief DirectConnect Datapath Allocate Data structure port may map to an exclusive netdevice - \param[out] cap Pointer to DC SoC capability structure - \param[in] flags : Reserved - \return 0 if OK / -1 if error -*/ +/** \brief Obtain device capability in SoC. + \param[in] dev_id enum indicating device type + \param[out] dev_config Device configuration + \return 0 if OK / -1 if error + */ +int32_t +dc_dp_get_peripheral_config ( + enum dc_dp_dev_type dev_id, + struct dc_dp_dev_config *dev_config +); + +/** \brief Obtain SoC capability. + \param[out] cap Pointer to DC SoC capability structure + \param[in] flags : Reserved + \return 0 if OK / -1 if error + */ /* FIXME : can we add port specific capability as well? */ int32_t dc_dp_get_host_capability ( @@ -1085,21 +1203,24 @@ dc_dp_get_host_capability ( ); /** \brief DirectConnect Datapath Allocate Data structure port may map to an exclusive netdevice - like in the case of ethernet LAN ports. In other cases like WLAN, the physical port is a Radio port, - while netdevices are Virtual Access Points (VAPs). In this case, the AP netdevice can be passed. - Alternately, driver_port & driver_id will be used to identify this port. - \param[in] owner Kernel module pointer which owns the port - \param[in] dev_port Physical Port Number of this device managed by the driver - \param[in] dev Pointer to Linux netdevice structure (optional) - \param[in] port_id Optional port_id number requested. Usually, 0 and allocated by driver - \param[in] flags : Use Datapath driver flags for Datapath Port Alloc - - DC_DP_F_FASTPATH : Allocate the port as h/w acclerable - - DC_DP_F_FAST_DSL : Allocate the DSL port as h/w acclerable - - DC_DP_F_LITEPATH : Allocate the port as partial accelerable/offload - - DC_DP_F_SWPATH : Allocate the port as non-accelerable - - DC_DP_F_DEREGISTER : Deallocate the already allocated port - \return Returns allocated Port number for given netdevice. / -1 if error -*/ + like in the case of ethernet LAN ports. In other cases like WLAN, the physical port is a Radio port, + while netdevices are Virtual Access Points (VAPs). In this case, the AP netdevice can be passed. + Alternately, driver_port & driver_id will be used to identify this port. + \param[in] owner Kernel module pointer which owns the port + \param[in] dev_port Physical Port Number of this device managed by the driver + \param[in] dev Pointer to Linux netdevice structure (optional) + \param[in] port_id Optional port_id number requested. Usually, 0 and allocated by driver + \param[in] flags : Use Datapath driver flags for Datapath Port Alloc + - DC_DP_F_DEREGISTER : Deallocate the already allocated port + - DC_DP_F_FASTPATH : Allocate the port as h/w acclerable + - DC_DP_F_LITEPATH : Allocate the port as partial accelerable/offload + - DC_DP_F_SWPATH : Allocate the port as non-accelerable + - DC_DP_F_FAST_WLAN : Allocate the WLAN port as h/w acclerable + - DC_DP_F_FAST_DSL : Allocate the DSL port as h/w acclerable + - DC_DP_F_MULTI_PORT : Allocate the port as multiport device + - DC_DP_F_SHARED_RES : Allocate the port as shared resources + \return Returns allocated Port number for given netdevice. / -1 if error + */ int32_t dc_dp_alloc_port ( struct module *owner, @@ -1110,15 +1231,15 @@ dc_dp_alloc_port ( ); /** \brief Higher Layer Driver reservation API - \param[in] owner Kernel module pointer which owns the port - \param[in] port_id Datapath Port Identifier on which to reserve - \param[in] dev Pointer to Linux netdevice structure (Optional) - \param[in] dev_req Peripheral Device Requirements and Capability, as defined in DC_DP_F_DEV_* flags - \param[in] flags : Special input flags to reserve mode routine - - DC_DP_F_DEREGISTER : Free the reservation, if any. - \return 0 if OK / -1 if error - \note Optionally, DC peripheral driver may invoke this API to reserve DC fastpath mode. -*/ + \param[in] owner Kernel module pointer which owns the port + \param[in] port_id Datapath Port Identifier on which to reserve + \param[in] dev Pointer to Linux netdevice structure (Optional) + \param[in] dev_req Peripheral Device Requirements and Capability, as defined in DC_DP_F_DEV_* flags + \param[in] flags : Special input flags to reserve mode routine + - DC_DP_F_DEREGISTER : Free the reservation, if any. + \return 0 if OK / -1 if error + \note Optionally, DC peripheral driver may invoke this API to reserve DC fastpath mode. + */ int32_t dc_dp_reserve_fastpath ( struct module *owner, @@ -1129,23 +1250,23 @@ dc_dp_reserve_fastpath ( ); /** \brief Higher layer Driver Datapath registration API - \param[in] owner Kernel module pointer which owns the port - \param[in] port_id Datapath Port Identifier on which to register - \param[in] dev Pointer to Linux netdevice structure - \param[in] datapathcb Callback registration structure - \param[in,out] resources Buffer, Tx/Rx ring and DC counter resources. - \param[in,out] devspec Device Specification - netdevice name, DC Mode, Context Pointer. - \param[in] flags : Special input flags to register device routine - - DC_DP_F_DEREGISTER : Deregister the device - - DC_DP_F_ALLOC_SW_TX_RING : Specify it if peripheral driver want to allocate sw tx ring - - DC_DP_F_ALLOC_SW_RX_RING : Specify it if peripheral driver want to allocate sw rx ring - - DC_DP_F_DONT_ALLOC_HW_TX_RING : Specify it if peripheral driver don't want to allocate hw tx ring - - DC_DP_F_DONT_ALLOC_HW_RX_RING : Specify it if peripheral driver don't want to allocate hw rx ring - - DC_DP_F_QOS : Specify it if peripheral/dev QoS class is required in DC peripheral - - DC_DP_F_SHARED_RES : Specify it if peripheral driver want to re-use/update dc resources, multi-port/bodning case respectively - \return 0 if OK / -1 if error - \note This is the first registration to be invoked by any DC peripheral. Subsequently additional registrations like Multicast, Ring-Recovery or Power Saving (PS) to be done. -*/ + \param[in] owner Kernel module pointer which owns the port + \param[in] port_id Datapath Port Identifier on which to register + \param[in] dev Pointer to Linux netdevice structure + \param[in] datapathcb Callback registration structure + \param[in,out] resources Buffer, Tx/Rx ring and DC counter resources. + \param[in,out] devspec Device Specification - netdevice name, DC Mode, Context Pointer. + \param[in] flags : Special input flags to register device routine + - DC_DP_F_DEREGISTER : Deregister the device + - DC_DP_F_ALLOC_SW_TX_RING : Specify it if peripheral driver want to allocate sw tx ring + - DC_DP_F_ALLOC_SW_RX_RING : Specify it if peripheral driver want to allocate sw rx ring + - DC_DP_F_DONT_ALLOC_HW_TX_RING : Specify it if peripheral driver don't want to allocate hw tx ring + - DC_DP_F_DONT_ALLOC_HW_RX_RING : Specify it if peripheral driver don't want to allocate hw rx ring + - DC_DP_F_QOS : Specify it if peripheral/dev QoS class is required in DC peripheral + - DC_DP_F_SHARED_RES : Specify it if peripheral driver want to re-use/update dc resources, multi-port/bodning case respectively + \return 0 if OK / -1 if error + \note This is the first registration to be invoked by any DC peripheral. Subsequently additional registrations like Multicast, Ring-Recovery or Power Saving (PS) to be done. + */ int32_t dc_dp_register_dev ( struct module *owner, @@ -1158,21 +1279,21 @@ dc_dp_register_dev ( ); /** \brief Allocates datapath subif number to a sub-interface netdevice. - Sub-interface value must be passed to the driver. - The port may map to an exclusive netdevice like in the case of ethernet LAN ports. - \param[in] owner Kernel module pointer which owns the port - \param[in] dev Pointer to Linux netdevice structure. Optional for VRX518(ATM) driver. - \param[in] subif_name Pointer to device name, this is MUST if Linux netdevice is NULL. - \param[in,out] subif_id Pointer to subif_id structure including port_id - \param[in] flags : - DC_DP_F_DEREGISTER - De-register already registered subif/vap - DC_DP_F_REGISTER_LITEPATH - Register already registered subif/vap as LitePath Partial offload - DC_DP_F_DEREGISTER_LITEPATH - De-register already registered LitePath subif/logical netdev from LitePath - \return 0 if OK / -1 if error - \note Sub-Interface is applicable for logical or virtual interfaces like VAP (SSID) or VLAN. - \note LITEPATH register/deregister only works on Partial offload or NOT, does deregister subif. - \note if subinterface has to be deregistered completely, DC_DP_F_DEREGISTER flag must be passed. -*/ + Sub-interface value must be passed to the driver. + The port may map to an exclusive netdevice like in the case of ethernet LAN ports. + \param[in] owner Kernel module pointer which owns the port + \param[in] dev Pointer to Linux netdevice structure. Optional for VRX518(ATM) driver. + \param[in] subif_name Pointer to device name, this is MUST if Linux netdevice is NULL. + \param[in,out] subif_id Pointer to subif_id structure including port_id + \param[in] flags : + - DC_DP_F_DEREGISTER : De-register already registered subif/vap + - DC_DP_F_REGISTER_LITEPATH : Register already registered subif/vap as LitePath Partial offload + - DC_DP_F_DEREGISTER_LITEPATH : De-register already registered LitePath subif/logical netdev from LitePath + \return 0 if OK / -1 if error + \note Sub-Interface is applicable for logical or virtual interfaces like VAP (SSID) or VLAN. + \note LITEPATH register/deregister only works on Partial offload or NOT, does deregister subif. + \note if subinterface has to be deregistered completely, DC_DP_F_DEREGISTER flag must be passed. + */ int32_t dc_dp_register_subif ( struct module *owner, @@ -1182,19 +1303,46 @@ dc_dp_register_subif ( uint32_t flags ); +/** \brief Allocates datapath subif number to a sub-interface netdevice. + Sub-interface value must be passed to the driver. + The port may map to an exclusive netdevice like in the case of ethernet LAN ports. + \param[in] owner Kernel module pointer which owns the port + \param[in] dev Pointer to Linux netdevice structure. Optional for VRX518(ATM) driver. + \param[in] subif_name Pointer to device name, this is MUST if Linux netdevice is NULL. + \param[in,out] subif_id Pointer to subif_id structure including port_id + \param[in,out] res_ext Pointer to dc_dp_res_ext structure + \param[in] flags : + - DC_DP_F_DEREGISTER : De-register already registered subif/vap + - DC_DP_F_REGISTER_LITEPATH : Register already registered subif/vap as LitePath Partial offload + - DC_DP_F_DEREGISTER_LITEPATH : De-register already registered LitePath subif/logical netdev from LitePath + \return 0 if OK / -1 if error + \note Sub-Interface is applicable for logical or virtual interfaces like VAP (SSID) or VLAN. + \note LITEPATH register/deregister only works on Partial offload or NOT, does deregister subif. + \note if subinterface has to be deregistered completely, DC_DP_F_DEREGISTER flag must be passed. + */ +int32_t +dc_dp_register_subif_ext ( + struct module *owner, + struct net_device *dev, + const uint8_t *subif_name, + struct dp_subif *subif_id, + struct dc_dp_res_ext *res_ext, + uint32_t flags +); + /** \brief Transmit packet to low-level Datapath driver - \param[in] rx_if Rx If netdevice pointer (optional)- MUST be set when received net_device is known. - \param[in] rx_subif Rx SubIf pointer (Optional) - MUST be set when atleast received {PortId, <SubifId>} are known. - \param[in] tx_subif Tx SubIf pointer - \param[in] skb Pointer to packet buffer like sk_buff - \param[in] len Length of packet to transmit - \param[in] flags : - DC_DP_F_XMIT_LITEPATH - send packet through litepath - DC_DP_F_XMIT_OAM - Send packet through ATM only - \return 0 if OK / -1 if error - \note Either rx_if or rx_subif would be passed in this routine if LitePath - \note skb is freed by underlying layer if error -*/ + \param[in] rx_if Rx If netdevice pointer (optional)- MUST be set when received net_device is known. + \param[in] rx_subif Rx SubIf pointer (Optional) - MUST be set when atleast received {PortId, <SubifId>} are known. + \param[in] tx_subif Tx SubIf pointer + \param[in] skb Pointer to packet buffer like sk_buff + \param[in] len Length of packet to transmit + \param[in] flags : + - DC_DP_F_XMIT_LITEPATH : send packet through litepath + - DC_DP_F_XMIT_OAM : Send packet through ATM only + \return 0 if OK / -1 if error + \note Either rx_if or rx_subif would be passed in this routine if LitePath + \note skb is freed by underlying layer if error + */ int32_t dc_dp_xmit ( struct net_device *rx_if, @@ -1205,18 +1353,35 @@ dc_dp_xmit ( uint32_t flags ); +/** \brief Enable DMA IRQ + \param[in] owner + \param[in] port_id + \param[in] dev + \param[in] subif + \param[in] flags : + \return 0 if OK / -1 if error + */ +int32_t +dc_dp_enable_dma_irq ( + struct module *owner, + uint32_t port_id, + struct net_device *dev, + struct dp_subif *subif, + uint32_t flags +); + /** \brief Handle Tx-Confirm and/or Rx interrupts (for SWPATH - w/ or w/o LITEPATH only) - \param[in] owner Kernel module pointer which owns the port. - \param[in] port_id Datapath Port Id (PMAC Port No). - \param[in] dev Pointer to Linux netdevice structure - (Optional). - \param[in] ring Pointer to Ring which needs processing. - \param[in] flags Special input flags to interrupt handling routine - - DC_DP_F_TX_COMPLETE : To handle Tx return interrupt - - DC_DP_F_RX : To handle Rx interrupt - - DC_DP_F_RX_FRAG_EXCEPT : To handle Rx exception (fragmentation) interrupt - \return 0 if OK / -1 if error - \note This processing will be called from a tasklet (Device specific) -*/ + \param[in] owner Kernel module pointer which owns the port. + \param[in] port_id Datapath Port Id (PMAC Port No). + \param[in] dev Pointer to Linux netdevice structure - (Optional). + \param[in] ring Pointer to Ring which needs processing. + \param[in] flags Special input flags to interrupt handling routine + - DC_DP_F_TX_COMPLETE : To handle Tx return interrupt + - DC_DP_F_RX : To handle Rx interrupt + - DC_DP_F_RX_FRAG_EXCEPT : To handle Rx exception (fragmentation) interrupt + \return 0 if OK / -1 if error + \note This processing will be called from a tasklet (Device specific) + */ int32_t dc_dp_handle_ring_sw ( struct module *owner, @@ -1226,39 +1391,54 @@ dc_dp_handle_ring_sw ( uint32_t flags ); +/** \brief Return the HW allocated buffers + \param[in] port_id Port Id + \param[in] buflist List of buffers to be returned + \param[in] buflist_len Number of buffers in the buflist + \param[in] flags Reserved for future use + \return 0 if OK / -1 if error + */ +int32_t +dc_dp_return_bufs ( + uint32_t port_id, + void **buflist, + uint32_t buflist_len, + uint32_t flags +); + /** \brief Allocate skbuff buffer in DDR/SRAM - \param[in] len Length of the buffer required - \param[in] subif Pointer to sub-interface for which allocation is requried - \param[in] flags Reserved - \return skb pointer if OK / NULL if error -*/ -struct sk_buff* dc_dp_alloc_skb ( + \param[in] len Length of the buffer required + \param[in] subif Pointer to sub-interface for which allocation is requried + \param[in] flags Reserved + \return skb pointer if OK / NULL if error + */ +struct sk_buff * dc_dp_alloc_skb ( uint32_t len, struct dp_subif *subif, uint32_t flags ); /** \brief Free the allocated buffer - \param[in] subif Pointer to sub-interface for which free is requried - \param[in] skb Pointer to packet sk_buff structure to be freed - \return 0 if OK / -1 if error -*/ + \param[in] subif Pointer to sub-interface for which free is requried + \param[in] skb Pointer to packet sk_buff structure to be freed + \return 0 if OK / -1 if error + */ int32_t dc_dp_free_skb ( struct dp_subif *subif, struct sk_buff *skb ); /** \brief Allocate buffers in HW/FW/SW - \param[in] subif Pointer to sub-interface for which allocation is requried - \param[in] buf_len Length of the buffer required - \param[in] num_bufs_req Number of buffers to allocate - \param[out] num_bufpools Number of buffer pools/chunks allocated for the desired no of buffers - \param[out] buflist Allocated list of buffer chunks from which packet buffers are carved out. - Caller needs to free the memory given by buflist pointer. - \param[in] dir direction, where enum values are as descrbed in enum dc_dp_dir_type. - \param[in] flags Reserved - \return 0 if OK / -1 if error -*/ + \param[in] subif Pointer to sub-interface for which allocation is requried + \param[in] buf_len Length of the buffer required + \param[in] num_bufs_req Number of buffers to allocate + \param[out] num_bufpools Number of buffer pools/chunks allocated for the desired no of buffers + \param[out] buflist Allocated list of buffer chunks from which packet buffers are carved out. + Caller needs to free the memory given by buflist pointer. + \param[in] dir direction, where enum values are as descrbed in enum dc_dp_dir_type. + \param[in] flags Reserved + \return 0 if OK / -1 if error + */ /* FIXME : Any use? */ int32_t dc_dp_alloc_bufs ( struct dp_subif *subif, @@ -1271,14 +1451,14 @@ int32_t dc_dp_alloc_bufs ( ); /** \brief Free the allocated buffers from HW/FW/SW - \param[in] subif Pointer to sub-interface for which allocation is requried - \param[in] num_bufpools Number of buffer pools/chunks allocated for the desired no of buffers - \param[in] buflist Allocated list of buffer chunks from which packet buffers are carved out. - Caller needs to free the memory given by buflist pointer. - \param[in] dir direction, where enum values are as descrbed in enum dc_dp_dir_type. - \param[in] flags Reserved - \return 0 if OK / -1 if error -*/ + \param[in] subif Pointer to sub-interface for which allocation is requried + \param[in] num_bufpools Number of buffer pools/chunks allocated for the desired no of buffers + \param[in] buflist Allocated list of buffer chunks from which packet buffers are carved out. + Caller needs to free the memory given by buflist pointer. + \param[in] dir direction, where enum values are as descrbed in enum dc_dp_dir_type. + \param[in] flags Reserved + \return 0 if OK / -1 if error + */ /* FIXME : Any use? */ int32_t dc_dp_free_bufs ( struct dp_subif *subif, @@ -1289,17 +1469,17 @@ int32_t dc_dp_free_bufs ( ); /** \brief Allow Acceleration subsystem to learn about session when driver shortcuts - forwarding without going to stack. - \param[in] subif Pointer to sub-interface for which free is requried - \param[in] skb Pointer to packet with skb->dev pointing to RxIf in PREFORWARDING - and TxIf in POSTFORWARDING - \param[in] flags : DC_DP_F_PREFORWARDING/DC_DP_F_POSTFORWARDING - \return 0 if OK / -1 if error - \remark skb->dev needs to be filled based on Rx and Tx Netif in the call - \remark Peripheral driver needs to call once in PREFORWARDING and once in - POSTFORWARDING - \remark this will call PPA/HIL learning hook as required -*/ + forwarding without going to stack. + \param[in] subif Pointer to sub-interface for which free is requried + \param[in] skb Pointer to packet with skb->dev pointing to RxIf in PREFORWARDING + and TxIf in POSTFORWARDING + \param[in] flags : DC_DP_F_PREFORWARDING/DC_DP_F_POSTFORWARDING + \return 0 if OK / -1 if error + \remark skb->dev needs to be filled based on Rx and Tx Netif in the call + \remark Peripheral driver needs to call once in PREFORWARDING and once in + POSTFORWARDING + \remark this will call PPA/HIL learning hook as required + */ int32_t dc_dp_add_session_shortcut_forward ( struct dp_subif *subif, struct sk_buff *skb, @@ -1307,16 +1487,19 @@ int32_t dc_dp_add_session_shortcut_forward ( ); /** \brief Set port id in Host SoC DMA Descriptor. - \param[in,out] skb Pointer to sk_buff structure - \param[in] port_id port identifier value - \param[in] flags Reserved - \return 0 if OK / -1 if error - \note This utility function will set and return DMA Decriptor's from given port value. -*/ + \param[in,out] skb Pointer to sk_buff structure + \param[in] port_id port identifier value + \param[in] flags Reserved + \return 0 if OK / -1 if error + \note This utility function will set and return DMA Decriptor's from given port value. + */ +#if defined(CONFIG_INTEL_DATAPATH) +#define dc_dp_set_ep_pkt(...) +#else static inline int32_t dc_dp_set_ep_pkt ( struct sk_buff *skb, - int32_t port_id, + int32_t port_id, uint32_t flags ) { @@ -1331,14 +1514,15 @@ dc_dp_set_ep_pkt ( return DC_DP_SUCCESS; } +#endif /** \brief Get port id from DMA Desc DWORD 1 (DW1). - \param[in] skb Pointer to sk_buff structure - \param[out] port_id Port identifier value - \param[in] flags : Reserved - \return 0 if OK / -1 if error - \note This utility funciton will extract port identifier from given DMA Descriptor's DWord-1. -*/ + \param[in] skb Pointer to sk_buff structure + \param[out] port_id Port identifier value + \param[in] flags : Reserved + \return 0 if OK / -1 if error + \note This utility funciton will extract port identifier from given DMA Descriptor's DWord-1. + */ static inline int32_t dc_dp_get_ep_pkt ( struct sk_buff *skb, @@ -1359,16 +1543,19 @@ dc_dp_get_ep_pkt ( } /** \brief Set sub-interface id in DMA Desc DWORD 0 (DW0). - \param[in] port_id port identifier value - \param[in,out] skb Pointer to sk_buff structure - \param[in] subif_id subinterface id value - \param[in] flags : Reserved - \return 0 if OK / -1 if error - \note This funciton will return DMA Descriptior's DWord-0 by setting the given sub-interface id value. -*/ + \param[in] port_id port identifier value + \param[in,out] skb Pointer to sk_buff structure + \param[in] subif_id subinterface id value + \param[in] flags : Reserved + \return 0 if OK / -1 if error + \note This funciton will return DMA Descriptior's DWord-0 by setting the given sub-interface id value. + */ +#if defined(CONFIG_INTEL_DATAPATH) +#define dc_dp_set_subifid_pkt(...) +#else static inline int32_t dc_dp_set_subifid_pkt ( - int32_t port_id, + int32_t port_id, struct sk_buff *skb, int32_t subif_id, uint32_t flags @@ -1384,18 +1571,19 @@ dc_dp_set_subifid_pkt ( return DC_DP_SUCCESS; } +#endif /** \brief Get sub-interface id from DMA Desc DWORD 0 (DW0). - \param[in] port_id port identifier value - \param[in] skb Pointer to sk_buff structure - \param[out] subif_id subinterface id value - \param[in] flags : Reserved - \return 0 if OK / -1 if error - \note This utility function will extract sub-interface id from given DMA Descriptor DWord-0. -*/ + \param[in] port_id port identifier value + \param[in] skb Pointer to sk_buff structure + \param[out] subif_id subinterface id value + \param[in] flags : Reserved + \return 0 if OK / -1 if error + \note This utility function will extract sub-interface id from given DMA Descriptor DWord-0. + */ static inline int32_t dc_dp_get_subifid_pkt ( - int32_t port_id, + int32_t port_id, struct sk_buff *skb, int32_t *subif_id, uint32_t flags @@ -1413,16 +1601,19 @@ dc_dp_get_subifid_pkt ( } /** \brief Set complete mark into DMA Desc DWORD 3 (DW3). - \param[in] port_id port identifier value - \param[in,out] skb Pointer to sk_buff structure - \param[in] flags : Reserved - \return 0 if OK / -1 if error - \note This utility macro will set complete mark into DMA Descriptor DWord-3, - to indicate that the peripheral specific desc fileds are all set. -*/ + \param[in] port_id port identifier value + \param[in,out] skb Pointer to sk_buff structure + \param[in] flags : Reserved + \return 0 if OK / -1 if error + \note This utility macro will set complete mark into DMA Descriptor DWord-3, + to indicate that the peripheral specific desc fileds are all set. + */ +#if defined(CONFIG_INTEL_DATAPATH) +#define dc_dp_set_mark_pkt(...) +#else static inline int32_t dc_dp_set_mark_pkt ( - int32_t port_id, + int32_t port_id, struct sk_buff *skb, uint32_t flags ) @@ -1437,18 +1628,19 @@ dc_dp_set_mark_pkt ( return DC_DP_SUCCESS; } +#endif /** \brief Get special mark from DMA Desc DWORD 3 (DW3). - \param[in] port_id port identifier value - \param[in] skb Pointer to sk_buff structure - \param[in] flags : Reserved - \return 1 if already marked / 0 if not marked / -1 if error - \note This utility macro will get complete mark into DMA Descriptor DWord-3, - to indicate that the peripheral specific desc fileds are all set. -*/ + \param[in] port_id port identifier value + \param[in] skb Pointer to sk_buff structure + \param[in] flags : Reserved + \return 1 if already marked / 0 if not marked / -1 if error + \note This utility macro will get complete mark into DMA Descriptor DWord-3, + to indicate that the peripheral specific desc fileds are all set. + */ static inline int32_t dc_dp_get_mark_pkt ( - int32_t port_id, + int32_t port_id, struct sk_buff *skb, uint32_t flags ) @@ -1463,14 +1655,14 @@ dc_dp_get_mark_pkt ( } /** \brief Disconnect a particular MAC addr or an network device - remove all MAC table entries - and/or routing sessions which use the specified MAC address. - \param[in] netif netdevice pointer through which all entries must be removed from acceleration - optional - \param[in] subif_id Sub-interface identifier to remove on netif - \param[in] mac_addr MAC address to remove - \param[in] flags Reserved for future use - \return 0 if OK / -1 if error - \note One of subif_id, mac_addr or netif must be specified -*/ + and/or routing sessions which use the specified MAC address. + \param[in] netif netdevice pointer through which all entries must be removed from acceleration - optional + \param[in] subif_id Sub-interface identifier to remove on netif + \param[in] mac_addr MAC address to remove + \param[in] flags Reserved for future use + \return 0 if OK / -1 if error + \note One of subif_id, mac_addr or netif must be specified + */ int32_t dc_dp_disconn_if ( struct net_device *netif, @@ -1480,20 +1672,20 @@ dc_dp_disconn_if ( ); /** \brief Register/De-register a DirectConnect interface to MCAST helper module - \param[in] dev Net device to be registered, e.g., wlan0_0. - \param[in] owner Kernel module pointer which owns the port. - \param[in] cb Multicast callback function. - \param[in] flags : - DC_DP_F_MC_REGISTER - Register a DirectConnect interface. - DC_DP_F_MC_DEREGISTER - De-register already registered DirectConnect interface. - DC_DP_F_MC_UPDATE_MAC_ADDR - Register a DirectConnect interface to get mcast SSM support. - DC_DP_F_MC_FW_RESET - Cleanup request on already learned entries on the registered DirectConnect interface. - DC_DP_F_MC_NEW_STA - Re-learn request on the registered DirectConnect interface. - \return 0 if OK / -1 if error - \note It can be skipped for specific peripheral driver if there is no notion - of host connect/disconnect on the peripheral network/interface. - \note Eg. WIFI has station association and disassociation which can be used -*/ + \param[in] dev Net device to be registered, e.g., wlan0_0. + \param[in] owner Kernel module pointer which owns the port. + \param[in] cb Multicast callback function. + \param[in] flags : + - DC_DP_F_MC_REGISTER : Register a DirectConnect interface. + - DC_DP_F_MC_DEREGISTER : De-register already registered DirectConnect interface. + - DC_DP_F_MC_UPDATE_MAC_ADDR : Register a DirectConnect interface to get mcast SSM support. + - DC_DP_F_MC_FW_RESET : Cleanup request on already learned entries on the registered DirectConnect interface. + - DC_DP_F_MC_NEW_STA : Re-learn request on the registered DirectConnect interface. + \return 0 if OK / -1 if error + \note It can be skipped for specific peripheral driver if there is no notion + of host connect/disconnect on the peripheral network/interface. + \note Eg. WIFI has station association and disassociation which can be used + */ int32_t dc_dp_register_mcast_module ( struct net_device *dev, @@ -1504,42 +1696,42 @@ dc_dp_register_mcast_module ( /** \brief Provide a Priority (802.1D Priority) to Device QoS/WMM Class/TID map for the * given WiFi Radio/net_device - \param[in] port_id Port Id on which mapping is to be updated - \param[in] netif Pointer to stack network interface structure on which mapping is to be updated - \param[in] prio2qos Array of priority to QoS/WMM Class/TID mapping values - \return 0 if OK / -1 if error - \note One of port_id or netif must be specified - \note The DC driver must configure the Egress PMAC table to mark the WMM Class/TID in the descriptor DW1[7:4] -*/ + \param[in] port_id Port Id on which mapping is to be updated + \param[in] netif Pointer to stack network interface structure on which mapping is to be updated + \param[in] prio2qos Array of priority to QoS/WMM Class/TID mapping values + \return 0 if OK / -1 if error + \note One of port_id or netif must be specified + \note The DC driver must configure the Egress PMAC table to mark the WMM Class/TID in the descriptor DW1[7:4] + */ int32_t dc_dp_map_prio_devqos_class ( - int32_t port_id, + int32_t port_id, struct net_device *netif, uint8_t prio2qos[DC_DP_MAX_DEV_CLASS] ); /** \brief Mark Dev QoS Class/WMM AC/TID in given packet - \param[in] port_id Port Id on which mapping is to be updated - \param[in] dst_netif Pointer to stack network interface structure - \param[in] skb : pointer to network packet/sk_buff structure - \return Dev Class/WMM value marked if successful, -1 on error - \note The WAVE/ DC Datapath driver needs to use skb->extmark/skb->priority to mark and return WMM Class/TID -*/ + \param[in] port_id Port Id on which mapping is to be updated + \param[in] dst_netif Pointer to stack network interface structure + \param[in] skb : pointer to network packet/sk_buff structure + \return Dev Class/WMM value marked if successful, -1 on error + \note The WAVE/ DC Datapath driver needs to use skb->extmark/skb->priority to mark and return WMM Class/TID + */ int32_t dc_dp_mark_pkt_devqos ( - int32_t port_id, + int32_t port_id, struct net_device *dst_netif, struct sk_buff *skb ); /** \brief Get DirectConnect interface statistics. Either netdevice or subif_id has to be passed to this API. - \param[in] netif Pointer to Linux netdevice structure - \param[in] subif_id Datapath Port Number and Sub-Interface (if applicable else -1). - \param[out] if_stats Pointer to Linux rtnl_link_stats64 structure - \param[in] flags : - DC_DP_F_SUBIF_LOGICAL - to be used when the interface is subif type - \return 0 if OK / -1 if error -*/ + \param[in] netif Pointer to Linux netdevice structure + \param[in] subif_id Datapath Port Number and Sub-Interface (if applicable else -1). + \param[out] if_stats Pointer to Linux rtnl_link_stats64 structure + \param[in] flags : + - DC_DP_F_SUBIF_LOGICAL : to be used when the interface is subif type + \return 0 if OK / -1 if error + */ int32_t dc_dp_get_netif_stats ( struct net_device *netif, @@ -1549,12 +1741,12 @@ dc_dp_get_netif_stats ( ); /** \brief Clear DirectConnect interface statistics. Either netdevice or subif_id has to be passed. - \param[in] netif Pointer to Linux netdevice structure - \param[in] subif_id Datapath Port Number and Sub-Interface (if applicable else -1). - \param[in] flags Flag Type to pass additional info such as - DC_DP_F_SUBIF_LOGICAL - to be used when the interface is subif type - \return 0 if OK / -1 if error -*/ + \param[in] netif Pointer to Linux netdevice structure + \param[in] subif_id Datapath Port Number and Sub-Interface (if applicable else -1). + \param[in] flags Flag Type to pass additional info such as + - DC_DP_F_SUBIF_LOGICAL : to be used when the interface is subif type + \return 0 if OK / -1 if error + */ int32_t dc_dp_clear_netif_stats ( struct net_device *netif, @@ -1564,13 +1756,13 @@ dc_dp_clear_netif_stats ( /** * \brief Disable or Enable Enqueue and/or Dequeue to/from peripheral in SoC. - \param[in] port_id Datapath Port Id - \param[in] flags : What to enable or disable - DC_DP_F_ENQ_ENABLE, - DC_DP_F_ENQ_DISABLE, - DC_DP_F_DEQ_DISABLE, - DC_DP_F_DEQ_ENABLE - \return 0 if OK / -1 if error + \param[in] port_id Datapath Port Id + \param[in] flags : What to enable or disable + - DC_DP_F_ENQ_ENABLE + - DC_DP_F_ENQ_DISABLE + - DC_DP_F_DEQ_DISABLE + - DC_DP_F_DEQ_ENABLE + \return 0 if OK / -1 if error */ int32_t dc_dp_change_dev_status_in_soc ( @@ -1580,10 +1772,10 @@ dc_dp_change_dev_status_in_soc ( /** * \brief Flush the ring. - \param[in] owner Kernel module pointer which owns the port - \param[in] port_id Datapath Port Id - \param[in] flags : Reserved - \return 0 if OK / -1 if error + \param[in] owner Kernel module pointer which owns the port + \param[in] port_id Datapath Port Id + \param[in] flags : Reserved + \return 0 if OK / -1 if error */ /* FIXME : Any use? */ int32_t @@ -1594,11 +1786,11 @@ dc_dp_flush_ring ( ); /** \brief Get Wake-on-LAN configuration. - \param[in] port_id Datapath Port Id - \param[out] cfg Wake-on-LAN Configuration - \param[in] flags : Reserved - \return 0 if OK / -1 if error -*/ + \param[in] port_id Datapath Port Id + \param[out] cfg Wake-on-LAN Configuration + \param[in] flags : Reserved + \return 0 if OK / -1 if error + */ int32_t dc_dp_get_wol_cfg ( int32_t port_id, @@ -1607,11 +1799,11 @@ dc_dp_get_wol_cfg ( ); /** \brief Set Wake-on-LAN config - \param[in] port_id Datapath Port Id - \param[in] cfg Wake-on-LAN Configuration - \param[in] flags : Reserved - \return 0 if OK / -1 if error -*/ + \param[in] port_id Datapath Port Id + \param[in] cfg Wake-on-LAN Configuration + \param[in] flags : Reserved + \return 0 if OK / -1 if error + */ int32_t dc_dp_set_wol_cfg ( int32_t port_id, @@ -1620,10 +1812,10 @@ dc_dp_set_wol_cfg ( ); /** \brief Enable/Disable Wake-on-LAN functionality - \param[in] port_id Datapath Port Id - \param[in] enable Enable Wake-on-LAN if 1, disable Wake-on-LAN if 0 - \return 0 if OK / -1 if error -*/ + \param[in] port_id Datapath Port Id + \param[in] enable Enable Wake-on-LAN if 1, disable Wake-on-LAN if 0 + \return 0 if OK / -1 if error + */ int32_t dc_dp_set_wol_ctrl ( int32_t port_id, @@ -1631,9 +1823,9 @@ dc_dp_set_wol_ctrl ( ); /** \brief Get Wake-on-LAN status - \param[in] port_id Datapath Port Id - \return 1 if Enabled/ 0 if Disabled / -1 if error -*/ + \param[in] port_id Datapath Port Id + \return 1 if Enabled/ 0 if Disabled / -1 if error + */ int32_t dc_dp_get_wol_ctrl_status ( int32_t port_id @@ -1646,13 +1838,13 @@ dc_dp_get_wol_ctrl_status ( /* @{ */ /** \brief Register/De-register to/from the CPUFreq framework - \param[in] nb Notifier function to register - \param[in] notify_type Notifier list type (DC_DP_PS_TRANSITION_NOTIFIER / DC_DP_PS_POLICY_NOTIFIER) - \param[in] flags : - - DC_DP_F_PS_REGISTER : Register to CPUFreq framework - - DC_DP_F_PS_DEREGISTER : De-register from CPUFreq framework - \return 0 if OK / < 0 if error -*/ + \param[in] nb Notifier function to register + \param[in] notify_type Notifier list type (DC_DP_PS_TRANSITION_NOTIFIER / DC_DP_PS_POLICY_NOTIFIER) + \param[in] flags : + - DC_DP_F_PS_REGISTER : Register to CPUFreq framework + - DC_DP_F_PS_DEREGISTER : De-register from CPUFreq framework + \return 0 if OK / < 0 if error + */ int32_t dc_dp_register_power_notifier ( struct notifier_block *nb, @@ -1661,11 +1853,11 @@ dc_dp_register_power_notifier ( ); /** \brief Request new system power state - \param[in] module Module identifier - \param[in] module_nr Module Number - \param[in] new_state Power state as defined in dc_dp_power_state - \return DC_DP_PS_SUCCESS / DC_DP_PS_DENIED / DC_DP_PS_NOACTIVITY / DC_DP_PS_NOTDEFINED -*/ + \param[in] module Module identifier + \param[in] module_nr Module Number + \param[in] new_state Power state as defined in dc_dp_power_state + \return DC_DP_PS_SUCCESS / DC_DP_PS_DENIED / DC_DP_PS_NOACTIVITY / DC_DP_PS_NOTDEFINED + */ int32_t dc_dp_req_power_state ( enum dc_dp_power_module module, @@ -1674,10 +1866,10 @@ dc_dp_req_power_state ( ); /** \brief Get Power Mgmt polling period per module - \param[in] module Module identifier - \param[in] module_nr - \return Polling period -*/ + \param[in] module Module identifier + \param[in] module_nr + \return Polling period + */ int32_t dc_dp_get_ps_poll_period ( enum dc_dp_power_module module, @@ -1685,10 +1877,10 @@ dc_dp_get_ps_poll_period ( ); /** \brief Get power state related threshold values per module - \param[in] module Module identifier - \param[in] module_nr - \return dc_dp_ps_threshold pointer if OK / NULL if error -*/ + \param[in] module Module identifier + \param[in] module_nr + \return dc_dp_ps_threshold pointer if OK / NULL if error + */ struct dc_dp_ps_threshold * dc_dp_get_ps_threshold ( enum dc_dp_power_module module, @@ -1696,10 +1888,10 @@ dc_dp_get_ps_threshold ( ); /** \brief Add/Delete module from CPUFReq module list - \param[in] head Module list head pointer - \param[in] add DC_DP_PS_LIST_ADD / DC_DP_PS_LIST_DEL - \return 0 if OK / < 0 if error -*/ + \param[in] head Module list head pointer + \param[in] add DC_DP_PS_LIST_ADD / DC_DP_PS_LIST_DEL + \return 0 if OK / < 0 if error + */ int32_t dc_dp_mod_ps_list ( struct list_head *head, @@ -1707,9 +1899,9 @@ dc_dp_mod_ps_list ( ); /** \brief Convert frequency to power state - \param[in] freq_khz frequency (in khz) - \return power state if OK / < 0 if error -*/ + \param[in] freq_khz frequency (in khz) + \return power state if OK / < 0 if error + */ enum dc_dp_power_state dc_dp_get_ps_from_khz ( uint32_t freq_khz diff --git a/include/directconnect_dp_dcmode_api.h b/include/directconnect_dp_dcmode_api.h index 75c19d6..2e3e564 100644 --- a/include/directconnect_dp_dcmode_api.h +++ b/include/directconnect_dp_dcmode_api.h @@ -43,6 +43,11 @@ struct dc_dp_dcmode_ops { struct module *owner, uint32_t port_id, struct net_device *dev, struct dc_dp_cb *datapathcb, struct dc_dp_res *resources, struct dc_dp_dev *devspec, uint32_t flags); + int32_t (*register_dev_ex)(void *dev_ctx, + struct module *owner, uint32_t port_id, + struct net_device *dev, struct dc_dp_cb *datapathcb, + struct dc_dp_res *resources, struct dc_dp_dev *devspec, + int32_t ref_port_id, uint32_t alloc_flags, uint32_t flags); int32_t (*register_subif)(void *dev_ctx, struct module *owner, struct net_device *dev, const uint8_t *subif_name, struct dp_subif *subif_id, uint32_t flags); @@ -50,6 +55,7 @@ struct dc_dp_dcmode_ops { struct sk_buff *skb, int32_t len, uint32_t flags); int32_t (*handle_ring_sw)(void *dev_ctx, struct module *owner, uint32_t port_id, struct net_device *dev, struct dc_dp_ring *ring, uint32_t flags); + int32_t (*return_bufs)(void *dev_ctx, uint32_t port_id, void **buflist, uint32_t buflist_len, uint32_t flags); int32_t (*add_session_shortcut_forward)(void *dev_ctx, struct dp_subif *subif, struct sk_buff *skb, uint32_t flags); int32_t (*disconn_if)(void *dev_ctx, struct net_device *netif, struct dp_subif *subif_id, uint8_t mac_addr[MAX_ETH_ALEN], uint32_t flags); diff --git a/test/Makefile b/test/Makefile index e69de29..799a0a0 100644 --- a/test/Makefile +++ b/test/Makefile @@ -0,0 +1,3 @@ +obj-m += test-wave600_drv.o +test-wave600_drv-objs += test_wave600_proc_api.o +test-wave600_drv-objs += test_wave600_drv.o diff --git a/test/README b/test/README new file mode 100644 index 0000000..8996aff --- /dev/null +++ b/test/README @@ -0,0 +1,101 @@ +#======================================================== +# Usage +#======================================================== + +1) Install driver: + The following entries are created under /proc/net directory + Under /proc/net/testwlan/ directory, we get the follows + dev datapath cputx + +2) Add dev under /proc/net/testwlan/dev + echo "add <index>" > /proc/net/testwlan/dev + example: echo "add 0" > /proc/net/testwlan/dev + +3) Delete dev from /proc/net/testwlan/dev + echo "<del> [device name]" > /proc/net/testwlan/dev + example: echo "del testwlan0" > /proc/net/testwlan/dev + example: echo "del" > /proc/net/testwlan/dev + +4) Register dev under /proc/net/testwlan/datapath + echo "reg dev <Dev_name> <Dev_port> <Type>" > /proc/net/testwlan/datapath + example: echo "reg dev testwlan0 1 DIRECTPATH" > /proc/net/testwlan/datapath + +5) Unegister dev from /proc/net/testwlan/datapath + echo "unreg dev <dev_name>" > /proc/net/testwlan/datapath + example: echo "unreg dev testwlan0 1 DIRECTPATH" > /proc/net/testwlan/datapath + +6) Register subif under /proc/net/testwlan/datapath + echo "reg subif <dev_name> <Dev_port>" > /proc/net/testwlan/datapath + example: echo "reg subif testwlan4 1" > /proc/net/testwlan/datapath + +7) Unregister subif under /proc/net/testwlan/datapath + echo unregister subif <dev_name> > /proc/net/testwlan/datapath + example: echo "unreg subif testwlan4" > /proc/net/testwlan/datapath + +8) cputx + TX and RX packet: (procedure) + echo "add <index>" /proc/net/testwlan/dev + echo "reg dev <Dev_name> <Dev_port> <Type>" > /proc/net/testwlan/datapath + echo "dev <name>" > /proc/net/testwlan/cputx + ifconfig <IF name(e.g; testwlan0)> up + echo "start/stop" > /proc/net/testwlan/cputx + + echo "intcnt <num>" /proc/net/testwlan/cputx + echo "pktsize <num>" /proc/net/testwlan/cputx + + echo "count <num>" > /proc/net/testwlan/cputx + +#======================================================== +# Sample Log +#======================================================== + +1) insmod ./test-wave600_drv.ko; dmesg -c + [ 6822.936812] Loading test_wlan_drv driver ...... Succeeded! + +2) echo "add 0" > /proc/net/testwlan/dev; dmesg -c + [ 4923.805381] add "testwlan0" successfully + +3) echo "reg dev testwlan0 0 FAST_WAVE5x" > /proc/net/testwlan/datapath; dmesg -c + [ 5078.029875] dev_port_str=1 + [ 5078.029880] flag_str=FAST_WLAN + [ 5078.029881] Try to register dev testwlan0 to datapath + [ 5078.029885] Succeed to register dev testwlan0 dev_port 1 to datapath with dp_port 1 + +4) echo "reg subif testwlan0 0" > /proc/net/testwlan/datapath; dmesg -c + [ 5150.210919] Try to register subif testwlan0 to datapath + [ 5150.210924] Succeed to register subitf testwlan0 dev port 1: dp_port 0 subif 0 dp_flag=2 + +5) echo "dev testwlan0" > /proc/net/testwlan/cputx + +6) cat /proc/net/testwlan/cputx + frame count: 0x1 + frame mib count: 0x0 + TX dev name: testwlan0 + Tx pause num: 20 + frame size: 128(Not including CRC) + +7) ifconfig testwlan0 up + +8) echo "start" > /proc/net/testwlan/cputx + ifconfig -a + testwlan0 Link encap:Ethernet HWaddr 00:20:DA:86:23:70 + inet6 addr: fe80::220:daff:fe86:2370/64 Scope:Link + UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 + RX packets:0 errors:0 dropped:0 overruns:0 frame:0 + TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:0 (0.0 b) TX bytes:648 (648.0 b) + +9) echo "stop" > /proc/net/testwlan/cputx + +10) echo "unreg subif testwlan0 0" > /proc/net/testwlan/datapath; dmesg -c + [ 5363.979926] Try to unregister subif testwlan0 from datapath + +11) echo "unreg dev testwlan0" > /proc/net/testwlan/datapath; dmesg -c + [ 5220.588540] Try to register dev testwlan0 from datapath + +12) echo "del testwlan0" > /proc/net/testwlan/dev; dmesg -c + +13) rmmod test-wave600_drv; dmesg -c + [ 7285.181610] Unloading test_wlan_drv driver ...... Succeeded! + diff --git a/test/test_wave600_drv.c b/test/test_wave600_drv.c new file mode 100644 index 0000000..1933caf --- /dev/null +++ b/test/test_wave600_drv.c @@ -0,0 +1,2647 @@ +/* +* #################################### +* Head File + +cd /proc/net/testwlan + +#create testwlan devices +echo add > dev + +#dynamically register testwlan1 to datapath +echo register dev testwlan1 1 DIRECTPATH > directpath +echo register subif testwlan1 > directpath + +#dynamically register testwlan2 to datapath +echo register dev testwlan2 2 DIRECTPATH > directpath +echo register subif testwlan2 > directpath + +#dynamically register testwlan3 to datapath +echo register dev testwlan3 3 DIRECTPATH > directpath +echo register subif testwlan3 > directpath + +#register testwlan4 to datapath with specified port_id 12 +echo register dev testwlan4 4 DIRECTPATH 12 > directpath +echo register subif testwlan4 > directpath +echo register subif testwlan5 12 > directpath + +#unregister +echo unregister subif testwlan5 > directpath +echo unregister subif testwlan4 > directpath +echo unregister subif testwlan4 > directpath +* #################################### +*/ + +/* Common Head File*/ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/version.h> +#include <linux/types.h> +#include <linux/ctype.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/atmdev.h> +#include <linux/init.h> +#include <linux/etherdevice.h> /* eth_type_trans */ +#include <linux/ethtool.h> /* ethtool_cmd */ +#include <linux/if_ether.h> +#include <linux/skbuff.h> +#include <linux/inetdevice.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/icmp.h> +#include <net/tcp.h> +#include <linux/uaccess.h> +#include <asm/unistd.h> +#include <asm/irq.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <asm/checksum.h> +#include <linux/errno.h> +#ifdef CONFIG_XFRM +#include <net/xfrm.h> +#endif + +//#define CONFIG_DIRECTCONNECT_DP_API 1 // FIXME +#define CONFIG_WAVE600_USING_DP_API 0 +//#define PUMA_DC_MODE1 + +//#include <net/ppa_stack_al.h> +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) +#include <net/directconnect_dp_api.h> +#elif (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) +#include <net/datapath_api.h> +#endif +//#include <net/datapath_proc_api.h> +//#include "../datapath/datapath.h" +#include "test_wlan_proc_api.h" +#include "test_wlan_drv.h" + +/*#################################### +* Definition +* #################################### +*/ + +#define ENABLE_DEBUG 1 + +#define ENABLE_ASSERT 1 + +#define DEBUG_DUMP_SKB 1 + +#if defined(ENABLE_DEBUG) && ENABLE_DEBUG +#define ENABLE_DEBUG_PRINT 1 +#define DISABLE_INLINE 1 +#else +#define ENABLE_DEBUG_PRINT 0 +#define DISABLE_INLINE 0 +#endif + +#if !defined(DISABLE_INLINE) || !DISABLE_INLINE +#define INLINE inline +#else +#define INLINE +#endif + +#define err(format, arg...) do {;\ + PRINTK(KERN_ERR __FILE__ ":%d:%s: " format "\n", \ + __LINE__, __FUNCTION__, ##arg); } \ + while (0) + +#if defined(ENABLE_DEBUG_PRINT) && ENABLE_DEBUG_PRINT +#undef dbg +#define dbg(format, arg...) do { if ((g_dbg_enable &\ + DBG_ENABLE_MASK_DEBUG_PRINT)) \ + PRINTK(KERN_WARNING __FILE__ ":%d:%s: " format "\n",\ + __LINE__, __FUNCTION__, ##arg); } \ + while (0) +#else +#if !defined(dbg) +#define dbg(format, arg...) +#endif +#endif + +#if defined(ENABLE_ASSERT) && ENABLE_ASSERT +#define ASSERT(cond, format, arg...) do { \ + if ((g_dbg_enable & DBG_ENABLE_MASK_ASSERT) && !(cond)) \ + PRINTK(KERN_ERR __FILE__ ":%d:%s: " format "\n", \ + __LINE__, __FUNCTION__, ##arg); } \ + while (0) +#else +#define ASSERT(cond, format, arg...) +#endif + +#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB +#define DUMP_SKB_LEN ~0 +#endif + +#if (defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB) \ +|| (defined(ENABLE_DEBUG_PRINT) && ENABLE_DEBUG_PRINT) \ +|| (defined(ENABLE_ASSERT) && ENABLE_ASSERT) +#define ENABLE_DBG_PROC 1 +#else +#define ENABLE_DBG_PROC 0 +#endif + +#define ENABLE_TXDBG 0 + +/* Debug Print Mask*/ +#define DBG_ENABLE_MASK_ERR 0x0001 +#define DBG_ENABLE_MASK_DEBUG_PRINT 0x0002 +#define DBG_ENABLE_MASK_ASSERT 0x0004 +#define DBG_ENABLE_MASK_DUMP_SKB_RX 0x0008 +#define DBG_ENABLE_MASK_DUMP_SKB_TX 0x0010 +#define DBG_ENABLE_MASK_ALL (DBG_ENABLE_MASK_ERR | \ + DBG_ENABLE_MASK_DEBUG_PRINT |\ + DBG_ENABLE_MASK_ASSERT |\ + DBG_ENABLE_MASK_DUMP_SKB_RX |\ + DBG_ENABLE_MASK_DUMP_SKB_TX) + +/* Constant Definition*/ +#define ETH_WATCHDOG_TIMEOUT (10 * HZ) +#define MAX_RX_QUEUE_LENGTH 100 +#define TASKLET_HANDLE_BUDGET 25 + +/* Ethernet Frame Definitions*/ +#define ETH_CRC_LENGTH 4 +#define ETH_MAX_DATA_LENGTH ETH_DATA_LEN +#define ETH_MIN_TX_PACKET_LENGTH ETH_ZLEN + +/* #################################### +* Data Type +* #################################### +*/ + +/* Internal Structure of Devices (ETH/ATM)*/ +#define TESTWLAN_F_FREE 0 +#define TESTWLAN_F_REGISTER_DEV 1 +#define TESTWLAN_F_REGISTER_SUBIF 2 + +struct test_wlan_priv_data { + int id; + struct net_device_stats stats; + unsigned int rx_preprocess_drop; + struct sk_buff_head rx_queue; + struct tasklet_struct rx_tasklet; + int f_tx_queue_stopped; + unsigned char dev_addr[MAX_ADDR_LEN]; + unsigned int dp_pkts_to_ppe; + unsigned int dp_pkts_to_ppe_fail; + unsigned int dp_pkts_from_ppe; + unsigned int dp_pkts_tx; + +#if CONFIG_WAVE600_USING_DP_API + dp_cb_t dp_cb; +#endif +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + struct dc_dp_cb wave500_cb; + struct dc_dp_dccntr dccntr; + struct dc_dp_dev devspec; + struct dc_dp_res resources; +#endif + + struct module *owner; +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + dp_subif_t dp_subif; +#endif + int32_t dev_port; /*dev instance */ + //int32_t flag_index; + uint8_t flag_str[64]; + int32_t f_dp; /* status for register to datapath */ +}; + +int32_t g_resvd_port = -1; +int32_t g_wave6x_radio_num = 0; + +static const char *const dbg_enable_mask_str[] = { + "err", /*DBG_ENABLE_MASK_ERR */ + "dbg", /*DBG_ENABLE_MASK_DEBUG_PRINT */ + "assert", /*DBG_ENABLE_MASK_ASSERT */ + "rx", /*DBG_ENABLE_MASK_DUMP_SKB_RX */ + "tx" /*DBG_ENABLE_MASK_DUMP_SKB_TX */ +}; + +/*#################################### +* Local Variable +* #################################### +*/ + +#define MAX_TESTWLAN_NUM ((PMAC_MAX_NUM*MAX_SUBIF_PER_PORT)+2) /* 16 PMAC and each port support 16 subif */ +static struct net_device *g_test_wlan_dev[MAX_TESTWLAN_NUM] = { 0 }; + +static struct module g_test_wlan_module[MAX_TESTWLAN_NUM]; + +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC +static int g_dbg_enable = DBG_ENABLE_MASK_ERR | DBG_ENABLE_MASK_ASSERT; +#endif + +/* Network Operations*/ +static void eth_setup(struct net_device *); +static struct net_device_stats *eth_get_stats(struct net_device *); +static int eth_open(struct net_device *); +static int eth_stop(struct net_device *); +static int eth_hard_start_xmit(struct sk_buff *, struct net_device *); +static int eth_ioctl(struct net_device *, struct ifreq *, int); +static void eth_tx_timeout(struct net_device *); + +/* skb management functions */ +static struct sk_buff *skb_break_away_from_protocol(struct sk_buff *); + +/* RX path functions*/ +static INLINE int eth_rx_preprocess(struct sk_buff *, int); +static INLINE void eth_rx_handler(struct sk_buff *, int); +static void do_test_wlan_rx_tasklet(unsigned long); + +/* Datapath directpath functions*/ +static int32_t dp_fp_stop_tx(struct net_device *); +static int32_t dp_fp_restart_tx(struct net_device *); +static int32_t dp_fp_rx(struct net_device *, struct net_device *, + struct dp_subif *subif, struct sk_buff **, int32_t, uint32_t); +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) +static int32_t dp_fp_get_staid(char *, uint32_t *); +#ifdef PUMA_DC_MODE1 +static int32_t dp_fp_get_desc_info(int32_t, struct sk_buff *, + struct dc_dp_fields_value_dw *, uint32_t); +#endif +#endif + +//#if defined(ENABLE_TXDBG) && ENABLE_TXDBG /* FIXME */ +static void proc_read_cputx(struct seq_file *s); +static ssize_t proc_write_cputx(struct file *file, const char *buf, + size_t count, loff_t *data); + +//#endif +/*Debug functions*/ +#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB +static INLINE void dump_skb(struct sk_buff *, u32, char *, int, int); +#else +static INLINE void dump_skb(struct sk_buff *skb, u32 len, char *title, + int ch, int is_tx) +{ +} +#endif + +static const struct net_device_ops test_wlan_netdev_ops = { + .ndo_open = eth_open, + .ndo_stop = eth_stop, + .ndo_start_xmit = eth_hard_start_xmit, + .ndo_do_ioctl = eth_ioctl, + .ndo_tx_timeout = eth_tx_timeout, + .ndo_get_stats = eth_get_stats, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +//#if defined(ENABLE_TXDBG) && ENABLE_TXDBG /* FIXME */ + +#define MAX_SKB_SIZE 1600 +#define MAX_PKT_SIZE 1500 +#define MIN_PKT_SIZE 60 +#define DEFAULT_PKT_SIZE 128 +#define MAX_PAUSE_CNT 50 +#define DEFAULT_PAUSE_CNT 20 + +enum { + TX_STOP = 0, + TX_START = 1 +}; + +static uint32_t g_tx_count = 1; +static uint32_t g_tx_start = TX_STOP; +static spinlock_t g_tx_start_lock; +static uint32_t g_tx_mib; +static struct sk_buff *g_tx_skb; +static uint32_t g_pkt_size = DEFAULT_PKT_SIZE; +static char g_tx_dev[IFNAMSIZ] = { 0 }; + +static struct task_struct *g_tx_ts; +static uint32_t g_tx_pause_cnt = 20; + +//#endif + +/*################################### +* Global Variable +* #################################### +*/ + +/*#################################### +* Declaration +* #################################### +*/ + +/* Wrapper for Different Kernel Version +*/ +static inline struct net_device *ltq_dev_get_by_name(const char *name) +{ + return dev_get_by_name(&init_net, name); +} + +/*find the testwlan index via its device name +return -1: not found +>=0: index + +*/ +static int find_testwlan_index_via_name(char *ifname) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) { + if (g_test_wlan_dev[i] + && ltq_strcmpi(ifname, g_test_wlan_dev[i]->name) == 0) { + return i; + } + } + return -1; +} + +static inline unsigned long ltq_get_xmit_fn(struct net_device *dev) +{ + return (unsigned long)dev->netdev_ops->ndo_start_xmit; +} + +/*#################################### +* Local Function +* #################################### +*/ + +static void eth_setup(struct net_device *dev) +{ + int id = -1; + struct test_wlan_priv_data *priv = netdev_priv(dev); + int i; + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) + if (!g_test_wlan_dev[i]) { + id = i; + break; + } + if (id < 0) + return; + ether_setup(dev); /* assign some members */ + dev->netdev_ops = &test_wlan_netdev_ops; + dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT; + dev->dev_addr[0] = 0x00; + dev->dev_addr[1] = 0x20; + dev->dev_addr[2] = 0xda; + dev->dev_addr[3] = 0x86; + dev->dev_addr[4] = 0x23; + dev->dev_addr[5] = 0x70 + id; + priv->id = id; +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv->dp_subif.port_id = -1; +#endif + skb_queue_head_init(&priv->rx_queue); + tasklet_init(&priv->rx_tasklet, do_test_wlan_rx_tasklet, 0); + return; +} + +struct net_device_stats *eth_get_stats(struct net_device *dev) +{ + struct test_wlan_priv_data *priv = netdev_priv(dev); + return &priv->stats; +} + +int eth_open(struct net_device *dev) +{ + dbg("open %s", dev->name); + netif_start_queue(dev); + return 0; +} + +int eth_stop(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +int eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned long sysflag; + struct test_wlan_priv_data *priv = netdev_priv(dev); + int rx_queue_len; + struct sk_buff *old_skb = skb; + skb = skb_break_away_from_protocol(skb); + dev_kfree_skb_any(old_skb); + if (!skb) + return 0; + dump_skb(skb, DUMP_SKB_LEN, "eth_hard_start_xmit", 0, 1); + ASSERT(skb->prev == NULL + && skb->next == NULL, + "skb on list: prev = 0x%px, next = 0x%px", + skb->prev, skb->next); + skb->dev = g_test_wlan_dev[priv->id]; + spin_lock_irqsave(&priv->rx_queue.lock, sysflag); + rx_queue_len = skb_queue_len(&priv->rx_queue); + if (rx_queue_len < MAX_RX_QUEUE_LENGTH) { + __skb_queue_tail(&priv->rx_queue, skb); + if (rx_queue_len == 0) + tasklet_schedule(&priv->rx_tasklet); + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + } else if (skb_queue_len(&priv->rx_queue) >= MAX_RX_QUEUE_LENGTH) { + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + netif_stop_queue(g_test_wlan_dev[priv->id]); + //} else { + // dev_kfree_skb_any(skb); + // priv->stats.tx_dropped++; + } + spin_unlock_irqrestore(&priv->rx_queue.lock, sysflag); + return 0; +} + +int eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + default: + return -EOPNOTSUPP; + } + return 0; +} + +void eth_tx_timeout(struct net_device *dev) +{ + /*TODO:must restart the TX channels */ + struct test_wlan_priv_data *priv = netdev_priv(dev); + priv->stats.tx_errors++; + netif_wake_queue(dev); + return; +} + +struct sk_buff *skb_break_away_from_protocol(struct sk_buff *skb) +{ + struct sk_buff *new_skb; + if (skb_shared(skb)) { + new_skb = skb_clone(skb, GFP_ATOMIC); + if (new_skb == NULL) + return NULL; + } else + new_skb = skb_get(skb); + skb_dst_drop(new_skb); + +#ifdef CONFIG_XFRM + secpath_put(new_skb->sp); + new_skb->sp = NULL; +#endif +#if 0 // TODO +#if defined(CONFIG_NETFILTER) + nf_conntrack_put(new_skb->nfct); + new_skb->nfct = NULL; +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put_reasm(new_skb->nfct_reasm); + new_skb->nfct_reasm = NULL; +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + nf_bridge_put(new_skb->nf_bridge); + new_skb->nf_bridge = NULL; +#endif +#endif +#endif + return new_skb; +} + +/* Ethernet frame types according to RFC 2516 */ +#define ETH_PPPOE_DISCOVERY 0x8863 +#define ETH_PPPOE_SESSION 0x8864 + +/* A PPPoE Packet, including Ethernet headers */ +typedef struct pppoe_pkt { +#ifdef PACK_BITFIELDS_REVERSED + unsigned int type:4; /* PPPoE Type (must be 1) */ + unsigned int ver:4; /* PPPoE Version (must be 1) */ +#else + unsigned int ver:4; /* PPPoE Version (must be 1) */ + unsigned int type:4; /* PPPoE Type (must be 1) */ +#endif + unsigned int code:8; /* PPPoE code */ + unsigned int session:16; /* PPPoE session */ + unsigned int length:16; /* Payload length */ + unsigned char payload[ETH_DATA_LEN]; /* A bit of room to spare, here just for space holder only */ +} pppoe_pkt; + +/* PPPoE Tag */ + +static unsigned char ppp_ipv4_proto[2] = { + 0x00, 0x21 +}; + +static unsigned char ppp_ipv6_proto[2] = { + 0x00, 0x57 +}; + +#define VLAN_HEAD_SIZE 4 +#define PPPOE_HEAD_SIZE 8 + +static INLINE int eth_rx_preprocess(struct sk_buff *skb, int id) +{ + unsigned char *p = skb->data; + unsigned char mac[6]; + unsigned char ip[4]; + unsigned char port[2]; + unsigned char ip_templ[4] = {0}; + struct iphdr *iph; + struct icmphdr *icmph; + struct tcphdr *tcph; + uint32_t t, off_t, *opt; + int csum; + pppoe_pkt *pppoe; + int offset = 0; + int vlan_num = 0; + unsigned char *p_new_src_mac; + static unsigned char zero_mac[4] = { + 0 + }; + struct in_device __rcu *in_dev; + struct in_ifaddr *if_info; + __u8 *addr; + + read_lock_bh(&dev_base_lock); + in_dev = (struct in_device *)skb->dev->ip_ptr; + if (!in_dev) { + read_unlock_bh(&dev_base_lock); + return 0; + } + if_info = in_dev->ifa_list; + + if (if_info) { + memcpy(ip_templ, (char *)&if_info->ifa_address, 4); + + addr = (char *)&if_info->ifa_local; + + PRINTK("Device %s ifa_local: %u.%u.%u.%u\n", + skb->dev->name, + (__u32) addr[0], (__u32) addr[1], + (__u32) addr[2], (__u32) addr[3]); + + addr = (char *)&if_info->ifa_address; + PRINTK("Device %s ifa_address: %u.%u.%u.%u\n", + skb->dev->name, + (__u32) addr[0], (__u32) addr[1], + (__u32) addr[2], (__u32) addr[3]); + + addr = (char *)&if_info->ifa_mask; + PRINTK("Device %s ifa_mask: %u.%u.%u.%u\n", + skb->dev->name, + (__u32) addr[0], (__u32) addr[1], + (__u32) addr[2], (__u32) addr[3]); + + addr = (char *)&if_info->ifa_broadcast; + PRINTK("Device %s ifa_broadcast: %u.%u.%u.%u\n", + skb->dev->name, (__u32) addr[0], + (__u32) addr[1], (__u32) addr[2], (__u32) addr[3]); + } + read_unlock_bh(&dev_base_lock); + + dump_skb(skb, DUMP_SKB_LEN, "eth_rx_preprocess", id, 0); + if (p[offset + 12] == 0x81 && p[offset + 13] == 0x00) { /*VLAN header */ + offset += VLAN_HEAD_SIZE; + vlan_num++; + dbg("Found VLAN%d\n", vlan_num); + } + if (p[offset + 12] == 0x88 && p[offset + 13] == 0x63) { + /*pppoe Discover(0x9)/Offer(0x7)/request(0x19)/Confirm(0x65) */ + return 0; + } + if (p[offset + 12] == 0x88 && p[offset + 13] == 0x64) { /*ppp */ + pppoe = (pppoe_pkt *) (p + offset + 14); + if ((pppoe->payload[0] == ppp_ipv4_proto[0] + && pppoe->payload[1] == ppp_ipv4_proto[1]) /*PPP IPv4 */ || + ((pppoe->payload[0] == ppp_ipv6_proto[0]) + && (pppoe->payload[1] == + ppp_ipv6_proto[1])) /* PPP IPv6 */) { + offset += PPPOE_HEAD_SIZE; /* skip 8 bytes ppp header */ + dbg("Found PPP IP packet\n"); + } else { + return 0; + } + } + /*swap dst/src mac address */ + memcpy(mac, p, 6); + memcpy(p, p + 6, 6); + memcpy(p + 6, mac, 6); + p_new_src_mac = p + 6; + p += offset; /*Note, now p[12~13] points to protocol */ + if (p[12] == 0x08 && p[13] == 0x06) { + /* arp */ + if (p[14] == 0x00 && p[15] == 0x01 && p[16] == 0x08 + && p[17] == 0x00 && p[20] == 0x00 && p[21] == 0x01) { + dbg("arp request:%d.%d.%d.%d\n", p[38], p[39], + p[40], p[41]); + /* Ethernet IP - 10.10.xx.xx */ + if ((p[38] == ip_templ[0] && p[39] == ip_templ[1]) + || memcmp(ip_templ, zero_mac, + sizeof(ip_templ)) == 0) { + /*fill in spoof mac address */ + memcpy(p_new_src_mac, + g_test_wlan_dev[id]->dev_addr, 4); + p[8] = p[38]; + p[9] = p[39]; + p[10] = p[40]; + p[11] = p[41]; + /* arp reply */ + p[21] = 0x02; + /* sender mac */ + memcpy(mac, p + 22, 6); /*save orignal sender mac */ + memcpy(p + 22, p_new_src_mac, 6); /*set new sender mac */ + /* sender IP */ + memcpy(ip, p + 28, 4); /*save original sender ip address */ + memcpy(p + 28, p + 38, 4); /*set new sender ip address */ + /* target mac */ + memcpy(p + 32, mac, 6); + /* target IP */ + memcpy(p + 38, ip, 4); + return 1; + } + } + } else if (((p[12] == 0x08) && (p[13] == 0x00)) /*Normal IPV4 */ || + ((p[12] == ppp_ipv4_proto[0]) && + (p[13] == ppp_ipv4_proto[1])) /*PPP IPV4 */ || + ((p[12] == ppp_ipv6_proto[0] + && p[13] == ppp_ipv6_proto[1])) /*PPP IPV6 */) { + /* IP */ + switch ((int)p[23]) { + case 0x01: + /* ICMP - request */ + if (p[34] == 0x08) { + /* src IP */ + memcpy(ip, p + 26, 4); + memcpy(p + 26, p + 30, 4); + /* dest IP */ + memcpy(p + 30, ip, 4); + /* ICMP reply */ + p[34] = 0x00; + /* IP checksum */ + iph = (struct iphdr *)(p + 14); + iph->check = 0; + iph->check = + ip_fast_csum((unsigned char *)iph, + iph->ihl); + /* ICMP checksum */ + icmph = (struct icmphdr *)(p + 34); + icmph->checksum = 0; + csum = csum_partial((unsigned char *) + icmph, skb->len - 34, 0); + icmph->checksum = csum_fold(csum); +#if 0 + atomic_inc(&skb->users); + skb->dev = ltq_dev_get_by_name("eth1"); + dev_put(skb->dev); + dev_queue_xmit(skb); + return 0; +#endif + return 1; + } + break; + case 0x11: + /* UDP */ + case 0x06: + /* TCP */ + /*swap src/dst ip */ + /* src IP */ + memcpy(ip, p + 26, 4); + memcpy(p + 26, p + 30, 4); + /* dest IP */ + memcpy(p + 30, ip, 4); + /*shaoguoh remove below checksum item since we + just swap ip and port only + */ +#if 0 + /* IP checksum */ + iph = (struct iphdr *)(p + 14); + iph->check = 0; + iph->check = + ip_fast_csum((unsigned char *)iph, iph->ihl); + /* no UDP checksum */ + p[40] = p[41] = 0x00; +#endif + /*shaoguoh add below to swap src/dst port 34~35 36~37 */ + /*save src port to port array and copy original dest + port to new src port + */ + memcpy(port, p + 34, 2); + memcpy(p + 34, p + 36, 2); + /* copy original src port to dest port */ + memcpy(p + 36, port, 2); + /*return if UDP */ + if ((int)p[23] == 0x11) + return 1; + iph = (struct iphdr *)(p + 14); + tcph = (struct tcphdr *)(p + 34); + if (tcph->syn == 1) { /*set syn & ack, set seq NO same as the incoming syn TCP packet, set ack seq NO as seq NO + 1 */ + tcph->ack = 1; + tcph->ack_seq = tcph->seq + 1; + } else if (tcph->fin == 1) { /*set fin & ack */ + tcph->ack = 1; + t = tcph->ack_seq; + tcph->ack_seq = tcph->seq + 1; + tcph->seq = t; + } else if (tcph->rst == 1 || (tcph->psh == 0 && tcph->ack == 1)) { /*rest or only ack, we ignore it. */ + return 0; + } else if (tcph->psh == 1) { + t = tcph->ack_seq; + if (iph->tot_len < 40) { /*corrupt packet, ignore it. */ + return -1; + } + tcph->ack_seq = + tcph->seq + iph->tot_len - + (iph->ihl * 4) - (tcph->doff * 4); + tcph->seq = t; + } + /*check timestamp */ + off_t = 14 + 20 + 20; /*mac + ip + tcp */ + + while ((tcph->doff << 2) > (off_t - 34)) { /*tcp option compare tcp header length */ + + switch (p[off_t]) { + case 0x0: /*Option End */ + break; + case 0x1: /* NO Operation */ + off_t += 1; + continue; + case 0x2: /*Max Segment Size */ + off_t += 4; + continue; + case 0x3: /* Window Scale */ + off_t += 3; + continue; + case 0x4: /*TCP Sack permitted */ + off_t += 2; + continue; + case 0x8: /*TCP timestamp */ +#if 1 + opt = (uint32_t *) (p + off_t + 2); + //*(opt + 1) = htons(tcp_time_stamp); + *(opt + 1) = 0; /*htonl(tcp_time_stamp(sock))*/ + t = *opt; + *opt = *(opt + 1); + *(opt + 1) = t; + +#else + for (t = 0; t < 10; t++) + *(p + off_t + t) = 1; +#endif + off_t += 10; /*option max is 64-20 */ + continue; + default: + off_t += 64; + break; + } + } + /* IP checksum */ + iph = (struct iphdr *)(p + 14); + iph->check = 0; + iph->check = + ip_fast_csum((unsigned char *)iph, iph->ihl); + /* TCP checksum */ + tcph->check = 0; + t = iph->tot_len - (iph->ihl * 4); + /*tcph->check = csum_partial((unsigned char *)tcph, iph->tot_len - 20, 0); */ + tcph->check = + csum_tcpudp_magic(iph->saddr, iph->daddr, + t, IPPROTO_TCP, + csum_partial(tcph, t, 0)); + return 1; + } + } + return 0; +} + +/* TODO : anath */ +static INLINE void eth_rx_handler(struct sk_buff *skb, int id) +{ + struct test_wlan_priv_data *priv = netdev_priv(g_test_wlan_dev[id]); + int pktlen; + dump_skb(skb, DUMP_SKB_LEN, "eth_rx_handler", 0, 0); + if (!netif_running(g_test_wlan_dev[id])) { + dev_kfree_skb_any(skb); + priv->stats.rx_dropped++; + return; + } +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + if (priv->dp_subif.port_id > 0) { +#if CONFIG_WAVE600_USING_DP_API + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + dc_dp_set_ep_pkt(skb, priv->dp_subif.port_id, 0); + dc_dp_set_subifid_pkt(priv->dp_subif.port_id, skb, priv->dp_subif.subif, 0); + if (dp_xmit(skb->dev, &priv->dp_subif, skb, skb->len, 0) == DP_SUCCESS) { + priv->dp_pkts_to_ppe++; + return; + } else { + priv->dp_pkts_to_ppe_fail++; + return; + } + } else +#endif + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x") || + !ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + uint32_t sta_id = 0; + dp_subif_t dp_subif = priv->dp_subif; + + dp_fp_get_staid(eth_hdr(skb)->h_dest, &sta_id); + dp_subif.subif |= (sta_id & 0xFF); + /* TODO : Unicast/broadcast */ +#define dc_dp_set_portid_pkt(desc_dw, port_id, flag) { \ + struct dma_tx_desc_1 *desc_1 = (struct dma_tx_desc_1 *) desc_dw; \ + desc_1->field.ep = port_id; \ +} + dc_dp_set_portid_pkt(&skb->DW1, priv->dp_subif.port_id, 0); + dc_dp_set_subifid_pkt(priv->dp_subif.port_id, skb, dp_subif.subif, 0); + + if (dc_dp_xmit + (NULL /*g_test_wlan_dev[id]*/, NULL, &priv->dp_subif, skb, skb->len, + 0) == DP_SUCCESS) { + priv->dp_pkts_to_ppe++; + return; + } else { + priv->dp_pkts_to_ppe_fail++; + return; + } +#endif + } + else if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } + } +#endif + pktlen = skb->len; + skb->dev = g_test_wlan_dev[id]; + skb->protocol = eth_type_trans(skb, g_test_wlan_dev[id]); + if (netif_rx(skb) == NET_RX_DROP) + priv->stats.rx_dropped++; + else { + priv->stats.rx_packets++; + priv->stats.rx_bytes += pktlen; + } +} + +static void do_test_wlan_rx_tasklet(unsigned long id) +{ + struct test_wlan_priv_data *priv = NULL; + struct sk_buff *skb = NULL; + int i = 0; + + if (!g_test_wlan_dev[id]) + return; + priv = netdev_priv(g_test_wlan_dev[id]); + if (!priv) + return; + + while (1) { + if (i >= TASKLET_HANDLE_BUDGET) { + tasklet_schedule(&priv->rx_tasklet); + break; + } + skb = skb_dequeue(&priv->rx_queue); + if (!skb) + break; + netif_wake_queue(g_test_wlan_dev[id]); + if (eth_rx_preprocess(skb, (int)id)) + eth_rx_handler(skb, (int)id); + else { + priv->rx_preprocess_drop++; + dev_kfree_skb_any(skb); + } + i++; + } +} + +static int32_t dp_fp_stop_tx(struct net_device *netif) +{ + return 0; +} + +static int32_t dp_fp_restart_tx(struct net_device *netif) +{ + return 0; +} + +static int32_t dp_fp_rx(struct net_device *rxif, struct net_device *txif, struct dp_subif *subif, + struct sk_buff **pskb, int32_t len, uint32_t flags) +{ + struct test_wlan_priv_data *priv; + int pktlen; + struct sk_buff *skb; + + if (!pskb) + return -1; + + skb = *pskb; + if (rxif) { + dump_skb(skb, DUMP_SKB_LEN, "callback: rx", 0, 0); + if (netif_running(rxif)) { + priv = netdev_priv(rxif); + pktlen = skb->len; + skb->dev = rxif; + skb->protocol = eth_type_trans(skb, rxif); + if (netif_rx(skb) == NET_RX_DROP) + priv->stats.rx_dropped++; + else { + priv->stats.rx_packets++; + priv->stats.rx_bytes += pktlen; + } + priv->dp_pkts_from_ppe++; + return 0; + } + } else if (txif) { + priv = netdev_priv(txif); + skb->dev = txif; + dev_queue_xmit(skb); + priv->dp_pkts_tx++; + return 0; + } + dev_kfree_skb_any(skb); + *pskb = NULL; + return 0; +} + +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) +static int32_t dp_fp_get_staid(char *mac_addr, uint32_t *sta_id) +{ + /* TODO : convert staid */ + + *sta_id = 0; + + return 0; +} + + +#ifdef PUMA_DC_MODE1 +static int32_t dp_fp_get_desc_info(int32_t port_id, struct sk_buff *skb, + struct dc_dp_fields_value_dw *desc_fields, uint32_t flags) +{ + struct test_wlan_priv_data *priv = NULL; + + if (!skb || (skb && !skb->dev) || !desc_fields || (desc_fields && !desc_fields->dw)) + return -1; + + priv = netdev_priv(skb->dev); + + desc_fields->num_dw = 2; + desc_fields->dw[0].dw = 0; + desc_fields->dw[0].desc_val = (uint32_t)priv->dp_subif.subif; + desc_fields->dw[0].desc_mask = 0xFFFF; + + desc_fields->dw[1].dw = 3; + desc_fields->dw[1].desc_val = 0; + desc_fields->dw[1].desc_mask = (0xF << 24); + + return 0; +} +#endif +#endif + +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC +static void proc_read_dbg(struct seq_file *s) +{ + int i; + + SEQ_PRINTF(s, "g_dbg_enable=0x%08x\n. \tEnabled Flags:", + g_dbg_enable); + for (i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++) + if ((g_dbg_enable & (1 << i))) + SEQ_PRINTF(s, "%s ", dbg_enable_mask_str[i]); + SEQ_PRINTF(s, "\n"); +} + +static ssize_t proc_write_dbg(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + char str[100]; + int len, rlen; + int f_enable = 0; + int i, j; + int num; + char *param_list[30]; + + len = count < sizeof(str) ? count : sizeof(str) - 1; + rlen = len - copy_from_user(str, buf, len); + str[rlen] = 0; + + num = ltq_split_buffer(str, param_list, ARRAY_SIZE(param_list)); + if (num < 1) + goto help; + + if (ltq_strcmpi(param_list[0], "enable") == 0) + f_enable = 1; + else if (ltq_strcmpi(param_list[0], "disable") == 0) + f_enable = -1; + else + goto help; + + if (!param_list[1]) + set_ltq_dbg_flag(g_dbg_enable, f_enable, -1); + else { + for (i = 1; i < num; i++) { + for (j = 0; j < ARRAY_SIZE(dbg_enable_mask_str); j++) { + if (ltq_strcmpi + (param_list[i], + dbg_enable_mask_str[j]) == 0) { + set_ltq_dbg_flag(g_dbg_enable, + f_enable, (1 << j)); + + break; + } + } + } + } + + return count; + help: + PRINTK("echo <enable/disable> ["); + for (i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++) { + if (i == 0) + PRINTK("%s", dbg_enable_mask_str[i]); + else + PRINTK("/%s", dbg_enable_mask_str[i]); + } + PRINTK("] > /proc/net/testwlan/dbg\n"); + return count; +} +#endif + +static int proc_read_dev(struct seq_file *s, int pos) +{ + if (g_test_wlan_dev[pos]) + SEQ_PRINTF(s, " %s\n", g_test_wlan_dev[pos]->name); + pos++; + + if (pos >= MAX_TESTWLAN_NUM) + pos = -1; + return pos; +} + +static int32_t get_port_info_via_dev_port(struct test_wlan_priv_data *priv_dev, int dev_port) +{ + int i; + struct test_wlan_priv_data *priv = NULL; + + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) { + if (!g_test_wlan_dev[i]) /* Skip NULL devices */ + continue; + priv = netdev_priv(g_test_wlan_dev[i]); + if (priv && priv->f_dp != TESTWLAN_F_FREE && (priv->dev_port == dev_port)) { + if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + /* Determine the registered intf and return the func ptr and etc */ + priv_dev->dev_port = dev_port; + priv_dev->owner = priv->owner; +#if (defined(CONFIG_LTQ_PPA_GRX500) && CONFIG_LTQ_PPA_GRX500) && \ + (defined(CONFIG_LTQ_PPA_API_DIRECTPATH) && CONFIG_LTQ_PPA_API_DIRECTPATH) + //memcpy(&priv_dev->dp_cb, &priv->dp_cb, sizeof(dp_cb_t)); +#endif +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv_dev->dp_subif.port_id = priv->dp_subif.port_id; +#endif + //priv_dev->flag_index = priv->flag_index; + strncpy(priv_dev->flag_str, priv->flag_str, (sizeof(priv_dev->flag_str) - 1)); + priv_dev->flag_str[(sizeof(priv_dev->flag_str) - 1)] = '\0'; + } + else if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x") || + !ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + /* Determine the registered intf and return the func ptr and etc */ + priv_dev->dev_port = dev_port; + priv_dev->owner = priv->owner; +#if CONFIG_WAVE600_USING_DP_API + memcpy(&priv_dev->dp_cb, &priv->dp_cb, sizeof(struct dp_cb)); +#endif +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + memcpy(&priv_dev->wave500_cb, &priv->wave500_cb, sizeof(struct dc_dp_cb)); +#endif +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv_dev->dp_subif.port_id = priv->dp_subif.port_id; +#endif + //priv_dev->flag_index = priv->flag_index; + strncpy(priv_dev->flag_str, priv->flag_str, (sizeof(priv_dev->flag_str) - 1)); + priv_dev->flag_str[(sizeof(priv_dev->flag_str) - 1)] = '\0'; + } + return 0; + } + } + return -1; +} + +/* Get number of subif */ +static int get_subif_cnt_via_dev_port(int32_t dev_port) +{ + struct test_wlan_priv_data *priv; + int num_subif = 0, i; + + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) { + if (!g_test_wlan_dev[i]) + continue; + priv = netdev_priv(g_test_wlan_dev[i]); + if (priv && (priv->f_dp == TESTWLAN_F_REGISTER_SUBIF) && (dev_port == priv->dev_port)) + num_subif ++; + } + return num_subif; +} + +static int register_dev(int i) +{ + struct test_wlan_priv_data *priv; + int32_t dp_port_id; + int32_t port_id; + uint32_t alloc_flags = 0; + + if (!g_test_wlan_dev[i]) + return -1; + + priv = netdev_priv(g_test_wlan_dev[i]); + + /* TODO : Check for unique priv->dev_port */ + +#if 0 + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x")) { + /* TODO : Check for maximum 3 devices (using priv->dev_port) */ + port_id = 0; + dp_port_id = dp_alloc_port_ext(0, priv->owner, g_test_wlan_dev[i], priv->dev_port, + port_id, NULL, NULL, DP_F_FAST_WLAN); + if (dp_port_id <= 0) { + PRINTK + ("failed in register fastpath for %s\n", + g_test_wlan_dev[i]->name); + return -1; + } + PRINTK("dp_alloc_port_ext get port %d for %s\n", + dp_port_id, g_test_wlan_dev[i]->name); + priv->dp_cb.stop_fn = (dp_stop_tx_fn_t) dp_fp_stop_tx; + priv->dp_cb.restart_fn = (dp_restart_tx_fn_t) dp_fp_restart_tx; + priv->dp_cb.rx_fn = (dp_rx_fn_t) dp_fp_rx; + priv->dp_subif.port_id = dp_port_id; + if (dp_register_dev(priv->owner, dp_port_id, &priv->dp_cb, 0) != DP_SUCCESS) { + PRINTK + ("dp_register_dev failed for %s\n and port_id %d", + g_test_wlan_dev[i]->name, dp_port_id); + dp_alloc_port_ext(0, priv->owner, g_test_wlan_dev[i], priv->dev_port, + dp_port_id, NULL, NULL, DP_F_DEREGISTER); + return -1; + } + } else +#endif +#if CONFIG_WAVE600_USING_DP_API + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + // 1st radio + if (g_resvd_port < 0) { + PRINTK + ("Port reservation failed g_resvd_port=%d, not allowed to reg dev=%s as FAST_WAVE6x?\n", + g_resvd_port, g_test_wlan_dev[i]->name); + return -1; + } + if (g_wave6x_radio_num > 2) { + PRINTK + ("Port reservation failed g_wave6x_radio_num=%d, not allowed to reg dev=%s as FAST_WAVE6x?\n", + g_wave6x_radio_num, g_test_wlan_dev[i]->name); + return -1; + } + + // 1st radio + if (g_wave6x_radio_num == 0) { + port_id = g_resvd_port; + /* TODO : Check for maximum 3 devices (using priv->dev_port) */ + dp_port_id = dp_alloc_port_ext(0, priv->owner, g_test_wlan_dev[i], priv->dev_port, + port_id, NULL, NULL, (DP_F_FAST_WLAN | DP_F_SHARED_RES)); + + // 2nd radio + } else { + struct dp_port_data dp_p_data = {0}; + dp_p_data.flag_ops = DP_F_DATA_ALLOC; + dp_p_data.start_port_no = g_resvd_port; + port_id = g_resvd_port + 1; + dp_port_id = dp_alloc_port_ext(0, priv->owner, g_test_wlan_dev[i], priv->dev_port, + port_id, NULL, &dp_p_data, (DP_F_FAST_WLAN | DP_F_SHARED_RES)); + } + if (dp_port_id <= 0) { + PRINTK + ("failed in register fastpath for %s\n", + g_test_wlan_dev[i]->name); + return -1; + } + PRINTK("dp_alloc_port_ext get port %d for %s\n", + dp_port_id, g_test_wlan_dev[i]->name); + priv->dp_cb.stop_fn = (dp_stop_tx_fn_t) dp_fp_stop_tx; + priv->dp_cb.restart_fn = (dp_restart_tx_fn_t) dp_fp_restart_tx; + priv->dp_cb.rx_fn = (dp_rx_fn_t) dp_fp_rx; + priv->dp_subif.port_id = dp_port_id; + if (dp_register_dev(priv->owner, dp_port_id, &priv->dp_cb, 0) != DP_SUCCESS) { + PRINTK + ("dp_register_dev failed for %s\n and port_id %d", + g_test_wlan_dev[i]->name, dp_port_id); + dp_alloc_port_ext(0, priv->owner, g_test_wlan_dev[i], priv->dev_port, + dp_port_id, NULL, NULL, DP_F_DEREGISTER); + return -1; + } + g_wave6x_radio_num ++; + } else +#endif + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x")) { // How to know it is allready allocated +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + //dma_addr_t dccntr_phys_base; + + /* TODO : Check for maximum 3 devices (using priv->dev_port) */ + dp_port_id = dc_dp_alloc_port(priv->owner, priv->dev_port, g_test_wlan_dev[i], + 0, DC_DP_F_FASTPATH); + if (dp_port_id <= 0) { + PRINTK + ("failed in register fastpath for %s\n", + g_test_wlan_dev[i]->name); + return -1; + } + PRINTK("dc_dp_alloc_port get port %d for %s\n", + dp_port_id, g_test_wlan_dev[i]->name); + priv->wave500_cb.stop_fn = (dp_stop_tx_fn_t) dp_fp_stop_tx; + priv->wave500_cb.restart_fn = (dp_restart_tx_fn_t) dp_fp_restart_tx; + priv->wave500_cb.rx_fn = (dc_dp_rx_fn_t) dp_fp_rx; + priv->wave500_cb.get_subif_fn = (dc_dp_get_netif_subinterface_fn_t) dp_fp_get_staid; +#ifdef PUMA_DC_MODE1 + priv->wave500_cb.get_desc_info_fn = (dc_dp_get_dev_specific_desc_info_t) dp_fp_get_desc_info; +#endif + priv->dp_subif.port_id = dp_port_id; +#ifdef PUMA_DC_MODE1 + priv->dccntr.soc_write_dccntr_mode = (DC_DP_F_DCCNTR_MODE_INCREMENTAL | DC_DP_F_DCCNTR_MODE_BIG_ENDIAN); +#else + priv->dccntr.soc_write_dccntr_mode = (DC_DP_F_DCCNTR_MODE_INCREMENTAL | DC_DP_F_DCCNTR_MODE_LITTLE_ENDIAN); +#endif + //priv->dccntr.dev2soc_ret_enq_base = (void *)dma_alloc_coherent(NULL, 8, &dccntr_phys_base, GFP_DMA); + //priv->dccntr.dev2soc_ret_enq_phys_base = (void *)dccntr_phys_base; +#ifdef PUMA_DC_MODE1 + priv->dccntr.dev2soc_ret_enq_base = (void *)kmalloc(16, GFP_DMA); +#else + priv->dccntr.dev2soc_ret_enq_base = (void *)kmalloc(8, GFP_DMA); +#endif + priv->dccntr.dev2soc_ret_enq_phys_base = (void *)virt_to_phys(priv->dccntr.dev2soc_ret_enq_base); + priv->dccntr.dev2soc_ret_enq_dccntr_len = 4; + priv->dccntr.soc2dev_enq_base = (void *)(priv->dccntr.dev2soc_ret_enq_base + 4); + priv->dccntr.soc2dev_enq_phys_base = (void *)(priv->dccntr.dev2soc_ret_enq_phys_base + 4); + priv->dccntr.soc2dev_enq_dccntr_len = 4; +#ifdef PUMA_DC_MODE1 + priv->dccntr.dev2soc_deq_base = (void *)((uint32_t)priv->dccntr.dev2soc_ret_enq_base + 8); + priv->dccntr.dev2soc_deq_phys_base = (void *)((uint32_t)priv->dccntr.dev2soc_ret_enq_phys_base + 8); + priv->dccntr.dev2soc_deq_dccntr_len = 4; + priv->dccntr.soc2dev_ret_deq_base = (void *)((uint32_t)priv->dccntr.dev2soc_ret_enq_base + 12); + priv->dccntr.soc2dev_ret_deq_phys_base = (void *)((uint32_t)priv->dccntr.dev2soc_ret_enq_phys_base + 12); + priv->dccntr.soc2dev_ret_deq_dccntr_len = 4; +#endif +#ifdef CONFIG_X86_INTEL_LGM + priv->resources.num_bufs_req = 64; + priv->resources.rings.soc2dev.size = 64; + priv->resources.rings.soc2dev_ret.size = 64; + priv->resources.rings.dev2soc.size = 64; + priv->resources.rings.dev2soc_ret.size = 64; +#else + priv->resources.num_bufs_req = 32; + priv->resources.rings.soc2dev.size = 32; + priv->resources.rings.soc2dev_ret.size = 32; + priv->resources.rings.dev2soc.size = 32; + priv->resources.rings.dev2soc_ret.size = 32; +#endif + priv->resources.rings.soc2dev.desc_dwsz = 4; + priv->resources.rings.soc2dev.flags = DC_DP_F_PKTDESC_ENQ; + priv->resources.rings.soc2dev_ret.desc_dwsz = 4; + priv->resources.rings.soc2dev.flags = DC_DP_F_PKTDESC_RET; + priv->resources.rings.dev2soc.desc_dwsz = 4; + priv->resources.rings.dev2soc.flags = DC_DP_F_PKTDESC_ENQ; + priv->resources.rings.dev2soc_ret.desc_dwsz = 4; + priv->resources.rings.dev2soc_ret.flags = DC_DP_F_RET_RING_SAME_AS_ENQ; + priv->resources.num_dccntr = 1; + priv->resources.dccntr = &priv->dccntr; + if (dc_dp_register_dev(priv->owner, dp_port_id, g_test_wlan_dev[i], + &priv->wave500_cb, &priv->resources, &priv->devspec, 0) != DC_DP_SUCCESS) { + PRINTK + ("dc_dp_register_dev failed for %s\n and port_id %d", + g_test_wlan_dev[i]->name, dp_port_id); + //dma_free_coherent(NULL, 8, priv->dccntr.dev2soc_ret_enq_base, (dma_addr_t)priv->dccntr.dev2soc_ret_enq_phys_base); + kfree(priv->dccntr.dev2soc_ret_enq_base); + dc_dp_alloc_port(priv->owner, priv->dev_port, g_test_wlan_dev[i], + dp_port_id, DC_DP_F_DEREGISTER); + return -1; + } +#endif + } + else if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { // How to know it is allready allocated +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + //dma_addr_t dccntr_phys_base; + if (g_wave6x_radio_num > 2) { + PRINTK + ("Port reservation failed g_wave6x_radio_num=%d, not allowed to reg dev=%s as FAST_WAVE6x?\n", + g_wave6x_radio_num, g_test_wlan_dev[i]->name); + return -1; + } + + // 1st radio + if (g_wave6x_radio_num == 0) { + port_id = 0; + alloc_flags |= DC_DP_F_MULTI_PORT; + + } else { + port_id = g_resvd_port; + alloc_flags |= DC_DP_F_MULTI_PORT | DC_DP_F_SHARED_RES; + } + + /* TODO : Check for maximum 3 devices (using priv->dev_port) */ + dp_port_id = dc_dp_alloc_port(THIS_MODULE, priv->dev_port, g_test_wlan_dev[i], + port_id, (alloc_flags | DC_DP_F_FASTPATH)); + if (dp_port_id <= 0) { + PRINTK + ("failed in register fastpath for %s\n", + g_test_wlan_dev[i]->name); + return -1; + } + if (g_wave6x_radio_num == 0) + g_resvd_port = dp_port_id; + g_wave6x_radio_num++; + PRINTK("dc_dp_alloc_port get port %d for %s\n", + dp_port_id, g_test_wlan_dev[i]->name); + priv->wave500_cb.stop_fn = (dp_stop_tx_fn_t) dp_fp_stop_tx; + priv->wave500_cb.restart_fn = (dp_restart_tx_fn_t) dp_fp_restart_tx; + priv->wave500_cb.rx_fn = (dc_dp_rx_fn_t) dp_fp_rx; + priv->wave500_cb.get_subif_fn = (dc_dp_get_netif_subinterface_fn_t) dp_fp_get_staid; +#ifdef PUMA_DC_MODE1 + priv->wave500_cb.get_desc_info_fn = (dc_dp_get_dev_specific_desc_info_t) dp_fp_get_desc_info; +#endif + priv->dp_subif.port_id = dp_port_id; +#ifdef PUMA_DC_MODE1 + priv->dccntr.soc_write_dccntr_mode = (DC_DP_F_DCCNTR_MODE_INCREMENTAL | DC_DP_F_DCCNTR_MODE_BIG_ENDIAN); +#else + priv->dccntr.soc_write_dccntr_mode = (DC_DP_F_DCCNTR_MODE_INCREMENTAL | DC_DP_F_DCCNTR_MODE_LITTLE_ENDIAN); +#endif + //priv->dccntr.dev2soc_ret_enq_base = (void *)dma_alloc_coherent(NULL, 8, &dccntr_phys_base, GFP_DMA); + //priv->dccntr.dev2soc_ret_enq_phys_base = (void *)dccntr_phys_base; +#ifdef PUMA_DC_MODE1 + priv->dccntr.dev2soc_ret_enq_base = (void *)kmalloc(16, GFP_DMA); +#else + priv->dccntr.dev2soc_ret_enq_base = (void *)kmalloc(8, GFP_DMA); +#endif + priv->dccntr.dev2soc_ret_enq_phys_base = (void *)virt_to_phys(priv->dccntr.dev2soc_ret_enq_base); + priv->dccntr.dev2soc_ret_enq_dccntr_len = 4; + priv->dccntr.soc2dev_enq_base = (void *)(priv->dccntr.dev2soc_ret_enq_base + 4); + priv->dccntr.soc2dev_enq_phys_base = (void *)(priv->dccntr.dev2soc_ret_enq_phys_base + 4); + priv->dccntr.soc2dev_enq_dccntr_len = 4; +#ifdef PUMA_DC_MODE1 + priv->dccntr.dev2soc_deq_base = (void *)((uint32_t)priv->dccntr.dev2soc_ret_enq_base + 8); + priv->dccntr.dev2soc_deq_phys_base = (void *)((uint32_t)priv->dccntr.dev2soc_ret_enq_phys_base + 8); + priv->dccntr.dev2soc_deq_dccntr_len = 4; + priv->dccntr.soc2dev_ret_deq_base = (void *)((uint32_t)priv->dccntr.dev2soc_ret_enq_base + 12); + priv->dccntr.soc2dev_ret_deq_phys_base = (void *)((uint32_t)priv->dccntr.dev2soc_ret_enq_phys_base + 12); + priv->dccntr.soc2dev_ret_deq_dccntr_len = 4; +#endif + priv->resources.num_bufs_req = 64; + priv->resources.rings.soc2dev.size = 64; + priv->resources.rings.soc2dev.desc_dwsz = 4; + priv->resources.rings.soc2dev.flags = DC_DP_F_PKTDESC_ENQ; + priv->resources.rings.soc2dev_ret.size = 64; + priv->resources.rings.soc2dev_ret.desc_dwsz = 4; + priv->resources.rings.soc2dev.flags = DC_DP_F_PKTDESC_RET; + priv->resources.rings.dev2soc.size = 64; + priv->resources.rings.dev2soc.desc_dwsz = 4; + priv->resources.rings.dev2soc.flags = DC_DP_F_PKTDESC_ENQ; + priv->resources.rings.dev2soc_ret.size = 64; + priv->resources.rings.dev2soc_ret.desc_dwsz = 4; + priv->resources.rings.dev2soc_ret.flags = DC_DP_F_RET_RING_SAME_AS_ENQ; + priv->resources.num_dccntr = 1; + priv->resources.dccntr = &priv->dccntr; + if (dc_dp_register_dev(THIS_MODULE, dp_port_id, g_test_wlan_dev[i], + &priv->wave500_cb, &priv->resources, &priv->devspec, 0) != DC_DP_SUCCESS) { + PRINTK + ("dc_dp_register_dev failed for %s\n and port_id %d", + g_test_wlan_dev[i]->name, dp_port_id); + //dma_free_coherent(NULL, 8, priv->dccntr.dev2soc_ret_enq_base, (dma_addr_t)priv->dccntr.dev2soc_ret_enq_phys_base); + kfree(priv->dccntr.dev2soc_ret_enq_base); + dc_dp_alloc_port(THIS_MODULE, priv->dev_port, g_test_wlan_dev[i], + dp_port_id, DC_DP_F_DEREGISTER); + if (g_wave6x_radio_num == 1) + g_resvd_port = -1; + g_wave6x_radio_num--; + return -1; + } +#endif + } + else if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } + + priv->f_dp = TESTWLAN_F_REGISTER_DEV; + + return 0; +} + +static int unregister_dev(int i) +{ + struct test_wlan_priv_data *priv; + int k; + int32_t dev_port = -1; /*dev instance */ + + if (!g_test_wlan_dev[i]) + return -1; + + priv = netdev_priv(g_test_wlan_dev[i]); + dev_port = priv->dev_port; + if (priv->f_dp == TESTWLAN_F_REGISTER_SUBIF) { +#if CONFIG_WAVE600_USING_DP_API + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + dp_register_subif(priv->owner, g_test_wlan_dev[i], g_test_wlan_dev[i]->name, + &priv->dp_subif, + DP_F_DEREGISTER); + } else +#endif + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + dc_dp_register_subif(priv->owner, g_test_wlan_dev[i], NULL, + &priv->dp_subif, + DC_DP_F_DEREGISTER); +#endif + } else if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + dc_dp_register_subif(THIS_MODULE, g_test_wlan_dev[i], NULL, + &priv->dp_subif, + DC_DP_F_DEREGISTER); +#endif + } else if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } + priv->f_dp = TESTWLAN_F_REGISTER_DEV; + } + if(!get_subif_cnt_via_dev_port(priv->dev_port)) { + if (priv->f_dp == TESTWLAN_F_REGISTER_DEV) { +#if CONFIG_WAVE600_USING_DP_API + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + if (dp_register_dev(priv->owner, priv->dp_subif.port_id, &priv->dp_cb, DP_F_DEREGISTER) != DP_SUCCESS) + PRINTK("dp_register_dev failed \ + for %s with port_id/subif %d/%d\n", g_test_wlan_dev[i]->name, priv->dp_subif.port_id, priv->dp_subif.subif); + if (dp_alloc_port_ext(0, priv->owner, g_test_wlan_dev[i], priv->dev_port, + priv->dp_subif.port_id, NULL, NULL, DP_F_DEREGISTER) != DP_SUCCESS) { + PRINTK("dp_alloc_port_ext failed \ + for %s with port_id/subif %d/%d\n", g_test_wlan_dev[i]->name, priv->dp_subif.port_id, priv->dp_subif.subif); + } + g_wave6x_radio_num--; + if (g_wave6x_radio_num < 0) + g_wave6x_radio_num = 0; + } else +#endif + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + if (dc_dp_register_dev + (priv->owner, priv->dp_subif.port_id, g_test_wlan_dev[i], &priv->wave500_cb, + &priv->resources, &priv->devspec, DC_DP_F_DEREGISTER) != DP_SUCCESS) + PRINTK("dc_dp_register_dev failed \ + for %s with port_id/subif %d/%d\n", g_test_wlan_dev[i]->name, priv->dp_subif.port_id, priv->dp_subif.subif); + //dma_free_coherent(NULL, 8, priv->dccntr.dev2soc_ret_enq_base, (dma_addr_t)priv->dccntr.dev2soc_ret_enq_phys_base); + kfree(priv->dccntr.dev2soc_ret_enq_base); + if (dc_dp_alloc_port(priv->owner, priv->dev_port, g_test_wlan_dev[i], + priv->dp_subif.port_id, DC_DP_F_DEREGISTER) != DP_SUCCESS) { + PRINTK("dc_dp_alloc_port failed \ + for %s with port_id/subif %d/%d\n", g_test_wlan_dev[i]->name, priv->dp_subif.port_id, priv->dp_subif.subif); + } +#endif + } else if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + if (dc_dp_register_dev + (THIS_MODULE, priv->dp_subif.port_id, g_test_wlan_dev[i], &priv->wave500_cb, + &priv->resources, &priv->devspec, DC_DP_F_DEREGISTER) != DP_SUCCESS) + PRINTK("dc_dp_register_dev failed \ + for %s with port_id/subif %d/%d\n", g_test_wlan_dev[i]->name, priv->dp_subif.port_id, priv->dp_subif.subif); + //dma_free_coherent(NULL, 8, priv->dccntr.dev2soc_ret_enq_base, (dma_addr_t)priv->dccntr.dev2soc_ret_enq_phys_base); + kfree(priv->dccntr.dev2soc_ret_enq_base); + if (dc_dp_alloc_port(THIS_MODULE, priv->dev_port, g_test_wlan_dev[i], + priv->dp_subif.port_id, DC_DP_F_DEREGISTER) != DP_SUCCESS) { + PRINTK("dc_dp_alloc_port failed \ + for %s with port_id/subif %d/%d\n", g_test_wlan_dev[i]->name, priv->dp_subif.port_id, priv->dp_subif.subif); + } +#endif + } else if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv->dp_subif.port_id = -1; +#endif + } + for (k = 0; k < ARRAY_SIZE(g_test_wlan_dev); k++) { + if (!g_test_wlan_dev[k]) + continue; + priv = netdev_priv(g_test_wlan_dev[k]); + if (priv && (dev_port == priv->dev_port)) { + priv->dev_port = -1; +#if CONFIG_WAVE600_USING_DP_API + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + memset(&priv->dp_cb, 0, sizeof(struct dp_cb)); + } else +#endif + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x") || + !ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + memset(&priv->wave500_cb, 0, sizeof(struct dc_dp_cb)); +#endif + } + else if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv->dp_subif.port_id = -1; +#endif + //priv->flag_index = -1; + priv->f_dp = TESTWLAN_F_FREE; + } + } + } + return 0; +} + +static int delete_testwlan_dev(int i) +{ + struct test_wlan_priv_data *priv; + + if (!g_test_wlan_dev[i]) + return -1; + + priv = netdev_priv(g_test_wlan_dev[i]); + + unregister_dev(i); + + /*unregister testwlan dev itself */ + unregister_netdev(g_test_wlan_dev[i]); + free_netdev(g_test_wlan_dev[i]); + g_test_wlan_dev[i] = NULL; + return 0; +} + +static int create_testwlan_dev(int i) +{ + char ifname[32]; + struct test_wlan_priv_data *priv; + + sprintf(ifname, "testwlan%d", i); + g_test_wlan_dev[i] = + alloc_netdev(sizeof(struct test_wlan_priv_data), ifname, + NET_NAME_ENUM, eth_setup); + if (g_test_wlan_dev[i] == NULL) { + PRINTK("alloc_netdev fail\n"); + return -1; + } + if (register_netdev(g_test_wlan_dev[i])) { + free_netdev(g_test_wlan_dev[i]); + g_test_wlan_dev[i] = NULL; + PRINTK("register device \"%s\" fail ??\n", ifname); + } else { + PRINTK("add \"%s\" successfully\n", ifname); + priv = netdev_priv(g_test_wlan_dev[i]); + priv->f_dp = TESTWLAN_F_FREE; + priv->id = i; /* Set the priv ID */ + priv->rx_tasklet.data = i; /* Set the tasklet ID */ + } + return 0; +} + +static ssize_t proc_write_dev(struct file *file, const char *buf, size_t count, + loff_t *ppos) +{ + char str[100]; + int len, rlen; + char *param_list[10]; + int num; + int i; + + len = count < sizeof(str) ? count : sizeof(str) - 1; + rlen = len - copy_from_user(str, buf, len); + str[rlen] = 0; + + num = ltq_split_buffer(str, param_list, ARRAY_SIZE(param_list)); + if (num < 1) + goto help; + + if (ltq_strcmpi(param_list[0], "add") == 0) { + if (param_list[1]) { + i = ltq_atoi(param_list[1]); + if ((i < 0) || (i >= ARRAY_SIZE(g_test_wlan_dev))) { + PRINTK("Wrong index value: %d\n", i); + return count; + } + if (g_test_wlan_dev[i]) { + PRINTK + ("interface index %d already exist and no need to create it\n", + i); + return count; + } + + /*create one dev */ + create_testwlan_dev(i); + } else { +#if 0 + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) { + /*create all dev if not created yet */ + if (g_test_wlan_dev[i]) + continue; + create_testwlan_dev(i); + } +#endif + PRINTK("Wrong arguments: %s\n", param_list[0]); + goto help; + } + } else if (ltq_strcmpi(param_list[0], "del") == 0) { + if (param_list[1]) { + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) { + if (g_test_wlan_dev[i] && + (ltq_strcmpi + (g_test_wlan_dev[i]->name, + param_list[1]) == 0)) { + delete_testwlan_dev(i); + break; + } + } + } else { + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) + delete_testwlan_dev(i); + } + } else { + PRINTK("Wrong command: %s\n", param_list[0]); + goto help; + } + return count; + + help: + PRINTK("echo add <index> /proc/net/testwlan/dev\n"); + PRINTK(" example: echo add 0 > /proc/net/testwlan/dev\n"); + PRINTK(" Note, the maximum index is %d\n", + ARRAY_SIZE(g_test_wlan_dev)); + PRINTK("echo <del> [device name] > /proc/net/testwlan/dev\n"); + PRINTK(" example: echo del testwlan1 > /proc/net/testwlan/dev\n"); + PRINTK(" example: echo del > /proc/net/testwlan/dev\n"); + return count; +} + +static int proc_read_mib(struct seq_file *s, int pos) +{ + struct test_wlan_priv_data *priv; + + if (g_test_wlan_dev[pos]) { + priv = netdev_priv(g_test_wlan_dev[pos]); + SEQ_PRINTF(s, " %s:\n", g_test_wlan_dev[pos]->name); + SEQ_PRINTF(s, " rx_packets: %lu\n", + priv->stats.rx_packets); + SEQ_PRINTF(s, " rx_bytes: %lu\n", priv->stats.rx_bytes); + SEQ_PRINTF(s, " rx_errors: %lu\n", priv->stats.rx_errors); + SEQ_PRINTF(s, " rx_dropped: %lu\n", + priv->stats.rx_dropped); + SEQ_PRINTF(s, " tx_packets: %lu\n", + priv->stats.tx_packets); + SEQ_PRINTF(s, " tx_bytes: %lu\n", priv->stats.tx_bytes); + SEQ_PRINTF(s, " tx_errors: %lu\n", priv->stats.tx_errors); + SEQ_PRINTF(s, " tx_dropped: %lu\n", + priv->stats.tx_dropped); + SEQ_PRINTF(s, " rx_preprocess_drop: %u\n", + priv->rx_preprocess_drop); + SEQ_PRINTF(s, " dp_pkts_to_ppe: %u\n", + priv->dp_pkts_to_ppe); + SEQ_PRINTF(s, " dp_pkts_to_ppe_fail: %u\n", + priv->dp_pkts_to_ppe_fail); + SEQ_PRINTF(s, " dp_pkts_from_ppe: %u\n", + priv->dp_pkts_from_ppe); + SEQ_PRINTF(s, " dp_pkts_tx: %u\n", + priv->dp_pkts_tx); + } + + pos++; + if (pos >= MAX_TESTWLAN_NUM) + pos = -1; + + return pos; +} + +static void clear_mib(int i) +{ + struct test_wlan_priv_data *priv; + + priv = netdev_priv(g_test_wlan_dev[i]); + if (!priv) + return; + memset(&priv->stats, 0, sizeof(priv->stats)); + priv->rx_preprocess_drop = 0; + priv->dp_pkts_to_ppe = 0; + priv->dp_pkts_to_ppe_fail = 0; + priv->dp_pkts_from_ppe = 0; + priv->dp_pkts_tx = 0; +} + +static ssize_t proc_write_mib(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + char str[100]; + int len, rlen; + int i; + int num; + char *param_list[4]; + + len = count < sizeof(str) ? count : sizeof(str) - 1; + rlen = len - copy_from_user(str, buf, len); + str[rlen] = 0; + num = ltq_split_buffer(str, param_list, ARRAY_SIZE(param_list)); + if (num < 2) + goto help; + + if (ltq_strcmpi(param_list[0], "clear") != 0) { + PRINTK("Wrong command:%s\n", param_list[0]); + goto help; + } + + if ((ltq_strcmpi(param_list[1], "all") != 0)) { + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) { + if (g_test_wlan_dev[i] && + (ltq_strcmpi(g_test_wlan_dev[i]->name, + param_list[1]) == 0)) { + clear_mib(i); + break; + } + } + if (i >= ARRAY_SIZE(g_test_wlan_dev)) + PRINTK("not found device %s\n", param_list[1]); + } else { + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) + clear_mib(i); + } + return count; + help: + PRINTK("echo <clear> [all/device name] > /proc/net/testwlan/mib\n"); + return count; +} + +static int proc_read_datapath(struct seq_file *s, int pos) +{ + struct test_wlan_priv_data *priv; + + if (g_test_wlan_dev[pos]) { + priv = netdev_priv(g_test_wlan_dev[pos]); +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + if (priv->dp_subif.port_id >= 0) { + SEQ_PRINTF(s, + "%s - directpath on (ifid %d subif %d)\n", + g_test_wlan_dev[pos]->name, + priv->dp_subif.port_id, + priv->dp_subif.subif); + } +#else + if (priv->dev_port >= 0) { + SEQ_PRINTF(s, + "%s - directpath on (ifid %d subif %d)\n", + g_test_wlan_dev[pos]->name, + priv->dev_port, 0); + } +#endif + else + SEQ_PRINTF(s, "%s - directpath off\n", + g_test_wlan_dev[pos]->name); + } + + pos++; + if (pos >= MAX_TESTWLAN_NUM) + pos = -1; + + return pos; +} + +#if 0 /* TODO : anath */ +void unregister_from_dp(struct net_device *dev) +{ + struct test_wlan_priv_data *priv; + int32_t dp_port_id = -1; + int32_t dev_port = -1; + int i; + + priv = netdev_priv(dev); +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + dp_port_id = priv->dp_subif.port_id; +#else + dev_port = priv->dev_port; +#endif + if (dp_port_id <= 0) { + PRINTK + ("Cannot undregister %s since it is not reigstered yet\n", + dev->name); + return; + } + + /*unregister all subif with same port_id first */ + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) { + if (g_test_wlan_dev[i]) { + struct test_wlan_priv_data *priv = + netdev_priv(g_test_wlan_dev[i]); +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + if (priv->dp_subif.port_id != dp_port_id) +#else + if (priv->dev_port != dev_port) +#endif + continue; + if (priv->f_dp != TESTWLAN_F_REGISTER_SUBIF) + continue; + +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x") || + !ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + if (dc_dp_register_subif + (priv->owner, g_test_wlan_dev[i], NULL, + &priv->dp_subif, + 0) != DP_SUCCESS) { + PRINTK("dc_dp_register_subif failed for %s \ + with port_id/subif %d/%d ?\n", dev->name, priv->dp_subif.port_id, priv->dp_subif.subif); + } + } +#endif +#if (defined(CONFIG_LTQ_PPA_GRX500) && CONFIG_LTQ_PPA_GRX500) && \ + (defined(CONFIG_LTQ_PPA_API_DIRECTPATH) && CONFIG_LTQ_PPA_API_DIRECTPATH) + if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } +#endif + priv->f_dp = TESTWLAN_F_REGISTER_DEV; +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv->dp_subif.subif = 0; +#endif + } + } + + /*unregister/deallocate devices and reset all devices with same port_id */ +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x") || + !ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + if (dc_dp_register_dev(priv->owner, dp_port_id, NULL, DC_DP_F_DEREGISTER) + != DP_SUCCESS) { + PRINTK("dp_unregister_dev failed for %s \ + with port_id %d ??? \n", dev->name, dp_port_id); + } + //dma_free_coherent(NULL, 8, priv->dccntr.dev2soc_ret_enq_base, (dma_addr_t)priv->dccntr.dev2soc_ret_enq_phys_base); + kfree(priv->dccntr.dev2soc_ret_enq_base); + if (dc_dp_alloc_port(priv->owner, priv->dev_port, dev, dp_port_id, + DC_DP_F_DEREGISTER) != DP_SUCCESS) { + PRINTK + ("dp_dealloc_port failed for %s with port_id %d \n", + dev->name, dp_port_id); + } + } +#endif +#if (defined(CONFIG_LTQ_PPA_GRX500) && CONFIG_LTQ_PPA_GRX500) && \ + (defined(CONFIG_LTQ_PPA_API_DIRECTPATH) && CONFIG_LTQ_PPA_API_DIRECTPATH) + if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } +#endif + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) { + if (g_test_wlan_dev[i]) { + struct test_wlan_priv_data *priv = + netdev_priv(g_test_wlan_dev[i]); +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + if (priv->dp_subif.port_id != dp_port_id) +#else + if (priv->dev_port != dev_port) +#endif + continue; + + priv->f_dp = TESTWLAN_F_FREE; + priv->dp_subif.port_id = 0; + } + } +} +#endif + +static int get_dp_port_flag_str_size(void) +{ + return ARRAY_SIZE(dp_port_flag_str); +} + +#if 0 +/*to find out the device type index via its flag name. for example eth_lan, eth_wan,.... +return value: +-1: not found +>=0: type index +*/ +static int get_dev_type_index(char *flag_name) +{ + int i; + + for (i = 1; i < get_dp_port_flag_str_size(); i++) { /*skip i = 0 */ + if (ltq_strcmpi(flag_name, dp_port_flag_str[i]) == 0) { + return i; + ; + } + } + return -1; +} +#endif + +static ssize_t proc_write_datapath(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + char str[300]; + int len, rlen; + char *ifname = NULL; + char *param_list[10] = { + NULL + }; + int param_list_num = 0; + struct test_wlan_priv_data *priv = NULL; + int i, k; + uint32_t dev_port = 0; + //int flag_index = 0; + char *flag_str = NULL; + char *dev_port_str = NULL; + + len = count < sizeof(str) ? count : sizeof(str) - 1; + rlen = len - copy_from_user(str, buf, len); + str[rlen] = 0; + param_list_num = + ltq_split_buffer(str, param_list, ARRAY_SIZE(param_list)); + if (param_list_num < 3) + goto help; + + ifname = param_list[2]; + i = find_testwlan_index_via_name(ifname); /*device must ready before register to Datapath */ + if (i < 0) + goto help; + + priv = netdev_priv(g_test_wlan_dev[i]); + + if (((ltq_strcmpi(param_list[0], "register") == 0) || + (ltq_strcmpi(param_list[0], "reg") == 0)) && + (ltq_strcmpi(param_list[1], "dev") == 0)) { + if (param_list_num < 5) + goto help; + if (priv->f_dp == TESTWLAN_F_REGISTER_SUBIF) { + PRINTK("Dev %s already registered as subif\n", ifname); + goto exit; + } else if (priv->f_dp == TESTWLAN_F_REGISTER_DEV) { + PRINTK("Dev %s already registered as dev\n", ifname); + goto exit; + } + dev_port_str = param_list[3]; + flag_str = param_list[4]; + PRINTK("dev_port_str=%s\n", + dev_port_str ? dev_port_str : "NULL"); + PRINTK("flag_str=%s\n", flag_str ? flag_str : "NULL"); + PRINTK("Try to register dev %s to datapath\n", ifname); + + dev_port = ltq_atoi(dev_port_str); + //flag_index = get_dev_type_index(flag_str); + //if (flag_index <= 0) { + // PRINTK("Not vailid device type:%s(%d)\n", + // flag_str, flag_index); + // goto help; + //} + priv->owner = &g_test_wlan_module[i]; + sprintf(priv->owner->name, "module%02d", i); + priv->dev_port = dev_port; + //priv->flag_index = flag_index; + if (flag_str) { + strncpy(priv->flag_str, flag_str, (sizeof(priv->flag_str) - 1)); + priv->flag_str[(sizeof(priv->flag_str) - 1)] = '\0'; + } + if (register_dev(i) != 0) { + PRINTK("Registration is failed for %s\n", ifname); + goto exit; + } + PRINTK("Succeed to register dev %s dev_port %d to \ + datapath with dp_port %d \n", ifname, dev_port, +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv->dp_subif.port_id +#else + priv->dev_port +#endif + ); + } else if (((ltq_strcmpi(param_list[0], "register") == 0) + || (ltq_strcmpi(param_list[0], "reg") == 0)) + && (ltq_strcmpi(param_list[1], "subif") == 0)) { + if (param_list_num < 4) + goto help; + PRINTK("Try to register subif %s to datapath\n", ifname); + if (priv->f_dp == TESTWLAN_F_REGISTER_DEV) { /*already alloc a port and registered dev */ +#if CONFIG_WAVE600_USING_DP_API + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + if (dp_register_subif + (priv->owner, g_test_wlan_dev[i], g_test_wlan_dev[i]->name, + &priv->dp_subif, + 0) != DP_SUCCESS) { + PRINTK + ("dp_register_subif failed for %s\n and port_id %d", + g_test_wlan_dev[i]->name, priv->dp_subif.port_id); + goto exit; + } + } + else +#endif + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + if (dc_dp_register_subif + (priv->owner, g_test_wlan_dev[i], NULL, + &priv->dp_subif, + 0) != DC_DP_SUCCESS) { + PRINTK + ("dc_dp_register_subif failed for %s\n and port_id %d", + g_test_wlan_dev[i]->name, priv->dp_subif.port_id); + goto exit; + } +#endif + } + else if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + if (dc_dp_register_subif + (THIS_MODULE, g_test_wlan_dev[i], NULL, + &priv->dp_subif, + 0) != DC_DP_SUCCESS) { + PRINTK + ("dc_dp_register_subif failed for %s\n and port_id %d", + g_test_wlan_dev[i]->name, priv->dp_subif.port_id); + goto exit; + } +#endif + } + else if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } + priv->f_dp = TESTWLAN_F_REGISTER_SUBIF; + PRINTK("Succeed to register subitf %s dev port %d: " + "dp_port %d subif %d dp_flag=%d\n", ifname, priv->dev_port, +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv->dp_subif.port_id, priv->dp_subif.subif, +#else + 0, 0, +#endif + priv->f_dp); + } else if (priv->f_dp == TESTWLAN_F_FREE) { + dev_port_str = param_list[3]; + PRINTK("dev_port_str=%s\n", dev_port_str); + dev_port = ltq_atoi(dev_port_str); + if (get_port_info_via_dev_port(priv, dev_port) != 0) { + PRINTK("Failed to register subif %s to dev port %s: Try to register dev first\n", ifname, dev_port_str); + goto exit; + } +#if CONFIG_WAVE600_USING_DP_API + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { + if (dp_register_subif + (priv->owner, g_test_wlan_dev[i], g_test_wlan_dev[i]->name, + &priv->dp_subif, + 0) != DP_SUCCESS) { + goto exit; + } + } else +#endif + if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE5x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + if (dc_dp_register_subif + (priv->owner, g_test_wlan_dev[i], NULL, + &priv->dp_subif, + 0) != DP_SUCCESS) { + goto exit; + } +#endif + } + else if (!ltq_strcmpi(priv->flag_str, "FAST_WAVE6x")) { +#if ((defined(CONFIG_DIRECTCONNECT_DP_API) && CONFIG_DIRECTCONNECT_DP_API) || \ + (defined(CONFIG_DIRECTCONNECT_DP_API_MODULE) && CONFIG_DIRECTCONNECT_DP_API_MODULE)) + if (dc_dp_register_subif + (THIS_MODULE, g_test_wlan_dev[i], NULL, + &priv->dp_subif, + 0) != DP_SUCCESS) { + goto exit; + } +#endif + } + else if (!ltq_strcmpi(priv->flag_str, "DIRECTPATH")) { + } + priv->f_dp = TESTWLAN_F_REGISTER_SUBIF; + PRINTK + ("Succeed to register subitf %s dev port %d:dp_port %d subif %d\n", + ifname, priv->dev_port, +#if (defined(CONFIG_LTQ_DATAPATH) && CONFIG_LTQ_DATAPATH) || (defined(CONFIG_INTEL_DATAPATH) && CONFIG_INTEL_DATAPATH) + priv->dp_subif.port_id, priv->dp_subif.subif +#else + 0, 0 +#endif + ); + } else if (priv->f_dp == TESTWLAN_F_REGISTER_SUBIF) { + PRINTK("Subif %s already registered\n", ifname); + goto exit; + } else { + PRINTK("Failed for uknown reason:%d\n", priv->f_dp); + goto exit; + } + } else if (((ltq_strcmpi(param_list[0], "unregister") == 0) || + (ltq_strcmpi(param_list[0], "unreg") == 0)) && + (ltq_strcmpi(param_list[1], "dev") == 0)) { + PRINTK("Try to unregister dev %s from datapath\n", ifname); + dev_port = priv->dev_port; + for (k = 0; k < ARRAY_SIZE(g_test_wlan_dev); k++) { + if (g_test_wlan_dev[k]) { /*unregister all devices with same port_id */ + priv = netdev_priv(g_test_wlan_dev[k]); + if (!priv || (dev_port != priv->dev_port)) + continue; + unregister_dev(k); + } + } + } else if (((ltq_strcmpi(param_list[0], "unregister") == 0) || + (ltq_strcmpi(param_list[0], "unreg") == 0)) && + (ltq_strcmpi(param_list[1], "subif") == 0)) { + PRINTK("Try to unregister subif %s from datapath\n", ifname); + if (priv->f_dp == TESTWLAN_F_REGISTER_SUBIF) { + unregister_dev(i); + } else { + PRINTK + ("subif %s cannot be unregistered since it is not a registered subif yet\n", + ifname); + } + } else { + PRINTK("Wrong command: %s %s\n", param_list[0], + param_list[1]); + goto help; + } + exit: + return count; + help: +/* param_list[0] [1] [2] [3] [4] [5] */ + PRINTK + ("echo register dev <dev_name> <dev_port> <type> > /proc/net/testwlan/datapath\n"); + PRINTK + ("echo unregister dev <dev_name> > /proc/net/testwlan/datapath\n"); + PRINTK + ("echo register subif <dev_name> <dev_port> > /proc/net/testwlan/datapath\n"); + PRINTK + ("echo unregister subif <dev_name> > /proc/net/testwlan/datapath\n"); + PRINTK("Device Type:\n"); + for (i = 1; i < get_dp_port_flag_str_size(); i++) /*skip i 0 */ + PRINTK("\t%s\n", dp_port_flag_str[i]); + return count; +} + +#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB +static INLINE void dump_skb(struct sk_buff *skb, u32 len, char *title, + int ch, int is_tx) +{ + int i; + if (! + (g_dbg_enable & + (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : + DBG_ENABLE_MASK_DUMP_SKB_RX))) + return; + if (skb->len < len) + len = skb->len; + if (ch >= 0) + PRINTK("%s (ch %d)\n", title, ch); + else + PRINTK("%s\n", title); + PRINTK + (" skb->data = 0x%px, skb->tail = 0x%px, skb->len = %d\n", + skb->data, skb->tail, (int)skb->len); + for (i = 1; i <= len; i++) { + if (i % 16 == 1) + PRINTK(" %4d:", i - 1); + PRINTK(" %02X", (int)(*((char *)skb->data + i - 1) & 0xFF)); + if (i % 16 == 0) + PRINTK("\n"); + } + if ((i - 1) % 16 != 0) + PRINTK("\n"); +} +#endif + +//#if defined(ENABLE_TXDBG) && ENABLE_TXDBG /* FIXME */ + +static struct sk_buff *alloc_cputx_skb(int skb_len) +{ + struct sk_buff *new_skb = dev_alloc_skb(skb_len); + if (!new_skb) { + return NULL; + ; + } + return new_skb; +} + +static int init_cputx_skb(void) +{ + struct ethhdr *eth_h; + struct iphdr *iph; + struct udphdr *udph; + uint32_t srcip, dstip, i; + char *dst_mac[] = { + "00", "01", "02", "03", "04", "05" + }; + char *src_mac[] = { + "00", "10", "E0", "00", "00", "01" + }; + char *src_ip[] = { + "192", "168", "1", "100" + }; + char *dst_ip[] = { + "192", "168", "1", "200" + }; + g_tx_skb = alloc_cputx_skb(MAX_SKB_SIZE); + if (!g_tx_skb) { + PRINTK("failed to alloc skb. initialization failed\n"); + return -1; + } + if (g_pkt_size > MAX_PKT_SIZE) { + g_pkt_size = MAX_PKT_SIZE; + } + skb_reserve(g_tx_skb, 8); /*reserve for flag header */ + skb_put(g_tx_skb, g_pkt_size); + eth_h = (struct ethhdr *)g_tx_skb->data; + for (i = 0; i < 6; i++) { + eth_h->h_dest[i] = ltq_atoi(dst_mac[i]); + eth_h->h_source[i] = ltq_atoi(src_mac[i]); + } + eth_h->h_proto = htons(ETH_P_IP); + iph = (struct iphdr *)(g_tx_skb->data + sizeof(struct ethhdr)); + memset(iph, 0, sizeof(struct iphdr)); + iph->version = 4; + iph->ihl = 5; + iph->tot_len = g_pkt_size - 14; /*totoal pkt size - ethernet hdr */ + iph->ttl = 64; + iph->protocol = IPPROTO_UDP; + iph->check = 0; + srcip = (ltq_atoi(src_ip[0]) << 24) | + (ltq_atoi(src_ip[1]) << 16) | + (ltq_atoi(src_ip[2]) << 8) | (ltq_atoi(src_ip[3])); + dstip = (ltq_atoi(dst_ip[0]) << 24) | + (ltq_atoi(dst_ip[1]) << 16) | + (ltq_atoi(dst_ip[2]) << 8) | (ltq_atoi(dst_ip[3])); + iph->saddr = srcip; + iph->daddr = dstip; + iph->check = ip_fast_csum(iph, iph->ihl); + udph = (struct udphdr *)(g_tx_skb->data + sizeof(struct ethhdr) + + sizeof(struct iphdr)); + udph->source = htons(1024); + udph->dest = htons(1024); + udph->check = 0; + udph->len = g_pkt_size - 14 - 20; /*totol pkt size - CRC - ethernet hdr - ip hdr */ + return 0; +} + +static void reset_pkt_size(struct sk_buff *skb, uint32_t new_size) +{ + struct iphdr *iph; + struct udphdr *udph; + int len = new_size - skb->len; + skb_put(skb, len); + iph = (struct iphdr *)(skb->data + sizeof(struct ethhdr)); + iph->tot_len = iph->tot_len + len; + iph->check = 0; + iph->check = ip_fast_csum(iph, iph->ihl); + udph = (struct udphdr *)(skb->data + sizeof(struct ethhdr) + + sizeof(struct iphdr)); + udph->len += len; + return; +} + +static void reset_tx_dev(char *dev_name, struct sk_buff *pskb) +{ + struct net_device *pdev; + pdev = ltq_dev_get_by_name(dev_name); + if (pdev) { + dev_put(pdev); + ; + } + pskb->dev = pdev; + return; +} + +static int send_out_pkt(void *arg) +{ + struct sk_buff *skb_tx; + uint32_t tx_cnt = 1; + g_tx_mib = 0; + g_tx_start = TX_START; + if (!g_tx_skb->dev) { + goto __THREAD_STOP; + ; + } + while (!kthread_should_stop() && tx_cnt <= g_tx_count) { + if (tx_cnt % g_tx_pause_cnt == 0) + schedule_timeout_uninterruptible(1); + skb_tx = skb_clone(g_tx_skb, GFP_ATOMIC); + if (skb_tx) + dev_queue_xmit(skb_tx); + g_tx_mib++; + tx_cnt++; + } + __THREAD_STOP: + g_tx_start = TX_STOP; + return 0; +} + +static void send_out_manager(char *dev_name) +{ + struct net_device *dev; + if (!g_tx_count) { + PRINTK("Warning: tx count is zero, stop sending\n"); + return; + } + if (!g_tx_skb) { + PRINTK("Error: tx skb is not init\n"); + return; + } + dev = ltq_dev_get_by_name(dev_name); + if (dev) { + dev_put(dev); + ; + } else { + PRINTK("Cannot find the dev by name: %s\n", dev_name); + return; + } + if (!ltq_get_xmit_fn(dev)) { + PRINTK("The dev %s don't support xmit function", dev_name); + return; + } + if (g_tx_ts && g_tx_start == TX_START) { + kthread_stop(g_tx_ts); + ; + } + g_tx_ts = kthread_run(send_out_pkt, NULL, "sending_out"); + if (IS_ERR(g_tx_ts)) { + PRINTK("Cannot alloc a new thread to send the pkt\n"); + g_tx_ts = NULL; + g_tx_start = TX_STOP; + } + return; +} + +#if 0 // FIXME +static int print_skb(struct seq_file *s, struct sk_buff *skb) +{ + int len = 0; + struct ethhdr *eth_h = (struct ethhdr *)skb->data; + struct iphdr *iph = + (struct iphdr *)(skb->data + sizeof(struct ethhdr)); + if (!skb) + return len; + SEQ_PRINTF(s, "dst mac: %x%x:%x%x:%x%x:%x%x:%x%x:%x%x\n", + eth_h->h_dest[0] >> 4, eth_h->h_dest[0], + eth_h->h_dest[1] >> 4, eth_h->h_dest[1], + eth_h->h_dest[2] >> 4, eth_h->h_dest[2], + eth_h->h_dest[3] >> 4, eth_h->h_dest[3], + eth_h->h_dest[4] >> 4, eth_h->h_dest[4], + eth_h->h_dest[5] >> 4, eth_h->h_dest[5]); + SEQ_PRINTF(s, "src mac: %x%x:%x%x:%x%x:%x%x:%x%x:%x%x\n", + eth_h->h_source[0] >> 4, eth_h->h_source[0], + eth_h->h_source[1] >> 4, eth_h->h_source[1], + eth_h->h_source[2] >> 4, eth_h->h_source[2], + eth_h->h_source[3] >> 4, eth_h->h_source[3], + eth_h->h_source[4] >> 4, eth_h->h_source[4], + eth_h->h_source[5] >> 4, eth_h->h_source[5]); + SEQ_PRINTF(s, "dst ip: %d.%d.%d.%d, src ip: %d.%d.%d.%d\n", + (iph->daddr >> 24) & 0xFF, (iph->daddr >> 16) & 0xFF, + (iph->daddr >> 8) & 0xFF, iph->daddr & 0xFF, + (iph->saddr >> 24) & 0xFF, (iph->saddr >> 16) & 0xFF, + (iph->saddr >> 8) & 0xFF, iph->saddr & 0xFF); + return len; +} +#endif + +static void proc_read_cputx(struct seq_file *s) +{ + SEQ_PRINTF(s, "frame count: 0x%x\n", g_tx_count); + SEQ_PRINTF(s, "frame mib count: 0x%x\n", g_tx_mib); + SEQ_PRINTF(s, "TX dev name: %s\n", g_tx_dev); + SEQ_PRINTF(s, "Tx pause num: %d\n", g_tx_pause_cnt); + SEQ_PRINTF(s, "frame size: %d(Not including CRC)\n", g_pkt_size); +#if 0 // FIXME + len += print_skb(page + off + len, g_tx_skb); + *eof = 1; +#endif +} + +static ssize_t proc_write_cputx(struct file *file, const char *buf, + size_t count, loff_t *data) +{ + char str[300]; + int len, rlen; + int param_list_num = 0; + char *param_list[10]; + + len = count < sizeof(str) ? count : sizeof(str) - 1; + rlen = len - copy_from_user(str, buf, len); + str[rlen] = 0; + param_list_num = + ltq_split_buffer(str, param_list, ARRAY_SIZE(param_list)); + + if (ltq_strncmpi(param_list[0], "help", 4) == 0) { + PRINTK + ("echo count <num> > /proc/net/testwlan/cputx (0xFFFF would be continues sending)\n"); + PRINTK("echo start/stop > /proc/net/testwlan/cputx\n"); + PRINTK("echo dev <name> > /proc/net/testwlan/cputx\n"); + PRINTK + ("echo intcnt <num> /proc/net/testwlan/cputx (min: 1, max: %d)\n", + MAX_PAUSE_CNT); + PRINTK + ("echo pktsize <num> /proc/net/testwlan/cputx (min: %d, max:%d)\n", + MIN_PKT_SIZE, MAX_PKT_SIZE); + } else if (ltq_strncmpi(param_list[0], "count", 5) == 0) { + g_tx_count = ltq_atoi(param_list[1]); + } else if (ltq_strncmpi(param_list[0], "start", 5) == 0) { + if (g_tx_start == TX_START) { + PRINTK("still running, please stop first\n"); + return count; + } + send_out_manager(g_tx_dev); + } else if (ltq_strncmpi(param_list[0], "stop", 4) == 0) { + DP_LIB_LOCK(&g_tx_start_lock); + if (g_tx_start == TX_STOP) { + goto __EXIT_TX_LOCK; + ; + } + kthread_stop(g_tx_ts); + g_tx_ts = NULL; + DP_LIB_UNLOCK(&g_tx_start_lock); + } else if (ltq_strncmpi(param_list[0], "dev", 3) == 0) { + + strncpy(g_tx_dev, param_list[1], sizeof(g_tx_dev) - 1); + g_tx_dev[IFNAMSIZ - 1] = '\0'; + reset_tx_dev(g_tx_dev, g_tx_skb); + } else if (ltq_strncmpi(param_list[0], "dumpskb", 7) == 0) { + if (g_tx_skb) { + dump_skb(g_tx_skb, g_tx_skb->len, "dump tx skb", + -1, 0); + } + } else if (ltq_strncmpi(param_list[0], "intcnt", 6) == 0) { + g_tx_pause_cnt = ltq_atoi(param_list[1]); + if (g_tx_pause_cnt == 0) { + PRINTK("Cannot set intermittent number to zero!!!\n"); + g_tx_pause_cnt = DEFAULT_PAUSE_CNT; + } else if (g_tx_pause_cnt > MAX_PAUSE_CNT) { + PRINTK("Set to MAX PAUSE COUNT: %d\n", MAX_PAUSE_CNT); + g_tx_pause_cnt = MAX_PAUSE_CNT; + } + } else if (ltq_strncmpi(param_list[0], "pktsize", 7) == 0) { + DP_LIB_LOCK(&g_tx_start_lock); + if (g_tx_start == TX_START) { + PRINTK + ("Cannot change the packet size when sending traffic!\n"); + goto __EXIT_TX_LOCK; + } + g_pkt_size = ltq_atoi(param_list[1]); + if (g_pkt_size < MIN_PKT_SIZE || g_pkt_size > MAX_PKT_SIZE) { + PRINTK + ("pkt size cannot be less than %d, or larger than %d\n", + MIN_PKT_SIZE, MAX_PKT_SIZE); + g_pkt_size = DEFAULT_PKT_SIZE; + } + reset_pkt_size(g_tx_skb, g_pkt_size); + DP_LIB_UNLOCK(&g_tx_start_lock); + } else if (ltq_strncmpi(param_list[0], "verifytx", 8) == 0) { +#if 0 // FIXME + struct net_device *tx_dev = NULL; +#if (CONFIG_WAVE600_USING_DP_API == 0) + struct test_wlan_priv_data *priv = NULL; +#endif + if (g_tx_dev[0] == '\0') { + PRINTK + ("tx_dev is NULL!\n"); + return 0; + } + + tx_dev = ltq_dev_get_by_name(g_tx_dev); + if(tx_dev){ + dev_put(tx_dev); + }else{ + printk("Cannot find the dev by name: %s\n", g_tx_dev); + return 0; + } + +#if (CONFIG_WAVE600_USING_DP_API == 0) + priv = netdev_priv(tx_dev); +{ + int i; + uint64_t reg; + uint32_t reg1, reg2; + uint32_t addr = (uint32_t)priv->resources.rings.soc2dev.base; + + for (i = 0; i < priv->resources.rings.soc2dev.size; i++) { + reg = *((uint64_t *)(addr)); + reg1 = (uint32_t)(reg >> 32); + reg2 = (reg & 0xFFFFFFFF); + printk("DESC0_%d_EGP_4 : addr:0x%8x, val[32-63]: 0x%8x, val[0-31]: 0x%8x\n", i, addr, reg1, reg2); + + addr += 8; + reg = *((uint64_t *)(addr)); + reg1 = (uint32_t)(reg >> 32); + reg2 = (reg & 0xFFFFFFFF); + printk("DESC1_%d_EGP_4 : addr:0x%8x, val[32-63]: 0x%8x, val[0-31]: 0x%8x\n", i, addr, reg1, reg2); + + addr += 8; + } +} +#endif +#endif + } + return count; + __EXIT_TX_LOCK: + DP_LIB_UNLOCK(&g_tx_start_lock); + return count; +} + +//#endif + +/*################################### +* Global Function +* #################################### +*/ +#define PROC_NAME "testwlan" +#define PROC_DBG "dbg" +#define PROC_READ_DEV "dev" +#define PROC_READ_MIB "mib" +#define PROC_READ_DP "datapath" +#define PROC_READ_CPUTX "cputx" +static struct ltq_proc_entry proc_entries[] = { +/* name single_callback_t multi_callback_t multi_callback_start write_callback_t */ +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC + {PROC_DBG, proc_read_dbg, NULL, NULL, proc_write_dbg}, +#endif + {PROC_READ_DEV, NULL, proc_read_dev, NULL, proc_write_dev}, + {PROC_READ_MIB, NULL, proc_read_mib, NULL, proc_write_mib}, + { + PROC_READ_DP, NULL, proc_read_datapath, NULL, + proc_write_datapath}, +//#if defined(ENABLE_TXDBG) && ENABLE_TXDBG /* FIXME */ + {PROC_READ_CPUTX, proc_read_cputx, NULL, NULL, proc_write_cputx}, +//#endif +}; + +static struct proc_dir_entry *proc_node; + +static struct proc_dir_entry *proc_file_create(void) +{ + proc_node = proc_mkdir(PROC_NAME, init_net.proc_net); + if (proc_node != NULL) { + int i; + for (i = 0; i < ARRAY_SIZE(proc_entries); i++) + ltq_proc_entry_create(proc_node, &proc_entries[i]); + } else { + PRINTK("cannot create proc entry"); + return NULL; + } + return proc_node; +} + +/*#################################### +* Init/Cleanup API +* #################################### +*/ + +static __init int test_wlan_drv_init_module(void) +{ + PRINTK("Loading test_wlan_drv driver ...... "); + + memset(g_test_wlan_dev, 0, sizeof(g_test_wlan_dev)); + memset(g_test_wlan_module, 0, sizeof(g_test_wlan_module)); + + proc_file_create(); +//#if defined(ENABLE_TXDBG) && ENABLE_TXDBG /* FIXME */ + spin_lock_init(&g_tx_start_lock); + DP_LIB_LOCK(&g_tx_start_lock); + init_cputx_skb(); +#if CONFIG_WAVE600_USING_DP_API + { + struct dp_port_data dp_p_data = {0}; + dp_p_data.flag_ops = (DP_F_DATA_RESV_CQM_PORT | DP_F_DATA_EVEN_FIRST); + dp_p_data.resv_num_port = 2; + + g_resvd_port = dp_alloc_port_ext(0, THIS_MODULE, NULL, 0, 0, NULL, &dp_p_data, 0); + } +#endif + DP_LIB_UNLOCK(&g_tx_start_lock); +//#endif + PRINTK("Succeeded!\n"); + return 0; +} + +static __exit void test_wlan_drv_exit_module(void) +{ + int i; + + PRINTK("Unloading test_wlan_drv driver ...... "); + for (i = 0; i < ARRAY_SIZE(g_test_wlan_dev); i++) + if (g_test_wlan_dev[i]) { + delete_testwlan_dev(i); + } +//#if defined(ENABLE_TXDBG) && ENABLE_TXDBG /* FIXME */ + if (g_tx_ts && g_tx_start == TX_START) + kthread_stop(g_tx_ts); + if (g_tx_skb != NULL) + dev_kfree_skb_any(g_tx_skb); +//#endif + for (i = 0; i < ARRAY_SIZE(proc_entries); i++) + remove_proc_entry(proc_entries[i].name, proc_node); + remove_proc_entry(PROC_NAME, init_net.proc_net); + PRINTK("Succeeded!\n"); + +} + +module_init(test_wlan_drv_init_module); +module_exit(test_wlan_drv_exit_module); + +MODULE_AUTHOR("Anath Bandhu Garai"); +MODULE_DESCRIPTION("Intel DC test driver (Supported XRX500)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.1.1"); diff --git a/test/test_wave600_proc_api.c b/test/test_wave600_proc_api.c new file mode 100644 index 0000000..1a32a16 --- /dev/null +++ b/test/test_wave600_proc_api.c @@ -0,0 +1,354 @@ +#include <linux/fs.h> +#include<linux/slab.h> +#include <linux/kernel.h> +#include "test_wlan_proc_api.h" + +static inline int lower_ch(int ch) +{ + if (ch >= 'A' && ch <= 'Z') + return ch + 'a' - 'A'; + return ch; +} + +int ltq_strcmpi(char const *s1, char const *s2) +{ + int c1, c2; + + if (!s1 || !s2) + return 1; + while (*s1 && *s2) { /*same length */ + c1 = lower_ch(*s1); + c2 = lower_ch(*s2); + s1++; + s2++; + + if (c1 != c2) + return c1 - c2; + } + return *s1 - *s2; +} +#if 0 +EXPORT_SYMBOL(ltq_strcmpi); +#endif + +int ltq_strncmpi(const char *s1, const char *s2, size_t n) +{ + int c1, c2; + + if (!s1 || !s2) + return 1; + for (; n > 0; s1++, s2++, --n) { + c1 = lower_ch(*s1); + c2 = lower_ch(*s2); + if (c1 != c2) + return c1 - c2; + else if (c1 == '\0') + return 0; + } + return 0; +} +#if 0 +EXPORT_SYMBOL(ltq_strncmpi); +#endif + +#if 0 +char *ltq_strstri(char *string, char *substring) +{ + register char *a, *b; + +/* First scan quickly through the two strings looking for a +* single-character match. When it's found, then compare the +* rest of the substring. +*/ + if (!string || !substring) + return (char *)0; + b = substring; + if (*b == 0) + return string; + + for (; *string != 0; string += 1) { + if (lower_ch(*string) != lower_ch(*b)) + continue; + a = string; + while (1) { + if (*b == 0) + return string; + if (lower_ch(*a++) != lower_ch(*b++)) + break; + } + b = substring; + } + return (char *)0; +} +EXPORT_SYMBOL(ltq_strstri); +#endif + +void ltq_replace_ch(char *p, int len, char orig_ch, char new_ch) +{ + int i; + + if (p) + for (i = 0; i < len; i++) { + if (p[i] == orig_ch) + p[i] = new_ch; + } +} +#if 0 +EXPORT_SYMBOL(ltq_replace_ch); +#endif + +static unsigned int btoi(char *str) +{ + unsigned int sum = 0; + signed len = 0, i = 0; + + len = strlen(str); + len = len - 1; + while (len >= 0) { + if (*(str + len) == '1') + sum = (sum | (1 << i)); + i++; + len--; + } + return sum; +} + +int ltq_atoi(unsigned char *str) +{ + unsigned int n = 0; + int i = 0; + int nega_sign = 0; + + if (!str) + return 0; + ltq_replace_ch(str, strlen(str), ' ', 0); + if (str[0] == 0) + return 0; + + if (str[0] == 'b' || str[0] == 'B') { /*binary format */ + n = btoi(str + 1); + } else if ((str[0] == '0') && ((str[1] == 'x') || (str[1] == 'X'))) { + /*hex format */ + str += 2; + + while (str[i]) { + n = n * 16; + if (('0' <= str[i] && str[i] <= '9')) { + n += str[i] - '0'; + } else if (('A' <= str[i] && str[i] <= 'F')) { + n += str[i] - 'A' + 10; + ; + } else if (('a' <= str[i] && str[i] <= 'f')) { + n += str[i] - 'a' + 10; + ; + } else + PRINTK(KERN_ERR "Wrong value:%u\n", str[i]); + + i++; + } + + } else { + if (str[i] == '-') { /*negative sign */ + nega_sign = 1; + i++; + } + while (str[i]) { + n *= 10; + n += str[i] - '0'; + i++; + } + } + if (nega_sign) + n = -(int)n; + return n; +} +#if 0 +EXPORT_SYMBOL(ltq_atoi); +#endif + +static void ltq_remove_leading_whitespace(char **p, int *len) +{ + while (*len && ((**p == ' ') || (**p == '\r') || (**p == '\r'))) { + (*p)++; + (*len)--; + } +} + +/*Split buffer to multiple segment with seperator space. +And put pointer to array[]. +By the way, original buffer will be overwritten with '\0' at some place. +*/ +int ltq_split_buffer(char *buffer, char *array[], int max_param_num) +{ + int i, set_copy = 0; + int res = 0; + int len; + + for (i = 0; i < max_param_num; i++) + array[i] = NULL; + if (!buffer) + return 0; + len = strlen(buffer); + for (i = 0; i < max_param_num;) { + ltq_remove_leading_whitespace(&buffer, &len); + for (; + *buffer != ' ' && *buffer != '\0' && *buffer != '\r' + && *buffer != '\n' && *buffer != '\t'; buffer++, len--) { + /*Find first valid charactor */ + set_copy = 1; + if (!array[i]) + array[i] = buffer; + } + + if (set_copy == 1) { + i++; + if (*buffer == '\0' || *buffer == '\r' + || *buffer == '\n') { + *buffer = 0; + break; + } + *buffer = 0; + buffer++; + len--; + set_copy = 0; + + } else { + if (*buffer == '\0' || *buffer == '\r' + || *buffer == '\n') + break; + buffer++; + len--; + } + } + res = i; + + return res; +} +#if 0 +EXPORT_SYMBOL(ltq_split_buffer); +#endif + +static void *ltq_seq_start(struct seq_file *s, loff_t *pos) +{ + struct ltq_proc_file_entry *p = s->private; + + if (p->pos < 0) + return NULL; + + return p; +} + +static void *ltq_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct ltq_proc_file_entry *p = s->private; + + *pos = p->pos; + + if (p->pos >= 0) + return p; + else + return NULL; +} + +static void ltq_seq_stop(struct seq_file *s, void *v) +{ +} + +static int ltq_seq_show(struct seq_file *s, void *v) +{ + struct ltq_proc_file_entry *p = s->private; + + if (p->pos >= 0) + p->pos = p->callback(s, p->pos); + + return 0; +} + +static int ltq_proc_open(struct inode *inode, struct file *file); + +static const struct seq_operations dp_seq_ops = { + .start = ltq_seq_start, + .next = ltq_seq_next, + .stop = ltq_seq_stop, + .show = ltq_seq_show +}; + +static int ltq_proc_open(struct inode *inode, struct file *file) +{ + struct seq_file *s; + struct ltq_proc_file_entry *p; + struct ltq_proc_entry *entry; + int ret; + + ret = seq_open(file, &dp_seq_ops); + if (ret) + return ret; + + s = file->private_data; + p = (struct ltq_proc_file_entry *)kmalloc(sizeof(*p), GFP_KERNEL); + + if (!p) { + (void)seq_release(inode, file); + return -ENOMEM; + } + + entry = PDE_DATA(inode); + + p->callback = entry->callback; + if (entry->init_callback) + p->pos = entry->init_callback(); + else + p->pos = 0; + + s->private = p; + + return 0; +} + +static int ltq_proc_release(struct inode *inode, struct file *file) +{ + struct seq_file *s; + + s = file->private_data; + kfree(s->private); + + return seq_release(inode, file); +} + +static int ltq_seq_single_show(struct seq_file *s, void *v) +{ + struct ltq_proc_entry *p = s->private; + + p->single_callback(s); + return 0; +} + +static int ltq_proc_single_open(struct inode *inode, struct file *file) +{ + return single_open(file, ltq_seq_single_show, PDE_DATA(inode)); +} + +void ltq_proc_entry_create(struct proc_dir_entry *parent_node, + struct ltq_proc_entry *proc_entry) +{ + memset(&proc_entry->ops, 0, sizeof(struct file_operations)); + proc_entry->ops.owner = THIS_MODULE; + + if (proc_entry->single_callback) { + proc_entry->ops.open = ltq_proc_single_open; + proc_entry->ops.release = single_release; + } else { + proc_entry->ops.open = ltq_proc_open; + proc_entry->ops.release = ltq_proc_release; + } + + proc_entry->ops.read = seq_read; + proc_entry->ops.llseek = seq_lseek; + proc_entry->ops.write = proc_entry->write_callback; + proc_create_data(proc_entry->name, + (S_IFREG | S_IRUGO), + parent_node, &proc_entry->ops, proc_entry); +} +#if 0 /* FIXME */ +EXPORT_SYMBOL(ltq_proc_entry_create); +#endif diff --git a/test/test_wlan_drv.h b/test/test_wlan_drv.h new file mode 100644 index 0000000..2369481 --- /dev/null +++ b/test/test_wlan_drv.h @@ -0,0 +1,21 @@ +#ifndef TEST_WLAN_DRV_H +#define TEST_WLAN_DRV_H /*! PMAC port informatin data */ + +#define IFNAMSIZ 16 +#define PMAC_MAX_NUM 16 + +#define DP_LIB_LOCK spin_lock_bh +#define DP_LIB_UNLOCK spin_unlock_bh + +#define MAX_SUBIF_PER_PORT 16 + +char *dp_port_flag_str[] = { + "DP_F_DEREGISTER", /*DP_F_DEREGISTER: not valid device type */ + "ETH_LAN", /*DP_F_FAST_ETH_LAN */ + "ETH_WAN", /*DP_F_FAST_ETH_WAN */ + "FAST_WLAN", /*DP_F_FAST_WLAN */ + "FAST_DSL", /* DP_F_FAST_DSL */ + "DIRECTPATH", /* DP_F_DIRECT */ + "CAPWAP" /* DP_F_CAPWAP */ +}; +#endif /*DATAPATH_H */ diff --git a/test/test_wlan_proc_api.h b/test/test_wlan_proc_api.h new file mode 100644 index 0000000..fea7cd7 --- /dev/null +++ b/test/test_wlan_proc_api.h @@ -0,0 +1,57 @@ +#ifndef TEST_WLAN_PROC_API_H +#define TEST_WLAN_PROC_API_H + +#include <linux/kernel.h> /*kmalloc */ +#include <linux/ctype.h> +#include <linux/proc_fs.h> /*file_operations */ +#include <linux/seq_file.h> /*seq_file */ +#include <linux/uaccess.h> /*copy_from_user */ + +#define PRINTK printk /*workaround for checkpatch issue */ +#define SEQ_PRINTF seq_printf + +#define set_ltq_dbg_flag(v, e, f) do {;\ + if (e > 0)\ + v |= (uint32_t)(f);\ + else\ + v &= (uint32_t) (~f); } \ + while (0) + +typedef void (*ltq_proc_single_callback_t) (struct seq_file *); +typedef int (*ltq_proc_callback_t) (struct seq_file *, int); +typedef int (*ltq_proc_init_callback_t) (void); +typedef ssize_t(*ltq_proc_write_callback_t) (struct file *file, + const char __user *input, + size_t size, loff_t *loff); + +struct ltq_proc_file_entry { + ltq_proc_callback_t callback; + int pos; +}; + +struct ltq_proc_entry { + char *name; + ltq_proc_single_callback_t single_callback; + ltq_proc_callback_t callback; + ltq_proc_init_callback_t init_callback; + ltq_proc_write_callback_t write_callback; + struct file_operations ops; +}; + +void ltq_proc_entry_create(struct proc_dir_entry *parent_node, + struct ltq_proc_entry *proc_entry); + +int ltq_atoi(unsigned char *str); +void ltq_remove_space(char **p, int *len); +char *ltq_strstri(char *string, char *substring); +int ltq_strncmpi(const char *s1, const char *s2, size_t n); +int ltq_strcmpi(char const *s1, char const *s2); +void ltq_replace_ch(char *p, int len, char orig_ch, char new_ch); + +/*Split buffer to multiple segment with seperator space. +And put pointer to array[]. +By the way, original buffer will be overwritten with '\0' at some place. +*/ +int ltq_split_buffer(char *buffer, char *array[], int max_param_num); + +#endif /*TEST_WLAN_PROC_API_H*/ -- GitLab