diff --git a/include/linux/bcm_br_fdb.h b/include/linux/bcm_br_fdb.h new file mode 100644 index 0000000000000000000000000000000000000000..720a083b50a6df18638c464e24363f5145c4b2aa --- /dev/null +++ b/include/linux/bcm_br_fdb.h @@ -0,0 +1,50 @@ +#ifndef _BCM_BR_FDB_H +#define _BCM_BR_FDB_H +/* +<:copyright-BRCM:2013:DUAL/GPL:standard + + Copyright (c) 2013 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +int bcm_br_has_fdb_expired(const struct net_bridge *br, + const struct net_bridge_fdb_entry *fdb); + +int bcm_br_fdb_notify(struct net_bridge *br, + const struct net_bridge_fdb_entry *fdb, int type, + bool swdev_notify); + +int bcm_br_fdb_init(struct net_bridge *br, struct net_bridge_fdb_entry *fdb); + +int bcm_br_fdb_fill_info(const struct net_bridge_fdb_entry *fdb); + +int bcm_br_fdb_update(struct net_bridge_fdb_entry *fdb, + struct net_bridge_port *source); + +int bcm_br_fdb_cleanup(struct net_bridge_fdb_entry *fdb, + unsigned long time_now, unsigned long delay); + +unsigned int bcm_br_fdb_mac_limit(struct sk_buff *skb); +#endif diff --git a/include/linux/bcm_br_mcast.h b/include/linux/bcm_br_mcast.h new file mode 100644 index 0000000000000000000000000000000000000000..5cdfc82693b7f77f0fae7ebc3173a0b38b747741 --- /dev/null +++ b/include/linux/bcm_br_mcast.h @@ -0,0 +1,47 @@ +/* +* Copyright (c) 2003-2019 Broadcom +* All Rights Reserved +* +<:label-BRCM:2019:DUAL/GPL:standard + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ +#if (defined(CONFIG_BCM_MCAST) || defined(CONFIG_BCM_MCAST_MODULE)) + +#ifndef _BCM_BR_MCAST_H +#define _BCM_BR_MCAST_H + +#include <linux/skbuff.h> +#include <linux/netdevice.h> + +typedef int (*br_bcm_mcast_receive_hook)(int ifindex, struct sk_buff *skb, int is_routed); +typedef int (*br_bcm_mcast_should_deliver_hook)(int ifindex, struct sk_buff *skb, struct net_device *src_dev, bool dst_mrouter); +typedef int (*br_bcm_mcast_local_in_hook)(struct sk_buff *skb); + +int br_bcm_mcast_flood_forward(struct net_device *dev, struct sk_buff *skb); +int br_bcm_mcast_bind(br_bcm_mcast_receive_hook bcm_rx_hook, + br_bcm_mcast_should_deliver_hook bcm_should_deliver_hook, + br_bcm_mcast_local_in_hook bcm_local_in_hook); + +#endif /* _BCM_BR_MCAST_H */ +#endif diff --git a/include/linux/bcm_dslcpe_wlan_info.h b/include/linux/bcm_dslcpe_wlan_info.h new file mode 100755 index 0000000000000000000000000000000000000000..f14fc33aaae8411b854c2864ed1c84a2d73c0df4 --- /dev/null +++ b/include/linux/bcm_dslcpe_wlan_info.h @@ -0,0 +1,27 @@ +#ifndef __BCM_DSLCPE_WLAN_INFO_H_ +#define __BCM_DSLCPE_WLAN_INFO_H_ +#if defined(CONFIG_BLOG) +#include <linux/blog.h> +#define WLAN_CLIENT_INFO_OK (0) +#define WLAN_CLIENT_INFO_ERR (-1) +typedef enum { + WLAN_CLIENT_TYPE_CPU, + WLAN_CLIENT_TYPE_WFD, + WLAN_CLIENT_TYPE_RUNNER, + WLAN_CLIENT_TYPE_MAX +} wlan_client_type_t; + +typedef struct { + wlan_client_type_t type; + union { + uint32_t wl; + BlogWfd_t wfd; + BlogRnr_t rnr; + }; +} wlan_client_info_t; + + +typedef int (* wlan_client_get_info_t)(struct net_device *dev,char *mac_address_p,int priority, wlan_client_info_t *info_p); + +#endif /* CONFIG_BLOG */ +#endif /* __BCM_DSLCPE_WLAN_INFO_H_ */ diff --git a/include/linux/bcm_log.h b/include/linux/bcm_log.h new file mode 100644 index 0000000000000000000000000000000000000000..91b6a6ac1e5aa99171f8a6d11a4c38c5ed8f9058 --- /dev/null +++ b/include/linux/bcm_log.h @@ -0,0 +1,316 @@ +/* +* <:copyright-BRCM:2012:DUAL/GPL:standard +* +* Copyright (c) 2012 Broadcom +* All Rights Reserved +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed +* to you under the terms of the GNU General Public License version 2 +* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +* with the following added to such license: +* +* As a special exception, the copyright holders of this software give +* you permission to link this software with independent modules, and +* to copy and distribute the resulting executable under terms of your +* choice, provided that you also meet, for each linked independent +* module, the terms and conditions of the license of that module. +* An independent module is a module which is not derived from this +* software. The special exception does not apply to any modifications +* of the software. +* +* Not withstanding the above, under no circumstances may you combine +* this software in any way with any other Broadcom software provided +* under a license other than the GPL, without Broadcom's express prior +* written consent. +* +:> +*/ +#ifndef _BCM_LOG_SERVICES_ +#define _BCM_LOG_SERVICES_ + +#if !defined(__KERNEL__) +#include <stdint.h> /**< ISO C99 7.18 Integer Types */ +#include <stdio.h> +#include <string.h> +#endif + +#include <linux/bcm_log_mod.h> + +#if !defined(__KERNEL__) +#define bcm_printk printf +#define bcm_seq_printf seq_printf +#define BUG() do { } while(0) +#define EXPORT_SYMBOL(sym) +#endif + +/********* + ********* + * Private: + ********* + *********/ + + +#define IN /*Input parameters*/ +#define OUT /*Output parameters*/ +#define INOUT /*Input/Output parameters*/ + +/* + * This block of defines selects supported functionality for everything + * that includes bcm_log.h. Selection of functionality will eventually + * be moved to make menuconfig. CONFIG_BRCM_COLORIZE_PRINTS is already + * in make menuconfig, but it is locally disabled here. + */ +#ifdef CONFIG_BCM_LOG +#undef CONFIG_BRCM_COLORIZE_PRINTS +#define BCM_ASSERT_SUPPORTED +#define BCM_LOG_SUPPORTED +#define BCM_DATADUMP_SUPPORTED +#define BCM_ERROR_SUPPORTED +#undef BCM_SNAPSHOT_SUPPORTED +#endif /* CONFIG_BCM_LOG */ + +#include <linux/bcm_colors.h> + +#if defined(BCM_ASSERT_SUPPORTED) +#define BCM_ASSERTCODE(code) code +#else +#define BCM_ASSERTCODE(code) +#endif /*defined(BCM_ASSERT_SUPPORTED)*/ + +#if defined(BCM_LOG_SUPPORTED) +#define BCM_LOGCODE(code) code +#else +#define BCM_LOGCODE(code) +#endif /*defined(BCM_LOG_SUPPORTED)*/ + +#if defined(BCM_ERROR_SUPPORTED) +#define BCM_ERRORCODE(code) code +#else +#define BCM_ERRORCODE(code) +#endif /*defined(BCM_ERROR_SUPPORTED)*/ + +#if defined(BCM_DATADUMP_SUPPORTED) +#define BCM_DATADUMPCODE(code) code +#else +#define BCM_DATADUMPCODE(code) +#endif /*defined(BCM_DATADUMP_SUPPORTED)*/ + +#if defined(BCM_SNAPSHOT_SUPPORTED) +#define BCM_SNAPSHOTCODE(code) code +#else +#define BCM_SNAPSHOTCODE(code) +#endif /*defined(BCM_SNAPSHOT_SUPPORTED)*/ + +typedef enum { + BCM_LOG_DD_IMPORTANT=0, + BCM_LOG_DD_INFO, + BCM_LOG_DD_DETAIL, + BCM_LOG_DD_MAX +} bcmLogDataDumpLevel_t; + +typedef void (*bcmLogLevelChangeCallback_t)(bcmLogId_t logId, bcmLogLevel_t level, void *ctx); + +typedef struct { + bcmLogId_t logId; + char *name; + bcmLogLevel_t logLevel; + bcmLogDataDumpLevel_t ddLevel; + bcmLogLevelChangeCallback_t lcCallback; + void * lcCallbackCtx; +} bcmLogModuleInfo_t; + + +/******** + ******** + * Public: service API offered by LOGdriver to other drivers + ******** + ********/ +#if defined(__KERNEL__) +#include <linux/seq_file.h> + +/* This fucntion is same as printk, but is defined always + * and should be used in binary only modules to avoid + * dependency on CONFIG_PRINTK. It can be used in place of + * regular printk as well. + * + * All driver should use/define macro based on this function and + * should not create new clones. + */ +int bcm_printk(const char *fmt, ...); +#define bcm_pr_cont(fmt, ...) bcm_printk(KERN_CONT fmt, ##__VA_ARGS__) +#define bcm_print(fmt, ...) bcm_pr_cont(fmt, ##__VA_ARGS__) +#define bcm_pr_hex_dump_offset(r, g, b, l, a) \ + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, r, g, b, l, a) +void bcm_seq_printf(struct seq_file *s, const char *fmt, ...); +#endif + +/** + * Logging API: Activate by #defining BCM_LOG_SUPPORTED + **/ + +#if defined(BCM_LOG_SUPPORTED) +bcmLogModuleInfo_t *bcmLog_logIsEnabled(bcmLogId_t logId, bcmLogLevel_t logLevel); +#endif + +#define BCM_LOG_FUNC(logId) \ + BCM_LOG_DEBUG((logId), " ") + +#define BCM_LOG_DEBUG(logId, fmt, arg...) \ + BCM_LOGCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_logIsEnabled(logId, BCM_LOG_LEVEL_DEBUG); \ + if (_pModInfo) \ + bcm_printk(CLRm "[DBG " "%s" "] %-10s: " fmt CLRnl, \ + _pModInfo->name, __FUNCTION__, ##arg); } while(0) ) + +#define BCM_LOG_INFO(logId, fmt, arg...) \ + BCM_LOGCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_logIsEnabled(logId, BCM_LOG_LEVEL_INFO); \ + if (_pModInfo) \ + bcm_printk(CLRg "[INF " "%s" "] %-10s: " fmt CLRnl, \ + _pModInfo->name, __FUNCTION__, ##arg); } while(0) ) + +#define BCM_LOG_NOTICE(logId, fmt, arg...) \ + BCM_LOGCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_logIsEnabled(logId, BCM_LOG_LEVEL_NOTICE); \ + if (_pModInfo) \ + bcm_printk(CLRb "[NTC " "%s" "] %-10s: " fmt CLRnl, \ + _pModInfo->name, __FUNCTION__, ##arg); } while(0) ) + + +/** + * Error Reporting API: Activate by #defining BCM_ERROR_SUPPORTED + **/ + +#define BCM_LOG_ERROR(logId, fmt, arg...) \ + BCM_ERRORCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_logIsEnabled(logId, BCM_LOG_LEVEL_ERROR); \ + if (_pModInfo) \ + bcm_printk(CLRerr "[ERROR " "%s" "] %-10s,%d: " fmt CLRnl, \ + _pModInfo->name, __FUNCTION__, __LINE__, ##arg); } while(0) ) + + +/** + * Assert API: Activate by #defining BCM_ASSERT_SUPPORTED + **/ + +#define BCM_ASSERT(cond) \ + BCM_ASSERTCODE( if ( !(cond) ) { \ + bcm_printk(CLRerr "[ASSERT " "%s" "] %-10s,%d: " #cond CLRnl, \ + __FILE__, __FUNCTION__, __LINE__); \ + BUG(); \ + } ) + + +/** + * Datadump API: Activate by #defining BCM_DATADUMP_SUPPORTED + **/ + +/* + * Prototype of datadump print functions. + * Note: parse functions must be exported (EXPORT_SYMBOL) + */ +typedef int (Bcm_DataDumpPrintFunc)(uint32_t dataDumpId, IN void* dataPtr, uint32_t numDataBytes, + OUT char* buf, uint32_t bufSize); + +#if defined(BCM_DATADUMP_SUPPORTED) +bcmLogModuleInfo_t *bcmLog_ddIsEnabled(bcmLogId_t logId, bcmLogDataDumpLevel_t ddLevel); +void bcm_dataDumpRegPrinter(uint32_t qId, uint32_t dataDumpId, Bcm_DataDumpPrintFunc *printFun); +void bcm_dataDump(uint32_t qID, uint32_t dataDumpID, const char* dataDumpName, void *ptr, uint32_t numBytes); +uint32_t bcm_dataDumpCreateQ(const char* qName); +void bcm_dataDumpDeleteQ(uint32_t qid); +#endif + +/* + * Create a DataDump queue. Different modules can share a queue. + * Returns a queue ID (uint32_t). + */ +#define BCM_DATADUMP_CREATE_Q(qName) BCM_DATADUMPCODE(bcm_dataDumpCreateQ(qName)) + +/* + * Delete a DataDump queue. + */ +#define BCM_DATADUMP_DELETE_Q(qID) BCM_DATADUMPCODE(bcm_dataDumpDeleteQ(qID)) + +/* + * Dump data + */ +#define BCM_DATADUMP_IMPORTANT(logId, qID, dataDumpID, ptr, numBytes) \ + BCM_DATADUMPCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_ddIsEnabled(logId, BCM_LOG_DD_IMPORTANT); \ + if (_pModInfo) \ + bcm_dataDump(qID, dataDumpID, #dataDumpID, (void*)(ptr), numBytes); } while(0) ) +#define BCM_DATADUMP_INFO(logId, qID, dataDumpID, ptr, numBytes) \ + BCM_DATADUMPCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_ddIsEnabled(logId, BCM_LOG_DD_INFO); \ + if (_pModInfo) \ + bcm_dataDump(qID, dataDumpID, #dataDumpID, (void*)(ptr), numBytes); } while(0) ) +#define BCM_DATADUMP_DETAIL(logId, qID, dataDumpID, ptr, numBytes) \ + BCM_DATADUMPCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_ddIsEnabled(logId, BCM_LOG_DD_DETAIL); \ + if (_pModInfo) \ + bcm_dataDump(qID, dataDumpID, #dataDumpID, (void*)(ptr), numBytes); } while(0) ) +#define BCM_DATADUMP_MAX(logId, qID, dataDumpID, ptr, numBytes) \ + BCM_DATADUMPCODE( do { bcmLogModuleInfo_t *_pModInfo = bcmLog_ddIsEnabled(logId, BCM_LOG_DD_MAX); \ + if (_pModInfo) \ + bcm_dataDump(qID, dataDumpID, #dataDumpID, (void*)(ptr), numBytes); } while(0) ) + +/* + * Register a printer for a certain DataDump ID. + * Datadumps for which no printer is registered will use a default printer. + * The default printer will print the data as an array of bytes. + */ +#define BCM_DATADUMP_REG_PRINTER(qId, dataDumpId, printFun) \ + BCM_DATADUMPCODE(bcm_dataDumpRegPrinter(qId, dataDumpId, printFun)) + +/* A helper macro for datadump printers */ +#define DDPRINTF(buf, len, bufSize, arg...) \ + ({len += snprintf((buf)+(len), max_t(uint32_t, 0, (bufSize)-80-(len)), ##arg); \ + if ((len) >= (bufSize)-80) snprintf((buf)+(len), 80, "---BUFFER FULL---\n");}) + + +/** + * Snapshot API: Commit all logs to the Snapshot queue + **/ + +#define BCM_LOG_SNAPSHOT() BCM_SNAPSHOTCODE() /*TBD*/ + + +/** + * API Function Prototypes + **/ + +typedef int (bcmFun_t)(void *); + +#ifdef CONFIG_BCM_LOG + +void bcmLog_setGlobalLogLevel(bcmLogLevel_t logLevel); +bcmLogLevel_t bcmLog_getGlobalLogLevel(void); + +void bcmLog_setLogLevel(bcmLogId_t logId, bcmLogLevel_t logLevel); +bcmLogLevel_t bcmLog_getLogLevel(bcmLogId_t logId); + +char *bcmLog_getModName(bcmLogId_t logId); + +/*Register a function with the bcmLog driver*/ +void bcmFun_reg(bcmFunId_t funId, bcmFun_t *f); + +/*De-Register a function with the bcmLog driver*/ +void bcmFun_dereg(bcmFunId_t funId); + +/*Look up a function by FunId. Returns NULL if the function is not + *registered.*/ +bcmFun_t* bcmFun_get(bcmFunId_t funId); + +void bcmLog_registerLevelChangeCallback(bcmLogId_t logId, bcmLogLevelChangeCallback_t callback, void *ctx); + +#else + +/* BCM LOG not configured: create empty stubs for all functions */ +static inline bcmLogModuleInfo_t *bcmLog_logIsEnabled(bcmLogId_t logId, bcmLogLevel_t logLevel) { return NULL; } +static inline void bcmLog_setGlobalLogLevel(bcmLogLevel_t loglevel) {} +static inline bcmLogLevel_t bcmLog_getGlobalLogLevel(void) { return 0; } +static inline char *bcmLog_getModName(bcmLogId_t logId) { return NULL; } +static inline void bcmLog_setLogLevel(bcmLogId_t logId, bcmLogLevel_t logLevel) {} +static inline bcmLogLevel_t bcmLog_getLogLevel(bcmLogId_t logId) { return 0; } +static inline void bcmFun_reg(bcmFunId_t funId, bcmFun_t f) {} +static inline void bcmFun_dereg(bcmFunId_t funId) {} +static inline bcmFun_t* bcmFun_get(bcmFunId_t funId) { return NULL; } +static inline void bcmLog_registerLevelChangeCallback(bcmLogId_t logId, bcmLogLevelChangeCallback_t callback, void *ctx) {} + + +#endif /* CONFIG_BCM_LOG */ +#endif /*_BCM_LOG_SERVICES_*/ diff --git a/include/linux/bcm_log_mod.h b/include/linux/bcm_log_mod.h new file mode 100644 index 0000000000000000000000000000000000000000..a644cf402484aac15f04a8e050149d8b2147df17 --- /dev/null +++ b/include/linux/bcm_log_mod.h @@ -0,0 +1,373 @@ +/* +* <:copyright-BRCM:2010:DUAL/GPL:standard +* +* Copyright (c) 2010 Broadcom +* All Rights Reserved +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed +* to you under the terms of the GNU General Public License version 2 +* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +* with the following added to such license: +* +* As a special exception, the copyright holders of this software give +* you permission to link this software with independent modules, and +* to copy and distribute the resulting executable under terms of your +* choice, provided that you also meet, for each linked independent +* module, the terms and conditions of the license of that module. +* An independent module is a module which is not derived from this +* software. The special exception does not apply to any modifications +* of the software. +* +* Not withstanding the above, under no circumstances may you combine +* this software in any way with any other Broadcom software provided +* under a license other than the GPL, without Broadcom's express prior +* written consent. +* +* :> + +*/ + +#ifndef _BCM_LOG_MODULES_ +#define _BCM_LOG_MODULES_ + +typedef enum { + BCM_LOG_LEVEL_ERROR=0, + BCM_LOG_LEVEL_NOTICE, + BCM_LOG_LEVEL_INFO, + BCM_LOG_LEVEL_DEBUG, + BCM_LOG_LEVEL_MAX +} bcmLogLevel_t; + +/* To support a new module, create a new log ID in bcmLogId_t, + and a new entry in BCM_LOG_MODULE_INFO */ + + +typedef enum { + BCM_LOG_ID_LOG=0, + BCM_LOG_ID_VLAN, + BCM_LOG_ID_GPON, + BCM_LOG_ID_PLOAM, + BCM_LOG_ID_PLOAM_FSM, + BCM_LOG_ID_PLOAM_HAL, + BCM_LOG_ID_PLOAM_PORT, + BCM_LOG_ID_PLOAM_ALARM, + BCM_LOG_ID_OMCI, + BCM_LOG_ID_I2C, + BCM_LOG_ID_ENET, + BCM_LOG_ID_GPON_SERDES, + BCM_LOG_ID_AE, + BCM_LOG_ID_XTM, + BCM_LOG_ID_IQ, + BCM_LOG_ID_BPM, + BCM_LOG_ID_ARL, + BCM_LOG_ID_EPON, + BCM_LOG_ID_GMAC, + BCM_LOG_ID_RDPA, + BCM_LOG_ID_RDPA_CMD_DRV, + BCM_LOG_ID_PKTRUNNER, + BCM_LOG_ID_SIM_CARD, + BCM_LOG_ID_PMD, + BCM_LOG_ID_TM, + BCM_LOG_ID_SPDSVC, + BCM_LOG_ID_MCAST, + BCM_LOG_ID_DPI, + BCM_LOG_ID_CMDLIST, + BCM_LOG_ID_ARCHER, + BCM_LOG_ID_TOD, + BCM_LOG_ID_PON_PWM, + BCM_LOG_ID_OPTICALDET, + BCM_LOG_ID_WANTYPEDET, + BCM_LOG_ID_XPORT, + BCM_LOG_ID_BCMLIBS_BIT_POOL, + BCM_LOG_ID_MPM, + BCM_LOG_ID_BP3, + BCM_LOG_ID_MAX +} bcmLogId_t; + +#define BCM_LOG_MODULE_INFO \ + { \ + {.logId = BCM_LOG_ID_LOG, .name = "bcmlog", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_VLAN, .name = "vlan", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_GPON, .name = "gpon", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_PLOAM, .name = "ploam", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_PLOAM_FSM, .name = "ploamFsm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_PLOAM_HAL, .name = "ploamHal", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_PLOAM_PORT, .name = "ploamPort", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_PLOAM_ALARM, .name = "ploamAlarm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_OMCI, .name = "omci", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_I2C, .name = "i2c", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_ENET, .name = "enet", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_GPON_SERDES, .name = "ponserdes", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_AE, .name = "ae", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_XTM, .name = "xtm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_IQ, .name = "iq", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_BPM, .name = "bpm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_ARL, .name = "arl", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_EPON, .name = "eponlue", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_GMAC, .name = "gmac", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_RDPA, .name = "rdpa", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_RDPA_CMD_DRV, .name = "rdpadrv", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_PKTRUNNER, .name = "pktrunner", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_SIM_CARD, .name = "sim_card", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_PMD, .name = "pmd", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_TM, .name = "tm", .logLevel = BCM_LOG_LEVEL_ERROR}, \ + {.logId = BCM_LOG_ID_SPDSVC, .name = "spdsvc", .logLevel = BCM_LOG_LEVEL_ERROR}, \ + {.logId = BCM_LOG_ID_MCAST, .name = "mcast", .logLevel = BCM_LOG_LEVEL_ERROR}, \ + {.logId = BCM_LOG_ID_DPI, .name = "dpi", .logLevel = BCM_LOG_LEVEL_ERROR}, \ + {.logId = BCM_LOG_ID_CMDLIST, .name = "cmdlist", .logLevel = BCM_LOG_LEVEL_ERROR}, \ + {.logId = BCM_LOG_ID_ARCHER, .name = "archer", .logLevel = BCM_LOG_LEVEL_ERROR}, \ + {.logId = BCM_LOG_ID_TOD, .name = "tod", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_PON_PWM, .name = "pon_pwm", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_OPTICALDET, .name = "opticaldet", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_WANTYPEDET, .name = "wantypedet", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_XPORT, .name = "xport", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_BCMLIBS_BIT_POOL, .name = "bitpool", .logLevel = BCM_LOG_LEVEL_NOTICE}, \ + {.logId = BCM_LOG_ID_MPM, .name = "mpm", .logLevel = BCM_LOG_LEVEL_ERROR}, \ + {.logId = BCM_LOG_ID_BP3, .name = "bp3", .logLevel = BCM_LOG_LEVEL_DEBUG}, \ + } + +/* To support a new registered function, + * create a new BCM_FUN_ID */ + +typedef enum { + BCM_FUN_ID_RESET_SWITCH=0, + BCM_FUN_ID_ENET_LINK_CHG, + BCM_FUN_ID_ENET_CHECK_SWITCH_LOCKUP, + BCM_FUN_ID_ENET_GET_PORT_BUF_USAGE, + BCM_FUN_ID_GPON_GET_GEM_PID_QUEUE, + BCM_FUN_ID_ENET_HANDLE, + BCM_FUN_ID_EPON_HANDLE, + BCM_FUN_IN_ENET_CLEAR_ARL_ENTRY, + BCM_FUN_ID_ENET_IS_WAN_PORT, /* Take Logical port number as argument */ + BCM_FUN_ID_ENET_IS_SWSWITCH_PORT, + BCM_FUN_ID_ENET_PORT_ROLE_NOTIFY, + BCM_FUN_ID_ENET_BOND_RX_PORT_MAP, + BCM_FUN_ID_ENET_SYSPORT_CONFIG, + BCM_FUN_ID_ENET_SYSPORT_QUEUE_MAP, + BCM_FUN_ID_ENET_REMAP_TX_QUEUE, + BCM_FUN_ID_ENET_PHY_SPEED_SET, + BCM_FUN_ID_ENET_TM_EN_SET, + /* The arguments of the BCM TM functions are defined by bcmTmDrv_arg_t */ + BCM_FUN_ID_TM_REGISTER, + BCM_FUN_ID_TM_PORT_CONFIG, + BCM_FUN_ID_TM_PORT_ENABLE, + BCM_FUN_ID_TM_ARBITER_CONFIG, + BCM_FUN_ID_TM_QUEUE_CONFIG, + BCM_FUN_ID_TM_APPLY, + BCM_FUN_ID_TM_ENQUEUE, + /* The arguments of the Speed Service functions are defined in spdsvc_defs.h */ + BCM_FUN_ID_SPDSVC_TRANSMIT, + BCM_FUN_ID_SPDSVC_RECEIVE, + /* Kernel Bonding Driver related function */ + BCM_FUN_ID_BOND_CLR_SLAVE_STAT, + BCM_FUN_ID_ENET_IS_BONDED_LAN_WAN_PORT, /* Expects Logical port number as argument */ + BCM_FUN_ID_ENET_IS_DEV_IN_SLAVE_PATH, + BCM_FUN_ID_BOND_RX_HANDLER, + /* DSL Runner Hooks */ + BCM_FUN_ID_RUNNER_PREPEND, + BCM_FUN_ID_TCPSPDTEST_CONNECT, + /* Archer Hooks */ + BCM_FUN_ID_ARCHER_HOST_BIND, + BCM_FUN_ID_ARCHER_XTMRT_BIND, + BCM_FUN_ID_ARCHER_WFD_BIND, + BCM_FUN_ID_ARCHER_WFD_CONFIG, + BCM_FUN_ID_ARCHER_WLAN_RX_REGISTER, + BCM_FUN_ID_ARCHER_WLAN_RX_SEND, + BCM_FUN_ID_ARCHER_WLAN_BIND, + BCM_FUN_ID_ARCHER_WLAN_UNBIND, + BCM_FUN_ID_ARCHER_DHD_BDMF_NEW_AND_SET, + BCM_FUN_ID_ARCHER_DHD_INIT_CFG_SET, + BCM_FUN_ID_ARCHER_DHD_FLUSH_SET, + BCM_FUN_ID_ARCHER_DHD_FLOW_RING_ENABLE_SET, + BCM_FUN_ID_ARCHER_DHD_RX_POST_INIT, + BCM_FUN_ID_ARCHER_DHD_RX_POST_UNINIT, + BCM_FUN_ID_ARCHER_DHD_RX_POST_REINIT, + BCM_FUN_ID_ARCHER_DHD_CPU_GET, + BCM_FUN_ID_ARCHER_DHD_CPU_NUM_QUEUES_GET, + BCM_FUN_ID_ARCHER_DHD_CPU_RXQ_CFG_SET, + BCM_FUN_ID_ARCHER_DHD_CPU_INDEX_GET, + BCM_FUN_ID_ARCHER_DHD_CPU_INT_ENABLE, + BCM_FUN_ID_ARCHER_DHD_CPU_INT_DISABLE, + BCM_FUN_ID_ARCHER_DHD_CPU_INT_CLEAR, + BCM_FUN_ID_ARCHER_DHD_WAKEUP_INFORMATION_GET, + BCM_FUN_ID_ARCHER_DHD_COMPLETE_RING_CREATE, + BCM_FUN_ID_ARCHER_DHD_COMPLETE_RING_DESTROY, + BCM_FUN_ID_ARCHER_DHD_SEND_PACKET_TO_DONGLE, + BCM_FUN_ID_ARCHER_DHD_CPU_PACKET_GET, + BCM_FUN_ID_ARCHER_DHD_COMPLETE_MESSAGE_GET, + BCM_FUN_ID_ARCHER_DHD_COMPLETE_WAKEUP, + BCM_FUN_ID_ARCHER_DHD_BDMF_SYSB_RECYCLE, + BCM_FUN_ID_VLAN_LOOKUP_DP, + /* WLAN Hooks */ + BCM_FUN_ID_WLAN_QUERY_BRIDGEFDB, + BCM_FUN_ID_WLAN_UPDATE_BRIDGEFDB, + BCM_FUN_ID_WLAN_PKTC_DEL_BY_MAC, + BCM_FUN_ID_SPDT_RNR_TRANSMIT, + /* Pktrunner hook for SPU Offloading */ + BCM_FUN_ID_PKTRUNNER_SPUOFFLOAD_BIND, + BCM_FUN_ID_PMD_PRBS, + BCM_FUN_ID_WAN_SERDES_CONFIG, + BCM_FUN_ID_SEND_MESSAGE_TO_PON_DRV_TASK, + BCM_FUN_ID_WAN_SERDES_TYPE_GET, + BCM_FUN_ID_NETLINK_INVOKE_SERDES_JOB_WITH_OUTPUT, + BCM_FUN_ID_NETLINK_INVOKE_SERDES_JOB, + BCM_FUN_ID_SYNCE_ETH_LINK_CHANGE, + BCM_FUN_ID_WAN_SERDES_RESET_TXFIFO, + BCM_FUN_ID_WAN_SERDES_SYNC_LOSS, + /* hook for SPU offload stats update */ + BCM_FUN_ID_SPUOFFLOAD_STATS_UPDATE, + BCM_FUN_ID_MAX +} bcmFunId_t; + +/* Structures passed in above function calls */ +typedef struct { + struct net_device *slave_dev; /* Input */ + struct net_device **bond_dev; /* Input/Output */ +}BCM_BondDevInfo; + +/* Structures passed in above function calls */ +typedef struct { + uint16_t gemPortIndex; /* Input */ + uint16_t gemPortId; /* Output */ + uint8_t usQueueIdx; /* Output */ +}BCM_GponGemPidQueueInfo; + +typedef enum { + BCM_ENET_FUN_TYPE_LEARN_CTRL = 0, + BCM_ENET_FUN_TYPE_ARL_WRITE, + BCM_ENET_FUN_TYPE_AGE_PORT, + BCM_ENET_FUN_TYPE_UNI_UNI_CTRL, + BCM_ENET_FUN_TYPE_PORT_RX_CTRL, + BCM_ENET_FUN_TYPE_GET_VPORT_CNT, + BCM_ENET_FUN_TYPE_GET_IF_NAME_OF_VPORT, + BCM_ENET_FUN_TYPE_GET_UNIPORT_MASK, + BCM_ENET_FUN_TYPE_MAX +} bcmFun_Type_t; + +typedef struct { + uint16_t vid; + uint16_t val; + uint8_t mac[6]; +} arlEntry_t; + +typedef struct { + bcmFun_Type_t type; /* Action Needed in Enet Driver */ + union { + uint8_t port; + uint8_t uniport_cnt; + uint16_t portMask; + arlEntry_t arl_entry; + }; + char name[16]; + uint8_t enable; +}BCM_EnetHandle_t; + +typedef enum { + BCM_EPON_FUN_TYPE_UNI_UNI_CTRL = 0, + BCM_EPON_FUN_TYPE_MAX +} bcmEponFun_Type_t; + +typedef struct { + bcmEponFun_Type_t type; /* Action Needed in Epon Driver */ + uint8_t enable; +}BCM_EponHandle_t; + +typedef struct { + uint8_t port; /* switch port */ + uint8_t enable; /* enable/disable the clock */ +}BCM_CmfFfeClk_t; + +#define BCM_RUNNER_PREPEND_SIZE_MAX 100 /* Increasing the size of the prepend data buffer will require + recompiling the cmdlist driver files released as binary */ + +typedef struct { + void *blog_p; /* INPUT: Pointer to the Blog_t structure that triggered the Runner flow creation */ + uint8_t data[BCM_RUNNER_PREPEND_SIZE_MAX]; /* INPUT: The data that will be be prepended to all packets + forwarded by Runner that match the given Blog/Flow. + The data must be stored in NETWWORK BYTE ORDER */ + unsigned int size; /* OUTPUT: Size of the prepend data, up to 100 bytes long. + When no data is to be prepended, specify size = 0 */ + union + { + uint32_t flags_union; + struct + { + unsigned int extention_header_present : 1; + unsigned int g9111_header_present : 1; + unsigned int reserved : 30; + }; + }; +} BCM_runnerPrepend_t; + +typedef struct { + unsigned int sysport; + unsigned int switch_id; + unsigned int port; + unsigned int is_wan; +} BCM_EnetPortRole_t; + +#define BCM_ENET_SYSPORT_INTF_MAX 2 +#define BCM_ENET_SYSPORT_BLOG_CHNL_MAX 8 + +typedef enum { + BCM_ENET_SYSPORT_MODE_INVALID = 0, + BCM_ENET_SYSPORT_MODE_PORT, + BCM_ENET_SYSPORT_MODE_INTERNAL_BRCM_SW, + BCM_ENET_SYSPORT_MODE_EXTERNAL_BRCM_SW, + BCM_ENET_SYSPORT_MODE_STACKED_BRCM_SW, + BCM_ENET_SYSPORT_MODE_MAX +} bcmSysport_Mode_t; + +typedef struct { + bcmSysport_Mode_t mode; +} bcmSysport_Sysport_t; + +typedef struct { + struct net_device *dev; + int sysport; + int switch_id; + int port; + int nbr_of_queues; +} bcmSysport_BlogChnl_t; + +typedef struct { + int nbr_of_sysports; + bcmSysport_Sysport_t sysport[BCM_ENET_SYSPORT_INTF_MAX]; + int nbr_of_blog_channels; + bcmSysport_BlogChnl_t blog_chnl[BCM_ENET_SYSPORT_BLOG_CHNL_MAX]; + int switch_parent_port; /* parent port num if external switch exists, otherwise NO_EXT_SWITCH */ +#define NO_EXT_SWITCH -1 + int ls_port_q_offset; /* external switch port lightstacking port tx q start offset */ +} bcmSysport_Config_t; + +#define BCM_ENET_SYSPORT_QUEUE_MAP_PRIORITY_MAX 8 + +typedef struct { + int blog_chnl; + uint8_t priority_to_switch_queue[BCM_ENET_SYSPORT_QUEUE_MAP_PRIORITY_MAX]; +} bcmSysport_QueueMap_t; + +typedef struct { + int blog_chnl; + int blog_chnl_rx; +} bcmEnet_BondRxPortMap_t; + +/* Structure used with BCM_FUN_ID_ENET_REMAP_TX_QUEUE */ +typedef struct { + uint8_t tx_queue; + void *dev; +} bcmEnet_QueueReMap_t; + +typedef struct { + struct net_device *dev; + int kbps; +} bcmSysport_PhySpeed_t; + +typedef struct { + uint16_t enable; + uint8_t prbs_mode; +}pmd_pbrs_param; + +#endif /* _BCM_LOG_MODULES_ */ + diff --git a/include/linux/bcm_netdev_path.h b/include/linux/bcm_netdev_path.h new file mode 100644 index 0000000000000000000000000000000000000000..0657f51c118f83f9b25b29dd390aec2688da5777 --- /dev/null +++ b/include/linux/bcm_netdev_path.h @@ -0,0 +1,113 @@ +#ifndef __BCM_NETDEV_PATH_H_INCLUDED__ +#define __BCM_NETDEV_PATH_H_INCLUDED__ + + +/* +<:copyright-BRCM:2013:DUAL/GPL:standard + + Copyright (c) 2013 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +/* Forward declaration */ +struct net_device; + +#if defined(CONFIG_BCM_GPON_MODULE) +#define NETDEV_PATH_HW_SUBPORTS_MAX CONFIG_BCM_MAX_GEM_PORTS +#else +#define NETDEV_PATH_HW_SUBPORTS_MAX 0 +#endif +struct netdev_path +{ + /* this reference counter indicates the number of interfaces + referencing this interface */ + int refcount; + /* indicates the RX hardware port number associated to the + interface */ + unsigned int hw_port; + /* indicates the TX hardware port number associated to the + interface */ + unsigned int hw_tx_port; + /* hardware port type, must be set to one of the types defined in + BlogPhy_t */ + unsigned int hw_port_type; + /* some device drivers support virtual subports within a hardware + port. hw_subport_mcast is used to map a multicast hw subport + to a hw port. */ + unsigned int hw_subport_mcast_idx; +}; + +/* Returns TRUE when _dev is a member of a path, otherwise FALSE */ +#define netdev_path_is_linked(_dev) ( netdev_path_next_dev(_dev) != NULL ) + +/* Returns TRUE when _dev is the leaf in a path, otherwise FALSE */ +#define netdev_path_is_leaf(_dev) ( (_dev)->bcm_nd_ext.path.refcount == 0 ) + +/* Returns TRUE when _dev is the root of a path, otherwise FALSE. The root + device is the physical device */ +#define netdev_path_is_root(_dev) ( netdev_path_next_dev(_dev) == NULL ) + +#define netdev_path_set_hw_port(_dev, _hw_port, _hw_port_type) \ + do { \ + (_dev)->bcm_nd_ext.path.hw_port = (_hw_port); \ + (_dev)->bcm_nd_ext.path.hw_tx_port = (_hw_port); \ + (_dev)->bcm_nd_ext.path.hw_port_type = (_hw_port_type); \ + } while(0) + +#define netdev_path_set_hw_port_only(_dev, _hw_port) \ + do { \ + (_dev)->bcm_nd_ext.path.hw_port = (_hw_port); \ + } while(0) + +#define netdev_path_set_hw_tx_port_only(_dev, _hw_port) \ + do { \ + (_dev)->bcm_nd_ext.path.hw_tx_port = (_hw_port); \ + } while(0) + +#define netdev_path_get_hw_port(_dev) ( (_dev)->bcm_nd_ext.path.hw_port ) + +#define netdev_path_get_hw_tx_port(_dev) ( (_dev)->bcm_nd_ext.path.hw_tx_port ) + +#define netdev_path_get_hw_port_type(_dev) ( (_dev)->bcm_nd_ext.path.hw_port_type ) +#define netdev_path_set_hw_port_type(_dev, _hwt) do { (_dev)->bcm_nd_ext.path.hw_port_type = _hwt; } while (0) + +#define netdev_path_get_hw_subport_mcast_idx(_dev) ( (_dev)->bcm_nd_ext.path.hw_subport_mcast_idx ) + +/* Returns a pointer to the next device in a path, towards the root + (physical) device */ +struct net_device *netdev_path_next_dev(struct net_device *dev); + +struct net_device *netdev_path_get_root(struct net_device *dev); + +int netdev_path_set_hw_subport_mcast_idx(struct net_device *dev, unsigned int subport_idx); + +int netdev_path_add(struct net_device *new_dev, struct net_device *next_dev); + +int netdev_path_remove(struct net_device *dev); + +void netdev_path_dump(struct net_device *dev); + + +#endif /* __BCM_NETDEV_PATH_H_INCLUDED__ */ diff --git a/include/linux/bcm_netdevice.h b/include/linux/bcm_netdevice.h new file mode 100644 index 0000000000000000000000000000000000000000..220fb8c23d9608b43e037dacab43385c6ab22537 --- /dev/null +++ b/include/linux/bcm_netdevice.h @@ -0,0 +1,211 @@ +#ifndef __BCM_NETDEVICE_H_INCLUDED__ +#define __BCM_NETDEVICE_H_INCLUDED__ + + +/* +<:copyright-BRCM:2013:DUAL/GPL:standard + + Copyright (c) 2013 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +#if defined(CONFIG_BLOG) +#include <linux/blog.h> +#if defined(CONFIG_BCM_WLAN_MODULE) +#include <linux/bcm_dslcpe_wlan_info.h> +#endif +#endif +#include <linux/bcm_netdev_path.h> +#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE) +#include <linux/br_fp.h> +#endif +#include <uapi/linux/bcm_maclimit.h> + +#if defined(CONFIG_BCM_KF_NETDEV_EXT) +#if defined(CONFIG_BCM_MAP) || defined(CONFIG_BCM_MAP_MODULE) +#define NAT46_DEVICE_SIGNATURE 0x544e36dd +#endif +#endif + +#define BCM_IFF_WANDEV (1 << 0) +#define BCM_IFF_VLAN (1 << 1) +#define BCM_IFF_PPP (1 << 2) +#define BCM_IFF_HW_FDB (1 << 3) // this dev is enabled for hw MAC learning +#define BCM_IFF_HW_SWITCH (1 << 4) +#define BCM_IFF_WLANDEV (1 << 5) +#define BCM_IFF_BCM_DEV (1 << 6) +#define BCM_IFF_WLANDEV_NIC (1 << 7) +#define BCM_IFF_WLANDEV_DHD (1 << 8) +#define BCM_IFF_MCAST_ROUTER (1 << 9) +#define BCM_IFF_VFRWD (1 << 10) + +/*bits BCM_IFF_SWACCEL_GENERIC & BCM_IFF_HWACCEL_GENERIC are + *used together as 2 bit value*/ +#define BCM_IFF_ACCEL_GDX_RX (1 << 11) +#define BCM_IFF_ACCEL_GDX_TX (1 << 12) +#define BCM_IFF_ACCEL_GDX_HW (1 << 13) +#define BCM_IFF_ACCEL_GDX_DEBUG (1 << 14) +#define BCM_IFF_ACCEL_TC_EGRESS (1 << 15) + +#define BLOG_DEV_STAT_FLAG_INCLUDE_SW_UC (1<<0) /* Include SW accelerated Unicast stats */ +#define BLOG_DEV_STAT_FLAG_INCLUDE_HW_UC (1<<1) /* Include HW accelerated Unicast stats */ +#define BLOG_DEV_STAT_FLAG_INCLUDE_SW_MC (1<<2) /* Include SW accelerated Multicast stats */ +#define BLOG_DEV_STAT_FLAG_INCLUDE_HW_MC (1<<3) /* Include HW accelerated Multicast stats */ +#define BLOG_DEV_STAT_FLAG_INCLUDE_SW (BLOG_DEV_STAT_FLAG_INCLUDE_SW_UC|BLOG_DEV_STAT_FLAG_INCLUDE_SW_MC) +#define BLOG_DEV_STAT_FLAG_INCLUDE_HW (BLOG_DEV_STAT_FLAG_INCLUDE_HW_UC|BLOG_DEV_STAT_FLAG_INCLUDE_HW_MC) +#define BLOG_DEV_STAT_FLAG_INCLUDE_ALL (BLOG_DEV_STAT_FLAG_INCLUDE_SW|BLOG_DEV_STAT_FLAG_INCLUDE_HW) + +/* Info types to ask from different drivers */ +typedef enum { + BCM_NETDEV_TO_RDPA_PORT_OBJ, +} bcm_netdev_priv_info_type_t; +/* Output from driver corresponding to the info type */ +typedef union { + struct { + void *rdpa_port_obj; + } bcm_netdev_to_rdpa_port_obj; +} bcm_netdev_priv_info_out_t; + +typedef int (*bcm_netdev_priv_info_get_cb_fn_t)(struct net_device *dev, + bcm_netdev_priv_info_type_t info_type, + bcm_netdev_priv_info_out_t *info_out); + +struct bcm_netdev_ext { + unsigned int iff_flags; + struct netdev_path path; +#if defined(CONFIG_BLOG) + BlogStats_t blog_stats; /* Cummulative stats of accelerated flows */ + unsigned int blog_stats_flags; /* Blog stats collection property for the device */ +#if defined(CONFIG_BCM_WLAN_MODULE) + /* runner multicast acceleration hook */ + wlan_client_get_info_t wlan_client_get_info; +#endif +#endif /* CONFIG_BLOG */ +#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE) + struct bcm_br_ext bcm_br_ext; +#endif + struct mac_limit mac_limit; + /* return 0-success, -1:failure (not supported or other error) */ + bcm_netdev_priv_info_get_cb_fn_t bcm_netdev_cb_fn; +}; + +#if defined(CONFIG_BLOG) && defined(CONFIG_BCM_WLAN_MODULE) +#define netdev_wlan_client_get_info(dev) ((dev)->bcm_nd_ext.wlan_client_get_info) +#else +#define netdev_wlan_client_get_info(dev) NULL +#endif + +#define bcm_netdev_ext_field_get(dev, f) ((dev)->bcm_nd_ext.f) +#define bcm_netdev_ext_field_get_ptr(dev, f) (&(dev)->bcm_nd_ext.f) +#define bcm_netdev_ext_field_set(dev, f, val) ((dev)->bcm_nd_ext.f = val) + +#define netdev_bcm_dev_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_BCM_DEV +#define netdev_bcm_dev_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_BCM_DEV +#define is_netdev_bcm_dev(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_BCM_DEV) + +#define netdev_wan_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_WANDEV +#define netdev_wan_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_WANDEV +#define is_netdev_wan(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_WANDEV) + +#define netdev_vfrwd_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_VFRWD +#define is_netdev_vfrwd(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_VFRWD) + +#define netdev_vlan_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_VLAN +#define is_netdev_vlan(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_VLAN) + +#define netdev_ppp_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_PPP +#define is_netdev_ppp(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_PPP) + +#define netdev_hw_fdb_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_HW_FDB +#define netdev_hw_fdb_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_HW_FDB +#define is_netdev_hw_fdb(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_HW_FDB) + +#define netdev_hw_switch_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_HW_SWITCH +#define netdev_hw_switch_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_HW_SWITCH +#define is_netdev_hw_switch(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_HW_SWITCH) + +#define netdev_wlan_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_WLANDEV +#define netdev_wlan_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_WLANDEV +#define is_netdev_wlan(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_WLANDEV) + +#define netdev_wlan_nic_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_WLANDEV_NIC +#define netdev_wlan_nic_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_WLANDEV_NIC +#define is_netdev_wlan_nic(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_WLANDEV_NIC) + +#define netdev_wlan_dhd_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_WLANDEV_DHD +#define netdev_wlan_dhd_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_WLANDEV_DHD +#define is_netdev_wlan_dhd(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_WLANDEV_DHD) + +#define netdev_mcastrouter_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_MCAST_ROUTER +#define netdev_mcastrouter_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_MCAST_ROUTER +#define is_netdev_mcastrouter(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_MCAST_ROUTER) + +// for NETDEV_CHANGEUPPER +#define is_netdev_br_port_add(_dev, _ptr) (netif_is_bridge_port(_dev) && ((struct netdev_notifier_changeupper_info *)(ptr))->linking) +#define is_netdev_br_port_del(_dev, _ptr) (netif_is_bridge_port(_dev) && !((struct netdev_notifier_changeupper_info *)(ptr))->linking) +#define changeupper_get_upper(_dev, _ptr) (((struct netdev_notifier_changeupper_info *)(ptr))->upper_dev) + + +#define netdev_accel_gdx_rx_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_ACCEL_GDX_RX +#define netdev_accel_gdx_rx_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_ACCEL_GDX_RX +#define is_netdev_accel_gdx_rx(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_ACCEL_GDX_RX) + +#define netdev_accel_gdx_tx_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_ACCEL_GDX_TX +#define netdev_accel_gdx_tx_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_ACCEL_GDX_TX +#define is_netdev_accel_gdx_tx(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_ACCEL_GDX_TX) + +#define netdev_hw_accel_gdx_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_ACCEL_GDX_HW +#define netdev_hw_accel_gdx_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_ACCEL_GDX_HW +#define is_netdev_hw_accel_gdx(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_ACCEL_GDX_HW) + +extern int (*bcm_netdev_gen_hwaccel_notfier_cb)(struct net_device *dev, + int event, int group); + +static inline int bcm_netdev_gen_hwaccel_notfier(struct net_device *dev, + int event, int group) +{ + if(bcm_netdev_gen_hwaccel_notfier_cb) + return bcm_netdev_gen_hwaccel_notfier_cb(dev, event, group); + else { + printk(" Generic HW accel not supported \n"); + return -1; + } + + return 0; +} +#define netdev_accel_gdx_debug_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_ACCEL_GDX_DEBUG +#define netdev_accel_gdx_debug_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_ACCEL_GDX_DEBUG +#define is_netdev_accel_gdx_debug(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_ACCEL_GDX_DEBUG) + +#define netdev_accel_tc_egress_set(_dev) (_dev)->bcm_nd_ext.iff_flags |= BCM_IFF_ACCEL_TC_EGRESS +#define netdev_accel_tc_egress_unset(_dev) (_dev)->bcm_nd_ext.iff_flags &= ~BCM_IFF_ACCEL_TC_EGRESS +#define is_netdev_accel_tc_egress(_dev) ((_dev)->bcm_nd_ext.iff_flags & BCM_IFF_ACCEL_TC_EGRESS) + +void bcm_netdev_ext_inherit(struct net_device *parent, struct net_device * child); + +int bcm_attach_vlan_hook(struct net_device *dev); +void bcm_detach_vlan_hook(struct net_device *dev); + +#endif /* __BCM_NETDEVICE_H_INCLUDED__ */ diff --git a/include/linux/bcm_nf_conntrack.h b/include/linux/bcm_nf_conntrack.h new file mode 100644 index 0000000000000000000000000000000000000000..c1381c63df87573c74c0605b78c96622759f8d5d --- /dev/null +++ b/include/linux/bcm_nf_conntrack.h @@ -0,0 +1,304 @@ +/* +* <:copyright-BRCM:2012:DUAL/GPL:standard +* +* Copyright (c) 2012 Broadcom +* All Rights Reserved +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed +* to you under the terms of the GNU General Public License version 2 +* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +* with the following added to such license: +* +* As a special exception, the copyright holders of this software give +* you permission to link this software with independent modules, and +* to copy and distribute the resulting executable under terms of your +* choice, provided that you also meet, for each linked independent +* module, the terms and conditions of the license of that module. +* An independent module is a module which is not derived from this +* software. The special exception does not apply to any modifications +* of the software. +* +* Not withstanding the above, under no circumstances may you combine +* this software in any way with any other Broadcom software provided +* under a license other than the GPL, without Broadcom's express prior +* written consent. +* +:> +*/ + +#ifndef _BCM_NF_CONNTRACK_H +#define _BCM_NF_CONNTRACK_H + +#include <linux/types.h> +#include <linux/netfilter/nf_conntrack_common.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_helper.h> + +#include <linux/blog.h> +#include <linux/dpi.h> +#include <linux/iqos.h> + + +#if defined(CONFIG_BLOG) +static inline void bcm_nf_blog_ct_init(struct nf_conn *ct) +{ + /* no blog lock needed here */ + set_bit(IPS_BLOG_BIT, &ct->status); /* Enable conntrack blogging */ + ct->bcm_ext.blog_key[IP_CT_DIR_ORIGINAL] = BLOG_KEY_FC_INVALID; + ct->bcm_ext.blog_key[IP_CT_DIR_REPLY] = BLOG_KEY_FC_INVALID; + ct->bcm_ext.blog_learned = 0; +} + +static inline int bcm_nf_blog_destroy_conntrack(struct nf_conn *ct) +{ + blog_lock(); + pr_debug("%s(%px) blog keys[0x%08x,0x%08x]\n", __func__, + ct, ct->bcm_ext.blog_key[IP_CT_DIR_ORIGINAL], + ct->bcm_ext.blog_key[IP_CT_DIR_REPLY]); + + + /* Conntrack going away, notify blog client */ + if ((ct->bcm_ext.blog_key[IP_CT_DIR_ORIGINAL] != BLOG_KEY_FC_INVALID) || + (ct->bcm_ext.blog_key[IP_CT_DIR_REPLY] != BLOG_KEY_FC_INVALID)) { + + blog_notify(DESTROY_FLOWTRACK, (void*)ct, + (uint32_t)ct->bcm_ext.blog_key[IP_CT_DIR_ORIGINAL], + (uint32_t)ct->bcm_ext.blog_key[IP_CT_DIR_REPLY]); + } + + clear_bit(IPS_BLOG_BIT, &ct->status); /* Disable further blogging */ + blog_unlock(); + + return 0; +} + +static inline int bcm_nf_blog_link_ct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + struct sk_buff *skb, u_int16_t l3num, u_int8_t protonum) +{ + + /* here we dont need blog lock as this blog is owned by this skb */ + + struct nf_conn_help * help = nfct_help(ct); + + + if ( help && help->helper && + strcmp(help->helper->name, "BCM-NAT")) { + pr_debug("nf_conntrack_in: skb<%px> ct<%px> helper<%s> found\n", + skb, ct, help->helper->name); + clear_bit(IPS_BLOG_BIT, &ct->status); + } + + if (test_bit(IPS_BLOG_BIT, &ct->status)) { /* OK to blog ? */ + pr_debug("nf_conntrack_in: skb<%px> blog<%px> ct<%px>\n", + skb, blog_ptr(skb), ct); + + blog_link(FLOWTRACK, blog_ptr(skb), + (void*)ct, CTINFO2DIR(ctinfo), 0); + } else { + pr_debug("nf_conntrack_in: skb<%px> ct<%px> NOT BLOGible<%px>\n", + skb, ct, blog_ptr(skb)); + blog_skip(skb, blog_skip_reason_ct_status_donot_blog); /* No blogging */ + } + + return 0; +} + +static inline int bcm_nf_blog_update_timeout(struct nf_conn *ct, unsigned long extra_jiffies) +{ + blog_lock(); + + if ((ct->bcm_ext.blog_key[IP_CT_DIR_ORIGINAL] != BLOG_KEY_FC_INVALID) || + (ct->bcm_ext.blog_key[IP_CT_DIR_REPLY] != BLOG_KEY_FC_INVALID)) { + + unsigned int blog_key = (ct->bcm_ext.blog_key[IP_CT_DIR_ORIGINAL] != BLOG_KEY_FC_INVALID) ? + ct->bcm_ext.blog_key[IP_CT_DIR_ORIGINAL] : ct->bcm_ext.blog_key[IP_CT_DIR_REPLY]; + + blog_notify(UPDATE_FLOWTRACK_IDLE_TIMEOUT, (void*)ct, + blog_key, (uint32_t)(extra_jiffies/HZ)); + } + blog_unlock(); + + return 0; +} + +#endif /*CONFIG_BLOG */ + +static inline int nf_conntrack_ipv6_is_multicast(const __be32 ip6[4]) +{ + return ((ip6[0] & htonl(0xFF000000)) == htonl(0xFF000000)); +} + +static inline void bcm_nf_conn_set_iq_prio(struct nf_conn *ct, struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_BCM_INGQOS) +#if defined(CONFIG_BLOG) + if (skb != NULL && skb->blog_p != NULL ) + ct->bcm_ext.iq_prio = (blog_iq(skb) == BLOG_IQ_PRIO_HIGH) ? IQOS_PRIO_HIGH : IQOS_PRIO_LOW; + else +#endif + { + + switch (nf_ct_l3num(ct)) { + case AF_INET: + ct->bcm_ext.iq_prio = ipv4_is_multicast(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip) ? IQOS_PRIO_HIGH : IQOS_PRIO_LOW; + break; + case AF_INET6: + ct->bcm_ext.iq_prio = nf_conntrack_ipv6_is_multicast(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6) ? IQOS_PRIO_HIGH : IQOS_PRIO_LOW; + break; + default: + ct->bcm_ext.iq_prio = IQOS_PRIO_LOW; + } + } +#endif +} + +static inline void bcm_nf_iqos_destroy_conntrack(struct nf_conn *ct) +{ +#if IS_ENABLED(CONFIG_BCM_INGQOS) + if (test_bit(IPS_IQOS_BIT,&ct->status)) { + clear_bit(IPS_IQOS_BIT, &ct->status); + iqos_rem_L4port(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum, + ntohs(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all), IQOS_ENT_DYN); + iqos_rem_L4port(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum, + ntohs(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all), IQOS_ENT_DYN); + } +#endif +} + +#if defined(CONFIG_BCM_NF_DERIVED_CONN) +static inline void bcm_nf_ct_derived_list_add(struct nf_conn *ct) +{ + BCM_DERIVED_CONN_LOCK_BH(); + + /* master ref count incremneted before calling this fcuntion */ + /* via exp->master safe, refcnt bumped in nf_ct_find_expectation */ + if(ct->master){ + + list_add(&ct->bcm_ext.derived_list, + &ct->master->bcm_ext.derived_connections); + } + + BCM_DERIVED_CONN_UNLOCK_BH(); +} + +static inline void bcm_nf_ct_derived_list_del(struct nf_conn *ct) +{ + BCM_DERIVED_CONN_LOCK_BH(); + if(ct->master){ + list_del(&ct->bcm_ext.derived_list); + } + + BCM_DERIVED_CONN_UNLOCK_BH(); +} +static inline void bcm_nf_ct_derived_conn_init(struct nf_conn *ct) +{ + INIT_LIST_HEAD(&ct->bcm_ext.derived_connections); + INIT_LIST_HEAD(&ct->bcm_ext.derived_list); + ct->bcm_ext.derived_timeout = 0; +} +#else + +static inline void bcm_nf_ct_derived_list_add(struct nf_conn *ct){}; +static inline void bcm_nf_ct_derived_list_del(struct nf_conn *ct){}; +static inline void bcm_nf_ct_derived_conn_init(struct nf_conn *ct){}; +#endif + + +static inline unsigned long bcm_nf_ct_refresh(struct nf_conn *ct, + unsigned long extra_jiffies) +{ +#if defined(CONFIG_BCM_NF_DERIVED_CONN) + /* when derived time out is set always use it */ + if(ct->bcm_ext.derived_timeout) + extra_jiffies = ct->bcm_ext.derived_timeout; +#endif + +#if defined(CONFIG_BLOG) + if(ct->bcm_ext.extra_jiffies != extra_jiffies) { + ct->bcm_ext.extra_jiffies = extra_jiffies; + /*notify accelerator */ + bcm_nf_blog_update_timeout(ct, extra_jiffies); + } +#endif + + return extra_jiffies; +} + +static void bcm_conntrack_init_end(void) +{ +#if IS_ENABLED(CONFIG_BCM_DPI) + dpi_conntrack_init(); +#endif +} + +static void bcm_conntrack_cleanup_end(void) +{ +#if IS_ENABLED(CONFIG_BCM_DPI) + dpi_conntrack_cleanup(); +#endif +} + +static void bcm_nf_ct_alloc(struct nf_conn *ct, struct sk_buff *skb) +{ +#if defined(CONFIG_BLOG) + bcm_nf_blog_ct_init(ct); +#endif + /* REGARDLESS_DROP */ + INIT_LIST_HEAD(&ct->bcm_ext.safe_list); + + bcm_nf_ct_derived_conn_init(ct); + bcm_nf_conn_set_iq_prio(ct, skb); + +#if IS_ENABLED(CONFIG_BCM_DPI) + memset(&ct->bcm_ext.dpi, 0, sizeof(ct->bcm_ext.dpi)); +#define EG_PRIO_NORMAL 2 /* Taken from bcmdpi.h */ + ct->bcm_ext.dpi.eg_prio = EG_PRIO_NORMAL; + if (skb && skb->dev && is_netdev_wan(skb->dev)) + set_bit(DPI_CT_INIT_FROM_WAN_BIT, &ct->bcm_ext.dpi.flags); +#endif + +#if IS_ENABLED(CONFIG_BCM_SGS) + memset(&ct->bcm_ext.sgs, 0, sizeof(ct->bcm_ext.sgs)); +#endif + +#if IS_ENABLED(CONFIG_BCM_NDI) + memset(&ct->bcm_ext.ndi, 0, sizeof(ct->bcm_ext.ndi)); +#endif + +#if IS_ENABLED(CONFIG_NF_DYNDSCP) + /* initialize dynamic dscp inheritance fields */ + ct->bcm_ext.dyndscp.status = 0; + ct->bcm_ext.dyndscp.dscp[0] = 0; + ct->bcm_ext.dyndscp.dscp[1] = 0; +#endif + +#if IS_ENABLED(CONFIG_NF_MSCS) + ct->bcm_ext.mscs.init = 0; +#endif +} + +static void bcm_nf_ct_init(struct nf_conn *ct) +{ + bcm_nf_ct_derived_list_add(ct); +} + +static void bcm_nf_ct_delete_from_lists(struct nf_conn *ct) +{ +#if IS_ENABLED(CONFIG_BCM_DPI) + dpi_nf_ct_delete_from_lists(ct); +#endif +#if IS_ENABLED(CONFIG_BCM_SGS) + sgs_nf_ct_delete_from_lists(ct); +#endif +} + +static void bcm_nf_ct_destroy(struct nf_conn *ct) +{ +#if defined(CONFIG_BLOG) + bcm_nf_blog_destroy_conntrack(ct); +#endif + bcm_nf_iqos_destroy_conntrack(ct); + bcm_nf_ct_derived_list_del(ct); +} +#endif /* _BCM_NF_CONNTRACK_H */ diff --git a/include/linux/bcm_nf_regardlessdrop.h b/include/linux/bcm_nf_regardlessdrop.h new file mode 100644 index 0000000000000000000000000000000000000000..72c14e720c19340aca8e5bd48e0efd78e6466899 --- /dev/null +++ b/include/linux/bcm_nf_regardlessdrop.h @@ -0,0 +1,292 @@ +/* +* <:copyright-BRCM:2012:DUAL/GPL:standard +* +* Copyright (c) 2012 Broadcom +* All Rights Reserved +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed +* to you under the terms of the GNU General Public License version 2 +* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +* with the following added to such license: +* +* As a special exception, the copyright holders of this software give +* you permission to link this software with independent modules, and +* to copy and distribute the resulting executable under terms of your +* choice, provided that you also meet, for each linked independent +* module, the terms and conditions of the license of that module. +* An independent module is a module which is not derived from this +* software. The special exception does not apply to any modifications +* of the software. +* +* Not withstanding the above, under no circumstances may you combine +* this software in any way with any other Broadcom software provided +* under a license other than the GPL, without Broadcom's express prior +* written consent. +* +:> +*/ + +#ifndef _BCM_NF_REGARDLESSDROP_ +#define _BCM_NF_REGARDLESSDROP_ + +#define NF_CT_SAFE_LISTS_MAX 6 +/* regardless drop of connection when conntrack table is full */ +struct safe_list { + spinlock_t lock; + struct list_head low; /* low priority linux only */ + struct list_head low_sw_accel; /* low priority sw acceleartor */ + struct list_head low_hw_accel; /* low priority hw accelerator */ + struct list_head hi; /* high priority linux only */ + struct list_head hi_sw_accel; /* high priority sw acceleartor */ + struct list_head hi_hw_accel; /* high priority hw accelerator */ + struct list_head *drop_list_order[NF_CT_SAFE_LISTS_MAX]; /*order in which lists are checked to drop a connection */ +}; + +struct safe_list ct_safe_lists = { + .lock = __SPIN_LOCK_UNLOCKED(ct_safe_lists.lock), + .low = LIST_HEAD_INIT(ct_safe_lists.low), + .low_sw_accel = LIST_HEAD_INIT(ct_safe_lists.low_sw_accel), + .low_hw_accel = LIST_HEAD_INIT(ct_safe_lists.low_hw_accel), + .hi = LIST_HEAD_INIT(ct_safe_lists.hi), + .hi_sw_accel = LIST_HEAD_INIT(ct_safe_lists.hi_sw_accel), + .hi_hw_accel = LIST_HEAD_INIT(ct_safe_lists.hi_hw_accel), +#if CONFIG_BCM_NETFILTER_REGARDLESS_DROP_ORDER == 1 + /* when trying to find a drop candidate search safe_list's in the order of + * non-accelerated-->sw_accelerated-->hw_accelerated + * this is the default policy + */ + .drop_list_order = {&ct_safe_lists.low, &ct_safe_lists.hi, + &ct_safe_lists.low_sw_accel,&ct_safe_lists.hi_sw_accel, + &ct_safe_lists.low_hw_accel, &ct_safe_lists.hi_hw_accel} +#elif CONFIG_BCM_NETFILTER_REGARDLESS_DROP_ORDER == 2 + .drop_list_order = {&ct_safe_lists.low, &ct_safe_lists.low_sw_accel, + &ct_safe_lists.hi, &ct_safe_lists.hi_sw_accel, + &ct_safe_lists.low_hw_accel, &ct_safe_lists.hi_hw_accel} +#elif CONFIG_BCM_NETFILTER_REGARDLESS_DROP_ORDER == 3 + .drop_list_order = {&ct_safe_lists.low, &ct_safe_lists.low_sw_accel, + &ct_safe_lists.low_hw_accel, &ct_safe_lists.hi, + &ct_safe_lists.hi_sw_accel, &ct_safe_lists.hi_hw_accel} +#else +#error "Netfilter Regardless Drop Order is not set" +#endif +}; + + +static inline void ct_set_curr_safe_list(struct nf_conn *ct) +{ + /*first try to move to HW list */ + if(ct->bcm_ext.hw_accel_flows) { +#if IS_ENABLED(CONFIG_BCM_INGQOS) + if (ct->bcm_ext.iq_prio == IQOS_PRIO_HIGH) + ct->bcm_ext.curr_safe_list = &ct_safe_lists.hi_hw_accel; + else +#endif + ct->bcm_ext.curr_safe_list = &ct_safe_lists.low_hw_accel; + }else if(ct->bcm_ext.sw_accel_flows) { +#if IS_ENABLED(CONFIG_BCM_INGQOS) + if (ct->bcm_ext.iq_prio == IQOS_PRIO_HIGH) + ct->bcm_ext.curr_safe_list = &ct_safe_lists.hi_sw_accel; + else +#endif + ct->bcm_ext.curr_safe_list = &ct_safe_lists.low_sw_accel; + }else{ + /*move to SW only list */ +#if IS_ENABLED(CONFIG_BCM_INGQOS) + if (ct->bcm_ext.iq_prio == IQOS_PRIO_HIGH) + ct->bcm_ext.curr_safe_list = &ct_safe_lists.hi; + else +#endif + ct->bcm_ext.curr_safe_list = &ct_safe_lists.low; + } +} + +static inline void ct_safe_list_add_tail(struct nf_conn *ct) +{ + spin_lock_bh(&ct_safe_lists.lock); + + ct->bcm_ext.hw_accel_flows = 0; + ct->bcm_ext.sw_accel_flows = 0; + + ct_set_curr_safe_list(ct); + + list_add_tail(&ct->bcm_ext.safe_list, ct->bcm_ext.curr_safe_list); + + spin_unlock_bh(&ct_safe_lists.lock); +} + +static inline void ct_safe_list_move_tail(struct nf_conn *ct) +{ + spin_lock_bh(&ct_safe_lists.lock); + + list_move_tail(&ct->bcm_ext.safe_list, ct->bcm_ext.curr_safe_list); + + spin_unlock_bh(&ct_safe_lists.lock); +} + +static inline void ct_safe_list_del(struct nf_conn *ct) +{ + spin_lock_bh(&ct_safe_lists.lock); + list_del(&ct->bcm_ext.safe_list); + spin_unlock_bh(&ct_safe_lists.lock); +} + +#if defined(CONFIG_BLOG) + +static inline void __ct_blog_flow_accel_activate_event(struct nf_conn *ct, + BlogFlowEventInfo_t *info) +{ + spin_lock_bh(&ct_safe_lists.lock); + /* ensure ct is not being deleted */ + if(likely(atomic_read(&ct->ct_general.use) >= 1)) + { + if(info->flow_event_type == FLOW_EVENT_TYPE_HW) + ct->bcm_ext.hw_accel_flows++; + else + ct->bcm_ext.sw_accel_flows++; + + ct_set_curr_safe_list(ct); + + list_move_tail(&ct->bcm_ext.safe_list, ct->bcm_ext.curr_safe_list); + } + spin_unlock_bh(&ct_safe_lists.lock); +} + +static inline void ct_blog_flow_activate_event(BlogFlowEventInfo_t *info) +{ + int idx; + + if ((info->flow_event_type == FLOW_EVENT_TYPE_FC) || + (info->flow_event_type == FLOW_EVENT_TYPE_HW)){ + + for (idx=0; idx < info->ct_count; idx++) + { + if(info->ct_p[idx] == NULL) + continue; + + __ct_blog_flow_accel_activate_event(info->ct_p[idx], info); + } + } +} + +static inline void __ct_blog_flow_accel_deactivate_event(struct nf_conn *ct, + BlogFlowEventInfo_t *info) +{ + spin_lock_bh(&ct_safe_lists.lock); + /* ensure ct is not being deleted */ + if(likely(atomic_read(&ct->ct_general.use) >= 1)){ + + if(info->flow_event_type == FLOW_EVENT_TYPE_HW) + ct->bcm_ext.hw_accel_flows--; + else + ct->bcm_ext.sw_accel_flows--; + + ct_set_curr_safe_list(ct); + + list_move_tail(&ct->bcm_ext.safe_list, ct->bcm_ext.curr_safe_list); + } + spin_unlock_bh(&ct_safe_lists.lock); +} + +static inline void ct_blog_flow_deactivate_event( BlogFlowEventInfo_t *info) +{ + int idx; + + if ((info->flow_event_type == FLOW_EVENT_TYPE_FC) || + (info->flow_event_type == FLOW_EVENT_TYPE_HW)){ + + for (idx=0; idx < info->ct_count; idx++) + { + if(info->ct_p[idx] == NULL) + continue; + + __ct_blog_flow_accel_deactivate_event(info->ct_p[idx], info); + } + } +} + +static int ct_blog_flowevent_notify(struct notifier_block * nb, + unsigned long event, void *info) +{ + switch(event){ + + case FLOW_EVENT_ACTIVATE: + ct_blog_flow_activate_event(info); + break; + + case FLOW_EVENT_DEACTIVATE: + ct_blog_flow_deactivate_event(info); + break; + + default: + break; + + } + return NOTIFY_OK; +} + +#endif /*CONFIG_BLOG */ + + +/*caller must have safe_list lock acquired */ +static inline struct nf_conn* __ct_find_drop_candidate(struct net *net, struct list_head *list ) +{ + struct list_head *tmp; + struct nf_conn *ct, *ct_candidate=NULL; + + if (!list_empty(list)) { + list_for_each(tmp, list) { + ct = container_of(tmp, struct nf_conn, bcm_ext.safe_list); + + /*TODO IPS_OFFLOAD_BIT, should we check all lists or only sw only lists*/ + + + if (likely(!test_bit(IPS_OFFLOAD_BIT, &ct->status) && nf_ct_is_confirmed(ct) + && !nf_ct_is_dying(ct) && net_eq(nf_ct_net(ct), net) + && atomic_inc_not_zero(&ct->ct_general.use))) { + + /* we dont need to check if connection is expired or not to drop it, + * or trrigger gc, as we are already using LRU + */ + ct_candidate = ct; + /* move to the tail of the list while it's being deleted*/ + list_move_tail(&ct_candidate->bcm_ext.safe_list, list); + break; + } + } + } + /* refcount of ct_candidate is incremented in this fucntion, + * it's callers responsibility to decrement it + */ + return ct_candidate; +} + +/* Choose a LRU connection based on the configured drop order policy */ +static int bcm_regardless_drop(struct net *net) +{ + struct nf_conn *ct_candidate = NULL; + int i,dropped = 0; + + spin_lock_bh(&ct_safe_lists.lock); + + for( i=0; i < NF_CT_SAFE_LISTS_MAX; i++ ) { + ct_candidate = __ct_find_drop_candidate(net, ct_safe_lists.drop_list_order[i]); + if(ct_candidate) + break; + } + + spin_unlock_bh(&ct_safe_lists.lock); + + if (unlikely(ct_candidate == NULL)) + return dropped; + + if(nf_ct_delete(ct_candidate, 0, 0) == true){ + dropped = 1; + NF_CT_STAT_INC_ATOMIC(net, early_drop); + } + + nf_ct_put(ct_candidate); + return dropped; +} + +#endif /* _BCM_NF_REGARDLESSDROP_ */ diff --git a/include/linux/bcm_nfconn_ext.h b/include/linux/bcm_nfconn_ext.h new file mode 100644 index 0000000000000000000000000000000000000000..0d961cd9a729060778d931572924f69d8eea9973 --- /dev/null +++ b/include/linux/bcm_nfconn_ext.h @@ -0,0 +1,107 @@ +#ifndef _BCM_NFCONN_EXT_H +#define _BCM_NFCONN_EXT_H + +/* +<:copyright-BRCM:2013:DUAL/GPL:standard + + Copyright (c) 2013 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +#include <linux/dpi.h> +#include <linux/ndi.h> +#include <linux/sgs.h> + +struct bcm_nf_conn_ext { +#if defined(CONFIG_BLOG) + unsigned int blog_key[2]; /* Associating 2=IP_CT_DIR_MAX blogged flows */ + unsigned long extra_jiffies; /* connection timeout value */ + uint8_t blog_learned; /* indicates CT has completed blog/fc learning atleast once */ + uint8_t blog_unused[3]; +#endif + +#if defined(CONFIG_BCM_KF_NF_REGARDLESS_DROP) + uint32_t hw_accel_flows; /* Number of HW accelerated flows, need 32 bit for tunnel connection */ + uint32_t sw_accel_flows; /* Number of SW accelerated flows, need 32 bit for tunnel connection */ + struct list_head safe_list; /* regardless drop of connections */ + struct list_head *curr_safe_list; /* current safe_list for this connection*/ +#endif + +#if defined(CONFIG_BCM_NF_DERIVED_CONN) + struct list_head derived_connections; /* Used by master connection */ + struct list_head derived_list; /* Used by child connection */ + unsigned int derived_timeout; /* if non-zero override linux timeout */ +#endif + +#if IS_ENABLED(CONFIG_BCM_INGQOS) + uint8_t iq_prio; /* Ingress QoS Prio */ + uint8_t unused0; + uint16_t unused1; +#endif + +#if IS_ENABLED(CONFIG_NF_DYNDSCP) + struct nf_tos_inheritance { + u_int16_t status; + u_int8_t dscp[2]; /* IP_CT_DIR_MAX */ + } dyndscp; +#endif + +#if IS_ENABLED(CONFIG_NF_MSCS) + struct nf_mscs { + u_int8_t init; + u_int8_t priority; + u_int8_t dir; + u_int8_t unused; + } mscs; +#endif + +#if IS_ENABLED(CONFIG_BCM_NDI) + struct ndi_info ndi; +#endif + +#if IS_ENABLED(CONFIG_BCM_DPI) + struct dpi_info dpi; +#endif + +#if IS_ENABLED(CONFIG_BCM_SGS) + struct sgs_ct_info sgs; +#endif +}; + +#define bcm_nfconn_ext_field_get(ct, f) (ct->bcm_ext.f) +#define bcm_nfconn_ext_field_get_ptr(ct, f) (&ct->bcm_ext.f) +#define bcm_nfconn_ext_field_set(ct, f, val) (ct->bcm_ext.f = val) + +#if defined(CONFIG_BCM_NF_DERIVED_CONN) +extern spinlock_t bcm_derived_conn_lock; +#define BCM_DERIVED_CONN_LOCK_BH() spin_lock_bh(&bcm_derived_conn_lock) +#define BCM_DERIVED_CONN_UNLOCK_BH() spin_unlock_bh(&bcm_derived_conn_lock) +#endif + + +int ct_show_bcm_ext(struct seq_file *s, const struct nf_conn *ct); + + +#endif /*_BCM_NFCONN_EXT_H */ diff --git a/include/linux/bcm_skb_defines.h b/include/linux/bcm_skb_defines.h new file mode 100644 index 0000000000000000000000000000000000000000..4acb80265551ba45564bb12f674bcc5aaabb0316 --- /dev/null +++ b/include/linux/bcm_skb_defines.h @@ -0,0 +1,149 @@ +/* +* <:copyright-BRCM:2014:DUAL/GPL:standard +* +* Copyright (c) 2014 Broadcom +* All Rights Reserved +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed +* to you under the terms of the GNU General Public License version 2 +* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +* with the following added to such license: +* +* As a special exception, the copyright holders of this software give +* you permission to link this software with independent modules, and +* to copy and distribute the resulting executable under terms of your +* choice, provided that you also meet, for each linked independent +* module, the terms and conditions of the license of that module. +* An independent module is a module which is not derived from this +* software. The special exception does not apply to any modifications +* of the software. +* +* Not withstanding the above, under no circumstances may you combine +* this software in any way with any other Broadcom software provided +* under a license other than the GPL, without Broadcom's express prior +* written consent. +* +* :> +*/ + +#ifndef _BCM_SKB_DEFINES_ +#define _BCM_SKB_DEFINES_ + +/* queue = mark[4:0] */ +#define SKBMARK_Q_S 0 +#define SKBMARK_Q_M (0x1F << SKBMARK_Q_S) +#define SKBMARK_GET_Q(MARK) ((MARK & SKBMARK_Q_M) >> SKBMARK_Q_S) +#define SKBMARK_SET_Q(MARK, Q) ((MARK & ~SKBMARK_Q_M) | (Q << SKBMARK_Q_S)) +/* traffic_class_id = mark[10:5] */ +#define SKBMARK_TC_ID_S 5 +#define SKBMARK_TC_ID_M (0x3F << SKBMARK_TC_ID_S) +#define SKBMARK_GET_TC_ID(MARK) ((MARK & SKBMARK_TC_ID_M) >> SKBMARK_TC_ID_S) +#define SKBMARK_SET_TC_ID(MARK, TC) \ + ((MARK & ~SKBMARK_TC_ID_M) | (TC << SKBMARK_TC_ID_S)) +/* flow_id = mark[18:11] */ +#define SKBMARK_FLOW_ID_S 11 +#define SKBMARK_FLOW_ID_M (0xFF << SKBMARK_FLOW_ID_S) +#define SKBMARK_GET_FLOW_ID(MARK) \ + ((MARK & SKBMARK_FLOW_ID_M) >> SKBMARK_FLOW_ID_S) +#define SKBMARK_SET_FLOW_ID(MARK, FLOW) \ + ((MARK & ~SKBMARK_FLOW_ID_M) | (FLOW << SKBMARK_FLOW_ID_S)) +/* iq_prio = mark[19]; for Ingress QoS used when TX is WLAN */ +#define SKBMARK_IQPRIO_MARK_S 19 +#define SKBMARK_IQPRIO_MARK_M (0x01 << SKBMARK_IQPRIO_MARK_S) +#define SKBMARK_GET_IQPRIO_MARK(MARK) \ + ((MARK & SKBMARK_IQPRIO_MARK_M) >> SKBMARK_IQPRIO_MARK_S) +#define SKBMARK_SET_IQPRIO_MARK(MARK, IQPRIO_MARK) \ + ((MARK & ~SKBMARK_IQPRIO_MARK_M) | (IQPRIO_MARK << SKBMARK_IQPRIO_MARK_S)) +/* port = mark[26:20]; for enet driver of gpon port, this is gem_id */ +#define SKBMARK_PORT_S 20 +#define SKBMARK_PORT_M (0x7F << SKBMARK_PORT_S) +#define SKBMARK_GET_PORT(MARK) \ + ((MARK & SKBMARK_PORT_M) >> SKBMARK_PORT_S) +#define SKBMARK_SET_PORT(MARK, PORT) \ + ((MARK & ~SKBMARK_PORT_M) | (PORT << SKBMARK_PORT_S)) + +/* iffwan_mark = mark[27] -- BRCM defined-- */ +#define SKBMARK_IFFWAN_MARK_S 27 +#define SKBMARK_IFFWAN_MARK_M (0x01 << SKBMARK_IFFWAN_MARK_S) +#define SKBMARK_GET_IFFWAN_MARK(MARK) \ + ((MARK & SKBMARK_IFFWAN_MARK_M) >> SKBMARK_IFFWAN_MARK_S) +#define SKBMARK_SET_IFFWAN_MARK(MARK, IFFWAN_MARK) \ + ((MARK & ~SKBMARK_IFFWAN_MARK_M) | (IFFWAN_MARK << SKBMARK_IFFWAN_MARK_S)) + +/* ipsec_mark = mark[28] */ +#define SKBMARK_IPSEC_MARK_S 28 +#define SKBMARK_IPSEC_MARK_M (0x01 << SKBMARK_IPSEC_MARK_S) +#define SKBMARK_GET_IPSEC_MARK(MARK) \ + ((MARK & SKBMARK_IPSEC_MARK_M) >> SKBMARK_IPSEC_MARK_S) +#define SKBMARK_SET_IPSEC_MARK(MARK, IPSEC_MARK) \ + ((MARK & ~SKBMARK_IPSEC_MARK_M) | (IPSEC_MARK << SKBMARK_IPSEC_MARK_S)) +/* policy_routing = mark[31:29] */ +#define SKBMARK_POLICY_RTNG_S 29 +#define SKBMARK_POLICY_RTNG_M (0x07 << SKBMARK_POLICY_RTNG_S) +#define SKBMARK_GET_POLICY_RTNG(MARK) \ + ((MARK & SKBMARK_POLICY_RTNG_M) >> SKBMARK_POLICY_RTNG_S) +#define SKBMARK_SET_POLICY_RTNG(MARK, POLICY) \ + ((MARK & ~SKBMARK_POLICY_RTNG_M) | (POLICY << SKBMARK_POLICY_RTNG_S)) + +/* dpi_queue = mark[31:27] */ +/* Overlaps with SKBMARK_IFFWAN, SKBMARK_IPSEC, and SKBMARK_POLICY_RTNG */ +#define SKBMARK_DPIQ_MARK_S 27 +#define SKBMARK_DPIQ_MARK_M (0x1F << SKBMARK_DPIQ_MARK_S) +#define SKBMARK_GET_DPIQ_MARK(MARK) \ + ((MARK & SKBMARK_DPIQ_MARK_M) >> SKBMARK_DPIQ_MARK_S) +#define SKBMARK_SET_DPIQ_MARK(MARK, DPIQ_MARK) \ + ((MARK & ~SKBMARK_DPIQ_MARK_M) | (DPIQ_MARK << SKBMARK_DPIQ_MARK_S)) + +/* The enet driver subdivides queue field (mark[4:0]) in the skb->mark into + priority and channel */ +/* priority = queue[2:0] (=>mark[2:0]) */ +#define SKBMARK_Q_PRIO_S (SKBMARK_Q_S) +#define SKBMARK_Q_PRIO_M (0x07 << SKBMARK_Q_PRIO_S) +#define SKBMARK_GET_Q_PRIO(MARK) \ + ((MARK & SKBMARK_Q_PRIO_M) >> SKBMARK_Q_PRIO_S) +#define SKBMARK_SET_Q_PRIO(MARK, Q) \ + ((MARK & ~SKBMARK_Q_PRIO_M) | (Q << SKBMARK_Q_PRIO_S)) +/* channel = queue[4:3] (=>mark[4:3]) */ +#define SKBMARK_Q_CH_S (SKBMARK_Q_S + 3) +#define SKBMARK_Q_CH_M (0x03 << SKBMARK_Q_CH_S) +#define SKBMARK_GET_Q_CHANNEL(MARK) ((MARK & SKBMARK_Q_CH_M) >> SKBMARK_Q_CH_S) +#define SKBMARK_SET_Q_CHANNEL(MARK, CH) \ + ((MARK & ~SKBMARK_Q_CH_M) | (CH << SKBMARK_Q_CH_S)) +/* service_queue_enable_mark = mark[4] -- DS BRCM defined-- */ +#define SKBMARK_SQ_MARK_S 4 +#define SKBMARK_SQ_MARK_M (0x01 << SKBMARK_SQ_MARK_S) +#define SKBMARK_GET_SQ_MARK(MARK) \ + ((MARK & SKBMARK_SQ_MARK_M) >> SKBMARK_SQ_MARK_S) +#define SKBMARK_SET_SQ_MARK(MARK, SQ_MARK) \ + ((MARK & ~SKBMARK_SQ_MARK_M) | (SQ_MARK << SKBMARK_SQ_MARK_S)) + +#define SKBMARK_ALL_GEM_PORT (0xFF) + +#define WLAN_PRIORITY_BIT_POS (1) +#define WLAN_PRIORITY_MASK (0x7 << WLAN_PRIORITY_BIT_POS) +#define GET_WLAN_PRIORITY(VAL) ((VAL & WLAN_PRIORITY_MASK) >> WLAN_PRIORITY_BIT_POS) +#define SET_WLAN_PRIORITY(ENCODEVAL, PRIO) ((ENCODEVAL & (~WLAN_PRIORITY_MASK)) | (PRIO << WLAN_PRIORITY_BIT_POS)) + +#define WLAN_IQPRIO_BIT_POS (0) +#define WLAN_IQPRIO_MASK (0x1 << WLAN_IQPRIO_BIT_POS) +#define GET_WLAN_IQPRIO(VAL) ((VAL & WLAN_IQPRIO_MASK) >> WLAN_IQPRIO_BIT_POS) +#define SET_WLAN_IQPRIO(ENCODEVAL, IQPRIO) ((ENCODEVAL & (~WLAN_IQPRIO_MASK)) | (IQPRIO << WLAN_IQPRIO_BIT_POS)) + +// LINUX_PRIORITY_BIT_POS_IN_MARK macro must be in sync with PRIO_LOC_NFMARK (=>mark[2:0]) +#define PRIO_LOC_NFMARK SKBMARK_Q_PRIO_S +#define LINUX_PRIORITY_BIT_POS_IN_MARK SKBMARK_Q_PRIO_S +#define LINUX_PRIORITY_BIT_MASK SKBMARK_Q_PRIO_M +#define LINUX_GET_PRIO_MARK(MARK) ((MARK & LINUX_PRIORITY_BIT_MASK) >> LINUX_PRIORITY_BIT_POS_IN_MARK) +#define LINUX_SET_PRIO_MARK(MARK, PRIO) ((MARK & (~LINUX_PRIORITY_BIT_MASK)) | (PRIO << LINUX_PRIORITY_BIT_POS_IN_MARK)) + +//Encode 3 bits of priority and 1 bit of IQPRIO into 4 bits as follows (3bitPrio:1bitIQPrio) +#define ENCODE_WLAN_PRIORITY_MARK(u8EncodeVal, u32Mark) \ + (u8EncodeVal = SET_WLAN_PRIORITY(u8EncodeVal, LINUX_GET_PRIO_MARK(u32Mark)) | SET_WLAN_IQPRIO(u8EncodeVal, SKBMARK_GET_IQPRIO_MARK(u32Mark))) +#define DECODE_WLAN_PRIORITY_MARK(encodedVal, u32Mark) \ + do { (u32Mark) = LINUX_SET_PRIO_MARK(u32Mark, GET_WLAN_PRIORITY(encodedVal)); \ + (u32Mark) = SKBMARK_SET_IQPRIO_MARK(u32Mark, GET_WLAN_IQPRIO(encodedVal)); \ + } while(0) + + +#endif /* _BCM_SKB_DEFINES_ */ diff --git a/include/linux/bcm_skbuff.h b/include/linux/bcm_skbuff.h new file mode 100644 index 0000000000000000000000000000000000000000..2f35fe4b2097eac79594016b222ab55787eca89b --- /dev/null +++ b/include/linux/bcm_skbuff.h @@ -0,0 +1,580 @@ +#ifndef _BCM_SKBUFF_H +#define _BCM_SKBUFF_H + +#include <linux/net.h> + +#define CONFIG_SKBSHINFO_HAS_DIRTYP 1 + +struct blog_t; /* defined(CONFIG_BLOG) */ +struct net_device; + +#ifndef NULL_STMT +#define NULL_STMT do { /* NULL BODY */ } while (0) +#endif + +typedef void (*RecycleFuncP)(void *nbuff_p, unsigned long context, uint32_t flags); + +#define SKB_DATA_RECYCLE (1 << 0) +#define SKB_DATA_NO_RECYCLE (~SKB_DATA_RECYCLE) +#define SKB_RECYCLE (1 << 1) +#define SKB_NO_RECYCLE (~SKB_RECYCLE) +#define SKB_RECYCLE_NOFREE (1 << 2) /* DO NOT USE */ +#define SKB_RECYCLE_FPM_DATA (1 << 3) /* Data buffer from Runner FPM pool */ +#define SKB_RNR_FLOOD (1 << 4) /* Data buffer flooded by Runner to flooding-capable ports */ +/* Indicates whether a sk_buf or a data buffer is in BPM pristine state */ +#define SKB_BPM_PRISTINE (1 << 5) +/* UDP Speed Test flags */ +#define SKB_RNR_UDPSPDT_BASIC (1 << 6) +#define SKB_RNR_UDPSPDT_IPERF3 (1 << 7) + +/* flags to support HW recycling of skb->data */ +#define SKB_DATA_HW_RECYCLE_CAPABLE (1 <<8) +#define SKB_DATA_HW_RECYCLE_DONE (1 <<9) + +#define SKB_HW_RECYCLE_CAPABLE (1 <<10) /* skb from HW pool */ + + + +#define SKB_RNR_FLAGS (SKB_RNR_FLOOD | SKB_RNR_UDPSPDT_BASIC | SKB_RNR_UDPSPDT_IPERF3) +#define SKB_RNR_UDPSPDT_FLAGS (SKB_RNR_UDPSPDT_BASIC | SKB_RNR_UDPSPDT_IPERF3) + +#define SKB_BPM_TAINTED(skb) \ +({ \ + ((struct sk_buff *)skb)->recycle_flags &= ~SKB_BPM_PRISTINE; \ + (skb_shinfo(skb))->dirty_p = NULL; \ +}) + + +#define SKB_DATA_PRISTINE(skb) \ +({ \ + (skb_shinfo(skb))->dirty_p = ((struct sk_buff *)skb)->head; \ +}) + +struct fkbuff; + +extern void skb_frag_xmit4(struct sk_buff *origskb, struct net_device *txdev, + uint32_t is_pppoe, uint32_t minMtu, void *ip_p); +extern void skb_frag_xmit6(struct sk_buff *origskb, struct net_device *txdev, + uint32_t is_pppoe, uint32_t minMtu, void *ip_p); +extern struct sk_buff *skb_xlate(struct fkbuff *fkb_p); +extern struct sk_buff *skb_xlate_dp(struct fkbuff *fkb_p, uint8_t *dirty_p); +extern int skb_avail_headroom(const struct sk_buff *skb); +extern void skb_bpm_tainted(struct sk_buff *skb); + +extern int skb_avail_headroom(const struct sk_buff *skb); + +extern void skb_cb_zero(struct sk_buff *skb); + +extern size_t skb_size(void); +extern size_t skb_aligned_size(void); +extern int skb_layout_test(int head_offset, int tail_offset, int end_offset); + +#if defined(CONFIG_BCM_SW_GSO) +extern __be16 bcm_sw_gso_skb_network_protocol(struct sk_buff *skb, int offset, __be16 type); +#else +#define bcm_sw_gso_skb_network_protocol(skb, offset, type) (type) +#endif + +/** + * skb_headerinit - initialize a socket buffer header + * @headroom: reserved headroom size + * @datalen: data buffer size, data buffer is allocated by caller + * @skb: skb allocated by caller + * @data: data buffer allocated by caller + * @recycle_hook: callback function to free data buffer and skb + * @recycle_context: context value passed to recycle_hook, param1 + * @blog_p: pass a blog to a skb for logging + * + * Initializes the socket buffer and assigns the data buffer to it. + * Both the sk_buff and the pointed data buffer are pre-allocated. + * + */ +void skb_headerinit(unsigned int headroom, unsigned int datalen, + struct sk_buff *skb, unsigned char *data, + RecycleFuncP recycle_hook, unsigned long recycle_context, + struct blog_t *blog_p); + +void skb_preinit_headerinit(unsigned int headroom, unsigned int datalen, + struct sk_buff *skb, unsigned char *data, + RecycleFuncP recycle_hook, unsigned long recycle_context, + struct blog_t *blog_p); + +/* TODO avoid this detail here, nbuff/skbuff should just define this as + * uint32_t and wl driver should cast this to appropriate structure + */ +typedef union wlFlowInf { + uint32_t u32; + union { + union { + struct { + /* Start - Shared fields between ucast and mcast */ + uint32_t is_ucast:1; + /* wl_prio is 4 bits for nic and 3 bits for dhd. Plan is + * to make NIC as 3 bits after more analysis */ + uint32_t wl_prio:4; + /* End - Shared fields between ucast and mcast */ + uint32_t nic_reserved1:11; + uint32_t wl_chainidx:16; + }; + struct { + uint32_t overlayed_field:16; + uint32_t ssid_dst:16; /* For bridged traffic we don't have chainidx (0xFE) */ + }; + } nic; + + struct { + /* Start - Shared fields between ucast and mcast */ + uint32_t is_ucast:1; + uint32_t wl_prio:4; + /* End - Shared fields between ucast and mcast */ + /* Start - Shared fields between dhd ucast and dhd mcast */ + uint32_t flowring_idx:10; + /* End - Shared fields between dhd ucast and dhd mcast */ + uint32_t dhd_reserved:13; + uint32_t ssid:4; + } dhd; + } ucast; + struct { + /* Start - Shared fields between ucast and mcast */ + /* for multicast, WFD does not need to populate this flowring_idx, it is used internally by dhd driver */ + uint32_t is_ucast:1; + uint32_t wl_prio:4; + /* End - Shared fields between ucast and mcast */ + /* Start - Shared fields between dhd ucast and dhd mcast */ + uint32_t flowring_idx:10; + /* End - Shared fields between dhd ucast and dhd mcast */ + uint32_t mcast_reserved:1; + uint32_t ssid_vector:16; + } mcast; + + struct { + /* Start - Shared fields b/w ucast, mcast & pktfwd */ + uint32_t is_ucast : 1; /* Start - Shared fields b/w ucast, mcast */ + uint32_t wl_prio : 4; /* packet priority */ + /* End - Shared fields between ucast, mcast & pktfwd */ + uint32_t pktfwd_reserved : 7; + uint32_t ssid : 4; + uint32_t pktfwd_key : 16; /* pktfwd_key_t : 2b domain, 2b incarn, 12b index */ + } pktfwd; + + struct { + /* Start - Shared fields b/w ucast, mcast, pktfwd & awl */ + uint32_t is_ucast : 1; /* is unicast packet */ + uint32_t wl_prio : 4; /* packet priority */ + /* End - Shared fields between ucast, mcast, pktfwd & awl */ + uint32_t awl_reserved : 21; /* Archer WLAN Reserved */ + uint32_t radio : 2; /* Radio Index */ + uint32_t ifidx : 4; /* Interface Index */ + } awl; +} wlFlowInf_t; + +struct wlan_ext { + + union { + __u32 wl_cb[6]; + struct { + /* pktc_cb should hold space for void* and unsigned int */ + unsigned char pktc_cb[16]; + __u16 pktc_flags; /* wl_flags */ + __u16 dma_index; /* used by HND router for NIC Bulk Tx */ + __u8 wl_flag1; /* used for blog handle, only need one bit for now */ + __u8 wl_rsvd; + __u16 wl_flowid; /* cfp flowid */ + }; + } __aligned(8); +}; + +#define SKB_VLAN_MAX_TAGS 4 + +struct vlan_ext { + union { + struct { + __u32 reserved:31; + __u32 restore_rx_vlan:1; /* Restore Rx VLAN at xmit. Used in ONT mode */ + }; + __u32 bcm_flags_word; + } bcm_flags; + __u16 vlan_count; + __u16 vlan_tpid; + __u32 cfi_save; + __u32 vlan_header[SKB_VLAN_MAX_TAGS]; + struct net_device *rxdev; +}; + +#define MAP_FORWARD_NONE 0 +#define MAP_FORWARD_MODE1 1 +#define MAP_FORWARD_MODE2 2 +#define MAP_FORWARD_MODE3 3 /* MAP-E Pre-Fragmentation */ + +struct map_ext { + __u8 map_forward:2; + __u8 map_mf:1; + __u32 map_offset; + __u32 map_id; +}; + +struct spdt_ext { + uint32_t so_mark; +}; + +struct bcm_skb_ext { + struct wlan_ext wlan; + struct vlan_ext vlan; + struct map_ext map; + struct spdt_ext spdt; + + void *tunl; /* used to store tunl pointer */ + union { + __u32 flags; + struct { + __u32 reserved:14; + __u32 skb_fc_accel:1;/* fcache accelerated skb */ + __u32 gdx_loopbk:1;/* loop back skb from HW accelerator */ + __u32 gdx_encap:8; /* encap type for parsing */ + __u32 gdx_l2hdrlen:8;/* l2 header len for ethernet packets */ + }; + }; + + + unsigned char *clone_wr_head; /* indicates drivers(ex:enet)about writable headroom in aggregated skb */ + unsigned char *clone_fc_head; /* indicates fcache about writable headroom in aggregated skb */ + + struct net_device *in_dev; /* Physical device where this pkt is received */ + long seq_num; /* sgs packet sequence number for a flow */ + uint64_t time_stamp; /* sgs packet reception time */ + void* sgs_conn; /* pointing to the sgs connection tracking object */ + struct nf_queue_entry *q_entry; /* sgs packet queue to reinject to the stack */ +}; + +/* accessor macro */ +#define skbuff_bcm_ext_wlan_get(_skb, _field) ((_skb)->bcm_ext.wlan._field) +#define skbuff_bcm_ext_vlan_get(_skb, _field) ((_skb)->bcm_ext.vlan._field) +#define skbuff_bcm_ext_map_get(_skb, _field) ((_skb)->bcm_ext.map._field) +#define skbuff_bcm_ext_sgs_conn_get(_skb) ((_skb)->bcm_ext.sgs_conn) +#define skbuff_bcm_ext_q_entry_get(_skb) ((_skb)->bcm_ext.q_entry) +#define skbuff_bcm_ext_indev_get(_skb) ((_skb)->bcm_ext.in_dev) +#define skbuff_bcm_ext_indev_set(_skb, _dev) ((_skb)->bcm_ext.in_dev = _dev) +#define skbuff_bcm_ext_spdt_get(_skb, _field) ((_skb)->bcm_ext.spdt._field) +#define skbuff_bcm_ext_spdt_set(_skb, _field, _val) ((_skb)->bcm_ext.spdt._field = _val) +#define skbuff_bcm_ext_sgs_conn_set(_skb, _val) ((_skb)->bcm_ext.sgs_conn = _val) +#define skbuff_bcm_ext_q_entry_set(_skb, _val) ((_skb)->bcm_ext.q_entry = _val) + +void bcm_skbuff_copy_skb_header(struct sk_buff *new, const struct sk_buff *old); +void bcm_skbuff_skb_clone(struct sk_buff *n, struct sk_buff *skb); +int bcm_skbuff_handle_netif_rx_internal(struct sk_buff *skb, int *ret); +int bcm_skbuff_handle_netif_receive_skb_core(struct sk_buff *skb, int *ret); + + +#ifdef CONFIG_BCM_SKB_FREE_THREAD +void dev_kfree_skb_thread(struct sk_buff *skb); +#else +#define dev_kfree_skb_thread(skb) dev_consume_skb_any(skb) +#endif +int bcm_skb_free_head(struct sk_buff *skb); +int bcm_kfree_skbmem(struct sk_buff *skb); + +#if defined(CONFIG_BCM_USBNET_ACCELERATION) +void skb_clone_headers_set(struct sk_buff *skb, unsigned int len); +unsigned int skb_writable_headroom(const struct sk_buff *skb); +#endif +void skb_header_free(struct sk_buff *skb); +struct sk_buff *skb_header_alloc(void); + +void skb_shinforeset(struct skb_shared_info *skb_shinfo); + +/* + * sk_buff structure is copied directly from skbuff.h and removing any #ifdef except CONFIG_BCM_KF_NBUFF. + * This is on-purpose to solve binary incompatibility issue. + */ + +#if defined(CONFIG_BCM_KF_MPTCP) +#define BCM_SKB_CB_SIZE 80 +#else +#define BCM_SKB_CB_SIZE 48 +#endif + +struct sk_buff { + union { + struct { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + union { + struct net_device *dev; + /* Some protocols might use this space to store information, + * while device pointer would be NULL. + * UDP receive path is one user. + */ + unsigned long dev_scratch; + }; + }; + struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ + struct list_head list; + }; + + union { + struct sock *sk; + int ip_defrag_offset; + }; + + union { + ktime_t tstamp; + u64 skb_mstamp; + }; +#if defined(CONFIG_BCM_KF_NBUFF) + __u32 unused; + union { + /* 3 bytes unused */ + unsigned int recycle_and_rnr_flags; + unsigned int recycle_flags; + }; + /* + * Several skb fields have been regrouped together for better data locality + * cache performance, 16byte cache line proximity. + * In 32 bit architecture, we have 32 bytes of data before this comment. + * In 64 bit architecture, we have 52 bytes of data at this point. + */ + + /*--- members common to fkbuff: begin here ---*/ + struct { + union { + /* see fkb_in_skb_test() */ + void *fkbInSkb; + void *word0; + }; + + /* defined(CONFIG_BLOG), use blog_ptr() */ + struct blog_t *blog_p; + unsigned char *data; + + /* The len in fkb is only 24 bits other 8 bits are used as internal flags + * when fkbInSkb is used the max len can be only 24 bits, the bits 31-24 + * are cleared + * currently we don't have a case where len can be >24 bits. + */ + union { + unsigned int len; + /* used for fkb_in_skb test */ + __u32 len_word; + }; + + union { + __u32 mark; + __u32 dropcount; + void *queue; + /* have to declare the following variation of fkb_mark + * for the ease of handling 64 bit vs 32 bit in fcache + */ + unsigned long fkb_mark; + __u32 fc_ctxt; /* hybrid flow cache context */ + }; + + union { + __u32 priority; + wlFlowInf_t wl; + }; + + /* Recycle preallocated skb or data */ + RecycleFuncP recycle_hook; + + union { + unsigned long recycle_context; + struct sk_buff *next_free; + __u32 fpm_num; + }; +#ifdef CONFIG_64BIT + } ____cacheline_aligned; + /* + * purposedly making the above fkbuff data structure cacheline aligned + * in 64 bit architecture. + * This can ensure the offset to the content is fixed into same cacheline. + * Main reason we only declare as cacheline_aligned for 64 bit is that + * we have manually calculated to ensure that this structure is 32 byte + * aligned in 32 bit architecture. If we add ____cacheline_aligned + * also for 32 bit architecture, it will waste 64 byte memory if that + * architecture is with 64 byte cache line size (i.e., 63148). + */ +#else + }; +#endif + /*--- members common to fkbuff: end here ---*/ + + +#endif + + struct bcm_skb_ext bcm_ext; + + union { + struct { + unsigned long _skb_refdst; + void (*destructor)(struct sk_buff *skb); + }; + struct list_head tcp_tsorted_anchor; + }; + + struct sec_path *sp; + unsigned long _nfct; + struct nf_bridge_info *nf_bridge; +#if defined(CONFIG_BCM_KF_NBUFF) + unsigned int data_len; +#else + unsigned int len, + data_len; +#endif + __u16 mac_len, + hdr_len; + + /* Following fields are _not_ copied in __copy_skb_header() + * Note that queue_mapping is here mostly to fill a hole. + */ + __u16 queue_mapping; + +/* if you move cloned around you also must adapt those constants */ +#ifdef __BIG_ENDIAN_BITFIELD +#define CLONED_MASK (1 << 7) +#else +#define CLONED_MASK 1 +#endif +#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset) + + __u8 __cloned_offset[0]; + __u8 cloned:1, + nohdr:1, + fclone:2, + peeked:1, + head_frag:1, + xmit_more:1, + pfmemalloc:1; + + /* fields enclosed in headers_start/headers_end are copied + * using a single memcpy() in __copy_skb_header() + */ + /* private: */ + __u32 headers_start[0]; + /* public: */ + +/* if you move pkt_type around you also must adapt those constants */ +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_TYPE_MAX (7 << 5) +#else +#define PKT_TYPE_MAX 7 +#endif +#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) + + __u8 __pkt_type_offset[0]; + __u8 pkt_type:3; + __u8 ignore_df:1; + __u8 nf_trace:1; + __u8 ip_summed:2; + __u8 ooo_okay:1; + + __u8 l4_hash:1; + __u8 sw_hash:1; + __u8 wifi_acked_valid:1; + __u8 wifi_acked:1; + __u8 no_fcs:1; + /* Indicates the inner headers are valid in the skbuff. */ + __u8 encapsulation:1; + __u8 encap_hdr_csum:1; + __u8 csum_valid:1; + + __u8 csum_complete_sw:1; + __u8 csum_level:2; + __u8 csum_not_inet:1; + __u8 dst_pending_confirm:1; + __u8 ndisc_nodetype:2; + __u8 ipvs_property:1; + + __u8 inner_protocol_type:1; + __u8 remcsum_offload:1; + __u8 offload_fwd_mark:1; + __u8 offload_mr_fwd_mark:1; + __u8 tc_skip_classify:1; + __u8 tc_at_ingress:1; + __u8 tc_redirected:1; + __u8 tc_from_ingress:1; + __u8 decrypted:1; + __u16 tc_index; /* traffic control index */ + + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; +#ifdef CONFIG_BCM_KF_NBUFF +#else + __u32 priority; +#endif + int skb_iif; + __u32 hash; + __be16 vlan_proto; + __u16 vlan_tci; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + __u32 secmark; + +#if defined(CONFIG_BCM_KF_NBUFF) + __u32 reserved_tailroom; +#else + union { + __u32 mark; + __u32 reserved_tailroom; + }; +#endif + + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + + /* private: */ + __u32 headers_end[0]; + + /* + * This is the control buffer. It is free to use for every + * layer. Please put your private variables there. If you + * want to keep them across layers you have to do a skb_clone() + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[BCM_SKB_CB_SIZE] __aligned(8); + +/* + * ------------------------------- CAUTION!!! --------------------------------- + * Do NOT add a new field or modify any existing field(except cb) before this + * line to the beginning of the struct sk_buff. Doing so will cause + * struct sk_buff to be incompatible with the compiled binaries and may cause + * the binary only modules to crash. + * --------------------------------------------------------------------------- + */ + + /* public: */ + + /* These elements must be at the end, see alloc_skb() for details. */ + sk_buff_data_t tail; + sk_buff_data_t end; +#if defined(CONFIG_BCM_KF_NBUFF) + unsigned char *head; +#else + unsigned char *head, + *data; +#endif + unsigned int truesize; + refcount_t users; +}; + + +#endif /* _BCM_SKBUFF_H */ diff --git a/include/linux/blog.h b/include/linux/blog.h new file mode 100644 index 0000000000000000000000000000000000000000..3af178fe61301701ba7ea425deaf3933de5da3e0 --- /dev/null +++ b/include/linux/blog.h @@ -0,0 +1,3302 @@ +#if defined(CONFIG_BLOG) + +#ifndef __BLOG_H_INCLUDED__ +#define __BLOG_H_INCLUDED__ + +/*--------------------------------*/ +/* Blog.h and Blog.c for Linux OS */ +/*--------------------------------*/ + +/* +* <:copyright-BRCM:2003:DUAL/GPL:standard +* +* Copyright (c) 2003 Broadcom +* All Rights Reserved +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed +* to you under the terms of the GNU General Public License version 2 +* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +* with the following added to such license: +* +* As a special exception, the copyright holders of this software give +* you permission to link this software with independent modules, and +* to copy and distribute the resulting executable under terms of your +* choice, provided that you also meet, for each linked independent +* module, the terms and conditions of the license of that module. +* An independent module is a module which is not derived from this +* software. The special exception does not apply to any modifications +* of the software. +* +* Not withstanding the above, under no circumstances may you combine +* this software in any way with any other Broadcom software provided +* under a license other than the GPL, without Broadcom's express prior +* written consent. +* +:> +*/ + +/* + ******************************************************************************* + * + * File Name : blog.h + * + * Description: + * + * A Blog is an extension of the native OS network stack's packet context. + * In Linux a Blog would be an extension of the Linux socket buffer (aka skbuff) + * or a network device driver level packet context FkBuff. The nbuff layer + * provides a transparent access SHIM to the underlying packet context, may it + * be a skbuff or a fkbuff. In a BSD network stack, a packet context is the + * BSD memory buffer (aka mbuff). + * + * Blog layer provides Blog clients a SHIM to the native OS network stack: + * Blog clients may be implemented to: + * - debug trace a packet as it passes through the network stack, + * - develop traffic generators (loop) at the network device driver level. + * - develop network driver level promiscuous mode bound applications and use + * the Blog SHIM to isolate themselves from the native OS network constructs + * or proprietery network constructs such as Ethernet bridges, VLAN network + * interfaces, IGMP, firewall and connection tracking systems. + * + * As such, Blog provides an extension of the packet context and contains the + * received and transmitted packets data and parsed information. Parsing results + * are saved to describe, the type of layer 1, 2, 3 and 4 headers seen, whether + * the packet was a unicast, broadcast or multicast, a tunnel 4in6 or 6in4 etc. + * + * Blog views a receive or transmit end-point to be any construct that can be + * described by a end point context and a handler op. An end-point could hence + * be a: + * - a network device (Linux net_device with hard start transmit handler), + * - a link or queue in the network stack (e.g. a Linux Traffic Control queue + * or a netlink or raw socket queue), + * - a file system logging interface and its logging handler, + * - a virtual interface to some hardware block that provides some hardware + * assisted functionality (e.g. IPSEC acceleration or checksum offloading + * or GSO block), + * - a raw interface to an external hardware test traffic generator using say + * a DMA mapped packet reception or transmission. + * + * Blog clients are hence applications that provide value added capability by + * binding at such end-points. + * + * A simple Blog client application is a loop traffic generator that simply + * acts as a sink of packets belonging to a specific "l3 flow" and mirrors + * them to another interface or loops them back into the stack by serving as a + * source to a receive network device, while measuring the packet processing + * datapath performance in the native OS network stack/proprietary constructs. + * Such a loop traffic generator could be used to inject N cells/packets + * that cycle through the system endlessly, serving as background traffic while + * a few flows are studied from say a QOS perspective. + * + * Another example of a Blog client is a proxy accelerator (hardware / software) + * that is capable of snooping on specific flows and accelerating them while + * bypassing the native OS network stack and/or proprietery constructs. It is + * however required that the native OS constructs can co-exist. E.g. it may be + * necessary to refresh a network bridge's ARL table, or a connection/session + * tracker, or update statistics, when individual packets bypass such network + * constructs. A proxy accelerator may also reside between a Rx network device + * a hardware IPSEC accelerator block and a Tx network device. + * + * Blog layer provides a logical composite SHIM to the network constructs + * Linux or proprietery, allowing 3rd party network constructs to be seemlesly + * supported in the native OS. E.g a network stack that uses a proprietery + * session tracker with firewalling capability would need to be transparently + * accessed, so that a Blog client may refresh the session tracking object when + * packets bypass the network stack. + * + * For each OS (eCOS, Linux, BSD) a blog.c implementation file is provided that + * implements the OS specific SHIM. Support for 3rd-party network constructs + * would need to be defined in the blog.c . E.g. for Linux, if a proprietery + * session tracker replaces the Linux netfilter connection tracking framework, + * then the void * ct_p and the corresponding query/set operations would need to + * be implemented. The Blog clients SHOULD NOT rely on any function other than + * those specifically defined allowing a coexistence of the Blog client and the + * native construct. In the example of a ct_p, for all practice and purposes, + * the void *, could have been a key or a handle to a connection tracking object + * + * Likewise, the Blog client may save need to save a client key with the + * network constuct. Again a client key may be a pointer to a client object or + * simply a hash key or some handle semantics. + * + * The logical SHIM is defined as follows: + * + * __doc_include_if_linux__ + * + * 1. Extension of a packet context with a logging context: + * ======================================================== + * Explicit APIs to allocate/Free a Blog structure, and bind to the packet + * context, may it be a skbuff or a fkbuff. Support for transferring a + * Blog_t structure from one packet context to another during the course of + * a packet in the network stack involving a packet context clone/copy is + * also included. The release and recycling of Blog_t structures when a + * packet context is freed are also providied. + * Binding is bi-directional: packet context <-- --> Blog_t + * + * + * 2. Associating native OS or 3rd-party network constructs: blog_link() + * ========================================================================== + * Examples of network constructs + * "dev" - Network device + * "ct" - Connection or session tracker + * "fdb" - Network bridge forwarding database entity + * + * Association is pseudo bi-directional, using "void *" binding in a Blog_t to + * a network construct. In the reverse, a network construct will link to a + * Blog client entity using a Key concept. Two types of keys are currently + * employed, a BlogFlowKey and a BlogGroupKey. + * + * A BlogFlowKey would typically refer to a single unidirectional packet + * stream defined by say all packets belonging to a unidirectional IPv4 flow, + * whereas a BlogGroupKey could be used to represent a single downstream + * multicast stream (IP multicast group) that results in replicated streams + * pertaining to multiple clients joining a the IPv4 multicast group. + * + * Likewise, one may represent a single unidirectional IPv4 UDP flow using + * BlogFlowKey, and the reverse direction IPv4 UDP reply flow + * using another BlogFlowKey, and represent the mated pair using a + * BlogGroupKey. + * + * In a Blog traffic generator client, where in several IPv4 UDP flows, each + * represented independently using a BlogFlowKey, allows for a set of them + * (background downstream stress traffic) to be managed as a group using a + * BlogGroupKey. + * + * Designer Note: + * A network construct may be required to save a BlogFlowKey and/or + * BlogGroupKey to complete the reverse binding between a network construct + * and the Blog client application. An alternate approach would be to save + * a pointer to the Blog_t in the network construct with an additional + * dereference through the keys saved within the Blog_t object. + * + * A BlogFlowKey and a BlogGroupKey is a 32bt sized unit and can serve either + * as a pointer (32bit processor) or a index or a hash key or ... + * + * + * 3. Network construct and Blog client co-existence call backs: + * ============================================================= + * + * blog_notify(): + * ============== + * A network construct may notify a Blog client of a change of status and may + * be viewed as a "downcall" from specialized network construct to a Blog client + * E.g. if a connection/session tracking system deems that a flow needs to be + * deleted or say it itself is being destroyed, then it needs to notify the Blog + * client. This would allow the Blog client to cleanup any association with the + * network construct. + * Ability for a Blog client to receive general system wide notifications of + * changes, to include, network interfaces or link state changes, protocol stack + * service access point changes, etc. + * Designer Note: Linux notification list? + * + * blog_request(): + * =============== + * A Blog client may request a change in state in the network construct and may + * be viewed as a "upcall" from the Blog client into the network construct. A + * timer refresh of the bridge fdb or connection tracking object, or a query + * whether the session tracker has successfully established (e.g. a TCP 3-way + * handshake has completed, or a IGMP client was permitted to join a group, or a + * RTSP session was successful) a uni-driectional or bi-directional flow. + * + * + * 4. Network end-point binding of Blog client + * =========================================== + * + * blog_init(), blog_sinit(), blog_finit(): + * ======================================== + * __comment_if_linux__ : This function is invoked by a Linux network device on + * packet reception to pass the packet to a Blog client application. + * + * Pass a packet context to a Blog client at a "RX" network device either using + * a skbuff or a fkbuff packet context. Blog client MAY ONLY ACCESS fkbuff + * fields. As per the nbuff specification, a FkBuff may be considered as a + * base class and a skbuff is a derived class, inheriting the base class members + * of the base class, fkbuff. The basic fields of a packet context are a pointer + * to the received packet's data, data length, a set of reserved fields to carry + * layer 1 information, queue priority, etc, and packet context and or packet + * recycling. The layer 1 information is described in terms of channels and + * and link layer phy preambles. A channel could be an ATM VCI, a DSL queue, a + * PON Gem Port. A Phy could describe the LINK layer type and or a preamble for + * instance a RFC2684 header in the DSL world. + * + * blog_[s|f]init() will setup the L1 coarse key<channel,phy> and invokes a Blog + * client's receive hook. A Blog client may consume the packet bypassing the + * native OS network stack, may suggest that the packet context be extended by + * a Blog_t structure or may deem that the packet is of not interest. As such + * the Blog client will return PKT_DONE, PKT_BLOG or PKT_NORM, respectively. In + * case no Blog client has been registered for receiving packets (promiscuous) + * driectly from RX network devices, then the packet will follow a normal data + * path within the network stack (PKT_NORM). + * + * Designer Note: Blog clients MAY NOT use fields not defined in FkBuff. + * + * + * blog_emit(): + * ============ + * __comment_if_linux__ : This function is invoked by a Linux network device + * prior to packet transmission to pass the packet to a Blog client application. + * + * Pass a packet context to a Blog client at a "TX" network device either using + * a skbuff or a fkbuff packet context. The same restrictions on a Blog client + * pertaining to packet field context access as defined in the blog_init() + * variant of APIs is applicable to blog_emit(). A Blog client may also return + * PKT_NORM or PKT_DONE, to indicate normal processing, or packet consumption. + * + * Designer Note: blog_emit() will ONLY pass those packets to Blog clients that + * have a packet context extended with a Blog_t structure. Hence skbuffs or + * fkbuffs that do not have a Blog_t extension will not be handed to the Blog + * client. Do we need blog_semit/blog_femit variants. + * + * + * 5. Binding Blog client applications: blog_bind() + * ================================================ + * blog_bind() enables a "single" client to bind into the network stack by + * specifying a network device packet reception handler, a network device packet + * transmission handler, network stack to blog client notify hook. + * + * + * 6. Miscellanous + * =============== + * - Blog_t management. + * - Data-filling a Blog_t. + * - Protocol Header specifications independent of OS. + * - Debug printing. + * + * + * __end_include_if_linux__ + * + * Version 1.0 SKB based blogging + * Version 2.0 NBuff/FKB based blogging (mbuf) + * Version 2.1 IPv6 Support + * Version 3.0 Restructuring Blog SHIM to support eCOS, Linux and proprietery + * network constructs + * + ******************************************************************************* + */ + +#define BLOG_VERSION "v3.0" + +#if defined(__KERNEL__) /* Kernel space compilation */ +#include <linux/types.h> /* LINUX ISO C99 7.18 Integer types */ +#else /* User space compilation */ +#include <stdint.h> /* C-Lib ISO C99 7.18 Integer types */ +#endif +#include <linux/blog_net.h> /* IEEE and RFC standard definitions */ +#include <linux/nbuff_types.h> /* for IS_SKBUFF_PTR */ +#include <linux/brcm_dll.h> + +#ifndef NULL_STMT +#define NULL_STMT do { /* NULL BODY */ } while (0) +#endif + +#undef BLOG_DECL +#define BLOG_DECL(x) x, + +#ifndef BLOG_OFFSETOF +#define BLOG_OFFSETOF(stype, member) ((size_t) &((struct stype *)0)->member) +#endif + +/* Forward declarations */ +struct blog_t; +typedef struct blog_t Blog_t; +#define BLOG_NULL ((Blog_t*)NULL) +#define BLOG_KEY_NONE 0 + +/* __bgn_include_if_linux__ */ + +struct sk_buff; /* linux/skbuff.h */ +struct fkbuff; /* linux/nbuff.h */ + +/* See RFC 4008 */ + + +typedef struct blogCtTimeFlags { + uint32_t unused: 31; + uint32_t valid: 1; /* BlogCtTime has valid values */ +} BlogCtTimeFlags_t; + +/* used to pass timer info between the stack and blog layer */ +typedef struct blogCtTime { + BlogCtTimeFlags_t flags; /* Flags */ + uint8_t unknown; /* unknown proto */ + uint8_t proto; /* known proto TCP, UDP */ + uint8_t intv; /* intv in sec */ + uint8_t idle; /* idle time in sec */ +} BlogCtTime_t; + +/* used to exchange info between fcache and drivers */ +typedef struct { + uint32_t h_proto; /* protocol */ + uint32_t key_match; /* key */ + void *txdev_p; + uint8_t tx_l3_offset; + uint8_t tx_l4_offset; + uint8_t mcast_fwd_exception; /* Runner mcast forwarding exception */ + uint8_t esp_ivsize; + uint8_t esp_icvsize; + uint8_t esp_inner_pkt; +} BlogFcArgs_t; + +typedef struct { + uint64_t rx_packets; + uint64_t rx_bytes; + int32_t rx_rtp_packets_lost; /*TODO chekc why this is defined as signed int */ + uint32_t pollTS_ms; // Poll timestamp in ms +}BlogFcStats_t; + +typedef struct { + u64 packet_count; + u64 byte_count; +} blog_fast_stats_t; + +/* + * Linux Netfilter Conntrack registers it's conntrack refresh function which + * will be invoked to refresh a conntrack when packets belonging to a flow + * managed by Linux conntrack are bypassed by a Blog client. + */ +typedef void (*blog_cttime_upd_t)(void * ct_p, BlogCtTime_t *ct_time_p); +extern blog_cttime_upd_t blog_cttime_update_fn; + + +extern int blog_ct_get_stats(const void *ct, uint32_t blog_key, uint32_t dir, + BlogFcStats_t *stats); +extern int blog_ct_push_stats(void); + +#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE) +typedef int (*blog_gre_rcv_check_t)(void *dev, BlogIpv4Hdr_t *iph, uint16_t len, + void **tunl_pp, uint32_t *pkt_seqno_p); +extern blog_gre_rcv_check_t blog_gre_rcv_check_fn; + +typedef int (*blog_gre_xmit_upd_t)(void * tunl_p, BlogIpv4Hdr_t *iph, uint16_t len); +extern blog_gre_xmit_upd_t blog_gre_xmit_update_fn; +typedef int (*blog_gre6_rcv_check_t)(void *dev, BlogIpv6Hdr_t *ipv6h, uint16_t len, + void **tunl_pp, uint32_t *pkt_seqno_p); +extern blog_gre6_rcv_check_t blog_gre6_rcv_check_fn; + +typedef int (*blog_gre6_xmit_upd_t)(void * tunl_p, BlogIpv6Hdr_t *ipv6h, uint16_t len); +extern blog_gre6_xmit_upd_t blog_gre6_xmit_update_fn; +#endif + + +#define PPTP_NOT_ACK 0 +#define PPTP_WITH_ACK 1 +#define PPTP_GRE_VER_0 0 +#define PPTP_GRE_VER_1 1 +#define PPTP_GRE_NONE 2 + +typedef int (*blog_pptp_xmit_upd_t)(uint16_t call_id, uint32_t *seqNum, + uint32_t *ackNum, uint32_t daddr); +extern blog_pptp_xmit_upd_t blog_pptp_xmit_update_fn; + +typedef int (*blog_pptp_rcv_check_t)(uint16_t call_id, uint32_t *rcv_pktSeq, + uint32_t rcv_pktAck, uint32_t saddr); +extern blog_pptp_rcv_check_t blog_pptp_rcv_check_fn; + +typedef int (*blog_l2tp_rcv_check_t)(void *dev, uint16_t tunnel_id, + uint16_t session_id); +extern blog_l2tp_rcv_check_t blog_l2tp_rcv_check_fn; + +#if defined(CONFIG_BCM_OVS) +typedef int (* blog_is_ovs_internal_dev_t)(void *dev); +typedef unsigned long (* blog_mega_get_key_t)(void *mega); +typedef void (* blog_mega_set_key_t)(void *mega, unsigned long blog_key); +typedef void (* blog_mega_put_fast_stats_t)(void *net_p, + const blog_fast_stats_t *stats); + +typedef struct { + blog_is_ovs_internal_dev_t is_ovs_internal_dev; + blog_mega_get_key_t mega_get_key; + blog_mega_set_key_t mega_set_key; + blog_mega_put_fast_stats_t mega_put_fast_stats; +} blog_ovs_hooks_t; +void blog_bind_ovs(blog_ovs_hooks_t *blog_ovs_hooks_p); +#endif + +/* __end_include_if_linux__ */ + + + +/* + *------------------------------------------------------------------------------ + * Denotes a Blog client, + *------------------------------------------------------------------------------ + */ +typedef enum { + BLOG_DECL(BlogClient_fcache) + BLOG_DECL(BlogClient_mcast) + BLOG_DECL(BlogClient_MAX) +} BlogClient_t; + +/* + *------------------------------------------------------------------------------ + * Denotes whether a packet is consumed and freed by a Blog client application, + * whether a packet needs to be processed normally within the network stack or + * whether a packet context is extended with a Blog_t object. + *------------------------------------------------------------------------------ + */ +typedef enum { + BLOG_DECL(PKT_DONE) /* Packet consumed and freed */ + BLOG_DECL(PKT_NORM) /* Continue normal stack processing */ + BLOG_DECL(PKT_BLOG) /* Continue stack with blogging */ + BLOG_DECL(PKT_DROP) /* Drop Packet */ + BLOG_DECL(PKT_TCP4_LOCAL) /* ipv4 tcp packet terminating locally*/ + BLOG_DECL(PKT_TCP6_LOCAL) /* ipv6 tcp packet terminating locally*/ + BLOG_DECL(BLOG_ACTION_MAX) +} BlogAction_t; + +/* + *------------------------------------------------------------------------------ + * Denotes the direction in the network stack when a packet is processed by a + * virtual network interface/network device. + *------------------------------------------------------------------------------ + */ +typedef enum { + BLOG_DECL(DIR_RX) /* Receive path in network stack */ + BLOG_DECL(DIR_TX) /* Transmit path in network stack */ + BLOG_DECL(BLOG_DIR_MAX) +} BlogDir_t; + +/* + *------------------------------------------------------------------------------ + * Denotes the type of Network entity associated with a Blog_t. + * + * BlogNetEntity_t may be linked to a blog using blog_link to make the Blog_t + * point to the BlogNetEntity_t. A reverse linking from the BlogNetEntity_t to + * Blog_t is only possible via a key (if necessary when a one to one association + * between the BlogNetEntity_t and a Blog exists. For instance, there is a + * one to one association between a Flow Connection Tracker and a Blog. In fact + * a Linux Netfilter Connection Tracking object manages a bi-directional flow + * and thus may have 2 keys to reference the corresponding Blog_t. However, a + * network device (physical end device or a virtual device) may have multiple + * Flows passing through it and hence no one-to-one association exists. In this + * can a Blog may have a link to a network device, but the reverse link (via a + * key) is not saved in the network device. + * + * Linking a BlogNetEntity_t to a blog is done via blog_link() whereas saving + * a reference key into a BlogNetEntity_t is done via blog_request() by the + * Blog client application, if needed. + * + *------------------------------------------------------------------------------ + */ + +/* FLOWTRACK: param1 is ORIG=0 or REPLY=1 direction */ +#define BLOG_PARAM1_DIR_ORIG 0U +#define BLOG_PARAM1_DIR_REPLY 1U +#define BLOG_PARAM1_DIR_MAX 2U + +/* BRIDGEFDB: param1 is src|dst */ +#define BLOG_PARAM1_SRCFDB 0U +#define BLOG_PARAM1_DSTFDB 1U + +/* LLID/GEM index for the mcast data received at WAN side. 0xFE means any value is acceptable */ +#define BLOG_CHAN_XPON_MCAST_ANY 0xFE + +/* IF_DEVICE: param1 is direction RX or TX, param 2 is minMtu */ + +typedef enum { + BLOG_DECL(FLOWTRACK) /* Flow (connection|session) tracker */ + BLOG_DECL(BRIDGEFDB) /* Bridge Forwarding Database entity */ + BLOG_DECL(MCAST_FDB) /* Multicast Client FDB entity */ + BLOG_DECL(IF_DEVICE) /* Virtual Interface (network device) */ + BLOG_DECL(IF_DEVICE_MCAST) /* Virtual Interface (network device) */ + BLOG_DECL(GRE_TUNL) /* GRE Tunnel */ + BLOG_DECL(TOS_MODE) /* TOS_MODE */ + BLOG_DECL(MEGA) /* Megaflow tracker */ + BLOG_DECL(BLOG_NET_ENTITY_MAX) +} BlogNetEntity_t; + +/* + *------------------------------------------------------------------------------ + * Denotes a type of notification sent from the network stack to the Blog client + * See blog_notify(BlogNotify_t, void *, unsigned long param1, uint32_t param2); + *------------------------------------------------------------------------------ + */ + +/* MCAST_CONTROL_EVT: param1 is add|del, and param2 is IPv4|IPv6 */ +#define BLOG_PARAM1_MCAST_ADD 0U +#define BLOG_PARAM1_MCAST_DEL 1U +#define BLOG_PARAM2_MCAST_IPV4 0U +#define BLOG_PARAM2_MCAST_IPV6 1U + +/* LINK_STATE_CHANGE: param1 */ +#define BLOG_PARAM1_LINK_STATE_UP 0U +#define BLOG_PARAM1_LINK_STATE_DOWN 1U + +typedef enum { + BLOG_DECL(DESTROY_FLOWTRACK) /* Session/connection is deleted */ + BLOG_DECL(DESTROY_BRIDGEFDB) /* Bridge FDB has aged */ + BLOG_DECL(MCAST_CONTROL_EVT) /* Mcast client joins a group event */ + BLOG_DECL(DESTROY_NETDEVICE) /* Network device going down */ + BLOG_DECL(FETCH_NETIF_STATS) /* Fetch accumulated stats */ + BLOG_DECL(CLEAR_NETIF_STATS) /* Clear accumulated stats */ + BLOG_DECL(UPDATE_NETDEVICE) /* Netdevice has been modified (MTU, etc) */ + BLOG_DECL(ARP_BIND_CHG) /* ARP IP/MAC binding change event */ + BLOG_DECL(CONFIG_CHANGE) /* Certain configuration change event */ + BLOG_DECL(UP_NETDEVICE) /* network device up */ + BLOG_DECL(DN_NETDEVICE) /* network device down */ + BLOG_DECL(CHANGE_ADDR) /* network device change MAC addr */ + BLOG_DECL(SET_DPI_PARAM) /* Set the DPI parameters */ + BLOG_DECL(FLUSH) /* Flush flows based on parameters */ + BLOG_DECL(DESTROY_MEGA) /* Megaflow connection is deleted */ + BLOG_DECL(FETCH_MEGA_STATS) /* Fetch megaflow fast stats */ + BLOG_DECL(CLEAR_MEGA_STATS) /* Clear megaflow fast stats */ + BLOG_DECL(UPDATE_FLOWTRACK_IDLE_TIMEOUT)/* update idle timeout */ + BLOG_DECL(CREATE_BRIDGEFDB) /* Bridge FDB is created */ + BLOG_DECL(BLOG_NOTIFY_MAX) +} BlogNotify_t; + +typedef enum { + BLOG_DECL(QUERY_FLOWTRACK) /* Session/connection time is queried */ + BLOG_DECL(QUERY_BRIDGEFDB) /* Bridge FDB time is queried */ + BLOG_DECL(QUERY_FLOWTRACK_STATS)/* get stats of flows associated with NPE */ + BLOG_DECL(QUERY_GET_HW_ACCEL) + BLOG_DECL(BLOG_QUERY_MAX) +} BlogQuery_t; + +typedef struct{ + int orig_queue; /* Originating queue index */ + int reply_queue; /* Reply queue index */ + int priority; /* Traffic priority */ +}BlogDpiParams_t; + + +/* Blog Notify FLUSH strucutre */ +typedef int (* BlogFlushMetadataFunc_t)(void *metadata_p, const Blog_t *const blog_p); + +typedef struct { + uint32_t flush_all :1; + uint32_t flush_flow :1; + uint32_t flush_dev :1; + uint32_t flush_dstmac :1; + uint32_t flush_srcmac :1; + uint32_t flush_meta :1; /* Not available through Userspace/CLI */ + uint32_t flush_hw :1; /* Flush all flows from HW */ + uint32_t flush_unused :25; + uint8_t mac[6]; + int devid; + int flowid; + void *metadata_p; /* flush_meta = 1 ; Must set metadata_p, devid & flush_dev */ + BlogFlushMetadataFunc_t func_p; /* flush_meta = 1 ; Must provide callback func */ +}BlogFlushParams_t; + +/* + *------------------------------------------------------------------------------ + * Denotes a type of request from a Blog client to a network stack entity. + *------------------------------------------------------------------------------ + */ + +typedef enum { + BLOG_DECL(FLOWTRACK_KEY_SET) /* Set Client key into Flowtracker */ + BLOG_DECL(FLOWTRACK_KEY_GET) /* Get Client key into Flowtracker */ + BLOG_DECL(FLOWTRACK_CONFIRMED) /* Test whether session is confirmed */ + BLOG_DECL(FLOWTRACK_ALG_HELPER) /* Test whether flow has an ALG */ + BLOG_DECL(FLOWTRACK_EXCLUDE) /* Clear flow candidacy by Client */ + BLOG_DECL(FLOWTRACK_TIME_SET) /* Set time in a flow tracker */ + BLOG_DECL(FLOWTRACK_IDLE_TIMEOUT_GET) /* get idle timeout in a flow tracker */ + BLOG_DECL(FLOWTRACK_PUT_STATS) /* Push accumulated stats to conntrack*/ + BLOG_DECL(FLOWTRACK_L4PROTO_GET)/* Get L4 proto in Flowtracker */ + BLOG_DECL(NETIF_PUT_STATS) /* Push accumulated stats to devices */ + BLOG_DECL(LINK_XMIT_FN) /* Fetch device link transmit function*/ + BLOG_DECL(LINK_NOCARRIER) /* Fetch device link carrier */ + BLOG_DECL(NETDEV_NAME) /* Network device name */ + BLOG_DECL(MCAST_DFLT_MIPS) /* Delete action in blogRule chain */ + BLOG_DECL(IQPRIO_SKBMARK_SET) /* Set IQOS Prio in skb->mark */ + BLOG_DECL(DPIQ_SKBMARK_SET) /* Set DPIQ in skb->mark */ + BLOG_DECL(BRIDGEFDB_KEY_SET) /* Set Client key into bridge FDB */ + BLOG_DECL(BRIDGEFDB_KEY_GET) /* Get Client key into bridge FDB */ + BLOG_DECL(BRIDGEFDB_TIME_SET) /* Refresh bridge FDB time */ + BLOG_DECL(SYS_TIME_GET) /* Get the system time in jiffies */ + BLOG_DECL(GRE_TUNL_XMIT) /* GRE Tunnel tx */ + BLOG_DECL(GRE6_TUNL_XMIT) /* GRE6 Tunnel tx */ + BLOG_DECL(SKB_DST_ENTRY_SET) /* get dst_entry from skb */ + BLOG_DECL(SKB_DST_ENTRY_RELEASE)/* release dst_entry from blog */ + BLOG_DECL(NETDEV_ADDR) /* Device MAC addr */ + BLOG_DECL(FLOW_EVENT_ACTIVATE) /* Flow Activation event */ + BLOG_DECL(FLOW_EVENT_DEACTIVATE)/* Flow Deactivation event */ + BLOG_DECL(CHK_HOST_DEV_MAC) /* Check Dev HostMAC for addition */ + BLOG_DECL(MAP_TUPLE_KEY_SET) /* Set Client key into MAPT Tuple */ + BLOG_DECL(MAP_TUPLE_KEY_GET) /* Get Client key into MAPT Tuple */ + BLOG_DECL(MEGA_KEY_SET) /* Set Client key into megaflow */ + BLOG_DECL(MEGA_KEY_GET) /* Get Client key into megaflow */ + BLOG_DECL(MEGA_PUT_STATS) /* Put the stats in a megaflow */ + BLOG_DECL(BLOG_REQUEST_MAX) +} BlogRequest_t; + +/* + *------------------------------------------------------------------------------ + * Denotes a type of update to an existing Blog flow. + *------------------------------------------------------------------------------ + */ + +typedef enum { + BLOG_DECL(BLOG_UPDATE_DPI_QUEUE) /* DPI Queue assignment has changed */ + BLOG_DECL(BLOG_UPDATE_BITMAP) /* Multicast client bitmap has changed */ + BLOG_DECL(BLOG_UPDATE_FWD_AND_TRAP) /* fwd_and_trap bit has changed */ + BLOG_DECL(BLOG_UPDATE_BITMAP_FWD_AND_TRAP) /* Mcast client bitmap and + fwd_and_trap bit have changed */ + BLOG_DECL(BLOG_UPDATE_MAX) +} BlogUpdate_t; + +/* + *------------------------------------------------------------------------------ + * Clean this up. + *------------------------------------------------------------------------------ + */ + +#define BLOG_ENCAP_MAX 6 /* Maximum number of L2 encaps */ +#define BLOG_HDRSZ_MAX 38 /* Maximum size of L2 encaps */ + +typedef enum { + BLOG_DECL(GRE_ETH) /* e.g. BLOG_XTMPHY, BLOG_GPONPHY */ + BLOG_DECL(BCM_XPHY) /* e.g. BLOG_XTMPHY, BLOG_GPONPHY */ + BLOG_DECL(BCM_SWC) /* BRCM LAN Switch Tag/Header */ + BLOG_DECL(ETH_802x) /* Ethernet */ + BLOG_DECL(VLAN_8021Q) /* Vlan 8021Q (incld stacked) */ + BLOG_DECL(PPPoE_2516) /* PPPoE RFC 2516 */ + BLOG_DECL(PPP_1661) /* PPP RFC 1661 */ + BLOG_DECL(PLD_IPv4) /* Payload IPv4 */ + BLOG_DECL(PLD_IPv6) /* Payload IPv6 */ + BLOG_DECL(PPTP) /* PPTP Header */ + BLOG_DECL(L2TP) /* L2TP Header */ + BLOG_DECL(GRE) /* GRE Header */ + BLOG_DECL(ESP) /* ESP Header */ + BLOG_DECL(DEL_IPv4) /* Outer IPv4 */ + BLOG_DECL(DEL_IPv6) /* Outer IPv6 */ + BLOG_DECL(DEL_L2) /* L2 DEL */ + BLOG_DECL(PLD_L2) /* L2 PLD */ + BLOG_DECL(HDR0_IPv4) /* IPv4 Inner Header 0 */ + BLOG_DECL(HDR0_IPv6) /* IPv6 Inner Header 0 */ + BLOG_DECL(HDR0_L2) /* L2 Inner Header 0 */ + BLOG_DECL(GREoESP_type) /* GRE over ESP type */ + BLOG_DECL(GREoESP_type_resvd) /* GRE over ESP type */ + BLOG_DECL(GREoESP) /* GRE over ESP */ + BLOG_DECL(NPT6) /* NPT6 */ + BLOG_DECL(PASS_THRU) /* pass-through */ + BLOG_DECL(DEL_DST_OPTS) /* Delivery IPv6 Dest options */ + BLOG_DECL(PLD_DST_OPTS) /* Payload IPv6 Dest options */ + BLOG_DECL(LLC_SNAP) /* LLC_SNAP */ + BLOG_DECL(VXLAN) /* VXLAN Header */ + BLOG_DECL(GRE_ETH_IPv4) /* L2 GRE inner header type IPv4 */ + BLOG_DECL(GRE_ETH_IPv6) /* L2 GRE inner header type IPv6 */ + BLOG_DECL(unused) /* unused */ + BLOG_DECL(PROTO_MAX) +} BlogEncap_t; + + +/* + *------------------------------------------------------------------------------ + * Logging of a maximum 4 "virtual" network devices that a flow can traverse. + * Virtual devices are interfaces that do not perform the actual DMA transfer. + * E.g. an ATM interface would be referred to as a physical interface whereas + * a ppp interface would be referred to as a Virtual interface. + *------------------------------------------------------------------------------ + */ +#define MAX_VIRT_DEV 7 + +#define DEV_DIR_MASK 0x3ul +#define DEV_PTR_MASK (~DEV_DIR_MASK) +#define DEV_DIR(ptr) ((uintptr_t)(ptr) & DEV_DIR_MASK) + +#define IS_RX_DIR(ptr) ( DEV_DIR(ptr) == DIR_RX ) +#define IS_TX_DIR(ptr) ( DEV_DIR(ptr) == DIR_TX ) + +/* + *------------------------------------------------------------------------------ + * Device pointer conversion between with and without embeded direction info + *------------------------------------------------------------------------------ + */ +#define DEVP_APPEND_DIR(ptr,dir) ((void *)((uintptr_t)(ptr) | (uintptr_t)(dir))) +#define DEVP_DETACH_DIR(ptr) ((void *)((uintptr_t)(ptr) & (uintptr_t) \ + DEV_PTR_MASK)) +/* + *------------------------------------------------------------------------------ + * Denotes the tos mode. + *------------------------------------------------------------------------------ + */ +typedef enum { + BLOG_DECL(BLOG_TOS_FIXED) + BLOG_DECL(BLOG_TOS_INHERIT) + BLOG_DECL(BLOG_TOS_MAX) +} BlogTos_t; + +/* + *------------------------------------------------------------------------------ + * Blog statistics structure + *------------------------------------------------------------------------------ + */ +typedef struct{ + /* NOTE : All these structure variables should be of same type/size */ + + uint64_t rx_packets; /* total blog packets received */ + uint64_t tx_packets; /* total blog packets transmitted */ + uint64_t rx_bytes; /* total blog bytes received */ + uint64_t tx_bytes; /* total blog bytes transmitted */ + uint64_t multicast; /* total blog multicast packets */ + uint64_t tx_multicast_packets; /* multicast packets transmitted */ + uint64_t rx_multicast_bytes; /* multicast bytes recieved */ + uint64_t tx_multicast_bytes; /* multicast bytes transmitted */ +} BlogStats_t; + +typedef enum { + BLOG_DECL(blog_skip_reason_unknown = 0) /* unknown or customer defined */ + BLOG_DECL(blog_skip_reason_br_flood) + BLOG_DECL(blog_skip_reason_ct_tcp_state_not_est) + BLOG_DECL(blog_skip_reason_ct_tcp_state_ignore) + BLOG_DECL(blog_skip_reason_ct_status_donot_blog) + BLOG_DECL(blog_skip_reason_nf_xt_skiplog) + BLOG_DECL(blog_skip_reason_nf_ebt_skiplog) + BLOG_DECL(blog_skip_reason_scrub_pkt) + BLOG_DECL(blog_skip_reason_unknown_proto) + BLOG_DECL(blog_skip_reason_unknown_proto_ah4) + BLOG_DECL(blog_skip_reason_unknown_proto_ah6) + BLOG_DECL(blog_skip_reason_unknown_proto_esp6) + BLOG_DECL(blog_skip_reason_esp4_crypto_algo) + BLOG_DECL(blog_skip_reason_esp4_spu_disabled) + BLOG_DECL(blog_skip_reason_spudd_check_failure) + BLOG_DECL(blog_skip_reason_dpi) + BLOG_DECL(blog_skip_reason_sgs) + BLOG_DECL(blog_skip_reason_bond) + BLOG_DECL(blog_skip_reason_map_tcp) + BLOG_DECL(blog_skip_reason_blog) + BLOG_DECL(blog_skip_reason_l2_local_termination) + BLOG_DECL(blog_skip_reason_local_tcp_termination) + BLOG_DECL(blog_skip_reason_blog_xfer) + BLOG_DECL(blog_skip_reason_skb_morph) + BLOG_DECL(blog_skip_reason_mega_multi_output_ports) + BLOG_DECL(blog_skip_reason_mega_attr_mismatch) + BLOG_DECL(blog_skip_reason_mega_field_mismatch) + BLOG_DECL(blog_skip_reason_max) +} blog_skip_reason_t; + +typedef enum { + BLOG_DECL(blog_free_reason_unknown = 0) /* unknown or customer defined */ + BLOG_DECL(blog_free_reason_blog_emit) + BLOG_DECL(blog_free_reason_blog_iq_prio) + BLOG_DECL(blog_free_reason_kfree) + BLOG_DECL(blog_free_reason_ipmr_local) + BLOG_DECL(blog_free_reason_max) +} blog_free_reason_t; + +typedef struct { + uint32_t blog_get; + uint32_t blog_put; + uint32_t blog_skip; + uint32_t blog_free; + uint32_t blog_xfer; + uint32_t blog_clone; + uint32_t blog_copy; + uint32_t blog_min_avail; +} blog_info_stats_t; + +#define BLOG_DUMP_DISABLE 0 +#define BLOG_DUMP_RXBLOG 1 +#define BLOG_DUMP_TXBLOG 2 +#define BLOG_DUMP_RXTXBLOG 3 + +/* + * ----------------------------------------------------------------------------- + * Support accleration of L2, L3 packets. + * + * When acceleration support is enabled system wide, the default to be used may + * be set in CC_BLOG_SUPPORT_ACCEL_MODE which gets saved in blog_support_accel_mode_g. + * One may change the default (at runtime) by invoking blog_support_accel_mode(). + * ----------------------------------------------------------------------------- + */ + + +/* + * ----------------------------------------------------------------------------- + * Acceleration support: + * All the platforms support L3 tuple based acceleatiion. + * When the acceleation mode is configured as L23, the accelerators decides + * on per packet basis whether to use L2 or L3 tuple based acceleration. + * ----------------------------------------------------------------------------- + */ +#define BLOG_ACCEL_MODE_L3 0 /* Legacy. All platforms support*/ +#define BLOG_ACCEL_MODE_L23 1 /* Platforms supporting both */ + +#define CC_BLOG_SUPPORT_ACCEL_MODE (BLOG_ACCEL_MODE_L3) + +extern int blog_support_accel_mode_g; + +typedef int (*blog_accel_mode_set_t)(uint32_t accel_mode); +extern blog_accel_mode_set_t blog_accel_mode_set_fn; + +extern void blog_support_accel_mode(int accel_mode); +extern int blog_support_get_accel_mode(void); + +/* Support for TCP ACK multi flows */ +extern int blog_support_tcp_ack_mflows_g; + +typedef int (*blog_tcp_ack_mflows_set_t)(int enable); +extern blog_tcp_ack_mflows_set_t blog_tcp_ack_mflows_set_fn; + +extern void blog_support_set_tcp_ack_mflows(int enable); +extern int blog_support_get_tcp_ack_mflows(void); + +/* Support for ToS multi flows */ +extern int blog_support_tos_mflows_g; +typedef int (*blog_tos_mflows_set_t)(int enable); +extern blog_tos_mflows_set_t blog_tos_mflows_set_fn; + +extern void blog_support_set_tos_mflows(int enable); +extern int blog_support_get_tos_mflows(void); +extern int (*blog_generic_hw_accel_loopbk)(struct sk_buff *skb, bool l3_packet); + +/* Support for unknown ucast flows */ +extern int blog_support_unknown_ucast_g; +#if defined(CONFIG_BCM_UNKNOWN_UCAST) +typedef int (*blog_unknown_ucast_set_t)(int enable); +extern blog_unknown_ucast_set_t blog_unknown_ucast_set_fn; + +extern void blog_support_set_unknown_ucast(int enable); +extern int blog_support_get_unknown_ucast(void); +#endif + +/* + * ----------------------------------------------------------------------------- + * Support blogging of multicast packets. + * + * When Multicast support is enabled system wide, the default to be used may + * be set in CC_BLOG_SUPPORT_MCAST which gets saved in blog_support_mcast_g. + * One may change the default (at runtime) by invoking blog_support_mcast(). + * ----------------------------------------------------------------------------- + */ + +/* Multicast Support for IPv4 and IPv6 Control */ +#define BLOG_MCAST_DISABLE 0 +#define BLOG_MCAST_IPV4 1 +#define BLOG_MCAST_IPV6 2 + +#ifdef CONFIG_BLOG_MCAST +#define CC_BLOG_SUPPORT_MCAST BLOG_MCAST_IPV4 + BLOG_MCAST_IPV6 +#else +#define CC_BLOG_SUPPORT_MCAST BLOG_MCAST_DISABLE +#endif + +extern int blog_support_mcast_g; +extern void blog_support_mcast(int enable); + +#define BCM_WLAN_PER_CLIENT_FLOW_LEARNING 1 + +/* + * ----------------------------------------------------------------------------- + * Support blogging of IPv6 traffic + * + * When IPv6 support is enabled system wide, the default to be used may + * be set in CC_BLOG_SUPPORT_IPV6 which gets saved in blog_support_ipv6_g. + * One may change the default (at runtime) by invoking blog_support_ipv6(). + * ----------------------------------------------------------------------------- + */ + +/* IPv6 Support Control: see blog_support_ipv6_g and blog_support_ipv6() */ +#define BLOG_IPV6_DISABLE 0 +#define BLOG_IPV6_ENABLE 1 + +#ifdef CONFIG_BLOG_IPV6 +#define CC_BLOG_SUPPORT_IPV6 BLOG_IPV6_ENABLE +#else +#define CC_BLOG_SUPPORT_IPV6 BLOG_IPV6_DISABLE +#endif + +extern int blog_support_ipv6_g; +extern void blog_support_ipv6(int enable); + +/* + * ----------------------------------------------------------------------------- + * Support blogging of 6rd tos + * + * When 6rd is configured, the default to be used may be set in + * CC_BLOG_DEFAULT_TUNL_TOS which gets saved in blog_tunl_tos_g. + * One may change the default (at runtime) by invoking blog_tunl_tos(). + * ----------------------------------------------------------------------------- + */ + +/* GRE Support: enable/disable */ +#define BLOG_GRE_DISABLE 0 +#define BLOG_GRE_ENABLE 1 + +#ifdef CONFIG_BLOG_GRE +#define CC_BLOG_SUPPORT_GRE BLOG_GRE_ENABLE +#else +#define CC_BLOG_SUPPORT_GRE BLOG_GRE_DISABLE +#endif + +extern int blog_gre_tunnel_accelerated_g; +extern int blog_support_gre_g; +extern void blog_support_gre(int enable); + +/* L2TP Support */ +#define BLOG_L2TP_DISABLE 0 +#define BLOG_L2TP_ENABLE 1 + +#ifdef CONFIG_BLOG_L2TP +#define CC_BLOG_SUPPORT_L2TP BLOG_L2TP_ENABLE +#else +#define CC_BLOG_SUPPORT_L2TP BLOG_L2TP_DISABLE +#endif + +extern int blog_l2tp_tunnel_accelerated_g; +extern int blog_support_l2tp_g; +extern void blog_support_l2tp(int enable); + +/* ESP Support: tunnel and pass-thru modes */ +#define BLOG_ESP_DISABLE 0 +#define BLOG_ESP_TUNNEL 1 +#define BLOG_ESP_PASS_THRU 2 + +#ifdef CONFIG_BLOG_ESP +#define CC_BLOG_SUPPORT_ESP BLOG_ESP_TUNNEL +#else +#define CC_BLOG_SUPPORT_ESP BLOG_ESP_DISABLE +#endif + +/* + * ----------------------------------------------------------------------------- + * Support 4o6 fragmentation enable/disable + * ----------------------------------------------------------------------------- + */ +#define BLOG_4O6_FRAG_DISABLE 0 +#define BLOG_4O6_FRAG_ENABLE 1 + +extern int blog_support_4o6_frag_g; +extern void blog_support_4o6_frag(int enable); + +/* blog_ct_max_g: defines the max #of CTs that can be associated with a flow */ +extern int blog_ct_max_g; + +/* blog_nxe_max_g: defines the max #of NXEs that can be associated with a flow */ +extern int blog_nxe_max_g; + +/* blog notify processing mode */ +typedef enum { + BLOG_DECL(BLOG_NOTIFY_PROC_MODE_NOW) /* processing mode: now/sync */ + BLOG_DECL(BLOG_NOTIFY_PROC_MODE_HYBRID)/* mode: now+deferred */ + BLOG_DECL(BLOG_NOTIFY_PROC_MODE_DFRD)/* processing mode: deferred */ + BLOG_DECL(BLOG_NOTIFY_PROC_MODE_MAX) +} blog_notify_proc_mode_t; + +extern int blog_notify_proc_mode_g; +extern void blog_set_notify_proc_mode(int mode); + +typedef enum { + BLOG_DECL(BLOG_NOTIFY_API_SYNC) /* blog_notify() */ + BLOG_DECL(BLOG_NOTIFY_API_ASYNC)/* blog_notify_async() */ + BLOG_DECL(BLOG_NOTIFY_API_MAX) +} blog_notify_api_t; + +typedef enum +{ + BLOG_DECL(BLOG_NOTIFY_EVT_NONE) + BLOG_DECL(BLOG_NOTIFY_EVT_FLUSH_FDB) + BLOG_DECL(BLOG_NOTIFY_EVT_FLUSH_NPE) + BLOG_DECL(BLOG_NOTIFY_EVT_FLUSH_ARP) + BLOG_DECL(BLOG_NOTIFY_EVT_FLUSH) + BLOG_DECL(BLOG_NOTIFY_EVT_FLUSH_HW) + BLOG_DECL(BLOG_NOTIFY_EVT_FLUSH_DEV) + BLOG_DECL(BLOG_NOTIFY_EVT_FLUSH_PARAMS) + BLOG_DECL(BLOG_NOTIFY_EVT_FETCH_NETIF_STATS) + BLOG_DECL(BLOG_NOTIFY_EVT_CLEAR_NETIF_STATS) + BLOG_DECL(BLOG_NOTIFY_EVT_MAX) +} blog_notify_evt_type_t; + +/* Traffic type */ +typedef enum { + BLOG_DECL(BlogTraffic_IPV4_UCAST) + BLOG_DECL(BlogTraffic_IPV6_UCAST) + BLOG_DECL(BlogTraffic_IPV4_MCAST) + BLOG_DECL(BlogTraffic_IPV6_MCAST) + BLOG_DECL(BlogTraffic_Layer2_Flow) + BLOG_DECL(BlogTraffic_MAX) +} BlogTraffic_t; + +typedef union { + uint32_t word; + struct { + BE_DECL( + uint32_t incarn : 3; /* Allocation instance identification */ + uint32_t self : 29; /* Index into static allocation table */ + ) + LE_DECL( + uint32_t self : 29; /* Index into static allocation table */ + uint32_t incarn : 3; /* Allocation instance identification */ + ) + }; +} BlogKeyFc_t; + +typedef union { + uint32_t word; + struct { + BE_DECL( + + uint32_t resvd : 23; + uint32_t client_type: 1; + uint32_t client : 8; + ) + LE_DECL( + uint32_t client : 8; + uint32_t client_type: 1; + uint32_t resvd : 23; + ) + }; +} BlogKeyMc_t; + +#define BLOG_FDB_KEY_INVALID BLOG_KEY_NONE +#define BLOG_KEY_FC_INVALID BLOG_KEY_NONE + +#define BLOG_KEY_INVALID BLOG_KEY_NONE +#define BLOG_KEY_MCAST_INVALID BLOG_KEY_INVALID + +/* mcast client type: TX device only, TX device & wlinfo */ +#define BLOG_MCAST_CLIENT_TYPE_TXDEV 0 +#define BLOG_MCAST_CLIENT_TYPE_TXDEV_WLINFO 1 + +typedef struct { + BE_DECL( + BlogKeyFc_t fc; + BlogKeyMc_t mc; + ) + LE_DECL( + BlogKeyMc_t mc; + BlogKeyFc_t fc; + ) +} BlogActivateKey_t; + +typedef enum +{ + BLOG_DECL(BLOG_L2_KEYMAP_MACSA) + BLOG_DECL(BLOG_L2_KEYMAP_MACDA) + BLOG_DECL(BLOG_L2_KEYMAP_ETHTYPE) + BLOG_DECL(BLOG_L2_KEYMAP_VLAN0) + BLOG_DECL(BLOG_L2_KEYMAP_VLAN1) + BLOG_DECL(BLOG_L2_KEYMAP_TOS) + BLOG_DECL(BLOG_L2_KEYMAP_TCPACK) +} blog_l2_keymap_field_t; + +typedef enum +{ + BLOG_DECL(BLOG_L3_KEYMAP_IPV4SA) + BLOG_DECL(BLOG_L3_KEYMAP_IPV4DA) + BLOG_DECL(BLOG_L3_KEYMAP_IPV6SA) + BLOG_DECL(BLOG_L3_KEYMAP_IPV6DA) + BLOG_DECL(BLOG_L3_KEYMAP_PROTO) + BLOG_DECL(BLOG_L3_KEYMAP_SPORT) + BLOG_DECL(BLOG_L3_KEYMAP_DPORT) + BLOG_DECL(BLOG_L3_KEYMAP_TOS) + BLOG_DECL(BLOG_L3_KEYMAP_TCPACK) +} blog_l3_keymap_field_t; + + +#define BLOG_SET_L2KEYMAP(b, v0, v1, t) \ + b->l2_keymap = (1<<BLOG_L2_KEYMAP_MACSA \ + | 1<<BLOG_L2_KEYMAP_MACDA \ + | 1<<BLOG_L2_KEYMAP_ETHTYPE \ + | v0<<BLOG_L2_KEYMAP_VLAN0 \ + | v1<<BLOG_L2_KEYMAP_VLAN1 \ + | t<<BLOG_L2_KEYMAP_TOS) + +#define BLOG_SET_IPV4_PT_L3KEYMAP(b) \ + b->l3_keymap = (1<<BLOG_L3_KEYMAP_IPV4SA \ + | 1<<BLOG_L3_KEYMAP_IPV4DA \ + | 1<<BLOG_L3_KEYMAP_PROTO \ + | 1<<BLOG_L3_KEYMAP_TOS) + +#define BLOG_SET_IPV4_L3KEYMAP(b) \ + b->l3_keymap = (1<<BLOG_L3_KEYMAP_IPV4SA \ + | 1<<BLOG_L3_KEYMAP_IPV4DA \ + | 1<<BLOG_L3_KEYMAP_PROTO \ + | 1<<BLOG_L3_KEYMAP_SPORT \ + | 1<<BLOG_L3_KEYMAP_DPORT \ + | 1<<BLOG_L3_KEYMAP_TOS) + +#define BLOG_SET_IPV6_PT_L3KEYMAP(b) \ + b->l3_keymap = (1<<BLOG_L3_KEYMAP_IPV6SA \ + | 1<<BLOG_L3_KEYMAP_IPV6DA \ + | 1<<BLOG_L3_KEYMAP_PROTO \ + | 1<<BLOG_L3_KEYMAP_TOS) + +#define BLOG_SET_IPV6_L3KEYMAP(b) \ + b->l3_keymap = (1<<BLOG_L3_KEYMAP_IPV6SA \ + | 1<<BLOG_L3_KEYMAP_IPV6DA \ + | 1<<BLOG_L3_KEYMAP_PROTO \ + | 1<<BLOG_L3_KEYMAP_SPORT \ + | 1<<BLOG_L3_KEYMAP_DPORT \ + | 1<<BLOG_L3_KEYMAP_TOS) + +/* + *------------------------------------------------------------------------------ + * Flow Cache network proxiy entity (npe) Entry: + *------------------------------------------------------------------------------ + */ +struct blog_npe { + struct dll_t node; /* npe Entity list */ + BlogKeyFc_t key; /* linking Linux nwe and npe entity */ + struct blog_npe *chain_p; /* npe Entity Hash list node */ + uint32_t hashix; /* npe Entity hash index */ + + uint8_t type; /* npe Entity type */ + union { + void *nwe_p; + void *ct_p; + void *mega_p; + }; + Dll_t flow_list[BLOG_PARAM1_DIR_MAX]; /* flow lists */ + uint32_t flow_count[BLOG_PARAM1_DIR_MAX]; /* # of flows */ +} ____cacheline_aligned; +typedef struct blog_npe blog_npe_t; + +/* status of blog_nxe_t entry: + * BLOG_NXE_STS_CT: Initial status is invalid/unused + * BLOG_NXE_STS_CT/MAP/MEGA: after blog_link() + * BLOG_NXE_STS_NPE: after flow learning + */ +#define BLOG_NXE_STS_INV 0 +#define BLOG_NXE_STS_CT 1 +#define BLOG_NXE_STS_MEGA BLOG_NXE_STS_CT +#define BLOG_NXE_STS_NPE 2 + +typedef struct { + uint32_t unused: 29; + uint32_t dir : 1; + uint32_t status: 2; +} blog_nxe_flags_t; + +/* union of NPE and NWE entities */ +typedef struct { + union { + blog_npe_t *npe_p; /* valid afer flow learning */ + union { + void *nwe_p; + void *ct_p; + void *mega_p; + }; /* These fields are valid before flow learning */ + }; + blog_nxe_flags_t flags; +} blog_nxe_t; + +/* Max # of nf_conn linked per flow */ +/* Change this value to increase/decrease the #of nf_conn */ +#define BLOG_CT_MAX 4U + +#define BLOG_NXE_MEGA 0U +#define BLOG_NXE_CT 1U +#define BLOG_NXE_MAX (BLOG_NXE_CT + BLOG_CT_MAX) + +#define BLOG_FDB_NPE_SRCFDB 0U +#define BLOG_FDB_NPE_DSTFDB 1U +#define BLOG_FDB_NPE_MAX 2U + +/* Limit on Max # of nf_conn and NPE linked per flow */ +/* CAUTION: DO NOT change these values */ +#define BLOG_CT_MAX_LIMIT 8U +#define BLOG_NXE_MAX_LIMIT (BLOG_NXE_CT + BLOG_CT_MAX_LIMIT) + +#if (BLOG_CT_MAX > BLOG_NXE_MAX) +#error "BLOG_CT_MAX > BLOG_NXE_MAX)" +#endif +#if (BLOG_CT_MAX > BLOG_CT_MAX_LIMIT) +#error "BLOG_CT_MAX > BLOG_CT_MAX_LIMIT)" +#endif + +#define BLOG_NPE_NULL ((blog_npe_t*)NULL) +#define BLOG_NXE_CT_IDX(idx) (BLOG_NXE_CT+idx) + + +/* Is the megaflow valid ? */ +#define IS_BLOG_MEGA(b) ((b->nxe[BLOG_NXE_MEGA].flags.status == BLOG_NXE_STS_MEGA) && \ + (b->nxe[BLOG_NXE_MEGA].mega_p != NULL)) + +/* Before the flow is learnt, use CT macros */ +#define IS_BLOG_CT(b) ((b)->ct_count) +#define IS_BLOG_CT_IDX(b, idx) ((b->nxe[BLOG_NXE_CT_IDX(idx)].flags.status == BLOG_NXE_STS_CT) && \ + (b->nxe[BLOG_NXE_CT_IDX(idx)].ct_p != NULL)) + +/* After the flow is learnt, use NPE macros instead of CT macros */ +#define IS_BLOG_NPE(b, idx) ((b->nxe[idx].flags.status == BLOG_NXE_STS_NPE) && \ + (b->nxe[idx].npe_p != NULL)) +#define BLOG_NPE_GET(b, idx) (IS_BLOG_NPE(b, idx) ? b->nxe[idx].npe_p : NULL) + +#define IS_BLOG_NPE_CT(b) ((b)->ct_count) +#define IS_BLOG_NPE_CT_IDX(b, idx) (IS_BLOG_NPE(b, BLOG_NXE_CT_IDX(idx))) +#define IS_BLOG_NPE_MEGA(b) (IS_BLOG_NPE(b, BLOG_NXE_MEGA)) + +/* + *------------------------------------------------------------------------------ + * Flow event parameters + *------------------------------------------------------------------------------ + */ + +typedef enum { + BLOG_DECL(FLOW_EVENT_TYPE_FC) /* FCache Flow */ + BLOG_DECL(FLOW_EVENT_TYPE_HW) /* Hardware Flow */ + BLOG_DECL(FLOW_EVENT_TYPE_MAX) +} BlogFlowEventType_t; + + +typedef struct { + void *ct_p[BLOG_CT_MAX]; + int ct_count; + union { + struct { + uint32_t is_downstream :1; + uint32_t flow_event_type :2; + uint32_t is_upstream :1; + uint32_t reserved :20; + uint32_t skb_mark_flow_id :8; + }; + uint32_t u32; + }; +} BlogFlowEventInfo_t; + + +/* + * ============================================================================= + * CAUTION: OS and network stack may be built without CONFIG_BLOG defined. + * ============================================================================= + */ + +#if defined(CONFIG_BLOG) + +/* + *------------------------------------------------------------------------------ + * + * Section: Blog Conditional Compiles CC_BLOG_SUPPORT_... + * + * These conditional compiles are not controlled by a system wide build process. + * E.g. CONFIG_BLOG_MCAST is a system wide build configuration + * CC_BLOG_SUPPORT_MCAST is a blog defined build configuration + * + * Do not use any CONFIG_ or CC_BLOG_SUPPORT_ in Blog_t structure definitions. + * + *------------------------------------------------------------------------------ + */ + +/* LAB ONLY: Design development, uncomment to enable */ +/* #define CC_BLOG_SUPPORT_COLOR */ +/* #define CC_BLOG_SUPPORT_DEBUG */ + + + + +/* To enable user filtering, see blog_filter(), invoked in blog_finit() */ +/* #define CC_BLOG_SUPPORT_USER_FILTER */ + + + +/* + * ----------------------------------------------------------------------------- + * Section: Definition of a Blog_t + * ----------------------------------------------------------------------------- + */ + +#define HDR_BMAP_IPV4 1 +#define HDR_BMAP_IPV6 2 +#define HDR_BMAP_L2 3 + +/* GREoESP flag indicates whether it is GRE over ESP, or ESP over GRE */ +#define BLOG_ESPoGRE 0 +#define BLOG_GREoESP 1 + +#define BLOG_GREoESP_44 0 +#define BLOG_GREoESP_64 1 +#define BLOG_GREoESP_46 2 +#define BLOG_GREoESP_66 3 + +#define BLOG_ESPoGRE_44 0 +#define BLOG_ESPoGRE_64 1 +#define BLOG_ESPoGRE_46 2 +#define BLOG_ESPoGRE_66 3 + +typedef struct { + + uint8_t channel; /* e.g. port number, txchannel, ... */ + + union { + struct { + uint8_t phyHdrLen : 4; + uint8_t phyHdrType : 4; + }; + uint8_t phyHdr; + }; + + uint16_t unused; + + union { + struct { + BE_DECL( + uint32_t unused : 1; + uint32_t GRE_ETH_IPv6: 1; /* GRE inner header type IPv6 */ + uint32_t GRE_ETH_IPv4: 1; /* GRE inner header type IPv4 */ + uint32_t VXLAN : 1; + uint32_t LLC_SNAP : 1; + uint32_t PLD_DST_OPTS: 1; /* Payload IPv6 Ext Hdr Dest Options */ + uint32_t DEL_DST_OPTS: 1; /* Delivery IPv6 Ext Hdr Dest Options */ + uint32_t PASS_THRU : 1; + + uint32_t NPT6 : 1; + uint32_t GREoESP : 1; + uint32_t GREoESP_type: 2; + uint32_t HDR0_L2 : 1; + uint32_t HDR0_IPv6 : 1; + uint32_t HDR0_IPv4 : 1; + uint32_t PLD_L2 : 1; + + uint32_t DEL_L2 : 1; + uint32_t DEL_IPv6 : 1; + uint32_t DEL_IPv4 : 1; + uint32_t ESP : 1; + uint32_t GRE : 1; + uint32_t L2TP : 1; + uint32_t PPTP : 1; + uint32_t PLD_IPv6 : 1; + + uint32_t PLD_IPv4 : 1; + uint32_t PPP_1661 : 1; + uint32_t PPPoE_2516 : 1; + uint32_t VLAN_8021Q : 1; + uint32_t ETH_802x : 1; + uint32_t BCM_SWC : 1; + uint32_t BCM_XPHY : 1; /* e.g. BCM_XTM */ + uint32_t GRE_ETH : 1; /* Ethernet over GRE */ + ) + LE_DECL( + uint32_t GRE_ETH : 1; /* Ethernet over GRE */ + uint32_t BCM_XPHY : 1; /* e.g. BCM_XTM */ + uint32_t BCM_SWC : 1; + uint32_t ETH_802x : 1; + uint32_t VLAN_8021Q : 1; + uint32_t PPPoE_2516 : 1; + uint32_t PPP_1661 : 1; + uint32_t PLD_IPv4 : 1; + + uint32_t PLD_IPv6 : 1; + uint32_t PPTP : 1; + uint32_t L2TP : 1; + uint32_t GRE : 1; + uint32_t ESP : 1; + uint32_t DEL_IPv4 : 1; + uint32_t DEL_IPv6 : 1; + uint32_t DEL_L2 : 1; + + uint32_t PLD_L2 : 1; + uint32_t HDR0_IPv4 : 1; + uint32_t HDR0_IPv6 : 1; + uint32_t HDR0_L2 : 1; + uint32_t GREoESP_type: 2; + uint32_t GREoESP : 1; + uint32_t NPT6 : 1; + + uint32_t PASS_THRU : 1; + uint32_t DEL_DST_OPTS: 1; /* Delivery IPv6 Ext Hdr Dest Options */ + uint32_t PLD_DST_OPTS: 1; /* Payload IPv6 Ext Hdr Dest Options */ + uint32_t LLC_SNAP : 1; + uint32_t VXLAN : 1; + uint32_t GRE_ETH_IPv4: 1; /* GRE inner header type IPv4 */ + uint32_t GRE_ETH_IPv6: 1; /* GRE inner header type IPv6 */ + uint32_t unused : 1; + ) + } bmap;/* as per order of BlogEncap_t enums declaration */ + uint32_t hdrs; + }; +} BlogInfo_t; + +/* + *------------------------------------------------------------------------------ + * Buffer to log IP Tuple. + * Packed: 1 16byte cacheline. + *------------------------------------------------------------------------------ + */ +struct blogTuple_t { + uint32_t saddr; /* IP header saddr */ + uint32_t daddr; /* IP header daddr */ + + union { + struct { + uint16_t source; /* L4 source port */ + uint16_t dest; /* L4 dest port */ + } port; + struct { + uint16_t unused; + uint16_t gre_callid; + }; + uint32_t ports; + uint32_t esp_spi; + }; + + uint8_t ttl; /* IP header ttl */ + uint8_t tos; /* IP header tos */ + uint16_t check; /* checksum: rx tuple=l3, tx tuple=l4 */ + +}; +typedef struct blogTuple_t BlogTuple_t; + +#define BLOG_RX_MCAST(b) (b->rx.multicast) +#define BLOG_RX_UCAST(b) (!b->rx.multicast && !b->rx.unknown_ucast) +#define BLOG_RX_UNKNOWN_UCAST(b) (!b->rx.multicast && b->rx.unknown_ucast) +#define BLOG_IS_MASTER(b) ((BLOG_RX_UNKNOWN_UCAST(b) && b->client_id == BLOG_MCAST_MASTER_CLIENT_ID) || (b->rx.multicast && b->mcast_client_id == BLOG_MCAST_MASTER_CLIENT_ID)) + +#define NEXTHDR_IPV4 IPPROTO_IPIP +#define GRE_MAX_HDR_LEN (sizeof(struct ipv6hdr) + sizeof(BLOG_HDRSZ_MAX) + BLOG_GRE_HDR_LEN) + +#define HDRS_IPinIP ((1<<GREoESP) | (3<<GREoESP_type) | (1<<GRE) | (1<<ESP) | \ + (1<<PLD_IPv4) | (1<<PLD_IPv6) | (1<<PLD_L2) | \ + (1<<HDR0_IPv4) | (1<<HDR0_IPv6) | (1<<HDR0_L2) | \ + (1<<DEL_IPv4) | (1<<DEL_IPv6) | (1<<DEL_L2) | (1<<VXLAN)) +#define HDRS_IP4in4 ((1<<PLD_IPv4) | (1<<DEL_IPv4)) +#define HDRS_IP6in4 ((1<<PLD_IPv6) | (1<<DEL_IPv4)) +#define HDRS_IP4in6 ((1<<PLD_IPv4) | (1<<DEL_IPv6)) +#define HDRS_IP6in6 ((1<<PLD_IPv6) | (1<<DEL_IPv6)) +#define HDRS_GIP4 ((1<<PLD_IPv4) | (1<<GRE)) +#define HDRS_GIP6 ((1<<PLD_IPv6) | (1<<GRE)) +#define HDRS_GL2 ((1<<PLD_L2) | (1<<GRE)) +#define HDRS_EIP4 ((1<<PLD_IPv4) | (1<<ESP)) +#define HDRS_IP2in4 ((1<<PLD_L2) | (1<<DEL_IPv4)) +#define HDRS_IP2in6 ((1<<PLD_L2) | (1<<DEL_IPv6)) +#define HDRS_EIP4in6 ((1<<PLD_IPv4) | (1<<DEL_IPv6) | (1<<ESP)) + +#define RX_IP4in6(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_IP4in6) +#define RX_IP6in4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_IP6in4) +#define TX_IP4in6(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_IP4in6) +#define TX_IP6in4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_IP6in4) + +#define RX_IPV4(b) ((b)->rx.info.bmap.PLD_IPv4) +#define TX_IPV4(b) ((b)->tx.info.bmap.PLD_IPv4) +#define RX_IPV6(b) ((b)->rx.info.bmap.PLD_IPv6) +#define TX_IPV6(b) ((b)->tx.info.bmap.PLD_IPv6) +#define RX_IPV4_DEL(b) ((b)->rx.info.bmap.DEL_IPv4) +#define TX_IPV4_DEL(b) ((b)->tx.info.bmap.DEL_IPv4) +#define RX_IPV6_DEL(b) ((b)->rx.info.bmap.DEL_IPv6) +#define TX_IPV6_DEL(b) ((b)->tx.info.bmap.DEL_IPv6) +#define PT(b) ((b)->tx.info.bmap.PASS_THRU) + +#define RX_GRE(b) ((b)->rx.info.bmap.GRE) +#define TX_GRE(b) ((b)->tx.info.bmap.GRE) +#define RX_ESP(b) ((b)->rx.info.bmap.ESP) +#define TX_ESP(b) ((b)->tx.info.bmap.ESP) +#define RX_GRE_ETH(b) ((b)->rx.info.bmap.GRE_ETH) +#define TX_GRE_ETH(b) ((b)->tx.info.bmap.GRE_ETH) +#define TX_VXLAN(b) ((b)->tx.info.bmap.VXLAN) +#define RX_VXLAN(b) ((b)->rx.info.bmap.VXLAN) +#define VXLAN(b) (TX_VXLAN(b) || RX_VXLAN(b)) + +#define RX_IPV4ONLY(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==(1 << PLD_IPv4)) +#define TX_IPV4ONLY(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==(1 << PLD_IPv4)) +#define RX_IPV6ONLY(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==(1 << PLD_IPv6)) +#define TX_IPV6ONLY(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==(1 << PLD_IPv6)) +#define RX_L2ONLY(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==(1 << PLD_L2)) +#define TX_L2ONLY(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==(1 << PLD_L2)) + +#define CHK_RX_GIPV4(b) (((b)->rx.info.hdrs & ((1 << DEL_IPv4) | (1 << PLD_IPv4))) && RX_GRE(b)) +#define CHK_RX_GIPV6(b) (((b)->rx.info.hdrs & ((1 << DEL_IPv6) | (1 << PLD_IPv6))) && RX_GRE(b)) + + +#define RX_IPV4_OUTER(b) (RX_IPV4ONLY(b) || RX_IPV4_DEL(b)) +#define TX_IPV4_OUTER(b) (TX_IPV4ONLY(b) || TX_IPV4_DEL(b)) +#define PT4(b) (RX_IPV4ONLY(b) && TX_IPV4ONLY(b) && PT(b)) + +#define RX_IPV6_OUTER(b) (RX_IPV6ONLY(b) || RX_IPV6_DEL(b)) +#define TX_IPV6_OUTER(b) (TX_IPV6ONLY(b) || TX_IPV6_DEL(b)) +#define PT6(b) (RX_IPV6ONLY(b) && TX_IPV6ONLY(b) && PT(b)) + +#define HDRS_IPV4 ((1 << PLD_IPv4) | (1 << DEL_IPv4)) +#define HDRS_IPV6 ((1 << PLD_IPv6) | (1 << DEL_IPv6)) + +#define T4in6UP(b) (TX_IP4in6(b) && (RX_IPV4ONLY(b) || RX_GIP4in4(b) || RX_GIP4in6(b))) +#define T4in6DN(b) (RX_IP4in6(b) && (TX_IPV4ONLY(b) || TX_GIP4in4(b) || TX_GIP4in6(b))) +#define T4in6DN_NoRxVx(b) (T4in6DN(b) && !RX_VXLAN(b)) + +#define T6in4UP(b) (TX_IP6in4(b) && (RX_IPV6ONLY(b) || RX_GIP6in4(b) || RX_GIP6in6(b))) +#define T6in4DN(b) (RX_IP6in4(b) && (TX_IPV6ONLY(b) || TX_GIP6in4(b) || TX_GIP6in6(b))) + +#define CHK4in6(b) (T4in6UP(b) || T4in6DN(b)) +#define CHK6in4(b) (T6in4UP(b) || T6in4DN(b)) +#define CHK4to4(b) (RX_IPV4ONLY(b) && TX_IPV4ONLY(b)) +#define CHK6to6(b) (RX_IPV6ONLY(b) && TX_IPV6ONLY(b)) + +/* RX/TX is ESPv4 */ +#define RX_E4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==((1 << PLD_IPv4)|(1 << ESP))) +#define TX_E4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==((1 << PLD_IPv4)|(1 << ESP))) + +/* RX/TX ESPv4 over DSLite tunnel WAN side */ +#define RX_E4in6(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_EIP4in6) +#define TX_E4in6(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_EIP4in6) + +/* ESPv4 pass-thru over DSLite tunnel */ +#define EoT4in6UP(b) (RX_E4(b) && TX_E4in6(b)) +#define EoT4in6DN(b) (RX_E4in6(b) && TX_E4(b)) + +#define HDRS_GIP4in4 ((1<<GRE) | HDRS_IP4in4) +#define HDRS_GIP6in4 ((1<<GRE) | HDRS_IP6in4) +#define HDRS_GIP2in4 ((1<<GRE) | HDRS_IP2in4) + +#define HDRS_GIP4in6 ((1<<GRE) | HDRS_IP4in6) +#define HDRS_GIP6in6 ((1<<GRE) | HDRS_IP6in6) +#define HDRS_GIP2in6 ((1<<GRE) | HDRS_IP2in6) + +#define RX_GIPV4ONLY(b) (((b)->rx.info.hdrs & HDRS_IPinIP)== HDRS_GIP4) +#define TX_GIPV4ONLY(b) (((b)->tx.info.hdrs & HDRS_IPinIP)== HDRS_GIP4) +#define RX_GIPV6ONLY(b) (((b)->rx.info.hdrs & HDRS_IPinIP)== HDRS_GIP6) +#define TX_GIPV6ONLY(b) (((b)->tx.info.hdrs & HDRS_IPinIP)== HDRS_GIP6) +#define RX_GL2ONLY(b) (((b)->rx.info.hdrs & HDRS_IPinIP)== HDRS_GL2) +#define TX_GL2ONLY(b) (((b)->tx.info.hdrs & HDRS_IPinIP)== HDRS_GL2) + +#define RX_GIP4in4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GIP4in4) +#define TX_GIP4in4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GIP4in4) +#define RX_GIP6in4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GIP6in4) +#define TX_GIP6in4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GIP6in4) +#define RX_GIP2in4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GIP2in4) +#define TX_GIP2in4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GIP2in4) +#define RX_GIP46in4(b) (RX_GIP4in4(b) || RX_GIP6in4(b)) +#define TX_GIP46in4(b) (TX_GIP4in4(b) || TX_GIP6in4(b)) + +#define RX_GIP4in6(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GIP4in6) +#define TX_GIP4in6(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GIP4in6) +#define RX_GIP6in6(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GIP6in6) +#define TX_GIP6in6(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GIP6in6) +#define RX_GIP2in6(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GIP2in6) +#define TX_GIP2in6(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GIP2in6) +#define RX_GIP46in6(b) (RX_GIP4in6(b) || RX_GIP6in6(b)) +#define TX_GIP46in6(b) (TX_GIP4in6(b) || TX_GIP6in6(b)) + +#define TG4in4UP(b) (TX_GIP4in4(b) && (RX_IPV4ONLY(b) || RX_IPV6ONLY(b) || RX_IP4in6(b))) +#define TG4in4DN(b) (RX_GIP4in4(b) && (TX_IPV4ONLY(b) || TX_IPV6ONLY(b) || TX_IP4in6(b))) +#define TG6in4UP(b) (TX_GIP6in4(b) && (RX_IPV4ONLY(b) || RX_IPV6ONLY(b) || RX_IP6in4(b))) +#define TG6in4DN(b) (RX_GIP6in4(b) && (TX_IPV4ONLY(b) || TX_IPV6ONLY(b) || TX_IP6in4(b))) +#define TG2in4UP(b) (RX_L2ONLY(b) && TX_GIP2in4(b)) +#define TG2in4DN(b) (RX_GIP2in4(b) && TX_L2ONLY(b)) + +#define TGL2_4in6UP(b) (RX_L2ONLY(b) && TX_GRE_ETH(b) && TX_GIP2in6(b) && !((b)->tx.info.bmap.GRE_ETH_IPv6) && ((b)->tx.info.bmap.GRE_ETH_IPv4)) +#define TGL2_4in6DN(b) (RX_GRE_ETH(b) && RX_GIP2in6(b) && TX_L2ONLY(b) && !((b)->rx.info.bmap.GRE_ETH_IPv6) && ((b)->rx.info.bmap.GRE_ETH_IPv4)) +#define TGL2_6in6UP(b) (RX_L2ONLY(b) && TX_GRE_ETH(b) && TX_GIP2in6(b) && ((b)->tx.info.bmap.GRE_ETH_IPv6) && !((b)->tx.info.bmap.GRE_ETH_IPv4)) +#define TGL2_6in6DN(b) (RX_GRE_ETH(b) && RX_GIP2in6(b) && TX_L2ONLY(b) && ((b)->rx.info.bmap.GRE_ETH_IPv6) && !((b)->rx.info.bmap.GRE_ETH_IPv4)) +#define TGL2_6in4UP(b) (RX_L2ONLY(b) && TX_GRE_ETH(b) && TX_GIP2in4(b) && ((b)->tx.info.bmap.GRE_ETH_IPv6) && !((b)->tx.info.bmap.GRE_ETH_IPv4)) +#define TGL2_6in4DN(b) (RX_GRE_ETH(b) && RX_GIP2in4(b) && TX_L2ONLY(b) && ((b)->rx.info.bmap.GRE_ETH_IPv6) && !((b)->rx.info.bmap.GRE_ETH_IPv4)) + +#define TG4in6UP(b) (TX_GIP4in6(b) && (RX_IPV4ONLY(b) || RX_IPV6ONLY(b) || RX_IP4in6(b))) +#define TG4in6DN(b) (RX_GIP4in6(b) && (TX_IPV4ONLY(b) || TX_IPV6ONLY(b) || TX_IP4in6(b))) +#define TG6in6UP(b) (TX_GIP6in6(b) && (RX_IPV4ONLY(b) || RX_IPV6ONLY(b) || RX_IP6in4(b))) +#define TG6in6DN(b) (RX_GIP6in6(b) && (TX_IPV4ONLY(b) || TX_IPV6ONLY(b) || TX_IP6in4(b))) +#define TG2in6UP(b) (RX_L2ONLY(b) && TX_GIP2in6(b)) +#define TG2in6DN(b) (RX_GIP2in6(b) && TX_L2ONLY(b)) + +#define TG24in4UP(b) (TG4in4UP(b) || TG2in4UP(b)) +#define TG24in4DN(b) (TG4in4DN(b) || TG2in4DN(b)) + +#define TG24in6UP(b) (TG4in6UP(b) || TG2in6UP(b)) +#define TG24in6DN(b) (TG4in6DN(b) || TG2in6DN(b)) + +#define CHKG4in4(b) (TG4in4UP(b) || TG4in4DN(b)) +#define CHKG6in4(b) (TG6in4UP(b) || TG6in4DN(b)) +#define CHKG2in4(b) (TG2in4UP(b) || TG2in4DN(b)) +#define CHKG46in4UP(b) (TG4in4UP(b) || TG6in4UP(b)) +#define CHKG46in4DN(b) (TG4in4DN(b) || TG6in4DN(b)) +#define CHKG46in4(b) (CHKG4in4(b) || CHKG6in4(b)) +#define CHKG246in4UP(b) (TG4in4UP(b) || TG6in4UP(b) || TG2in4UP(b)) +#define CHKG246in4DN(b) (TG4in4DN(b) || TG6in4DN(b) || TG2in4DN(b)) +#define CHKG246in4(b) (CHKG4in4(b) || CHKG6in4(b) || CHKG2in4(b)) + +#define CHKG4in6(b) (TG4in6UP(b) || TG4in6DN(b)) +#define CHKG6in6(b) (TG6in6UP(b) || TG6in6DN(b)) +#define CHKG2in6(b) (TG2in6UP(b) || TG2in6DN(b)) +#define CHKG46in6UP(b) (TG4in6UP(b) || TG6in6UP(b)) +#define CHKG46in6DN(b) (TG4in6DN(b) || TG6in6DN(b)) +#define CHKG46in6(b) (CHKG4in6(b) || CHKG6in6(b)) +#define CHKG246in6UP(b) (TG4in6UP(b) || TG6in6UP(b) || TG2in6UP(b)) +#define CHKG246in6DN(b) (TG4in6DN(b) || TG6in6DN(b) || TG2in6DN(b)) +#define CHKG246in6(b) (CHKG4in6(b) || CHKG6in6(b) || CHKG2in6(b)) + +#define PTG4(b) (RX_GIPV4ONLY(b) && TX_GIPV4ONLY(b) && PT(b)) +#define PTG6(b) (RX_GIPV6ONLY(b) && TX_GIPV6ONLY(b) && PT(b)) +#define TOTG4(b) (!PT(b) && ((RX_GIP4in4(b) && TX_GIP4in4(b)) || \ + (RX_GIP6in4(b) && TX_GIP6in4(b)))) +#define TOTG6(b) (!PT(b) && ((RX_GIP4in6(b) && TX_GIP4in6(b)) || \ + (RX_GIP6in6(b) && TX_GIP6in6(b)))) + +#define L2ACCEL_PTG(b) (RX_GL2ONLY(b) && TX_GL2ONLY(b)) + +#define HDRS_EIP4in4 ((1<<ESP) | HDRS_IP4in4) +#define HDRS_EIP6in4 ((1<<ESP) | HDRS_IP6in4) + +#define RX_EIPV4ONLY(b) (((b)->rx.info.hdrs & HDRS_IPinIP)== HDRS_EIP4) +#define TX_EIPV4ONLY(b) (((b)->tx.info.hdrs & HDRS_IPinIP)== HDRS_EIP4) + +#define RX_EIP4in4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_EIP4in4) +#define TX_EIP4in4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_EIP4in4) +#define RX_EIP6in4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_EIP6in4) +#define TX_EIP6in4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_EIP6in4) + +#define TE4in4UP(b) (RX_IPV4ONLY(b) && TX_EIP4in4(b)) +#define TE4in4DN(b) (RX_EIP4in4(b) && TX_IPV4ONLY(b)) +#define TE6in4UP(b) (RX_IPV6ONLY(b) && TX_EIP6in4(b)) +#define TE6in4DN(b) (RX_EIP6in4(b) && TX_IPV6ONLY(b)) + +#define CHKE4in4(b) (TE4in4UP(b) || TE4in4DN(b)) +#define CHKE6in4(b) (TE6in4UP(b) || TE6in4DN(b)) +#define CHKE46in4(b) (CHKE4in4(b) || CHKE6in4(b)) + +#define PTE4(b) (RX_EIPV4ONLY(b) && TX_EIPV4ONLY(b)) + +#define HDRS_404 (1<<DEL_IPv4|1<<PLD_IPv4) +#define HDRS_464 (1<<DEL_IPv4|1<<HDR0_IPv6|1<<PLD_IPv4) +#define HDRS_606 (1<<DEL_IPv6|1<<PLD_IPv6) +#define HDRS_646 (1<<DEL_IPv6|1<<HDR0_IPv4|1<<PLD_IPv4) + +#define RX_404(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_404) +#define RX_464(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_464) +#define RX_606(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_606) +#define RX_646(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_646) + + +#define HDRS_444 (1<<DEL_IPv4|1<<HDR0_IPv4|1<<PLD_IPv4) +#define HDRS_644 (1<<DEL_IPv4|1<<HDR0_IPv4|1<<PLD_IPv6) +#define HDRS_244 (1<<DEL_IPv4|1<<HDR0_IPv4|1<<PLD_L2) + + +/* GRE over ESP */ +#define HDRS_4oG4oE4 (BLOG_GREoESP_44<<GREoESP_type|1<<GREoESP|1<<ESP|1<<GRE|HDRS_444) +#define HDRS_6oG4oE4 (BLOG_GREoESP_44<<GREoESP_type|1<<GREoESP|1<<ESP|1<<GRE|HDRS_644) +#define HDRS_2oG4oE4 (BLOG_GREoESP_44<<GREoESP_type|1<<GREoESP|1<<ESP|1<<GRE|HDRS_244) +/* HDRS_GE2 excludes IP Hdrs */ +#define HDRS_GE2 (BLOG_GREoESP_44<<GREoESP_type|1<<GREoESP|1<<ESP|1<<GRE|1<<PLD_L2) + +#define RX_4oG4oE4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_4oG4oE4) +#define TX_4oG4oE4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_4oG4oE4) +#define RX_6oG4oE4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_6oG4oE4) +#define TX_6oG4oE4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_6oG4oE4) +#define RX_2oG4oE4(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_2oG4oE4) +#define TX_2oG4oE4(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_2oG4oE4) +#define RX_GoEo2(b) (((b)->rx.info.hdrs & HDRS_IPinIP)==HDRS_GE2) +#define TX_GoEo2(b) (((b)->tx.info.hdrs & HDRS_IPinIP)==HDRS_GE2) + +#define T4oG4oE4UP(b) (RX_IPV4ONLY(b) && TX_4oG4oE4(b)) +#define T4oG4oE4DN(b) (RX_4oG4oE4(b) && TX_IPV4ONLY(b)) +#define T6oG4oE4UP(b) (RX_IPV6ONLY(b) && TX_6oG4oE4(b)) +#define T6oG4oE4DN(b) (RX_6oG4oE4(b) && TX_IPV6ONLY(b)) +#define T2oG4oE4UP(b) (RX_L2ONLY(b) && (TX_2oG4oE4(b) || TX_GoEo2(b))) +#define T2oG4oE4DN(b) ((RX_2oG4oE4(b) || RX_GoEo2(b)) && TX_L2ONLY(b)) + +#define CHK4oG4oE4UP(b) (T4oG4oE4UP(b)) +#define CHK4oG4oE4DN(b) (T4oG4oE4DN(b)) +#define CHK4oG4oE4(b) (CHK4oG4oE4UP(b) || CHK4oG4oE4DN(b)) + +#define CHK6oG4oE4UP(b) (T6oG4oE4UP(b)) +#define CHK6oG4oE4DN(b) (T6oG4oE4DN(b)) +#define CHK6oG4oE4(b) (CHK6oG4oE4UP(b) || CHK6oG4oE4DN(b)) + +#define CHK46oG4oE4DN(b) (CHK4oG4oE4DN(b) || CHK6oG4oE4DN(b)) +#define CHK46oG4oE4UP(b) (CHK4oG4oE4UP(b) || CHK6oG4oE4UP(b)) +#define CHK46oG4oE4(b) (CHK4oG4oE4(b) || CHK6oG4oE4(b)) + +#define CHK2oG4oE4UP(b) (T2oG4oE4UP(b)) +#define CHK2oG4oE4DN(b) (T2oG4oE4DN(b)) +#define CHK2oG4oE4(b) (CHK2oG4oE4UP(b) || CHK2oG4oE4DN(b)) + + +#define CHK246oG4oE4UP(b) (CHK4oG4oE4UP(b) || CHK6oG4oE4UP(b) || CHK2oG4oE4UP(b)) +#define CHK246oG4oE4DN(b) (CHK4oG4oE4DN(b) || CHK6oG4oE4DN(b) || CHK2oG4oE4DN(b)) +#define CHK246oG4oE4(b) (CHK4oG4oE4(b) || CHK6oG4oE4(b) || CHK2oG4oE4(b)) + + +#define RX_PPTP(b) ((b)->rx.info.bmap.PPTP) +#define TX_PPTP(b) ((b)->tx.info.bmap.PPTP) + +#define RX_L2TP(b) ((b)->rx.info.bmap.L2TP) +#define TX_L2TP(b) ((b)->tx.info.bmap.L2TP) + +#define CHK_RX_L2TP(b) (((b)->rx.info.hdrs & ((1 << DEL_IPv4) | (1 << PLD_IPv4))) && RX_L2TP(b)) +#define CHK_TX_L2TP(b) (((b)->rx.info.hdrs & ((1 << DEL_IPv4) | (1 << PLD_IPv4))) && TX_L2TP(b)) + +#define RX_PPPOE(b) ((b)->rx.info.bmap.PPPoE_2516) +#define TX_PPPOE(b) ((b)->tx.info.bmap.PPPoE_2516) +#define PT_PPPOE(b) (RX_PPPOE(b) && TX_PPPOE(b)) + +#define MAPT_UP(b) ((RX_IPV4ONLY(b) || RX_GIP4in4(b)) && TX_IPV6ONLY(b)) +#define MAPT_DN(b) (RX_IPV6ONLY(b) && (TX_IPV4ONLY(b) || TX_GIP4in4(b))) +#define MAPT(b) (MAPT_DN(b) || MAPT_UP(b)) + +#define PKT_IPV6_GET_TOS_WORD(word) \ + ((ntohl(word) & 0x0FF00000) >> 20) + +#define PKT_IPV6_SET_TOS_WORD(word, tos) \ + (word = htonl((ntohl(word) & 0xF00FFFFF) | ((tos << 20) & 0x0FF00000))) + +/* BLOG_LOCK Definitions */ +extern spinlock_t blog_lock_g; +#define BLOG_LOCK_BH() spin_lock_bh( &blog_lock_g ) +#define BLOG_UNLOCK_BH() spin_unlock_bh( &blog_lock_g ) + +typedef struct ip6_addr { + union { + uint8_t p8[16]; + uint16_t p16[8]; + uint32_t p32[4]; + }; +} ip6_addr_t; + +/* + *------------------------------------------------------------------------------ + * Buffer to log IPv6 Tuple. + * Packed: 3 16byte cachelines + *------------------------------------------------------------------------------ + */ +struct blogTupleV6_t { + union { + uint32_t word0; + }; + + union { + uint32_t word1; + struct { + uint16_t length; + uint8_t next_hdr; + uint8_t rx_hop_limit; + }; + }; + + ip6_addr_t saddr; + ip6_addr_t daddr; + + union { + struct { + uint16_t source; /* L4 source port */ + uint16_t dest; /* L4 dest port */ + } port; + uint32_t ports; + }; + + union { + struct { + uint8_t exthdrs:6; /* Bit field of IPv6 extension headers */ + uint8_t fragflag:1; /* 6in4 Upstream IPv4 fragmentation flag */ + uint8_t tunnel:1; /* Indication of IPv6 tunnel */ + uint8_t tx_hop_limit; + uint16_t ipid; /* 6in4 Upstream IPv4 identification */ + }; + uint32_t word2; + }; + + union { + struct { + uint8_t nextHdr; uint8_t hdrLen; uint16_t data16; + uint32_t data32; + }; + uint64_t ip6_ExtHdr; + }; + + ip6_addr_t addr_npt6; +} ____cacheline_aligned; +typedef struct blogTupleV6_t BlogTupleV6_t; + +#define BLOG_GRE_FLAGS_SEQ_ENABLE 0x1000 +#define BLOG_GRE_FLAGS_KEY_ENABLE 0x2000 + +typedef union blogGreFlags { + uint16_t u16; + struct { + BE_DECL( + uint16_t csumIe : 1; + uint16_t rtgIe : 1; + uint16_t keyIe : 1; + uint16_t seqIe : 1; + uint16_t srcRtIe: 1; + uint16_t recurIe: 3; + uint16_t ackIe : 1; + + uint16_t flags : 4; + uint16_t ver : 3; + ) + LE_DECL( + uint16_t ver : 3; + uint16_t flags : 4; + + uint16_t ackIe : 1; + uint16_t recurIe: 3; + uint16_t srcRtIe: 1; + uint16_t seqIe : 1; + uint16_t keyIe : 1; + uint16_t rtgIe : 1; + uint16_t csumIe : 1; + ) + }; +} BlogGreFlags_t; + +struct blogGre_t { + uint8_t l2hdr[ BLOG_HDRSZ_MAX ]; /* Data of all L2 headers */ + BlogGreFlags_t gre_flags; + union { + uint16_t u16; + struct { + BE_DECL( + uint16_t reserved : 10; + uint16_t fragflag : 1; + uint16_t hlen : 5; + ) + LE_DECL( + uint16_t hlen : 5; + uint16_t fragflag : 1; + uint16_t reserved : 10; + ) + }; + }; + uint16_t ipid; + uint16_t l2_hlen; + + union { //pptp + struct { + uint16_t keyLen; + uint16_t keyId; + }; + uint32_t key; + }; + uint32_t seqNum; + uint32_t ackNum; + uint16_t pppInfo; + uint16_t pppProto; +}; + +typedef struct blogGre_t BlogGre_t; + +typedef union blogL2tpFlags { + uint16_t u16; + struct { + BE_DECL( + uint16_t type : 1; + uint16_t lenBit : 3; + uint16_t seqBit : 2; + uint16_t offsetBit : 1; + uint16_t priority : 1; + uint16_t reserved : 4; + uint16_t version : 4; + ) + LE_DECL( + uint16_t version : 4; + uint16_t reserved : 4; + uint16_t priority : 1; + int16_t offsetBit : 1; + uint16_t seqBit : 2; + uint16_t lenBit : 3; + uint16_t type : 1; + ) + }; +} BlogL2tpFlags_t; + +struct blogL2tp_t { + BlogL2tpFlags_t l2tp_flags; + uint16_t length; + uint16_t tunnelId; + uint16_t sessionId; + uint16_t seqNum; + uint16_t expSeqNum; + uint16_t offsetSize; + uint16_t offsetPad; + union { + uint16_t u16; + struct { + BE_DECL( + uint16_t reserved : 10; + uint16_t fragflag : 1; + uint16_t hlen : 5; + ) + LE_DECL( + uint16_t hlen : 5; + uint16_t fragflag : 1; + uint16_t reserved : 10; + ) + }; + }; + uint16_t ipid; + uint16_t unused; + uint16_t udpLen; + uint16_t udpCheck; + uint16_t pppInfo; + uint16_t pppProto; + +}; +typedef struct blogL2tp_t BlogL2tp_t; + +#define BLOG_PPP_ADDR_CTL 0xFF03 +#define BLOG_L2TP_PPP_LEN 4 /* used when PPP address and control is 0xFF03 */ +#define BLOG_L2TP_PPP_LEN2 2 /* used when PPP address and control is NOT 0xFF03 */ +#define BLOG_L2TP_PORT 1701 + +#define BLOG_PPTP_PPP_LEN 4 +#define BLOG_PPTP_NOAC_PPPINFO 0X2145 /* pptp packet without ppp address control field 0xff03 */ + +#define BLOG_ESP_SPI_LEN 4 +#define BLOG_ESP_SEQNUM_LEN 4 +#define BLOG_ESP_PADLEN_LEN 1 +#define BLOG_ESP_NEXT_PROTO_LEN 1 + +#define BLOG_ESP_ICV_LEN_64 8 +#define BLOG_ESP_ICV_LEN_96 12 +#define BLOG_ESP_ICV_LEN_128 16 +#define BLOG_ESP_ICV_LEN_192 24 +#define BLOG_ESP_ICV_LEN_224 28 +#define BLOG_ESP_ICV_LEN_256 32 +#define BLOG_ESP_ICV_LEN_384 48 +#define BLOG_ESP_ICV_LEN_512 64 + +struct blogEsp_t { + uint32_t u32; + union { + uint16_t u16; + struct { + BE_DECL( + uint16_t icvsize : 7; + uint16_t pmtudiscen : 1; + uint16_t ipv6 : 1; + uint16_t ipv4 : 1; + uint16_t fragflag : 1; + uint16_t ivsize : 5; + ) + LE_DECL( + uint16_t ivsize : 5; + uint16_t fragflag : 1; + uint16_t ipv4 : 1; + uint16_t ipv6 : 1; + uint16_t pmtudiscen : 1; + uint16_t icvsize : 7; + ) + }; + }; + uint16_t ipid; + void *dst_p; + void *secPath_p; +}; +typedef struct blogEsp_t BlogEsp_t; + +#define BLOG_VXLAN_PORT 4789 +#define BLOG_VXLAN_TUNNEL_MAX_LEN (BLOG_HDRSZ_MAX + BLOG_IPV6_HDR_LEN + BLOG_UDP_HDR_LEN + BLOG_VXLAN_HDR_LEN) + +struct blogVxlan_t { + uint32_t vni; + union { + uint16_t u16; + struct { + BE_DECL( + uint16_t reserved : 6; + uint16_t ipv6 : 1; + uint16_t ipv4 : 1; + uint16_t length : 8; + ) + LE_DECL( + uint16_t length : 8; + uint16_t ipv4 : 1; + uint16_t ipv6 : 1; + uint16_t reserved : 6; + ) + }; + }; + uint8_t l2len; + uint8_t outer_tos; + uint8_t tunnel_data[BLOG_VXLAN_TUNNEL_MAX_LEN]; +}; +typedef struct blogVxlan_t BlogVxlan_t; + + +/* + *------------------------------------------------------------------------------ + * Buffer to log Layer 2 and IP Tuple headers. + * Packed: 4 16byte cachelines + *------------------------------------------------------------------------------ + */ +struct blogHeader_t { + + BlogTuple_t tuple; /* L3+L4 IP Tuple log */ + + union { + BlogInfo_t info; + union { + struct { + uint32_t word1; /* channel, count, rfc2684, bmap */ + uint32_t word; /* channel, count, rfc2684, bmap */ + }; + uint32_t pktlen; /* stats info */ + }; + }; + + struct { + uint8_t vlan_8021ad :1; /* 8021AD stacked */ + uint8_t unknown_ucast:1; /* device type */ + uint8_t multicast :1; /* multicast flag */ + uint8_t fkbInSkb :1; /* fkb from skb */ + uint8_t count :4; /* # of L2 encapsulations */ + }; + uint8_t length; /* L2 header total length */ + uint8_t /*BlogEncap_t*/ encap[ BLOG_ENCAP_MAX ];/* All L2 header types */ + + uint8_t l2hdr[ BLOG_HDRSZ_MAX ]; /* Data of all L2 headers */ + struct { + uint8_t unused; + uint8_t len_offset; + union { + uint16_t frame_len; + int16_t len_delta; + }; + } llc_snap; +} ____cacheline_aligned; + +typedef struct blogHeader_t BlogHeader_t; /* L2 and L3+4 tuple */ + +/* Coarse hash key: L1, L3, L4 hash */ +union blogHash_t { + uint32_t match; + struct { + union { + struct { + uint8_t tcp_pure_ack : 1; + uint8_t llc_snap : 1; + uint8_t unused : 6; + }; + uint8_t ext_match; + }; + uint8_t protocol; /* IP protocol */ + + union { + uint16_t u16; + struct { + uint8_t channel; + union { + struct { + uint8_t phyLen : 4; + uint8_t phyType : 4; + }; + uint8_t phy; + }; + }; + } l1_tuple; + }; +}; + +typedef union blogHash_t BlogHash_t; + + +/* flow priority - used for packet buffer reseravtion */ +typedef enum { + BLOG_DECL(flow_prio_normal) + BLOG_DECL(flow_prio_high) + BLOG_DECL(flow_prio_exclusive) +} BlogFlowPrio_t; + + +/* TBD : Rearrange following bit positions for optimization. */ +union blogWfd_t { + uint32_t u32; + struct { + BE_DECL( + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_wfd : 1;/* is_wfd=1 */ + uint32_t is_wmf_enabled : 1;/* =0 unused */ + uint32_t chain_idx :16;/* Tx chain index */ + uint32_t is_chain : 1;/* is_chain=1 */ + uint32_t wfd_idx : 2;/* WFD idx */ + uint32_t wfd_prio : 1;/* 0=high, 1=low */ + uint32_t priority : 4;/* Tx Priority */ + uint32_t reserved0 : 4;/* unused */ + ) + LE_DECL( + uint32_t reserved0 : 4;/* unused */ + uint32_t priority : 4;/* Tx Priority */ + uint32_t wfd_prio : 1;/* 0=high, 1=low */ + uint32_t wfd_idx : 2;/* WFD idx */ + uint32_t is_chain : 1;/* is_chain=1 */ + uint32_t chain_idx :16;/* Tx chain index */ + uint32_t is_wmf_enabled : 1;/* =0 unused */ + uint32_t is_wfd : 1;/* is_wfd=1 */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + ) + } nic_ucast; + + struct { + BE_DECL( + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_wfd : 1;/* is_wfd=1 */ + uint32_t is_wmf_enabled : 1;/* =0 unused */ + uint32_t flowring_idx :16;/* Tx flowring index */ + uint32_t is_chain : 1;/* is_chain=0 */ + uint32_t wfd_idx : 2;/* WFD idx */ + uint32_t wfd_prio : 1;/* 0=high, 1=low */ + uint32_t ssid : 4;/* SSID for WLAN, keep it the same location as mcast */ + uint32_t priority : 3;/* Tx Priority */ + uint32_t reserved0 : 1;/* unused */ + ) + LE_DECL( + uint32_t reserved0 : 1;/* unused */ + uint32_t priority : 3;/* Tx Priority */ + uint32_t ssid : 4;/* SSID for WLAN, keep it the same location as mcast */ + uint32_t wfd_prio : 1;/* 0=high, 1=low */ + uint32_t wfd_idx : 2;/* WFD idx */ + uint32_t is_chain : 1;/* is_chain=0 */ + uint32_t flowring_idx :16;/* Tx flowring index */ + uint32_t is_wmf_enabled : 1;/* =0 unused */ + uint32_t is_wfd : 1;/* is_wfd=1 */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + ) + } dhd_ucast; + + struct { + BE_DECL( + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_wfd : 1;/* is_wfd=1 */ + uint32_t is_wmf_enabled : 1;/* =1 if wmf enabled */ + uint32_t sta_id :16;/* uniq wifi identifier, NIC max 2048*/ + uint32_t is_chain : 1;/* is_chain=0 */ + uint32_t wfd_idx : 2;/* WFD idx */ + uint32_t wfd_prio : 1;/* 0=high, 1=low */ + uint32_t ssid : 4;/* SSID, keep it the same location as dhd_ucast */ + uint32_t reserved : 4;/* unused */ + ) + LE_DECL( + uint32_t reserved : 4;/* unused */ + uint32_t ssid : 4;/* SSID, keep it the same location as dhd_ucast */ + uint32_t wfd_prio : 1;/* 0=high, 1=low */ + uint32_t wfd_idx : 2;/* WFD idx */ + uint32_t is_chain : 1;/* is_chain=0 */ + uint32_t sta_id :16;/* uniq wifi sta identifier, NIC max 2048*/ + uint32_t is_wmf_enabled : 1;/* =1 if wmf enabled */ + uint32_t is_wfd : 1;/* is_wfd=1 */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + ) + } mcast; +}; +typedef union blogWfd_t BlogWfd_t; + +struct blogRnr_t { + BE_DECL( + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_wfd : 1;/* rnr (is_wfd=0) */ + uint32_t is_wmf_enabled : 1;/* =1 if wmf enabled */ + uint32_t flowring_idx :16;/* Tx flowring index */ + uint32_t radio_idx : 2;/* Radio index */ + uint32_t llcsnap_flag : 1;/* llcsnap_flag */ + uint32_t priority : 3;/* Tx Priority */ + uint32_t ssid : 4;/* SSID */ + uint32_t flow_prio : 2;/* flow priority (normal,high, exclusive) - used for packet buffer reservation */ + ) + LE_DECL( + uint32_t flow_prio : 2;/* flow priority (normal,high, exclusive) - used for packet buffer reservation */ + uint32_t ssid : 4;/* SSID */ + uint32_t priority : 3;/* Tx Priority */ + uint32_t llcsnap_flag : 1;/* llcsnap_flag */ + uint32_t radio_idx : 2;/* Radio index */ + uint32_t flowring_idx :16;/* Tx flowring index */ + uint32_t is_wmf_enabled : 1;/* =1 if wmf enabled */ + uint32_t is_wfd : 1;/* rnr (is_wfd=0) */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + ) +}; + +typedef struct blogRnr_t BlogRnr_t; + +/* GDX device blog info */ +struct blog_gdx { + union { + uint32_t u32; + struct { + BE_DECL( + uint32_t unused : 12; + uint32_t gdx_idx : 2;/* GDX device index */ + uint32_t is_gdx_tx : 1;/* GDX device */ + uint32_t gdx_prio : 1;/* 0=high, 1=low */ + uint32_t gdx_ifid : 16;/* interface id */ + ) + LE_DECL( + uint32_t gdx_ifid : 16;/* interface id */ + uint32_t gdx_prio : 1;/* 0=high, 1=low */ + uint32_t is_gdx_tx : 1;/* GDX device */ + uint32_t gdx_idx : 2;/* GDX device index */ + uint32_t unused : 12; + ) + }; + }; +}; + +typedef struct blog_gdx blog_gdx_t; + + +#define MAX_NUM_VLAN_TAG 2 + +/* Blog ingress priority derived from IQOS */ +typedef enum { + BLOG_DECL(BLOG_IQ_PRIO_LOW) + BLOG_DECL(BLOG_IQ_PRIO_HIGH) +} BlogIqPrio_t; + +#define BLOG_GET_MASK(b) (1ULL<<(b)) + +#define BLOG_BITS_PER_WORD 32 +#if (BLOG_BITS_PER_WORD != 32) +#error "BLOG_BITS_PER_WORD should be 32" +#endif + +#define BLOG_GROUP_MASTER_CLIENT_ID 0 +#define BLOG_GROUP_FIRST_CLIENT_ID 1 + +/* DSL RDP: first few mcast/group client ids reserved for non-WLAN clients (Enet, XTM (DPU case, etc.) */ +#define BLOG_GROUP_ENET_CLIENT_RESERVED_IDS 7 + +#define BLOG_MCAST_CLIENT_BITMAP_MAX_WORDS ((CONFIG_BCM_MAX_MCAST_CLIENTS_PER_GROUP>>5)+1) +#define BLOG_MCAST_CLIENT_BITMAP_SIZE (BLOG_MCAST_CLIENT_BITMAP_MAX_WORDS * BLOG_BITS_PER_WORD) +#define BLOG_MCAST_MASTER_CLIENT_ID BLOG_GROUP_MASTER_CLIENT_ID +#define BLOG_MCAST_FIRST_CLIENT_ID BLOG_GROUP_FIRST_CLIENT_ID + +#define BLOG_MCAST_ENET_CLIENT_RESERVED_IDS BLOG_GROUP_ENET_CLIENT_RESERVED_IDS +#define BLOG_MCAST_LAST_CLIENT_ID (CONFIG_BCM_MAX_MCAST_CLIENTS_PER_GROUP) + +#if defined(CONFIG_BCM_UNKNOWN_UCAST) +#define BLOG_UNKNOWN_UCAST_CLIENT_BITMAP_MAX_WORDS ((CONFIG_BCM_MAX_UNKNOWN_UCAST_CLIENTS_PER_GROUP>>5)+1) +#define BLOG_UNKNOWN_UCAST_CLIENT_BITMAP_SIZE (BLOG_UNKNOWN_UCAST_CLIENT_BITMAP_MAX_WORDS * BLOG_BITS_PER_WORD) +#define BLOG_UNKNOWN_UCAST_MASTER_CLIENT_ID BLOG_GROUP_MASTER_CLIENT_ID +#define BLOG_UNKNOWN_UCAST_FIRST_CLIENT_ID BLOG_GROUP_FIRST_CLIENT_ID + +#define BLOG_UNKNOWN_UCAST_ENET_CLIENT_RESERVED_IDS BLOG_GROUP_ENET_CLIENT_RESERVED_IDS +#define BLOG_UNKNOWN_UCAST_LAST_CLIENT_ID (CONFIG_BCM_MAX_UNKNOWN_UCAST_CLIENTS_PER_GROUP) +#endif + +#define BLOG_GROUP_DEV_REALLOC_COUNT 16 + +typedef union { + uint8_t u8; + struct { + uint8_t unused: 6; + uint8_t bridge_stats_updated: 1; /* bridge device stats have been updated */ + uint8_t bridge_dev: 1; /* virtual device is of type bridge_dev, e.g. bridge */ + }; +} blog_virt_dev_flags_t; + +typedef struct { + uint8_t unused; + uint8_t bridge_info_idx; /* index into bridge_base_stats_tbl[] */ + blog_virt_dev_flags_t flags; + int8_t delta; /* virtual dev delta */ + void *dev_p; /* pointer to virtual dev */ +} blog_virt_dev_info_t; + +/* + *------------------------------------------------------------------------------ + * These are the stats of master flow at the time of first client of bridge dev + * JOINing the mcast group. These stats will be used as base stats for calculating + * bridge dev stats for the mcast flow. + *------------------------------------------------------------------------------ + */ +typedef struct { + void *dev_p; + uint16_t master_dev_idx; + uint64_t sw_tx_packets; + uint64_t sw_tx_bytes; + uint64_t hw_tx_packets; + uint64_t hw_tx_bytes; +} blog_master_bridge_base_stats_t; + +/* + *------------------------------------------------------------------------------ + * This is the virtual dev info maintained for each of the virtual device in a + * master flow. + *------------------------------------------------------------------------------ + */ +typedef struct { + uint8_t bridge_info_idx; /* index into bridge_base_stats_tbl[] */ + blog_virt_dev_flags_t flags; + uint8_t ref_cnt; /* ref count to this virtual dev */ + int8_t delta; /* virtual dev delta */ + void *dev_p; /* pointer to virtual dev */ +} blog_master_virt_dev_info_t; + +/* Max 4 different bridge Tx dev in a mcast flow */ +#define BLOG_MCAST_MAX_BRIDGE_STATS 4 +#define BLOG_MCAST_INVALID_BRIDGE_STATS BLOG_MCAST_MAX_BRIDGE_STATS + +typedef struct { + uint16_t unused; + uint16_t max_dev; /* max number of allocated devices in the table */ + uint16_t num_dev; /* number of used devices in the table */ + uint16_t last_dev_idx; /* index of last used device in the table */ + blog_master_bridge_base_stats_t bridge_base_stats_tbl[BLOG_MCAST_MAX_BRIDGE_STATS]; + blog_master_virt_dev_info_t *virt_dev_info_tbl_p; /* pointer to table/list of virtual devices */ +} group_dev_info_t; + +typedef enum group_type { + group_type_mcast, + group_type_unknown_ucast, + group_type_max +} blog_group_type_t; + +typedef struct { + uint8_t is_tcp:1; + uint8_t is_hw:1; + uint8_t is_dir_upload:1; + uint8_t stream_idx:2; + uint8_t unused:3; +} spdtst_bits_t; + +typedef struct { + uint8_t inner_esp : 1; + uint8_t is_offload : 1; + uint8_t unused : 6; +} spu_bits_t; + +/* + *------------------------------------------------------------------------------ + * Buffer log structure. + * ARM 32: 704 bytes + * ARM 64: 896 bytes + * MIPS : 704 bytes + * Marked the cacheline boundaries in the below structure. + * Be cautious when adding new members to this structure. + *------------------------------------------------------------------------------ + */ +struct blog_t { + + union { + void * void_p; + struct blog_t * blog_p; /* Free list of Blog_t */ + struct sk_buff * skb_p; /* Associated sk_buff */ + }; + BlogHash_t key; /* Coarse hash search key */ + uint32_t hash; /* hash */ + union { + uint32_t wl; + struct { + BE_DECL( + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t reserved : 30; + ) + LE_DECL( + uint32_t reserved : 30; + uint32_t is_tx_hw_acc_en : 1;/* =1 if WLAN Transmit is capable of HW Acceleartion */ + uint32_t is_rx_hw_acc_en : 1;/* =1 if WLAN Receive is capable of HW Acceleration */ + ) + } wl_hw_support; + BlogWfd_t wfd; + BlogRnr_t rnr; + }; + blog_gdx_t gdx; + uint32_t fc_context; + void * mc_fdb; /* physical rx network device */ + + /* --- [ARM32]32 byte cacheline boundary --- */ + BlogEthAddr_t src_mac; /* Flow src MAC */ + BlogEthAddr_t dst_mac; /* Flow dst MAC */ + /* --- [ARM64]64 byte cacheline boundary --- */ + + void * fdb[2]; /* fdb_src and fdb_dst */ + uint32_t ifidx[2]; /* fdb src and fdb dst bridge ifidx */ + int8_t tx_dev_delta; /* octet delta of TX dev */ + uint8_t l2_dirty_offset; + uint8_t outer_vtag_num: 4; /* used for outer header */ + uint8_t vtag_num: 4; /* used for tuple header */ + uint8_t hdr_count: 4; + uint8_t vtag_tx_num: 4; + uint16_t eth_type; + /* --- [ARM32]32 byte cacheline boundary --- */ + + union { + uint32_t flags; + struct { + BE_DECL( + uint32_t fwd_and_trap:1; + uint32_t mcast_fwd_exception: 1; + uint32_t is_routed: 1; + uint32_t fc_hybrid: 1; /* hybrid flow accelarate in HW and SW */ + uint32_t l2_mode: 1; + uint32_t is_ssm: 1; + uint32_t pkt_drop: 1; /* Driver indicates this packet will be dropped */ + uint32_t host_client_add: 1; + + uint32_t l2_pppoe: 1; /* L2 packet is PPPoE */ + uint32_t l2_ipv6: 1; /* L2 packet is IPv6 */ + uint32_t l2_ipv4: 1; /* L2 packet is IPv4 */ + uint32_t is_mapt_us: 1; /* MAP-T Upstream flow */ + uint32_t is_df: 1; /* IPv4 DF flag set */ + uint32_t ptm_us_bond: 1; /* PTM US Bonding Mode */ + uint32_t lag_port: 2; /* LAG port when trunking is done by internal switch/runner */ + + uint32_t tos_mode_us: 1; /* ToS mode for US: fixed, inherit */ + uint32_t tos_mode_ds: 1; /* ToS mode for DS: fixed, inherit */ + uint32_t has_pppoe: 1; + uint32_t ack_done: 1; /* TCP ACK prio decision made */ + uint32_t ack_cnt: 4; /* back to back TCP ACKs for prio */ + + uint32_t nf_dir_pld: 1; + uint32_t nf_dir_del: 1; + uint32_t nf_ct_skip_ref_dec: 1; /* when set don't decrement ct refcnt */ + uint32_t pop_pppoa: 1; + uint32_t insert_eth: 1; + uint32_t iq_prio: 1; + uint32_t mc_sync: 1; + uint32_t rtp_seq_chk: 1; /* RTP sequence check enable */ + ) + LE_DECL( + uint32_t rtp_seq_chk: 1; /* RTP sequence check enable */ + uint32_t mc_sync: 1; + uint32_t iq_prio: 1; + uint32_t insert_eth: 1; + uint32_t pop_pppoa: 1; + uint32_t nf_ct_skip_ref_dec: 1; /* when set don't decrement ct refcnt */ + uint32_t nf_dir_del: 1; + uint32_t nf_dir_pld: 1; + + uint32_t ack_cnt: 4; /* back to back TCP ACKs for prio */ + uint32_t ack_done: 1; /* TCP ACK prio decision made */ + uint32_t has_pppoe: 1; + uint32_t tos_mode_ds: 1; /* ToS mode for DS: fixed, inherit */ + uint32_t tos_mode_us: 1; /* ToS mode for US: fixed, inherit */ + + uint32_t lag_port: 2; /* LAG port when trunking is done by internal switch/runner */ + uint32_t ptm_us_bond: 1; /* PTM US Bonding Mode */ + uint32_t is_df: 1; /* IPv4 DF flag set */ + uint32_t is_mapt_us: 1; /* MAP-T Upstream flow */ + uint32_t l2_ipv4: 1; /* L2 packet is IPv4 */ + uint32_t l2_ipv6: 1; /* L2 packet is IPv6 */ + uint32_t l2_pppoe: 1; /* L2 packet is PPPoE */ + + uint32_t host_client_add: 1; + uint32_t pkt_drop: 1; /* Driver indicates this packet will be dropped */ + uint32_t is_ssm: 1; + uint32_t l2_mode: 1; + uint32_t fc_hybrid: 1; /* hybrid flow accelarate in HW and SW */ + uint32_t is_routed: 1; + uint32_t mcast_fwd_exception: 1; + uint32_t fwd_and_trap:1; + ) + }; + }; + union { + uint32_t flags2; + struct { + BE_DECL( + uint32_t unused: 31; + uint32_t group_dev_added: 1; + ) + LE_DECL( + uint32_t group_dev_added: 1; + uint32_t unused: 31; + ) + }; + }; + union { + /* only the lower 32 bit in mark is used in 64 bit system + * but we declare it as unsigned long for the ease of blog + * to handle it in different architecture, since it part + * of union with a dst_entry pointer */ + unsigned long mark; /* NF mark value on tx */ + void *dst_entry; /* skb dst_entry for local_in */ + }; + + union { + uint32_t priority; /* Tx priority */ + uint32_t flowid; /* used only for local in */ + }; + + void * blogRule_p; /* List of Blog Rules */ + + union { + struct { + uint32_t dosAttack : 16; + uint32_t lenPrior : 1; + uint32_t vlanPrior : 1; + uint32_t dscpMangl : 1; + uint32_t tosMangl : 1; + uint32_t preMod : 1; + uint32_t postMod : 1; + uint32_t dscp2pbit : 1; + uint32_t dscp2q : 1; + uint32_t reserved : 8; + }; + uint32_t feature; /* Feature set for per-packet modification */ + }; + union { + struct { + uint8_t vlanout_offset; /* Outer VLAN header offset */ + uint8_t vlanin_offset; /* Inner VLAN header offset */ + uint8_t vpass_tx_offset; /* Passthru VLAN headers tx offset */ + uint8_t vpass_len; /* Passthru VLAN headers length */ + uint8_t pppoe_offset; /* PPPoE header offset */ + uint8_t ip_offset; /* IPv4 header offset */ + uint8_t ip6_offset; /* IPv6 header offset */ + uint8_t l4_offset; /* Layer 4 header offset */ + uint8_t isWan; /* Receiving by WAN interface */ + uint8_t reserved8_3[3]; + }; + uint32_t offsets[3]; + }; + /* --- [ARM32][ARM64] cacheline boundary --- */ + int (*preHook)(Blog_t *blog_p, void *nbuff_p); /* Pre-modify hook */ + int (*postHook)(Blog_t *blog_p, void *nbuff_p); /* Post-modify hook */ + /* vtag[] stored in network order to improve fcache performance */ + uint32_t vtag[MAX_NUM_VLAN_TAG]; + uint32_t outer_vtag[MAX_NUM_VLAN_TAG]; + /* pointers to the devices which the flow goes thru */ + blog_virt_dev_info_t virt_dev_info[MAX_VIRT_DEV]; + + /* --- [ARM32][ARM64]cacheline boundary --- */ + void *fdb_npe_p[BLOG_FDB_NPE_MAX]; /* FDB NPEs */ + /* --- [ARM32][ARM64]cacheline boundary --- */ + + //BlogTupleV6_t and BlogHeader_t is cacheline_aligned structure, be + //be cautious when adding new variables around these memeber as it + //will create unwanted holes in the structure. + BlogTupleV6_t tupleV6; /* L3+L4 IP Tuple log */ + BlogTupleV6_t del_tupleV6; /* Del GRE L3+L4 IPV6 Tuple log */ + + BlogHeader_t tx; /* Transmit path headers */ + BlogHeader_t rx; /* Receive path headers */ + /* --- [ARM32][ARM64]cacheline boundary --- */ + + void *rx_dev_p; /* RX physical network device */ + void *tx_dev_p; /* TX physical network device */ + void *local_rx_dev_p; /* RX device for terminated traffic */ + + unsigned long dev_xmit; + + /* Flow connection/session tracker */ + blog_nxe_t nxe[BLOG_NXE_MAX]; + uint8_t ct_count; + /* --- [ARM32]32 byte cacheline boundary --- */ + void *rx_tunl_p; + /* --- [ARM64]64 byte cacheline boundary --- */ + void *tx_tunl_p; + BlogActivateKey_t activate_key; + uint8_t tx_l4_offset; /*offset to inner most L4 header*/ + uint8_t tx_l3_offset; /*offset to inner most L3 header*/ + uint16_t mcast_port_map; + uint16_t mcast_excl_udp_port; + + uint16_t minMtu; + uint8_t dpi_queue; + uint8_t tuple_offset; /* offset of flow tuple header */ + uint8_t hw_pathstat_idx; /* HWACC Pathstat index */ + uint8_t host_mac_hashix; + union { + uint8_t spdtst; + spdtst_bits_t spdtst_bits; + }; + + union { + uint8_t spu_bits; + spu_bits_t spu; + }; + + union { + uint16_t wlinfo; + uint16_t wlsta_id; + }; + + /* --- [ARM32]32 byte cacheline boundary --- */ + BlogTuple_t *grerx_tuple_p; /* gre proto RX Tuple pointer */ + BlogTuple_t *gretx_tuple_p; /* gre proto TX Tuple pointer */ + union { + struct { + BlogGre_t grerx; + /* --- [ARM32][ARM64]cacheline boundary --- */ + BlogGre_t gretx; + }; + struct { + BlogL2tp_t l2tptx; + }; + BlogVxlan_t vxlan; + }; + /* --- [ARM32]32 byte cacheline boundary (was 24 bytes ago)--- */ + BlogTuple_t *esprx_tuple_p; /* ESP proto RX Tuple pointer */ + BlogTuple_t *esptx_tuple_p; /* ESP proto TX Tuple pointer */ + /* --- [ARM32]32 byte cacheline boundary --- */ + struct { + BlogEsp_t esprx; + /* --- [ARM64]64 byte cacheline boundary --- */ + BlogEsp_t esptx; + }; + /* --- [ARM32]32 byte cacheline boundary --- */ + BlogTuple_t delrx_tuple; /* Del proto RX L3+L4 IP Tuple log */ + BlogTuple_t deltx_tuple; /* Del proto TX L3+L4 IP Tuple log */ + /* --- [ARM32][ARM64]cacheline boundary --- */ + BlogTuple_t rx_tuple[1]; /* RX L3+L4 IP Tuple log */ + BlogTuple_t tx_tuple[1]; /* TX L3+L4 IP Tuple log */ + void *br_dev_p; + void *br_rx_dev_p; + + uint32_t l2_keymap; + uint32_t l3_keymap; + + /* + * CAUTION!!!: The unions have been defined to minimize the changes to other modules. + * The mcast_xxxx fields may be discontinued in future. + */ + union { + uint8_t mcast_client_type; + uint8_t client_type; + }; + union { + uint8_t mcast_client_id; + uint8_t client_id; + }; + union { + int16_t mcast_bitmap_idx; /* index into mcast bitmap pool */ + int16_t group_bitmap_idx; /* index into group bitmap pool */ + }; + blog_group_type_t group_type; + group_dev_info_t *group_dev_info_p; /* master group dev info */ + + uint32_t spdt_so_mark; +} ____cacheline_aligned; + +/* + * ----------------------------------------------------------------------------- + * Engineering constants: Pre-allocated pool size 400 blogs Ucast+Mcast + * + * Extensions done in #blogs carved from a 2x4K page (external fragmentation) + * Blog size = 240, 8192/240 = 34 extension 32bytes internal fragmentation + * + * Number of extensions engineered to permit approximately max # of flows + * (assuming one blog per flow). + * ----------------------------------------------------------------------------- + */ +#define CC_BLOG_SUPPORT_EXTEND /* Conditional compile */ +#define BLOG_POOL_SIZE_ENGG 400 /* Pre-allocated pool size */ +/* Number of Blog_t per extension */ +#define BLOG_EXTEND_SIZE_ENGG (8192/sizeof(Blog_t)) + +/* There is one additional mcast flow entry for each mcast group because of a master flow */ +#define BLOG_CONFIG_MAX_MCAST_FLOWS (CONFIG_BCM_MAX_MCAST_GROUPS + CONFIG_BCM_MAX_MCAST_CLIENTS) +#if defined(CONFIG_BCM_UNKNOWN_UCAST) +#define BLOG_CONFIG_MAX_UNKNOWN_UCAST_FLOWS (CONFIG_BCM_MAX_UNKNOWN_UCAST_GROUPS + CONFIG_BCM_MAX_UNKNOWN_UCAST_CLIENTS) +#define BLOG_CONFIG_MAX_FLOWS (CONFIG_BCM_MAX_UCAST_FLOWS + BLOG_CONFIG_MAX_MCAST_FLOWS + BLOG_CONFIG_MAX_UNKNOWN_UCAST_FLOWS) +#else +#define BLOG_CONFIG_MAX_FLOWS (CONFIG_BCM_MAX_UCAST_FLOWS + BLOG_CONFIG_MAX_MCAST_FLOWS) +#endif + +/* Maximum extensions allowed */ +#define BLOG_EXTEND_MAX_ENGG ((BLOG_CONFIG_MAX_FLOWS/BLOG_EXTEND_SIZE_ENGG) + 1) + + + +extern const char * strBlogAction[]; +extern const char * strBlogEncap[]; +extern const char * strRfc2684[]; +extern const uint8_t rfc2684HdrLength[]; +extern const uint8_t rfc2684HdrData[][16]; + + +#else +struct blog_t {void * blogRule_p;}; +#define BLOG_LOCK_BH() +#define BLOG_UNLOCK_BH() +#endif /* defined(CONFIG_BLOG) */ + +/* + * ----------------------------------------------------------------------------- + * Blog functional interface + * ----------------------------------------------------------------------------- + */ + + +/* + * ----------------------------------------------------------------------------- + * Section 1. Extension of a packet context with a logging context + * ----------------------------------------------------------------------------- + */ + +#if defined(CONFIG_BLOG) +#define blog_ptr(skb_p) skb_p->blog_p +#else +#define blog_ptr(skb_p) BLOG_NULL +#endif + +/* Allocate or deallocate a Blog_t */ +Blog_t * blog_get(void); +void blog_put(Blog_t * blog_p); + +/* Allocate a Blog_t and associate with sk_buff or fkbuff */ +extern Blog_t * blog_skb(struct sk_buff * skb_p); +extern Blog_t * blog_fkb(struct fkbuff * fkb_p); + +/* Clear association of Blog_t with sk_buff */ +extern Blog_t * blog_snull(struct sk_buff * skb_p); +extern Blog_t * blog_fnull(struct fkbuff * fkb_p); + +/* increment refcount of ct's associated with blog */ +extern void blog_ct_get(Blog_t * blog_p); +/* decrement refcount of ct's associated with blog */ +extern void blog_ct_put(Blog_t * blog_p); + +/* increment refcount for devices in virt_dev_p array */ +extern void blog_dev_hold(const Blog_t * blog_p); +/* decrement refcount for devices in virt_dev_p array */ +extern void blog_dev_put(Blog_t * blog_p); + +/* Clear association of Blog_t with sk_buff and free Blog_t object */ +extern void blog_free( struct sk_buff * skb_p, blog_skip_reason_t reason ); + +/* Disable further logging. Dis-associate with skb and free Blog object */ +extern void blog_skip(struct sk_buff * skb_p, blog_skip_reason_t reason); + +/* Transfer association of a Blog_t object between two sk_buffs. */ +extern void blog_xfer(struct sk_buff * skb_p, const struct sk_buff * prev_p); + +/* Duplicate a Blog_t object for another skb. */ +extern void blog_clone(struct sk_buff * skb_p, const struct blog_t * prev_p); + +/* Copy a Blog_t object another blog object. */ +extern void blog_copy(struct blog_t * new_p, const struct blog_t * prev_p); + +/* get the Ingress QoS Prio from the blog */ +extern int blog_iq(const struct sk_buff * skb_p); + +/* get the flow cache status */ +extern int blog_fc_enabled(void); + +/* get the GRE tunnel accelerated status */ +extern int blog_gre_tunnel_accelerated(void); + +#define BLOG_PTM_US_BONDING_DISABLED 0 +#define BLOG_PTM_US_BONDING_ENABLED 1 + +extern void blog_ptm_us_bonding( struct sk_buff *skb_p, int mode ); + +typedef int (*blog_dhd_flow_update_t)(void*, char*, char*, int); +extern blog_dhd_flow_update_t blog_dhd_flow_update_fn; +extern int blog_is_config_netdev_mac(void *dev_p, unsigned long incl_vmacs); +extern int blog_preemptible_task(void); + +#if defined(CONFIG_BLOG) +/* gets an NWE from blog based on the NPE type */ +static inline void *_blog_get_ct_nwe(Blog_t *blog_p, uint32_t ct_idx) +{ + blog_npe_t *npe_p = (blog_npe_t *) + ((blog_p->nxe[BLOG_NXE_CT_IDX(ct_idx)].flags.status == BLOG_NXE_STS_NPE) ? + blog_p->nxe[BLOG_NXE_CT_IDX(ct_idx)].npe_p : NULL); + + return (npe_p ? npe_p->nwe_p : (void *) NULL); +} +#endif + +/* + *------------------------------------------------------------------------------ + * Section 2. Associating native OS or 3rd-party network constructs + *------------------------------------------------------------------------------ + */ + +extern void blog_link(BlogNetEntity_t entity_type, Blog_t * blog_p, + void * net_p, uint32_t param1, uint32_t param2); + +/* + *------------------------------------------------------------------------------ + * Section 3. Network construct and Blog client co-existence call backs + *------------------------------------------------------------------------------ + */ + +extern unsigned long blog_request(BlogRequest_t event, void * net_p, + unsigned long param1, unsigned long param2); + +extern int blog_query(BlogQuery_t query, void * net_p, + uint32_t param1, uint32_t param2, unsigned long param3); + +/* + * blog_notify(): + * blog_notify() is a synchrounous notification from an entity to blog/flow_cache + * and will return once the notification/event has been completed. + * This API should be called only when it is known that the event processing time + * is going to be short (process or interrupt context). + * + * Interrupt Context: It is NOT recommended to call blog_notify() from an + * interrupt context (like softirq/timer) because it will block all other + * processes/threads/softirq from running until the notification processing + * has been completed. Instead use blog_notify_async() from an interrupt + * context. + */ +extern void blog_notify(BlogNotify_t event, void *net_p, + unsigned long param1, unsigned long param2); + +/* + * fc_evt task will invoke this callback function asynchronously once the + * event processing has been completed. It is upto each entity to decide what + * it wants to do within this callback but it should not hold the fc_evt task + * for too long. + */ +typedef void (*blog_notify_async_cb_fn_t)(void *notify_cb_data_p); + +/* + * blog_notify_async(): + * It is same as blog_notify() except two new parameters (notify_cb_fn and + * notify_cb_data_p) have been added, and it is asynchronous call. notify_cb_fn + * function will be called with notify_cb_data_p parameter on completion of + * the event processing. If an caller does not want to wait for the completion + * of the event it can pass NULL values for notify_cb_fn and notify_cb_data_p. + * + * blog_notify_async() can be called from process or interrupt context, + * it will not block/sleep in this call. If blocking is needed it should be + * done outside this API. + * + * CAUTION: Responsibility of the calling entity: + * - Serialization and/or locking, reference count of entry + * - the entry (e.g. flowring, FDB, etc.) for which flows are being flused + * is NOT freed or reallocated before the callback function is invoked. + * + * return: Caller MUST check the return value. + * 1 : caller's notify callback function will be called + * 0 : caller's notify callback function will NOT be called. + * Situations where fc_evt task is not running. + */ +extern int blog_notify_async(BlogNotify_t event, void *net_p, + unsigned long param1, unsigned long param2, + blog_notify_async_cb_fn_t notify_cb_fn, void *notify_cb_data_p); + +/* + *------------------------------------------------------------------------------ + * blog_notify_async_wait + * Calls blog_notify_async() and then waits for completion of event. No callback + * function needed from the caller, this function uses its own callback function. + * Note : If called from NOT preempt-safe context, this function will change + * blog_notify_async() to blog_notify(), which means the event is + * processed synchronously. + * Caller should not call blog_lock()/blog_unlock() as this fucntion + * internally calls blog_lock before calling blog_notify/_async() APIs, + * and blog_unlock after calling. + *------------------------------------------------------------------------------ + */ +extern void blog_notify_async_wait(BlogNotify_t event, void *net_p, + unsigned long param1, unsigned long param2); + +/* blog notify event enqueue function type */ +typedef int (*blog_notify_evt_enqueue_hook_t)(blog_notify_evt_type_t evt_type, + void *net_p, unsigned long param1, unsigned long param2, + blog_notify_async_cb_fn_t notify_cb_fn, void *notify_cb_data_p); + +void blog_bind_notify_evt_enqueue( + blog_notify_evt_enqueue_hook_t blog_notify_evt_enqueue_fn ); +/* + *------------------------------------------------------------------------------ + * Section 4. Network end-point binding of Blog client + * + * If rx hook is defined, + * blog_sinit(): initialize a fkb from skb, and pass to hook + * if packet is consumed, skb is released. + * if packet is blogged, the blog is associated with skb. + * blog_finit(): pass to hook + * if packet is to be blogged, the blog is associated with fkb. + * + * If tx hook is defined, invoke tx hook, dis-associate and free Blog_t + *------------------------------------------------------------------------------ + */ +extern int blog_sinit_generic(struct sk_buff *skb, int *ret); +extern BlogAction_t _blog_sinit( struct sk_buff * skb_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr, BlogFcArgs_t *fc_args ); + +static inline BlogAction_t blog_sinit( struct sk_buff * skb_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr ) +{ + /*TODO move this allocation to drivers calling this function */ + BlogFcArgs_t fc_args; + memset(&fc_args, 0, sizeof(BlogFcArgs_t)); + return _blog_sinit(skb_p, dev_p, encap, channel, phyHdr, &fc_args); +} + +static inline BlogAction_t blog_sinit_args( struct sk_buff * skb_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr, BlogFcArgs_t *fc_args ) +{ + return _blog_sinit(skb_p, dev_p, encap, channel, phyHdr, fc_args); +} + +extern BlogAction_t _blog_finit( struct fkbuff * fkb_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr, BlogFcArgs_t *fc_args ); + +static inline BlogAction_t blog_finit( struct fkbuff * fkb_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr ) +{ + /*TODO move this allocation to drivers calling this function */ + BlogFcArgs_t fc_args; + memset(&fc_args, 0, sizeof(BlogFcArgs_t)); + return _blog_finit(fkb_p, dev_p, encap, channel, phyHdr, &fc_args); +} + +static inline BlogAction_t blog_finit_args( struct fkbuff * fkb_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr, BlogFcArgs_t *fc_args ) +{ + return _blog_finit(fkb_p, dev_p, encap, channel, phyHdr, fc_args); +} + +static inline void blog_set_pkt_drop(Blog_t *blog_p, int pkt_drop) +{ + blog_p->pkt_drop = pkt_drop; +} + +static inline int blog_get_pkt_drop(Blog_t *blog_p) +{ + return (blog_p->pkt_drop); +} + +#if defined(CONFIG_BLOG) +extern BlogAction_t blog_emit_generic(void * nbuff_p, void * dev_p, uint32_t phyHdr); +extern BlogAction_t _blog_emit(void * nbuff_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr, BlogFcArgs_t *fc_args); + +static inline BlogAction_t blog_emit_args(void * nbuff_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr, BlogFcArgs_t *fc_args) +{ + if ( nbuff_p == NULL ) return PKT_NORM; + if ( !IS_SKBUFF_PTR(nbuff_p) ) return PKT_NORM; + // OK, this is something worth looking at, call real function + return ( _blog_emit(nbuff_p, dev_p, encap, channel, phyHdr, fc_args) ); +} + +static inline BlogAction_t blog_emit(void * nbuff_p, void * dev_p, + uint32_t encap, uint32_t channel, + uint32_t phyHdr) +{ + BlogFcArgs_t fc_args; + memset(&fc_args, 0, sizeof(BlogFcArgs_t)); + return ( blog_emit_args(nbuff_p, dev_p, encap, channel, phyHdr, &fc_args) ); +} + +#else +BlogAction_t blog_emit( void * nbuff_p, void * dev_p, + uint32_t encap, uint32_t channel, uint32_t phyHdr ); + +BlogAction_t blog_emit_args( void * nbuff_p, void * dev_p, + uint32_t encap, uint32_t channel, uint32_t phyHdr, + BlogFcArgs_t *fc_args); +#endif + +/* + * blog_iq_prio determines the Ingress QoS priority of the packet + */ +extern int blog_iq_prio(struct sk_buff * skb_p, void * dev_p, + uint32_t encap, uint32_t channel, uint32_t phyHdr); +/* + *------------------------------------------------------------------------------ + * blog_activate(): static configuration function of blog application + * pass a filled blog to the hook for configuration + *------------------------------------------------------------------------------ + */ +#if defined(CONFIG_BLOG) +extern BlogActivateKey_t *blog_activate( Blog_t * blog_p, BlogTraffic_t traffic ); +#else +extern uint32_t blog_activate( Blog_t * blog_p, BlogTraffic_t traffic ); +#endif + +/* + *------------------------------------------------------------------------------ + * blog_deactivate(): static deconfiguration function of blog application + *------------------------------------------------------------------------------ + */ +extern Blog_t * blog_deactivate( BlogActivateKey_t key, BlogTraffic_t traffic ); + +extern int blog_host_client_config(Blog_t *blog_p, BlogTraffic_t traffic); + +/* + * ----------------------------------------------------------------------------- + * User defined filter invoked invoked in the rx hook. A user may override the + * Blog action defined by the client. To enable the invocation of this API + * in blog_finit, ensure that CC_BLOG_SUPPORT_USER_FILTER is enabled. Also, a + * network device driver may directly invoke blog_filter() to override PKT_BLOG + * and return PKT_NORM (by releasing the associated Blog_t). + * ----------------------------------------------------------------------------- + */ +extern BlogAction_t blog_filter(Blog_t * blog_p); + +/* + * ----------------------------------------------------------------------------- + * Section 5. Binding Blog client applications: + * + * Blog defines three hooks: + * + * RX Hook: If this hook is defined then blog_init() will pass the packet to + * the Rx Hook using the FkBuff_t context. L1 and encap information + * are passed to the receive hook. The private network device context + * may be extracted using the passed net_device object, if needed. + * + * TX Hook: If this hook is defined then blog_emit() will check to see whether + * the NBuff has a Blog_t, and if so pass the NBuff and Blog to the + * bound Tx hook. + * + * NotifHook: When blog_notify is invoked, the bound hook is invoked. Based on + * event type the bound Blog client may perform a custom action. + * + * SC Hook: If this hook is defined, blog_activate() will pass a blog with + * necessary information for statical configuration. + * + * SD Hook: If this hook is defined, blog_deactivate() will pass a pointer + * to a network object with BlogActivateKey information. The + * respective flow entry will be deleted. + * + * QueryHook: When blog_query is invoked, the bound hook is invoked. Based on + * query type the bound Blog client will return result of query. + * ----------------------------------------------------------------------------- + */ +typedef union { + struct { + uint16_t QR_HOOK : 1; + uint16_t RX_HOOK : 1; + uint16_t TX_HOOK : 1; + uint16_t XX_HOOK : 1; + uint16_t SC_HOOK : 1; + uint16_t SD_HOOK : 1; + uint16_t FA_HOOK : 1; + uint16_t FD_HOOK : 1; + uint16_t PA_HOOK : 1; + uint16_t BM_HOOK : 1; + uint16_t HC_HOOK : 1; + uint16_t reserved : 5; + } bmap; + uint16_t hook_info; +} BlogBind_t; + +typedef BlogAction_t (* BlogDevRxHook_t)(struct fkbuff *fkb_p, void * dev_p, + BlogFcArgs_t * args); + +typedef BlogAction_t (* BlogDevTxHook_t)(struct sk_buff *skb_p, void * dev_p, + uint32_t encap, uint32_t blogHash, BlogFcArgs_t * args); + +typedef int (* BlogNotifyHook_t)(blog_notify_api_t blog_notify_api, + BlogNotify_t notification, + void * net_p, unsigned long param1, unsigned long param2, + blog_notify_async_cb_fn_t notify_cb_fn, void *notify_cb_data_p); + +typedef int (* BlogQueryHook_t)(BlogQuery_t query, void * net_p, + uint32_t param1, uint32_t param2, unsigned long param3); + +typedef BlogActivateKey_t * (* BlogScHook_t)(Blog_t * blog_p, BlogTraffic_t traffic); + +typedef Blog_t * (* BlogSdHook_t)(BlogActivateKey_t key, BlogTraffic_t traffic); +typedef int (* BlogHostClientHook_t)(Blog_t *blog_p, BlogTraffic_t traffic); + +typedef void (* BlogFaHook_t)(void *ct_p, BlogFlowEventInfo_t info, BlogFlowEventType_t type); + +typedef void (* BlogFdHook_t)(void *ct_p, BlogFlowEventInfo_t info, BlogFlowEventType_t type); + +typedef BlogAction_t (* BlogPaHook_t)(struct fkbuff * fkb_p, void * dev_p, + uint32_t encap, uint32_t channel, uint32_t phyHdr); + +typedef int (* BlogBitMapHook_t)(blog_group_type_t group_type, uint32_t bitmap_idx, uint32_t *dst_p, uint32_t dst_size_words); + +extern int blog_get_hw_accel(void); +extern void blog_bind(BlogDevRxHook_t rx_hook, /* Client Rx netdevice handler*/ + BlogDevTxHook_t tx_hook, /* Client Tx netdevice handler*/ + BlogNotifyHook_t xx_hook, /* Client notification handler*/ + BlogQueryHook_t qr_hook, /* Client query handler */ + BlogBitMapHook_t blog_bm, /* group BitMap copy handler */ + BlogBind_t bind + ); + +extern void blog_bind_config(BlogScHook_t sc_hook, /* Client static config handler*/ + BlogSdHook_t sd_hook, /* Client static deconf handler*/ + BlogHostClientHook_t hc_hook, + BlogBind_t bind + ); + +void blog_bind_packet_accelerator( BlogPaHook_t blog_pa, BlogBind_t bind ); +int blog_flowevent_register_notifier(struct notifier_block *nb); +int blog_flowevent_unregister_notifier(struct notifier_block *nb); + +/* + *------------------------------------------------------------------------------ + * blog notify event + *------------------------------------------------------------------------------ + */ +typedef struct { + struct dll_t node; /* First element implements dll */ + blog_notify_evt_type_t evt_type; + void *net_p; + unsigned long param1; + unsigned long param2; + blog_notify_async_cb_fn_t notify_cb_fn; + void *notify_cb_data_p; +} ____cacheline_aligned blog_notify_evt_t; + +/* + * ----------------------------------------------------------------------------- + * Section 6. Miscellanous + * ----------------------------------------------------------------------------- + */ + +/* Dump a Blog_t object */ +extern void blog_hw_formatted_dump(Blog_t *blog_p); + +/* Logging of L2|L3 headers */ +extern void blog(struct sk_buff * skb_p, BlogDir_t dir, BlogEncap_t encap, + size_t len, void * data_p); + +/* Dump a Blog_t object */ +extern void blog_dump(Blog_t * blog_p); + +/* Get the minimum Tx MTU for a blog */ +uint16_t blog_getTxMtu(Blog_t * blog_p); + +/* + * Lock and unlock the blog layer. This is used to reduce the number of + * times the blog lock must be acquired and released during bulk rx processing. + * See also blog_finit_locked. + */ +extern void blog_lock(void); +extern void blog_unlock(void); + +/* + * Per packet basis modification feature + */ +#define BLOG_MAX_FEATURES 8 + +#define BLOG_LEN_PARAM_INDEX 0 +#define BLOG_DSCP_PARAM_INDEX 1 +#define BLOG_TOS_PARAM_INDEX 2 + +#define BLOG_MAX_LEN_TBLSZ 8 +#define BLOG_MAX_DSCP_TBLSZ 64 +#define BLOG_MAX_TOS_TBLSZ 256 + +#define BLOG_LEN_PARAM_NUM 4 +#define BLOG_MAX_PARAM_NUM 4 + +#define BLOG_MIN_LEN_INDEX 0 +#define BLOG_MAX_LEN_INDEX 1 +#define BLOG_ORIGINAL_MARK_INDEX 2 +#define BLOG_TARGET_MARK_INDEX 3 + +#define BLOG_MATCH_DSCP_INDEX 0 +#define BLOG_TARGET_DSCP_INDEX 1 + +#define BLOG_MATCH_TOS_INDEX 0 +#define BLOG_TARGET_TOS_INDEX 1 + +#define BLOG_INVALID_UINT8 ((uint8_t)(-1)) +#define BLOG_INVALID_UINT16 ((uint16_t)(-1)) +#define BLOG_INVALID_UINT32 ((uint32_t)(-1)) + +extern int blog_set_ack_tbl(uint32_t val[]); +extern int blog_clr_ack_tbl(void); +extern int blog_set_len_tbl(uint32_t val[]); +extern int blog_clr_len_tbl(void); +extern int blog_set_dscp_tbl(uint8_t idx, uint8_t val); +extern int blog_clr_dscp_tbl(void); +extern int blog_set_tos_tbl(uint8_t idx, uint8_t val); +extern int blog_clr_tos_tbl(void); +extern int blog_pre_mod_hook(Blog_t *blog_p, void *nbuff_p); +extern int blog_post_mod_hook(Blog_t *blog_p, void *nbuff_p); + +#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE) +#define BLOG_GRE_RCV_NOT_GRE 2 +#define BLOG_GRE_RCV_NO_SEQNO 1 +#define BLOG_GRE_RCV_IN_SEQ 0 +#define BLOG_GRE_RCV_NO_TUNNEL -1 +#define BLOG_GRE_RCV_FLAGS_MISSMATCH -2 +#define BLOG_GRE_RCV_CHKSUM_ERR -3 +#define BLOG_GRE_RCV_OOS_LT -4 +#define BLOG_GRE_RCV_OOS_GT -5 + +extern int blog_gre_rcv( struct fkbuff *fkb_p, void * dev_p, uint32_t h_proto, + void **tunl_pp, uint32_t *pkt_seqno_p); +extern void blog_gre_xmit( struct sk_buff *skb_p, uint32_t h_proto ); +#endif + +#if defined(CONFIG_ACCEL_PPTP) +#define BLOG_PPTP_ENCRYPTED 3 +#define BLOG_PPTP_RCV_NOT_PPTP 2 +#define BLOG_PPTP_RCV_NO_SEQNO 1 +#define BLOG_PPTP_RCV_IN_SEQ 0 +#define BLOG_PPTP_RCV_NO_TUNNEL -1 +#define BLOG_PPTP_RCV_FLAGS_MISSMATCH -2 +#define BLOG_PPTP_RCV_CHKSUM_ERR -3 +#define BLOG_PPTP_RCV_OOS_LT -4 +#define BLOG_PPTP_RCV_OOS_GT -5 +extern int blog_pptp_rcv( struct fkbuff *fkb_p, uint32_t h_proto, + uint32_t *rcv_pktSeq); +extern void blog_pptp_xmit( struct sk_buff *skb_p, uint32_t h_proto ); +#endif + +#define BLOG_L2TP_RCV_TUNNEL_FOUND 1 +#define BLOG_L2TP_RCV_NO_TUNNEL 0 + +#define BLOG_INCLUDE_VIRTUAL_DEVS 1 + +void blog_get_dev_stats(void *dev_p, void *bStats_p); +void blog_clr_dev_stats(void *dev_p); +void blog_get_dev_running_stats(void *dev_p, void * const bStats_p); +void blog_get_dev_running_stats_wlan(void *dev_p, void * const bStats_p); /* Remove once WLAN falls in place */ +void blog_add_dev_accelerated_stats(void *dev_p, void *stats64_p); + +typedef struct { + wait_queue_head_t wqh; + unsigned long work_avail; +#define BLOG_WORK_AVAIL (1<<0) + spinlock_t wakeup_lock; + bool wakeup_done; +} wq_info_t; + +#define BLOG_WAKEUP_WORKER_THREAD(x, mask) \ +do { \ + if ( !((x)->work_avail & mask) ) { \ + (x)->work_avail |= mask; \ + wake_up_interruptible(&((x)->wqh)); \ + } \ +} while (0) + +/*wake up with spinlock to avoid preemption/bh processing between + *setting work_avail & wakeup + */ +#define BLOG_WAKEUP_WORKER_THREAD_NO_PREEMPT(x, mask) \ +do { \ + spin_lock_bh(&((x)->wakeup_lock)); \ + BLOG_WAKEUP_WORKER_THREAD(x, mask); \ + (x)->wakeup_done = true; \ + spin_unlock_bh(&((x)->wakeup_lock)); \ +} while (0) + + +void blog_fold_stats(BlogStats_t * const d, + const BlogStats_t * const s); +int blog_copy_group_client_bitmap(blog_group_type_t group_type, uint16_t bitmap_idx, uint32_t *dst_p, uint32_t dst_size_words); +#define blog_copy_mcast_client_bitmap(bmidx, dst_p, dst_size_words) blog_copy_group_client_bitmap(group_type_mcast, bmidx, dst_p, dst_size_words) + +#if defined(CONFIG_BCM_UNKNOWN_UCAST) +#define blog_copy_unknown_ucast_client_bitmap(bmidx, dst_p, dst_size_words) blog_copy_group_client_bitmap(group_type_unknown_ucast, bmidx, dst_p, dst_size_words) +#endif + +int blog_set_bridge_tx_dev(Blog_t *cblog_p); +void *blog_group_dev_realloc(Blog_t *mblog_p, uint8_t new); +void blog_group_dev_free(Blog_t *mblog_p); +int blog_group_find_matching_master_dev(Blog_t *mblog_p, void *dev_p, int8_t delta); +int blog_group_add_rx_dev(Blog_t *mblog_p, Blog_t *cblog_p); +void blog_group_del_rx_dev(Blog_t *mblog_p, Blog_t *cblog_p); +int blog_group_add_tx_dev(Blog_t *mblog_p, Blog_t *cblog_p); +void blog_group_del_tx_dev(Blog_t *mblog_p, Blog_t *cblog_p); +void blog_group_del_all_devs(Blog_t *mblog_p); +int blog_group_dev_get_bridge_dev_ref_cnt(Blog_t *mblog_p, void *dev_p); +int blog_group_add_bridge_dev_base_stats(Blog_t *mblog_p, void *dev_p, int master_dev_idx); +int blog_group_del_bridge_dev_base_stats(Blog_t *mblog_p, uint32_t idx); +int blog_group_update_bridge_dev_base_stats(Blog_t *mblog_p, uint32_t idx, + uint64_t sw_tx_packets, uint64_t sw_tx_bytes, + uint64_t hw_tx_packets, uint64_t hw_tx_bytes); +int blog_group_get_bridge_dev_base_stats(Blog_t *mblog_p, uint32_t idx, + uint64_t *sw_tx_packets_p, uint64_t *sw_tx_bytes_p, + uint64_t *hw_tx_packets_p, uint64_t *hw_tx_bytes_p); + +typedef struct blog_ctx { + uint32_t blog_total; + uint32_t blog_avail; + uint32_t blog_mem_fails; + uint32_t blog_extends; + uint32_t blog_extend_fails; + blog_info_stats_t info_stats; + blog_skip_reason_t blog_skip_stats_table[blog_skip_reason_max]; + blog_free_reason_t blog_free_stats_table[blog_free_reason_max]; + uint32_t blog_dump; +} blog_ctx_t; + +#endif /* defined(__BLOG_H_INCLUDED__) */ + +#endif /* CONFIG_BCM_KF_BLOG */ diff --git a/include/linux/blog_net.h b/include/linux/blog_net.h new file mode 100644 index 0000000000000000000000000000000000000000..7cb3b50f7da3b3f874fc747306b99286f9d5e21e --- /dev/null +++ b/include/linux/blog_net.h @@ -0,0 +1,845 @@ +#ifndef __BLOG_NET_H_INCLUDED__ +#define __BLOG_NET_H_INCLUDED__ + +/* +<:copyright-BRCM:2003:DUAL/GPL:standard + + Copyright (c) 2003 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +/* + ******************************************************************************* + * + * File Name : blog_net.h + * + * Description: + * + * Global definitions and declaration of Protocol Headers independent of OS as + * per IEEE and RFC standards. Inlined utilities for header access. + * + * CAUTION: All protocol header structures are declared for Big Endian access + * and are not compatible for a Little Endian machine. + * + * CAUTION: It is also assumed that the Headers are AT LEAST 16bit aligned. + * + ******************************************************************************* + */ + +#if defined(CONFIG_CPU_BIG_ENDIAN) +#define BE_DECL(declarations) declarations +#define BE_CODE(statements) do { statements } while (0) +#define LE_DECL(declarations) +#define LE_CODE(statements) NULL_STMT +#elif defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) +#define BE_DECL(declarations) +#define BE_CODE(statements) NULL_STMT +#define LE_DECL(declarations) declarations +#define LE_CODE(statements) do { statements } while (0) +#else +#error "Compile: fix endianess in platform.h" +#endif + + +/*TODO rename these PHY types to BCM_XXXPHY */ +#undef BLOG_DECL +#define BLOG_DECL(x) x, +/* + *------------------------------------------------------------------------------ + * Denotes the type of physical interface and the presence of a preamble. + *------------------------------------------------------------------------------ + */ +typedef enum { + BLOG_DECL(BLOG_NOPHY) /* index 0 placeholder for non-bloggable interface */ + BLOG_DECL(BLOG_XTMPHY) + BLOG_DECL(BLOG_ENETPHY) + BLOG_DECL(BLOG_GPONPHY) + BLOG_DECL(BLOG_EPONPHY) + BLOG_DECL(BLOG_USBPHY) + BLOG_DECL(BLOG_WLANPHY) + BLOG_DECL(BLOG_GENPHY) + BLOG_DECL(BLOG_TCP_LOCALPHY) + BLOG_DECL(BLOG_SPU_DS) + BLOG_DECL(BLOG_SPU_US) + BLOG_DECL(BLOG_SPDTST) + BLOG_DECL(BLOG_MAXPHY) +} BlogPhy_t; + +/* + *------------------------------------------------------------------------------ + * RFC 2684 header logging. + * CAUTION: 0'th enum corresponds to either header was stripped or zero length + * header. VC_MUX_PPPOA and VC_MUX_IPOA have 0 length RFC2684 header. + * PTM does not have an rfc2684 header. + *------------------------------------------------------------------------------ + */ +typedef enum { + BLOG_DECL(RFC2684_NONE) /* */ + BLOG_DECL(LLC_SNAP_ETHERNET) /* AA AA 03 00 80 C2 00 07 00 00 */ + BLOG_DECL(LLC_SNAP_ROUTE_IP) /* AA AA 03 00 00 00 08 00 */ + BLOG_DECL(LLC_ENCAPS_PPP) /* FE FE 03 CF */ + BLOG_DECL(VC_MUX_ETHERNET) /* 00 00 */ + BLOG_DECL(VC_MUX_IPOA) /* */ + BLOG_DECL(VC_MUX_PPPOA) /* */ + BLOG_DECL(PTM) /* */ + BLOG_DECL(RFC2684_MAX) +} Rfc2684_t; + +/*----- LinkType: First header type ------------------------------------------*/ +/* Used by network drivers to determine the Layer 1 encapsulation or LinkType */ +typedef enum { + BLOG_DECL(TYPE_ETH) /* LAN: ETH, WAN: EoA, MER, PPPoE */ + BLOG_DECL(TYPE_PPP) /* WAN: PPPoA */ + BLOG_DECL(TYPE_IP) /* WAN: IPoA */ + BLOG_DECL(TYPE_VLAN) /* VLAN from generic hook */ +} BlogLinkType_t; + +#define BLOG_SET_PHYHDR(a, b) ( (((a) & 0xf) << 4) | ((b) & 0xf) ) +#define BLOG_GET_PHYTYPE(a) ( (a) & 0xf ) +#define BLOG_GET_PHYLEN(a) ( (a) >> 4 ) + +#define BLOG_PHYHDR_MASK 0xff +#define BLOG_SET_HW_ACT(a) ( ((a) & 0xf) << 8 ) +#define BLOG_GET_HW_ACT(a) ( (a) >> 8 ) + +/*----- ETH_TYPE: Standard well-defined Ethernet Encapsulations --------------*/ +#define BLOG_ETH_P_ETH_BRIDGING 0x6558 /* Transparent Ethernet bridging */ +#define BLOG_ETH_P_IPV4 0x0800 /* IPv4 in Ethernet */ +#define BLOG_ETH_P_ARP 0x0806 /* Address Resolution packet */ +#define BLOG_ETH_P_RARP 0x8035 /* Reverse ARP */ +#define BLOG_ETH_P_APPLTK_AARP 0x80F3 /* AppleTalk AARP */ +#define BLOG_ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ +#define BLOG_ETH_P_8021AD 0x88A8 /* VLAN Stacking 802.1ad */ +#define BLOG_ETH_P_NOVELL 0x8137 /* Novell, Inc. */ +#define BLOG_ETH_P_IPV6 0x86DD /* Internet Protocol Version 6 */ +#define BLOG_ETH_P_MPLS_UC 0x8847 /* MPLS - Unicast */ +#define BLOG_ETH_P_MPLS_MC 0x8848 /* MPLS - Multicast */ +#define BLOG_ETH_P_ATMMPOA 0x884c /* MultiProtocol Over ATM */ +#define BLOG_ETH_P_PPP_DIS 0x8863 /* PPPoE Discovery */ +#define BLOG_ETH_P_PPP_SES 0x8864 /* PPPoE Session */ +#define BLOG_ETH_JUMBO_FRAME 0x8870 /* Jumbo frame indicator */ +#define BLOG_ETH_P_BRCM6TAG 0x8874 /* BRCM Switch Hdr : 6 byte */ +#define BLOG_ETH_P_BRCM4TAG 0x888A /* BRCM Switch Hdr : 4 byte */ +#define BLOG_ETH_P_PAUSE 0x8808 /* IEEE Pause frames. 802.3 31B */ +#define BLOG_ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */ +#define BLOG_ETH_P_8021AG 0x8902 /* 802.1ag Connectivity FaultMgmt */ + /* ITU-T recomm Y.1731 (OAM) */ +#define BLOG_ETH_FCOE 0x8906 /* Fibre Channel over Ethernet */ +#define BLOG_ETH_FCOE_INIT 0x8914 /* FCoE Initialization Protocol */ +#define BLOG_ETH_QINQ1 0x9100 /* 802.1Q in Q, alternate 1 */ +#define BLOG_ETH_QINQ2 0x9200 /* 802.1Q in Q, alternate 2 */ + +/*----- PPP_TYPE: Standard well-defined PPP Encapsulations -------------------*/ +#define BLOG_PPP_IPV4 0x0021 /* IPv4 in PPP */ +#define BLOG_PPP_IPCP 0x8021 /* IP Control Protocol */ +#define BLOG_PPP_LCP 0xC021 /* Link Control Protocol */ +#define BLOG_PPP_MP 0x003D /* Multilink protocol */ +#define BLOG_PPP_IPV6 0x0057 /* IPv6 in PPP */ +#define BLOG_PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */ +#define BLOG_PPP_MPLSCP 0x80FD /* MPLS Control Protocol??? */ +#define BLOG_PPP_MPLS_UC 0x0281 /* MPLS - Unicast */ +#define BLOG_PPP_MPLS_MC 0x0283 /* MPLS - Multicast */ + +#define BLOG_GRE_PPP 0x880B /* PPTP: PPP in GRE Tunnel */ + +/*----- IPPROTO: Standard well-defined IP Encapsulations ---------------------*/ +#define BLOG_IPPROTO_HOPOPTV6 0 /* IPv6 ext: Hop-by-Hop Option Header */ +#define BLOG_IPPROTO_ICMP 1 /* Internet Control Message Protocol */ +#define BLOG_IPPROTO_IGMP 2 /* Internet Group Management Protocol */ +#define BLOG_IPPROTO_IPIP 4 /* IPIP tunnels e.g. 4in6 */ +#define BLOG_IPPROTO_TCP 6 /* Transmission Control Protocol */ +#define BLOG_IPPROTO_EGP 8 /* Exterior Gateway Protocol */ +#define BLOG_IPPROTO_UDP 17 /* User Datagram Protocol */ +#define BLOG_IPPROTO_IPV6 41 /* IPv6-in-IPv4 tunnelling */ +#define BLOG_IPPROTO_ROUTING 43 /* IPv6 ext: Routing Header */ +#define BLOG_IPPROTO_FRAGMENT 44 /* IPv6 ext: Fragmentation Header */ +#define BLOG_IPPROTO_RSVP 46 /* RSVP Protocol */ +#define BLOG_IPPROTO_GRE 47 /* Cisco GRE tunnels (rfc 1701,1702) */ +#define BLOG_IPPROTO_ESP 50 /* Encapsulation Security Payload */ +#define BLOG_IPPROTO_AH 51 /* Authentication Header Protocol */ +#define BLOG_IPPROTO_ICMPV6 58 /* IPv6 ext: ICMPv6 Header */ +#define BLOG_IPPROTO_NONE 59 /* IPv6 ext: NONE */ +#define BLOG_IPPROTO_DSTOPTS 60 /* IPv6 ext: Destination Options Hdr */ +#define BLOG_IPPROTO_ANY_HOST_INTERNAL_PROTO 61 /* Any host internel proto */ +#define BLOG_IPPROTO_MTP 92 /* IPv6 ext: Mcast Transport Protocol */ +#define BLOG_IPPROTO_ENCAP 98 /* IPv6 ext: Encapsulation Header */ +#define BLOG_IPPROTO_PIM 103 /* Protocol Independent Multicast */ +#define BLOG_IPPROTO_COMP 108 /* Compression Header Protocol */ +#define BLOG_IPPROTO_ANY_0HOP 114 /* Any Zero HOP */ +#define BLOG_IPPROTO_SCTP 132 /* Stream Control Transport Protocol */ +#define BLOG_IPPROTO_UDPLITE 136 /* UDP-Lite (RFC 3828) */ + +#define BLOG_IPPROTO_UNASSIGN_B 141 /* Begin of unassigned range */ +#define BLOG_IPPROTO_UNASSIGN_E 252 /* End of unassigned range */ +#define BLOG_IPPROTO_RSVD_EXPT1 253 /* Reserved for experimentation */ +#define BLOG_IPPROTO_RSVD_EXPT2 254 /* Reserved for experimentation */ +#define BLOG_IPPROTO_RAW 255 /* Raw IP Packets */ + + +/* IGRS/UPnP using Simple Service Discovery Protocol SSDP over HTTPMU */ +#define BLOG_HTTP_MCAST_UDP_DSTPORT 1900 + +/* Known L4 Ports */ +#define BLOG_DNS_SERVER_PORT 53 +#define BLOG_DHCP_SERVER_PORT 67 +#define BLOG_DHCP_CLIENT_PORT 68 + +/*----- Ethernet IEEE 802.3 definitions ------------------------------------- */ +#define BLOG_LLC_SAP_SNAP (0xAA) +#define BLOG_LLC_SNAP_8023_DSAP (BLOG_LLC_SAP_SNAP) +#define BLOG_LLC_SNAP_8023_SSAP (BLOG_LLC_SAP_SNAP) +#define BLOG_LLC_SNAP_8023_Ctrl (0x3) +#define BLOG_LLC_SNAP_8023_LEN 8 + +#define BLOG_ETH_ADDR_LEN 6 +#define BLOG_ETH_TYPE_LEN sizeof(uint16_t) +#define BLOG_ETH_HDR_LEN ((BLOG_ETH_ADDR_LEN * 2) + BLOG_ETH_TYPE_LEN) +#define BLOG_ETH_TYPE_MIN 1536 + +#define BLOG_ETH_MIN_LEN 60 +#define BLOG_ETH_FCS_LEN 4 +#define BLOG_ETH_MTU_LEN 0xFFFF /* Initial minMtu value */ + +#define BLOG_ETH_ADDR_FMT "[%02X:%02X:%02X:%02X:%02X:%02X]" +#define BLOG_ETH_ADDR(e) e.u8[0],e.u8[1],e.u8[2],e.u8[3],e.u8[4],e.u8[5] + +typedef union BlogEthAddr { + uint8_t u8[BLOG_ETH_ADDR_LEN]; + uint16_t u16[BLOG_ETH_ADDR_LEN/sizeof(uint16_t)]; +} BlogEthAddr_t; + +typedef struct BlogEthHdr { + union { + uint8_t u8[BLOG_ETH_HDR_LEN]; + uint16_t u16[BLOG_ETH_HDR_LEN/sizeof(uint16_t)]; + struct { + BlogEthAddr_t macDa; + BlogEthAddr_t macSa; + /* + * CAUTION: Position of ethType field of an Ethernet header depends on + * the presence and the number of VLAN Tags + * E.g. A single tagged Ethernet frame will have the ethType at offset 16. + */ + uint16_t ethType; /* or length */ + }; + }; +} BlogEthHdr_t; + +/* 16bit aligned access MAC Address functgions */ +static inline int blog_is_zero_eth_addr(uint8_t * addr_p) +{ + uint16_t * u16_p = (uint16_t *)addr_p; /* assert u16_p is 16bit aligned */ + return ( (u16_p[0] & u16_p[1] & u16_p[2]) == 0x0000 ); +} + +static inline int blog_is_bcast_eth_addr(uint8_t * addr_p) +{ + uint16_t * u16_p = (uint16_t *)addr_p; /* assert u16_p is 16bit aligned */ + return ( (u16_p[0] & u16_p[1] & u16_p[2]) == 0xFFFF ); +} + +/* Caution an IP mcast over PPPoE need not have a mcast MacDA */ +static inline int blog_is_mcast_eth_addr(uint8_t * addr_p) +{ +#if 1 + return *(addr_p+0) & 0x01; +#else /* Multicast (e.g. over PPPoE) may use unicast MacDA */ + uint16_t * u16_p = (uint16_t *)addr_p; /* assert u16_p is 16bit aligned */ + if ( ((u16_p[0] == 0x0100) /* IPv4: 01:00:5E:`1b0 */ + && (*(addr_p+2) == 0x5e) && ((*(addr_p+3) & 0x80) == 0) ) + || ( u16_p[0] == 0x3333) /* IPv6: 33:33 */ + ) + return 1; + else + return 0; +#endif +} + +static inline int blog_cmp_eth_addr(uint8_t * addr1_p, uint8_t * addr2_p) +{ + uint16_t *a1 = (uint16_t *)addr1_p; + uint16_t *a2 = (uint16_t *)addr2_p; + return ( ((a1[0] ^ a2[0]) | (a1[1] ^ a2[1]) | (a1[2] ^ a2[2])) != 0 ); +} + + +/*----- 6Byte Brcm6Hdr layout for 5397/98 Switch Management Port Tag ---------*/ +#define BLOG_BRCM6_HDR_LEN 6 + +typedef struct BlogBrcm6Hdr { + union { + uint8_t u8[BLOG_BRCM6_HDR_LEN]; + uint16_t u16[BLOG_BRCM6_HDR_LEN/sizeof(uint16_t)]; + /* + * egress: opcode:3, fbcount:14, rsvd:11, srcPortId:4 + * ingress_port opcode:3, rsvd:25, dstPortId:4 + * ingress_map opcode:3, rsvd:20, fwdMap:9 + */ + }; +} BlogBrcm6Hdr_t; + + +/*----- 4Byte Brcm4Hdr layout for 53115 Switch Management Port Tag -----------*/ +#define BLOG_BRCM4_HDR_LEN 4 + +typedef struct BlogBrcm4Hdr { + union { + uint8_t u8[BLOG_BRCM4_HDR_LEN]; + uint16_t u16[BLOG_BRCM4_HDR_LEN/sizeof(uint16_t)]; + /* + * egress opcode:3, rsvd:13, rsvd2:2, + * flooding:1, snooping:1, protocol:1, switching:1 + * learning:1, mirroring:1, tclass:3, srcpid:5 + * ingress opcode:3, tclass:3, + * tagenforce:2, rsvd:1, dstmap:23 + */ + }; +} BlogBrcm4Hdr_t; + +/*----- Composite Ethernet with BRCM Tag -------------------------------------*/ + +#define BLOG_ETHBRCM6_HDR_LEN (BLOG_ETH_HDR_LEN + BLOG_BRCM6_HDR_LEN) +#define BLOG_ETHBRCM4_HDR_LEN (BLOG_ETH_HDR_LEN + BLOG_BRCM4_HDR_LEN) + +typedef struct BlogEthBrcm6Hdr { + union { + uint8_t u8[BLOG_ETHBRCM6_HDR_LEN]; + uint16_t u16[BLOG_ETHBRCM6_HDR_LEN/sizeof(uint16_t)]; + struct { + BlogEthAddr_t macDa; + BlogEthAddr_t macSa; + BlogBrcm6Hdr_t brcm6; + uint16_t ethType; + }; + }; +} BlogEthBrcm6Hdr_t; + +typedef struct BlogEthBrcm4Hdr { + union { + uint8_t u8[BLOG_ETHBRCM4_HDR_LEN]; + uint16_t u16[BLOG_ETHBRCM4_HDR_LEN/sizeof(uint16_t)]; + struct { + BlogEthAddr_t macDa; + BlogEthAddr_t macSa; + BlogBrcm4Hdr_t brcm4; + uint16_t ethType; + }; + }; +} BlogEthBrcm4Hdr_t; + + +/*----- Vlan IEEE 802.1Q definitions -----------------------------------------*/ +#define BLOG_VLAN_HDR_LEN 4 +#define BLOG_VLAN_HDR_FMT "[0x%08X] tpid<0x%04X> tci<0x%04X> "\ + "pbit<%u> dei<%u> vid<0x%03X>" +#define BLOG_VLAN_HDR(v) v.u32[0], v.tpid, v.tci.u16[0], \ + v.tci.pbits, v.tci.dei, v.tci.vid + +typedef struct BlogVlanTci { + union { + uint8_t u8[sizeof(uint16_t)]; + uint16_t u16[1]; + struct { + BE_DECL( uint16_t pbits:3; uint16_t dei:1; uint16_t vid:12; ) + LE_DECL( uint16_t vid:12; uint16_t dei:1; uint16_t pbits:3; ) + }; + }; +} BlogVlanTci_t; + +typedef struct BlogVlanHdr { + union { + uint8_t u8[BLOG_VLAN_HDR_LEN]; + uint16_t u16[BLOG_VLAN_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_VLAN_HDR_LEN/sizeof(uint32_t)]; + struct { + uint16_t tpid; BlogVlanTci_t tci; /* u8[ 88, A8, EA, AA ] */ + }; + }; +} BlogVlanHdr_t; + + +/*----- PPPoE + PPP Header layout. PPPoE RFC 2516, PPP RFC 1661 --------------*/ +#define BLOG_PPPOE_HDR_LEN 8 /* Including PPP Header "PPP Type" */ +#define BLOG_PPP_HDR_LEN sizeof(uint16_t) +#define BLOG_PPPOE_HDR_FMT "[0x%08X 0x%08X] ver<%u> type<%u> code<0x%02X>"\ + " sId<0x%04X> len<%u> pppType<0x%04X>" +#define BLOG_PPPOE_HDR(p) p.u32[0], p.u32[1], p.ver, p.type, p.code,\ + p.sId, p.len, p.pppType + +typedef uint16_t BlogPppHdr_t; + +typedef struct BlogPppoeHdr { /* includes 2 byte PPP Type */ + union { + uint8_t u8[BLOG_PPPOE_HDR_LEN]; + uint16_t u16[BLOG_PPPOE_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_PPPOE_HDR_LEN/sizeof(uint32_t)]; + struct { + BE_DECL( uint16_t ver:4; uint16_t type:4; uint16_t code:8; ) + LE_DECL( uint16_t code:8; uint16_t type:4; uint16_t ver:4; ) + uint16_t sId; uint16_t len; BlogPppHdr_t pppType; + }; + }; +} BlogPppoeHdr_t; + + +/*----- Multi Protocol Label Switiching Architecture: RFC 3031 ----------------- + * + * 20b-label, 3b-tos, 1b-Stack, 8b-TTL + * StackBit==1? if label==0 then next is IPV4, if label==1 then next is IPV6 + *------------------------------------------------------------------------------ + */ +#define BLOG_MPLS_HDR_LEN 4 + +typedef struct BlogMplsHdr { + union { + uint8_t u8[BLOG_MPLS_HDR_LEN]; + uint16_t u16[BLOG_MPLS_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_MPLS_HDR_LEN/sizeof(uint32_t)]; + struct { + BE_DECL( uint32_t label:20; uint32_t cos:3; uint32_t sbit:1; uint32_t ttl:8; ) + LE_DECL( uint32_t ttl:8; uint32_t sbit:1; uint32_t cos:3; uint32_t label:20; ) + }; + }; +} BlogMplsHdr_t; + + +/*----- IPv4: RFC 791 definitions --------------------------------------------*/ +#define BLOG_IPV4_HDR_LEN 20 /* Not including IP Options */ +#define BLOG_IPV4_ADDR_LEN 4 +#define BLOG_IPV4_HDR_FMT "[0x%08X] ver<%u> ihl<%u> tos<0x%02X> len<%u> "\ + "[0x%08X] id<%u> df<%u> mf<%u> "\ + "fragOffset<0x%04X> [0x%08X] "\ + "ttl<%u> proto<%u> chkSum<0x%04X> "\ +#define BLOG_IPV4_HDR(i) i.u32[0], i.ver, i.ihl, i.tos, i.len, \ + i.u32[1], i.id, i.df, i.mf, i.fragOffset,\ + i.u32[2], i.ttl, i.proto, i.chkSum, +#define BLOG_IPTOS2DSCP(tos) ((tos) >> 2) +#define BLOG_IPDSCP2TOS(dscp) ((dscp) << 2) + +#define BLOG_IPV4_ADDR_FMT "<%03u.%03u.%03u.%03u>" +#define BLOG_IPV4_ADDR_PORT_FMT "<%03u.%03u.%03u.%03u:%u>" +#define BLOG_IPV4_ADDR(ip) ((uint8_t*)&ip)[0], ((uint8_t*)&ip)[1], \ + ((uint8_t*)&ip)[2], ((uint8_t*)&ip)[3] + +#if defined(CONFIG_CPU_BIG_ENDIAN) +#define BLOG_IPV4_ADDR_HOST(ip) ((uint8_t*)&ip)[0], ((uint8_t*)&ip)[1], \ + ((uint8_t*)&ip)[2], ((uint8_t*)&ip)[3] +#elif defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) +#define BLOG_IPV4_ADDR_HOST(ip) ((uint8_t*)&ip)[3], ((uint8_t*)&ip)[2], \ + ((uint8_t*)&ip)[1], ((uint8_t*)&ip)[0] +#endif + +typedef union BlogIpv4Addr { + uint8_t u8[BLOG_IPV4_ADDR_LEN]; + uint16_t u16[BLOG_IPV4_ADDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_IPV4_ADDR_LEN/sizeof(uint32_t)]; +} BlogIpv4Addr_t; + +#define BLOG_IP_FLAG_CE 0x8000 /* Congestion */ +#define BLOG_IP_FLAG_DF 0x4000 /* Do Not Fragment */ +#define BLOG_IP_FLAG_MF 0x2000 /* More Fragment */ +#define BLOG_IP_FRAG_OFFSET 0x1FFF + +typedef struct BlogIpv4Hdr { + union { + uint8_t u8[BLOG_IPV4_HDR_LEN]; + uint16_t u16[BLOG_IPV4_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_IPV4_HDR_LEN/sizeof(uint32_t)]; + struct { + union { + struct { + BE_DECL( uint8_t ver:4; uint8_t ihl:4; ) + LE_DECL( uint8_t ihl:4; uint8_t ver:4; ) + }; + uint8_t ver_ihl; + }; + uint8_t tos; uint16_t len; + uint16_t id; + union { + uint16_t flagsFrag; + struct { + BE_DECL( uint16_t cong:1; uint16_t df:1; + uint16_t moreFrag:1; uint16_t fragOffset:13; ) + LE_DECL( uint16_t fragOffset:13; uint16_t moreFrag:1; + uint16_t df:1; uint16_t cong:1; ) + }; + }; + uint8_t ttl; uint8_t proto; uint16_t chkSum; + BlogIpv4Addr_t sAddr; + BlogIpv4Addr_t dAddr; + }; + }; +} BlogIpv4Hdr_t; + + +/*----- IPv6: RFC 2460 RFC 3513 definitions ----------------------------------*/ +/* + * Well know IPv6 Address prefixes + * Multicast: FFXX:: + * Site local: FEC0:: + * Link Local: FE80:: + * Ucast 6to4: 2002:: + */ +#define BLOG_IPV6_HDR_LEN 40 +#define BLOG_IPV6_ADDR_LEN 16 + +#define BLOG_IPV6_ADDR_FMT "<%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x>" +#define BLOG_IPV6_ADDR_PORT_FMT "<%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u>" +#define BLOG_IPV6_ADDR(ip) \ + ntohs(((uint16_t*)&ip)[0]), ntohs(((uint16_t*)&ip)[1]), \ + ntohs(((uint16_t*)&ip)[2]), ntohs(((uint16_t*)&ip)[3]), \ + ntohs(((uint16_t*)&ip)[4]), ntohs(((uint16_t*)&ip)[5]), \ + ntohs(((uint16_t*)&ip)[6]), ntohs(((uint16_t*)&ip)[7]) + +typedef union BlogIpv6Addr { + uint8_t u8[BLOG_IPV6_ADDR_LEN]; + uint16_t u16[BLOG_IPV6_ADDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_IPV6_ADDR_LEN/sizeof(uint32_t)]; +} BlogIpv6Addr_t; + +typedef struct BlogIpv6Hdr { + union { + uint8_t u8[BLOG_IPV6_HDR_LEN]; + uint16_t u16[BLOG_IPV6_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_IPV6_HDR_LEN/sizeof(uint32_t)]; + struct { + /* ver_tos bits -> ver 4: tos 8: flowlblHi 4 + using bit field results in unaligned access */ + uint16_t ver_tos; uint16_t flowLblLo; + uint16_t len; uint8_t nextHdr; uint8_t hopLmt; + BlogIpv6Addr_t sAddr; + BlogIpv6Addr_t dAddr; + }; + }; +} BlogIpv6Hdr_t; + +#define BLOG_IPV6EXT_HDR_LEN 8 /* multiple of 8 octets */ +typedef struct BlogIpv6ExtHdr { + union { + uint8_t u8[BLOG_IPV6EXT_HDR_LEN]; + uint16_t u16[BLOG_IPV6EXT_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_IPV6EXT_HDR_LEN/sizeof(uint32_t)]; + struct { + uint8_t nextHdr; uint8_t hdrLen; uint16_t data16; + uint32_t data32; + }; + }; +} BlogIpv6ExtHdr_t; + + +/*----- Transmission Control Protocol: RFC 793 definitions -------------------*/ + +#define BLOG_TCP_HDR_LEN 20 + +#define TCPH_DOFF(t) (((htons(t->offFlags.u16)) >> 12) & 0xF) +#define TCPH_CWR(t) (((htons(t->offFlags.u16)) >> 7) & 0x1) +#define TCPH_ECE(t) (((htons(t->offFlags.u16)) >> 6) & 0x1) +#define TCPH_URG(t) (((htons(t->offFlags.u16)) >> 5) & 0x1) +#define TCPH_ACK(t) (((htons(t->offFlags.u16)) >> 4) & 0x1) +#define TCPH_PSH(t) (((htons(t->offFlags.u16)) >> 3) & 0x1) +#define TCPH_RST(t) (((htons(t->offFlags.u16)) >> 2) & 0x1) +#define TCPH_SYN(t) (((htons(t->offFlags.u16)) >> 1) & 0x1) +#define TCPH_FIN(t) (((htons(t->offFlags.u16)) >> 0) & 0x1) + +typedef struct BlogTcpOffFlags { + union { + uint16_t u16; + struct { uint8_t off; uint8_t flags; }; + struct { + BE_DECL( + uint16_t dOff: 4; + uint16_t res1: 4; + uint16_t cwr : 1; + uint16_t ece : 1; + uint16_t urg : 1; + uint16_t ack : 1; + uint16_t psh : 1; + uint16_t rst : 1; + uint16_t syn : 1; + uint16_t fin : 1; + ) + LE_DECL( + uint16_t fin : 1; + uint16_t syn : 1; + uint16_t rst : 1; + uint16_t psh : 1; + uint16_t ack : 1; + uint16_t urg : 1; + uint16_t ece : 1; + uint16_t cwr : 1; + uint16_t res1: 4; + uint16_t dOff: 4; + ) + }; + }; +} BlogTcpOffFlags_t; + +typedef struct BlogTcpHdr { + union { + uint8_t u8[BLOG_TCP_HDR_LEN]; + uint16_t u16[BLOG_TCP_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_TCP_HDR_LEN/sizeof(uint32_t)]; + struct { + uint16_t sPort; uint16_t dPort; + uint32_t seq; + uint32_t ackSeq; + BlogTcpOffFlags_t offFlags; uint16_t window; + uint16_t chkSum; uint16_t urgPtr; + }; + }; +} BlogTcpHdr_t; + + +/*----- User Datagram Protocol: RFC 768 definitions --------------------------*/ +#define BLOG_UDP_HDR_LEN 8 + +typedef struct BlogUdpHdr { + union { + uint8_t u8[BLOG_UDP_HDR_LEN]; + uint16_t u16[BLOG_UDP_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_UDP_HDR_LEN/sizeof(uint32_t)]; + struct { + uint16_t sPort; uint16_t dPort; + uint16_t len; uint16_t chkSum; + }; + }; +} BlogUdpHdr_t; + + +/*----- L2TP: RFC 2661 definitions -------------------------------------------*/ +#define BLOG_L2TP_HDR_LEN 8 + +typedef struct BlogL2tpIeFlagsVer { + union { + uint16_t u16; + struct { + BE_DECL( + uint16_t type : 1; + uint16_t lenIe : 1; + uint16_t rsvd2 : 2; + uint16_t seqIe : 1; + uint16_t rsvd1 : 1; + uint16_t offIe : 1; + uint16_t prio : 1; + uint16_t rsvd4 : 4; + uint16_t ver : 4; + ) + LE_DECL( + uint16_t ver : 4; + uint16_t rsvd4 : 4; + uint16_t prio : 1; + uint16_t offIe : 1; + uint16_t rsvd1 : 1; + uint16_t seqIe : 1; + uint16_t rsvd2 : 2; + uint16_t lenIe : 1; + uint16_t type : 1; + ) + }; + }; +} BlogL2tpIeFlagsVer_t; + +typedef struct BlogL2tpHdr { + union { + uint8_t u8[BLOG_L2TP_HDR_LEN]; + uint16_t u16[BLOG_L2TP_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_L2TP_HDR_LEN/sizeof(uint32_t)]; + struct { + BlogL2tpIeFlagsVer_t ieFlagsVer; uint16_t len; + uint16_t tId; uint16_t sId; + /* uint16_t ns; uint16_t nr; + uint16_t offSz; uint16_t offPad; */ + }; + }; +} BlogL2tpHdr_t; + + +/*----- Generic Routing Encapsulation: RFC 2637, PPTP session, RFC 2784 ------*/ +#define BLOG_GRE_HDR_LEN 8 + +typedef struct BlogGreIeFlagsVer { + union { + uint16_t u16; + struct { + BE_DECL( + uint16_t csumIe : 1; + uint16_t rtgIe : 1; + uint16_t keyIe : 1; + uint16_t seqIe : 1; + uint16_t srcRtIe: 1; + uint16_t recurIe: 3; + uint16_t ackIe : 1; + uint16_t flags : 4; + uint16_t ver : 3; + ) + LE_DECL( + uint16_t ver : 3; + uint16_t flags : 4; + uint16_t ackIe : 1; + uint16_t recurIe: 3; + uint16_t srcRtIe: 1; + uint16_t seqIe : 1; + uint16_t keyIe : 1; + uint16_t rtgIe : 1; + uint16_t csumIe : 1; + ) + }; + }; +} BlogGreIeFlagsVer_t; + +typedef struct BlogGreHdr { + union { + uint8_t u8[BLOG_GRE_HDR_LEN]; + uint16_t u16[BLOG_GRE_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_GRE_HDR_LEN/sizeof(uint32_t)]; + struct { + BlogGreIeFlagsVer_t ieFlagsVer; uint16_t proto; + /* RFC2784 specifies csum instead of len, for GRE ver = 0 */ + /* RFC2637 specifies len, for GRE ver=1 used with PPTP */ + uint16_t len; uint16_t callId; + /* uint32_t seqNum; present if seqIe = 1 */ + /* uint32_t ackNum; present if ackIe = 1 */ + }; + }; +} BlogGreHdr_t; + +/*----- VXLAN: RFC 7348 definitions --------------------------------------------------*/ +#define BLOG_VXLAN_PORT 4789 +#define BLOG_VXLAN_HDR_LEN 8 +#define BLOG_VXLAN_HF_VNI (1UL << 27) +#define BLOG_VXLAN_VNI_OFFSET 8 +typedef struct BlogVxlanHdr { + union { + uint8_t u8[BLOG_VXLAN_HDR_LEN]; + uint16_t u16[BLOG_VXLAN_HDR_LEN/sizeof(uint16_t)]; + uint32_t u32[BLOG_VXLAN_HDR_LEN/sizeof(uint32_t)]; + struct { + uint32_t flags; + uint32_t vni; + }; + }; +} BlogVxlanHdr_t; + +/* + *------------------------------------------------------------------------------ + * Assert that headers are properly packed (without using attribute packed) + * + * #include <stdio.h> + * #include <stdint.h> + * #include "blog_net.h" + * int main() { + * printf("blog_net_audit_hdrs %d\n", blog_net_audit_hdrs() ); + * return blog_net_audit_hdrs(); + * } + *------------------------------------------------------------------------------ + */ +static inline int blog_net_audit_hdrs(void) +{ +#define BLOG_NET_AUDIT(hdrlen,hdrtype) \ + if (hdrlen != sizeof(hdrtype)) \ + return (-1) + + BLOG_NET_AUDIT( BLOG_ETH_ADDR_LEN, BlogEthAddr_t ); + BLOG_NET_AUDIT( BLOG_ETH_HDR_LEN, BlogEthHdr_t ); + BLOG_NET_AUDIT( BLOG_BRCM6_HDR_LEN, BlogBrcm6Hdr_t ); + BLOG_NET_AUDIT( BLOG_BRCM4_HDR_LEN, BlogBrcm4Hdr_t ); + BLOG_NET_AUDIT( BLOG_ETHBRCM6_HDR_LEN, BlogEthBrcm6Hdr_t ); + BLOG_NET_AUDIT( BLOG_ETHBRCM4_HDR_LEN, BlogEthBrcm4Hdr_t ); + BLOG_NET_AUDIT( BLOG_VLAN_HDR_LEN, BlogVlanHdr_t ); + BLOG_NET_AUDIT( BLOG_PPPOE_HDR_LEN, BlogPppoeHdr_t ); + BLOG_NET_AUDIT( BLOG_MPLS_HDR_LEN, BlogMplsHdr_t ); + BLOG_NET_AUDIT( BLOG_IPV4_ADDR_LEN, BlogIpv4Addr_t ); + BLOG_NET_AUDIT( BLOG_IPV4_HDR_LEN, BlogIpv4Hdr_t ); + BLOG_NET_AUDIT( BLOG_IPV6_ADDR_LEN, BlogIpv6Addr_t ); + BLOG_NET_AUDIT( BLOG_IPV6_HDR_LEN, BlogIpv6Hdr_t ); + BLOG_NET_AUDIT( BLOG_TCP_HDR_LEN, BlogTcpHdr_t ); + BLOG_NET_AUDIT( BLOG_UDP_HDR_LEN, BlogUdpHdr_t ); + BLOG_NET_AUDIT( BLOG_L2TP_HDR_LEN, BlogL2tpHdr_t ); + BLOG_NET_AUDIT( BLOG_GRE_HDR_LEN, BlogGreHdr_t ); + BLOG_NET_AUDIT( BLOG_VXLAN_HDR_LEN, BlogVxlanHdr_t); + + return 0; +} + + + + + +/* + *------------------------------------------------------------------------------ + * Network Utilities : 16bit aligned + *------------------------------------------------------------------------------ + */ +#if defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) +/* + *------------------------------------------------------------------------------ + * Function : blog_read32_align16 + * Description : Read a 32bit value from a 16 byte aligned data stream + *------------------------------------------------------------------------------ + */ +static inline uint32_t blog_read32_align16( uint16_t * from ) +{ + return (uint32_t)( (from[1] << 16) | from[0] ); +} + +/* + *------------------------------------------------------------------------------ + * Function : blog_write32_align16 + * Description : Write a 32bit value to a 16bit aligned data stream + *------------------------------------------------------------------------------ + */ +static inline void blog_write32_align16( uint16_t * to, uint32_t from ) +{ + to[1] = (uint16_t)htons(from >> 16); + to[0] = (uint16_t)htons(from >> 0); +} + +#elif defined(CONFIG_CPU_BIG_ENDIAN) + +/* + *------------------------------------------------------------------------------ + * Function : blog_read32_align16 + * Description : Read a 32bit value from a 16 byte aligned data stream + *------------------------------------------------------------------------------ + */ +static inline uint32_t blog_read32_align16( uint16_t * from ) +{ + return (uint32_t)( (from[0] << 16) | (from[1]) ); +} + +/* + *------------------------------------------------------------------------------ + * Function : blog_write32_align16 + * Description : Write a 32bit value to a 16bit aligned data stream + *------------------------------------------------------------------------------ + */ +static inline void blog_write32_align16( uint16_t * to, uint32_t from ) +{ + to[0] = (uint16_t)(from >> 16); + to[1] = (uint16_t)(from >> 0); +} +#endif /* defined(CONFIG_CPU_BIG_ENDIAN) */ + +#endif /* defined(__BLOG_NET_H_INCLUDED__) */ diff --git a/include/linux/blog_rule.h b/include/linux/blog_rule.h new file mode 100644 index 0000000000000000000000000000000000000000..b1b4cedfee5b555133698fd993d18b714bc5456c --- /dev/null +++ b/include/linux/blog_rule.h @@ -0,0 +1,264 @@ +#if defined(CONFIG_BLOG) +#ifndef __BLOG_RULE_H_INCLUDED__ +#define __BLOG_RULE_H_INCLUDED__ + +/* +* <:copyright-BRCM:2010:DUAL/GPL:standard +* +* Copyright (c) 2010 Broadcom +* All Rights Reserved +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed +* to you under the terms of the GNU General Public License version 2 +* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +* with the following added to such license: +* +* As a special exception, the copyright holders of this software give +* you permission to link this software with independent modules, and +* to copy and distribute the resulting executable under terms of your +* choice, provided that you also meet, for each linked independent +* module, the terms and conditions of the license of that module. +* An independent module is a module which is not derived from this +* software. The special exception does not apply to any modifications +* of the software. +* +* Not withstanding the above, under no circumstances may you combine +* this software in any way with any other Broadcom software provided +* under a license other than the GPL, without Broadcom's express prior +* written consent. +* +:> +*/ + +/* + ******************************************************************************* + * + * File Name : blog_rule.h + * + * Description: Blog rules are extensions to a Blog structure that can be used + * to specify additional fiters and modifications. + * + ******************************************************************************* + */ + +#define CC_CONFIG_BLOG_RULE_DEBUG + +#define BLOG_RULE_VERSION "v1.0" + +#define BLOG_RULE_VLAN_TAG_MAX 3 + +#define BLOG_RULE_ACTION_MAX 16 + +#define BLOG_RULE_PBITS_MASK 0xE000 +#define BLOG_RULE_PBITS_SHIFT 13 +#define BLOG_RULE_DEI_MASK 0x1000 +#define BLOG_RULE_DEI_SHIFT 12 +#define BLOG_RULE_VID_MASK 0x0FFF +#define BLOG_RULE_VID_SHIFT 0 + +#define BLOG_RULE_GET_TCI_PBITS(_tci) \ + ( ((_tci) & BLOG_RULE_PBITS_MASK) >> BLOG_RULE_PBITS_SHIFT ) + +#define BLOG_RULE_GET_TCI_DEI(_tci) \ + ( ((_tci) & BLOG_RULE_DEI_MASK) >> BLOG_RULE_DEI_SHIFT ) + +#define BLOG_RULE_GET_TCI_VID(_tci) \ + ( (_tci) & BLOG_RULE_VID_MASK ) + +#define BLOG_RULE_DSCP_IN_TOS_MASK 0xFC +#define BLOG_RULE_DSCP_IN_TOS_SHIFT 2 + +#define BLOG_RULE_IP_PROTO_MASK 0xFF +#define BLOG_RULE_IP_PROTO_SHIFT 0 +#define BLOG_RULE_IP6_NXT_HDR_MASK 0xFF +#define BLOG_RULE_IP6_NXT_HDR_SHIFT 0 + +#define blog_rule_filterInUse(_filter) \ + ({ \ + char *_filter_p = (char *)(&_filter); \ + int _i, _val; \ + for(_i=0; _i<sizeof(_filter); ++_i) { \ + if((_val = _filter_p[_i]) != 0) break; \ + } \ + _val; \ + }) + +typedef struct { + struct ethhdr mask; + struct ethhdr value; +} blogRuleFilterEth_t; + +typedef struct { + union { + struct vlan_hdr mask; + uint32_t mask32; + }; + union { + struct vlan_hdr value; + uint32_t value32; + }; +} blogRuleFilterVlan_t; + +typedef struct { + /* only contains the fields we are interested */ + uint8_t tos; + uint8_t ip_proto; +} blogRuleIpv4Header_t; + +typedef struct { + blogRuleIpv4Header_t mask; + blogRuleIpv4Header_t value; +} blogRuleFilterIpv4_t; + +typedef struct { + /* only contains the fields we are interested */ + uint8_t tclass; + uint8_t nxtHdr; +} blogRuleIpv6Header_t; + +typedef struct { + blogRuleIpv6Header_t mask; + blogRuleIpv6Header_t value; +} blogRuleFilterIpv6_t; + +typedef struct { + uint32_t priority; /* skb priority filter value is offset by 1 because + * 0 is reserved to indicate filter not in use. + * Therefore the supported skb priority range is + * [0 to 0xfffffffe]. + */ + uint16_t markFlowId; + uint16_t markPort; /* port mark filter value is offset by 1 because + * 0 is reserved to indicate filter not in use. + * Therefore use 16-bit to cover the supported + * port range [0 to 255]. + */ +} blogRuleFilterSkb_t; + +typedef struct { + blogRuleFilterEth_t eth; + uint32_t nbrOfVlanTags; + blogRuleFilterVlan_t vlan[BLOG_RULE_VLAN_TAG_MAX]; + uint32_t hasPppoeHeader; + blogRuleFilterIpv4_t ipv4; + blogRuleFilterIpv6_t ipv6; + blogRuleFilterSkb_t skb; + uint32_t flags; +#define BLOG_RULE_FILTER_FLAGS_IS_UNICAST 0x0001 +#define BLOG_RULE_FILTER_FLAGS_IS_MULTICAST 0x0002 +#define BLOG_RULE_FILTER_FLAGS_IS_BROADCAST 0x0004 +} blogRuleFilter_t; + +#define BLOG_RULE_FILTER_FLAGS_ALL \ + ( BLOG_RULE_FILTER_FLAGS_IS_UNICAST | \ + BLOG_RULE_FILTER_FLAGS_IS_MULTICAST | \ + BLOG_RULE_FILTER_FLAGS_IS_BROADCAST ) + +#undef BLOG_RULE_DECL +#define BLOG_RULE_DECL(x) x + +typedef enum { + BLOG_RULE_DECL(BLOG_RULE_CMD_NOP=0), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_MAC_DA), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_MAC_SA), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_ETHERTYPE), + BLOG_RULE_DECL(BLOG_RULE_CMD_PUSH_VLAN_HDR), + BLOG_RULE_DECL(BLOG_RULE_CMD_POP_VLAN_HDR), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_PBITS), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_DEI), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_VID), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_VLAN_PROTO), + BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_PBITS), + BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_DEI), + BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_VID), + BLOG_RULE_DECL(BLOG_RULE_CMD_COPY_VLAN_PROTO), +// BLOG_RULE_DECL(BLOG_RULE_CMD_XLATE_DSCP_TO_PBITS), + BLOG_RULE_DECL(BLOG_RULE_CMD_POP_PPPOE_HDR), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_DSCP), + BLOG_RULE_DECL(BLOG_RULE_CMD_DECR_TTL), + BLOG_RULE_DECL(BLOG_RULE_CMD_DECR_HOP_LIMIT), + BLOG_RULE_DECL(BLOG_RULE_CMD_DROP), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_SKB_MARK_PORT), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_SKB_MARK_QUEUE), + BLOG_RULE_DECL(BLOG_RULE_CMD_OVRD_LEARNING_VID), + BLOG_RULE_DECL(BLOG_RULE_CMD_SET_STA_MAC_ADDRESS), + BLOG_RULE_DECL(BLOG_RULE_CMD_MAX) +} blogRuleCommand_t; + +typedef struct { + uint8_t cmd; // blogRuleCommand_t + uint8_t toTag; + union { + uint16_t etherType; + uint16_t tpid; + uint16_t pbits; + uint16_t dei; + uint16_t vid; + uint16_t vlanProto; + uint16_t dscp; + uint16_t fromTag; + uint16_t skbMarkQueue; + uint16_t skbMarkPort; + uint16_t arg; + uint8_t macAddr[ETH_ALEN]; + }; +} blogRuleAction_t; + +typedef struct blogRule { + blogRuleFilter_t filter; + uint32_t actionCount; + blogRuleAction_t action[BLOG_RULE_ACTION_MAX]; + struct blogRule *next_p; +} blogRule_t; + +typedef enum { + BLOG_RULE_VLAN_NOTIFY_DIR_RX, + BLOG_RULE_VLAN_NOTIFY_DIR_TX, + BLOG_RULE_VLAN_NOTIFY_DIR_MAX +} blogRuleVlanNotifyDirection_t; + +/* + * blogRuleVlanHook_t: The Linux VLAN manager must use this hook to register + * the handler that creates Blog Rules based on the configured VLAN Rules. + */ +typedef int (* blogRuleVlanHook_t)(void *arg_p, + struct net_device *rxVlanDev, + struct net_device *txVlanDev); + +/* + * blogRuleVlanNotifyHook_t: The Linux VLAN manager uses this hook to notify + * the registered handler whenever VLAN Rules are added or removed. + * The device (dev) can be either a VLAN interface or a Real interface. + */ +typedef void (* blogRuleVlanNotifyHook_t)(struct net_device *dev, + blogRuleVlanNotifyDirection_t direction, + uint32_t nbrOfTags); + +extern blogRuleVlanHook_t blogRuleVlanHook; +extern blogRuleVlanNotifyHook_t blogRuleVlanNotifyHook; + +#ifdef CONFIG_BCM_ENET_TC_OFFLOAD +struct blog_t; +typedef int (*blogRuleTcHook_t)(struct blog_t *bl, + struct net_device *rxVlanDev, + struct net_device *txVlanDev); +extern blogRuleTcHook_t blogRuleTcHook; +#endif /* CONFIG_BCM_ENET_TC_OFFLOAD */ + +typedef int (* blogArlHook_t)(void *e); + +extern blogArlHook_t bcm_arl_process_hook_g; + +/* -------------- User API -------------- */ + +blogRule_t *blog_rule_alloc(void); +void blog_rule_free(blogRule_t *blogRule_p); +int blog_rule_free_list(void *blogRule_p); +void blog_rule_init(blogRule_t *blogRule_p); +void blog_rule_dump(blogRule_t *blogRule_p); +int blog_rule_add_action(blogRule_t *blogRule_p, blogRuleAction_t *action_p); +int blog_rule_delete_action(void *rule_p); + +#endif /* defined(__BLOG_RULE_H_INCLUDED__) */ +#endif /* defined(CONFIG_BLOG) */ diff --git a/include/linux/br_fp.h b/include/linux/br_fp.h new file mode 100644 index 0000000000000000000000000000000000000000..88a574548b2cc8196097b4694025d7be3ee92080 --- /dev/null +++ b/include/linux/br_fp.h @@ -0,0 +1,74 @@ +/* + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef BR_FP_H +#define BR_FP_H + +#include <linux/device.h> +#include <linux/module.h> + +#define BR_FP_FDB_ADD 1 +#define BR_FP_FDB_REMOVE 2 +#define BR_FP_FDB_MODIFY 3 +#define BR_FP_FDB_CHECK_AGE 4 +#define BR_FP_PORT_ADD 5 +#define BR_FP_PORT_REMOVE 6 +#define BR_FP_LOCAL_SWITCHING_DISABLE 7 +#define BR_FP_BRIDGE_TYPE_SET 8 +#define BR_FP_BRIDGE_TYPE_GET 9 + +#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE) +struct br_fp_data +{ + int (*rdpa_hook)(int cmd, void *in, void *out); + void *rdpa_obj; +}; +#endif /* CONFIG_BCM_RDPA_BRIDGE || CONFIG_BCM_RDPA_BRIDGE_MODULE */ + +struct bcm_br_ext +{ +#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE) + struct br_fp_data br_fp_data; +#define bridge_fp_data_hook_get(_br_dev) (bcm_netdev_ext_field_get((_br_dev), bcm_br_ext.br_fp_data.rdpa_hook)) +#define bridge_fp_data_hook_set(_br_dev, _hook) bcm_netdev_ext_field_set((_br_dev), bcm_br_ext.br_fp_data.rdpa_hook, (_hook)) +#define bridge_fp_data_obj_get(_br_dev) (bcm_netdev_ext_field_get((_br_dev), bcm_br_ext.br_fp_data.rdpa_obj)) +#define bridge_fp_data_obj_set(_br_dev, _obj) bcm_netdev_ext_field_set((_br_dev), bcm_br_ext.br_fp_data.rdpa_obj, (_obj)) + +#define br_fp_hook(_br_dev, _cmd, _arg1, _arg2) (bridge_fp_data_hook_get((_br_dev)) ? bridge_fp_data_hook_get((_br_dev))((_cmd), (_arg1), (_arg2)) : 0) +#endif + +#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE) + int local_switching_disable; +#define bridge_local_switching_disable_get(_br_dev) (bcm_netdev_ext_field_get((_br_dev), bcm_br_ext.local_switching_disable)) +#define bridge_local_switching_disable_set(_br_dev, _val) bcm_netdev_ext_field_set((_br_dev), bcm_br_ext.local_switching_disable, (_val)) +#else +#define bridge_local_switching_disable_get(_br_dev) ((void)(_br_dev), 0) +#define bridge_local_switching_disable_set(_br_dev, _val) ((void)(_br_dev), (void)(_val), 0) +#endif + +#if defined(CONFIG_BCM_RDPA_BRIDGE) || defined(CONFIG_BCM_RDPA_BRIDGE_MODULE) + u32 mac_entry_discard_counter; +#define bridge_mac_entry_discard_counter_get(_br_dev) (bcm_netdev_ext_field_get((_br_dev), bcm_br_ext.mac_entry_discard_counter)) +#define bridge_mac_entry_discard_counter_set(_br_dev, _val) bcm_netdev_ext_field_set((_br_dev), bcm_br_ext.mac_entry_discard_counter, (_val)) +#define bridge_mac_entry_discard_counter_inc(_br_dev) ((bcm_netdev_ext_field_get((_br_dev), bcm_br_ext.mac_entry_discard_counter)++)) +#else +#define bridge_mac_entry_discard_counter_get(_br_dev) ((void)(br_dev), 0) +#define bridge_mac_entry_discard_counter_set(_br_dev, _val) do {} while(0) +#define bridge_mac_entry_discard_counter_inc(_br_dev) do {} while(0) +#endif +}; + +#endif /* BR_FP_H */ diff --git a/include/linux/brcm_dll.h b/include/linux/brcm_dll.h new file mode 100644 index 0000000000000000000000000000000000000000..3358c2de7d252635b585744f795016f377d68bc0 --- /dev/null +++ b/include/linux/brcm_dll.h @@ -0,0 +1,123 @@ +#ifndef _dll_t_ +#define _dll_t_ +/* +<:copyright-BRCM:2014:DUAL/GPL:standard + + Copyright (c) 2014 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +#if !defined(_envelope_of) +/* derived from container_of, without "const", for gcc -Wcast-qual compile */ +#define _envelope_of(ptr, type, member) \ +({ \ + typeof(((type *)0)->member) *__mptr = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); \ +}) +#endif /* _envelope_of */ + + +typedef struct dll_t dll_t; /* common to wlan bcmutils.h, pktHdr.h */ +typedef struct dll_t { + dll_t * next_p; + dll_t * prev_p; +} Dll_t, * PDll_t; + +#define dll dll_t + +#define DLL_STRUCT_INITIALIZER(struct_name, dll_name) \ + { .next_p = &(struct_name).dll_name, .prev_p = &(struct_name).dll_name } + +#define dll_init(node_p) ((node_p)->next_p = (node_p)->prev_p = (node_p)) + +/* dll macros returing a "dll_t *" */ +#define dll_head_p(list_p) ((list_p)->next_p) +#define dll_tail_p(list_p) ((list_p)->prev_p) + +#define dll_next_p(node_p) ((node_p)->next_p) +#define dll_prev_p(node_p) ((node_p)->prev_p) + +#define dll_empty(list_p) ((list_p)->next_p == (list_p)) +#define dll_end(list_p, node_p) ((list_p) == (node_p)) + +/* inserts the node new_p "after" the node at_p */ +#define dll_insert(new_p, at_p) \ +({ \ + (new_p)->next_p = (at_p)->next_p; \ + (new_p)->prev_p = (at_p); \ + (at_p)->next_p = (new_p); \ + (new_p)->next_p->prev_p = (new_p); \ +}) + +#define dll_append(list_p, node_p) dll_insert((node_p), dll_tail_p(list_p)) +#define dll_prepend(list_p, node_p) dll_insert((node_p), (list_p)) + +/* deletes a node from any list that it "may" be in, if at all. */ +#define dll_delete(node_p) \ +({ \ + (node_p)->prev_p->next_p = (node_p)->next_p; \ + (node_p)->next_p->prev_p = (node_p)->prev_p; \ +}) + +/** + * dll_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * Iterator "pos" may not be moved. + * + * If you need to delete the iterator, then use the below sample. + * dll_t * iter_p, * next_p; + * for (iter_p = dll_head_p(&someList); ! dll_end(&someList, iter_p); + * iter_p = next_p) + * { + * next_p = dll_next_p(iter_p); + * ... use iter_p at will, including removing it from list ... + * } + * + */ +#define dll_for_each(pos, head) \ + for (pos = (head)->next_p; pos != (head); pos = pos->next_p) + +/** + * Take all elements of list A and join them to the tail of list B. + * List A must not be empty and list A will be returned as an empty list. + */ +#define dll_join(listA_p, listB_p) \ +({ \ + dll_t *_listB_p = (listB_p); \ + dll_t *headA_p = dll_head_p(listA_p); \ + dll_t *tailA_p = dll_tail_p(listA_p); \ + dll_t *tailB_p = dll_tail_p(listB_p); \ + /* Link up list B's tail to list A's head */ \ + headA_p->prev_p = tailB_p; \ + tailB_p->next_p = headA_p; \ + /* Make list A's tail to be list B's new tail */ \ + tailA_p->next_p = (listB_p); \ + _listB_p->prev_p = tailA_p; \ + dll_init(listA_p); \ +}) + +#endif /* ! defined(_dll_t_) */ diff --git a/include/linux/buzzz_kevt.h b/include/linux/buzzz_kevt.h new file mode 100644 index 0000000000000000000000000000000000000000..1530b1655d0a77870cfde9a3557c7690fe78b62c --- /dev/null +++ b/include/linux/buzzz_kevt.h @@ -0,0 +1,81 @@ +#ifndef __buzzz_kevt_h_included__ +#define __buzzz_kevt_h_included__ + +#if defined(CONFIG_BUZZZ_KEVT) || defined(CONFIG_BUZZZ_FUNC) +/* + * +---------------------------------------------------------------------------- + * + * BCM BUZZZ ARM Cortex A9 Router Kernel events + * + * $Copyright Open Broadcom Corporation$ + * $Id$ + * + * vim: set ts=4 noet sw=4 tw=80: + * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- + * + * +---------------------------------------------------------------------------- + */ + +#include <uapi/linux/buzzz.h> + +#undef BUZZZ_KEVT +#define BUZZZ_KEVT(event) BUZZZ_KEVT__ ## event, + +#define BUZZZ_KEVT_REG(event, format) \ + buzzz_event_reg(BUZZZ_KEVT__## event, "\t\t" format); + +/** + * DO NOT SUBMIT USER EVENTS + * + * For private debug (not for submission), a user event may be added by: + * 1. Add an enum entry to buzzz_rtr_dpid, + * e.g. BUZZZ_KEVT(HELLO_WORLD) + * + * 2. Add an entry to buzzz_dp_init(), + * e.g. BUZZZ_KEVT_REG(HELLO_WORLD, "hello world at %pS from %pS") + * + * 3. In source code base, insert instrumentation: + * e.g. BUZZZ_DPL3(HELLO_WORLD, 2, + * BUZZZ32(BUZZZ_CUR_IP_), BUZZZ32(BUZZZ_RET_IP_)); + * + * See uapi/linux/buzzz.h, where BUZZZ_DPL tracing level is set to 3, thereby + * enabling all instrumentations BUZZZ_DPL1(), BUZZZ_DPL2() and BUZZZ_DPL3() + * Instrumentation with BUZZZ_DPL4() and BUZZZ_DPL5() are compiled out. + * + * Second parameter to BUZZZ_DPL#() specifies the number of arguments to be + * logged, in the above example, it is 2 arguments (maximum 3 arguments). + * - First argument in example is current instruction address, and + * - Second argument is return address. + * Arguments are 32bit values. [Gets messy on 64b aarch] + * + * Do not forget to invoke, buzzz_dp_init() once ... say in a module init. + */ +typedef +enum buzzz_rtr_dpid +{ + BUZZZ_KEVT__DATAPATH_START = 100, + + BUZZZ_KEVT(SAMPLE) + /* Define user events here */ + +} buzzz_rtr_dpid_t; + + +/* Invoke this once in a datapath module's init */ +static inline int +buzzz_dp_init(void) +{ + BUZZZ_KEVT_REG(SAMPLE, "sample pkt<%p>") + /* Add user event logs here */ + + return 0; +} +#else /* ! CONFIG_BUZZZ */ +#define BUZZZ_DPL1(ID, N, ARG...) do {} while (0) +#define BUZZZ_DPL2(ID, N, ARG...) do {} while (0) +#define BUZZZ_DPL3(ID, N, ARG...) do {} while (0) +#define BUZZZ_DPL4(ID, N, ARG...) do {} while (0) +#define BUZZZ_DPL5(ID, N, ARG...) do {} while (0) +#endif /* ! CONFIG_BUZZZ */ + +#endif /* __buzzz_kevt_h_included__ */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index e8839d3a75591960cf169b20f32fe951e6ac314f..8d5f97b03906958ac1af8d7c18c520b2ed754496 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -117,6 +117,10 @@ */ #define CRYPTO_NOLOAD 0x00008000 +#if defined(CONFIG_BLOG) && defined(CONFIG_BCM_KF_BLOG) +#define CRYPTO_ALG_BLOG 0x80000000 +#endif + /* * Transform masks and values (for crt_flags). */ @@ -128,6 +132,9 @@ #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 +#if defined(CONFIG_BLOG) && defined(CONFIG_BCM_KF_BLOG) +#define CRYPTO_TFM_REQ_MAY_BLOG 0x00080000 +#endif #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index cde6708644aed5a68f7d8047a16e99d10b2a60f4..fa8cc7c408dec1adc8cf6df6b31f7341d8931003 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -12,6 +12,9 @@ #include <linux/blkdev.h> #include <linux/math64.h> #include <linux/ratelimit.h> +#if defined(CONFIG_BCM_KF_DM_CREATE_BACKPORT) +#include <linux/dm-ioctl.h> +#endif //defined(CONFIG_BCM_KF_DM_CREATE_BACKPORT) struct dm_dev; struct dm_target; @@ -431,6 +434,16 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start); union map_info *dm_get_rq_mapinfo(struct request *rq); +#if defined(CONFIG_BCM_KF_DM_CREATE_BACKPORT) +/* + * Device mapper functions to parse and create devices specified by the + * parameter "dm-mod.create=" + */ +int __init dm_early_create(struct dm_ioctl *dmi, + struct dm_target_spec **spec_array, + char **target_params_array); +#endif //defined(CONFIG_BCM_KF_DM_CREATE_BACKPORT) + struct queue_limits *dm_get_queue_limits(struct mapped_device *md); /* diff --git a/include/linux/dpi.h b/include/linux/dpi.h new file mode 100644 index 0000000000000000000000000000000000000000..19665916d36cb4ce81894884a73543897c970e32 --- /dev/null +++ b/include/linux/dpi.h @@ -0,0 +1,139 @@ +#if defined(CONFIG_BCM_KF_DPI) +#ifndef _LINUX_DPI_H +#define _LINUX_DPI_H + +#include <linux/if_ether.h> +#include <linux/list.h> + +#define DPI_APPID_ONGOING_BIT 0 +#define DPI_APPID_IDENTIFIED_BIT 1 +#define DPI_APPID_FINAL_BIT 2 +#define DPI_APPID_STOP_CLASSIFY_BIT 3 +#define DPI_APPID_RESYNC_BIT 4 +#define DPI_DEVID_ONGOING_BIT 5 +#define DPI_DEVID_IDENTIFIED_BIT 6 +#define DPI_DEVID_FINAL_BIT 7 +#define DPI_DEVID_STOP_CLASSIFY_BIT 8 +#define DPI_URL_STOP_CLASSIFY_BIT 9 +#define DPI_PRIORITY_TO_DSCP_BIT 10 +#define DPI_CLASSIFICATION_STOP_BIT 14 +#define DPI_CT_INIT_FROM_WAN_BIT 15 +#define DPI_CT_DS_BYPASS_BIT 29 +#define DPI_CT_US_BYPASS_BIT 30 +#define DPI_CT_BLOCK_BIT 31 + +#define DPI_NL_CHANGE_MASK (1 << DPI_CT_BLOCK_BIT) + +#define DPI_URLINFO_MAX_HOST_LEN 64 +/* 256 was chosen as the max length of a hostname in a DHCP packet is 255. */ +#define DPI_HOSTNAME_MAX_LEN 256 + +#define dpi_ct_init_from_wan(ct) \ + test_bit(DPI_CT_INIT_FROM_WAN_BIT, &(ct)->bcm_ext.dpi.flags) + +struct dpi_ct_stats { + u64 pkts; + u64 bytes; +}; + +struct dpi_app { + u32 app_id; + atomic_t refcount; + struct hlist_node node; +}; + +struct dpi_dev { + u8 mac[ETH_ALEN]; + u8 ignore; + + u32 dev_id; + u16 category; + u16 family; + u16 vendor; + u16 os; + u16 os_class; + u16 prio; + char hostname[DPI_HOSTNAME_MAX_LEN]; + + struct dpi_ct_stats us; + struct dpi_ct_stats ds; + + u32 ndi_id; + atomic_t refcount; + struct hlist_node node; +}; + +struct dpi_appinst { + struct dpi_app *app; + struct dpi_dev *dev; + struct dpi_ct_stats us; + struct dpi_ct_stats ds; + atomic_t refcount; + struct hlist_node node; +}; + +struct dpi_url { + u32 len; + char hostname[DPI_URLINFO_MAX_HOST_LEN]; + atomic_t refcount; + struct hlist_node node; +}; + +struct dpi_info { + struct dpi_dev *dev; + struct dpi_app *app; + struct dpi_appinst *appinst; + struct dpi_url *url; + unsigned long flags; + u8 eg_prio; + u8 dscp; + struct net_device *net_dev; +}; + +struct nf_conn; + +struct dpi_core_hooks { + void (*delete)(struct nf_conn *ct); +}; + +struct dpi_ct_hooks { + int (*event_report)(int eventmask, struct nf_conn *ct, u32 portid, + int report); +}; + +/* ----- dpi functions ----- */ +struct dpi_info *dpi_info_get(struct nf_conn *conn); +u32 dpi_app_id(struct dpi_app *app); +u32 dpi_dev_id(struct dpi_dev *dev); +u8 *dpi_mac(struct dpi_dev *dev); +int dpi_url_len(struct dpi_url *url); +char *dpi_url(struct dpi_url *url); +struct dpi_ct_stats *dpi_appinst_stats(struct nf_conn *ct, int dir); +struct dpi_ct_stats *dpi_dev_stats(struct nf_conn *ct, int dir); +void dpi_block(struct nf_conn *conn); +void dpi_nf_ct_delete_from_lists(struct nf_conn *ct); +int dpi_core_hooks_register(struct dpi_core_hooks *h); +void dpi_core_hooks_unregister(void); +int dpi_nf_ct_event_report(struct nf_conn *ct, u32 portid); +void dpi_conntrack_init(void); +void dpi_conntrack_cleanup(void); + +/* dpi notification chain */ +struct notifier_block; +enum { + DPI_NOTIFY_DEVICE, +}; +int dpi_register_notifier(struct notifier_block *nb); +int dpi_unregister_notifier(struct notifier_block *nb); +int dpi_notify(long event, void *data); + +extern int (*dpi_tdts_shell_dpi_l3_skb)(struct sk_buff *skb, void *p); +extern int (*dpi_tdts_shell_system_setting_tcp_conn_max_get)(unsigned *p); +extern int (*dpi_tdts_shell_system_setting_tcp_conn_timeout_get)(unsigned *p); +extern int (*dpi_tdts_shell_system_setting_udp_conn_max_get)(unsigned *p); +extern int (*dpi_tdts_shell_tcp_conn_remove)(uint8_t ip_ver, uint8_t *sip, + uint8_t *dip, uint16_t sport, uint16_t dport); +extern int (*dpi_tdts_shell_udp_conn_remove)(uint8_t ip_ver, uint8_t *sip, + uint8_t *dip, uint16_t sport, uint16_t dport); +#endif /* _LINUX_DPI_H */ +#endif /* defined(CONFIG_BCM_KF_DPI) */ diff --git a/include/linux/gbpm.h b/include/linux/gbpm.h new file mode 100644 index 0000000000000000000000000000000000000000..4aaff7e6ac9421ad2dc6e60cbcd1a5dc6107b3f2 --- /dev/null +++ b/include/linux/gbpm.h @@ -0,0 +1,437 @@ +/* + * +<:copyright-BRCM:2007:DUAL/GPL:standard + + Copyright (c) 2007 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +#ifndef __GBPM_H_INCLUDED__ +#define __GBPM_H_INCLUDED__ + +/* + ******************************************************************************* + * File Name : gbpm.h + * + ******************************************************************************* + */ +#define GBPM_VERSION "v0.1" +#define GBPM_VER_STR GBPM_VERSION +#define GBPM_MODNAME "Broadcom GBPM" + +#define GBPM_ERROR (-1) +#define GBPM_SUCCESS 0 + +#define GBPM_HIGH_PRIO_ALLOC 1 +#define GBPM_LOW_PRIO_ALLOC 0 + +#if defined(CONFIG_BCM_XTMCFG) || defined(CONFIG_BCM_XTMCFG_MODULE) +#define GBPM_XTM_SUPPORT +#endif + +typedef enum { + GBPM_PORT_ETH, + GBPM_PORT_XTM, + GBPM_PORT_FWD, + GBPM_PORT_WLAN, + GBPM_PORT_USB, + GBPM_PORT_MAX +} gbpm_port_t; + +struct gbpm_hw_buffer_mngr { + const char *name; + int (*pool_init)(void); + void (*pool_exit)(void); +}; + +#if defined(CONFIG_BCM_BPM_BUF_TRACKING) +typedef enum { + GBPM_REF_BUFF, + GBPM_REF_FKB, + GBPM_REF_SKB +} gbpm_reftype_t; + +typedef enum { + GBPM_DRV_BPM, + GBPM_DRV_ETH, + GBPM_DRV_XTM, + GBPM_DRV_KERN, + GBPM_DRV_BDMF, + GBPM_DRV_ARCHER, + GBPM_DRV_MAX +} gbpm_driver_t; + +typedef enum { + GBPM_VAL_UNMARKED, + GBPM_VAL_ALLOC, + GBPM_VAL_CLONE, + GBPM_VAL_RECYCLE, + GBPM_VAL_FREE, + GBPM_VAL_RX, + GBPM_VAL_TX, + GBPM_VAL_ENTER, + GBPM_VAL_EXIT, + GBPM_VAL_INFO, + GBPM_VAL_INIT, + GBPM_VAL_COPY_SRC, + GBPM_VAL_COPY_DST, + GBPM_VAL_XLATE, + GBPM_VAL_MAX +} gbpm_value_t; + +typedef struct { + size_t addr; + union { + uint16_t word; + struct { + uint16_t driver:4; + uint16_t info:4; + uint16_t reftype:2; + uint16_t value:6; + }; + }; +} gbpm_mark_t; + +typedef struct { + atomic_t ref_cnt; + atomic_t idle_cnt; + uint32_t write; + gbpm_mark_t *mbuf_p; +} gbpm_trail_t; + +typedef void (*gbpm_mark_buf_hook_t) (void *, void *, int, int, int, int); +typedef void (*gbpm_add_ref_hook_t) (void *, int); +#endif /* bpm tracking */ + +/* + *----------------------------------------------------------------------------- + * GBPM callbacks are managed in a single global instantiation of gbpm_t gbpm_g + * GBPM Hooks may be viewed as "BPM" callbacks and "User" callbacks. + * - GBPM_BIND() lists all BPM callbacks + * - GBPM_USER() lists all USER callbacks (bind per user driver module). + *----------------------------------------------------------------------------- + */ + +/* GBPM_DECL may be undef/define, for GBPM_BIND and GBPM_USER template usage */ +#define GBPM_BIND() \ + /* --- BPM BUF POOL --- */ \ + GBPM_DECL(alloc_mult_buf) \ + GBPM_DECL(free_mult_buf) \ + GBPM_DECL(alloc_buf) \ + GBPM_DECL(free_buf) \ + /* --- BPM SKB POOL --- */ \ + GBPM_DECL(total_skb) \ + GBPM_DECL(avail_skb) \ + GBPM_DECL(attach_skb) \ + GBPM_DECL(alloc_skb) \ + GBPM_DECL(alloc_buf_skb_attach) \ + GBPM_DECL(alloc_mult_skb) \ + GBPM_DECL(free_skb) \ + GBPM_DECL(free_skblist) \ + GBPM_DECL(invalidate_dirtyp) \ + GBPM_DECL(recycle_skb) \ + /* --- BPM pNBuff --- */ \ + GBPM_DECL(recycle_pNBuff) \ + /* --- BPM Get Accessors --- */ \ + GBPM_DECL(get_dyn_buf_lvl) \ + GBPM_DECL(get_total_bufs) \ + GBPM_DECL(get_avail_bufs) \ + GBPM_DECL(get_max_dyn_bufs) \ + /* --- BPM Runtime --- */ \ + GBPM_DECL(resv_rx_buf) \ + GBPM_DECL(unresv_rx_buf) \ + GBPM_DECL(is_buf_hw_recycle_capable) \ + GBPM_DECL(recycle_hw_buf) \ + GBPM_DECL(register_hw_pool_api) + + + /* --- BPM Users --- */ +#define GBPM_ENET() GBPM_DECL(enet_status) + +#if defined(GBPM_XTM_SUPPORT) +#define GBPM_XTM() \ + GBPM_DECL(xtm_status) \ + GBPM_DECL(xtm_thresh) +#else +#define GBPM_XTM() +#endif /* GBPM_XTM_SUPPORT */ + +#define GBPM_USER() \ + GBPM_ENET() \ + GBPM_XTM() + +/* + * typedefs for callbacks managed by GBPM. + */ + +/* --- BPM BUF POOL --- */ +/* buffer allocation/free */ +typedef int (*gbpm_alloc_mult_buf_hook_t)(uint32_t, void **, uint32_t); +typedef void (*gbpm_free_mult_buf_hook_t)(uint32_t, void **); +typedef void *(*gbpm_alloc_buf_hook_t)(void); +typedef void (*gbpm_free_buf_hook_t)(void *); +typedef void (*gbpm_recycle_pNBuff_hook_t)(void *, unsigned long, uint32_t); + +/* --- BPM SKB POOL --- */ +typedef uint32_t (*gbpm_total_skb_hook_t)(void); +typedef uint32_t (*gbpm_avail_skb_hook_t)(void); +typedef void (*gbpm_attach_skb_hook_t)(void *, void *, uint32_t); +typedef void *(*gbpm_alloc_skb_hook_t)(void); +typedef void *(*gbpm_alloc_buf_skb_attach_hook_t)(uint32_t); +typedef void *(*gbpm_alloc_mult_skb_hook_t)(uint32_t); +typedef void (*gbpm_free_skb_hook_t)(void *); +typedef void (*gbpm_free_skblist_hook_t)(void *, void *, uint32_t, void **); +typedef void *(*gbpm_invalidate_dirtyp_hook_t)(void *); +typedef void (*gbpm_recycle_skb_hook_t)(void *, unsigned long, uint32_t); + +/* --- BPM Get Accessors --- */ +typedef int (*gbpm_get_dyn_buf_lvl_hook_t)(void); +typedef uint32_t (*gbpm_get_total_bufs_hook_t)(void); +typedef uint32_t (*gbpm_get_avail_bufs_hook_t)(void); +typedef uint32_t (*gbpm_get_max_dyn_bufs_hook_t)(void); + +/* --- BPM Set Accessors --- */ +typedef void (*gbpm_upd_buf_lvl_hook_t)(int); + +/* --- BPM Runtime --- */ +typedef int (*gbpm_resv_rx_buf_hook_t)(gbpm_port_t, uint32_t, uint32_t, + uint32_t); +typedef int (*gbpm_unresv_rx_buf_hook_t)(gbpm_port_t, uint32_t); + +/* --- BPM HW Buffer Manager ---*/ +typedef bool (*gbpm_is_buf_hw_recycle_capable_hook_t)(void *); +typedef void (*gbpm_recycle_hw_buf_hook_t)(void *, uint32_t); +typedef int (*gbpm_register_hw_pool_api_hook_t)(void *, void *); + +/* --- BPM User --- */ +typedef void (*gbpm_thresh_hook_t)(void); +typedef void (*gbpm_status_hook_t)(void); + + +/* --- BPM User instantiations --- */ +typedef gbpm_status_hook_t gbpm_enet_status_hook_t; +typedef gbpm_thresh_hook_t gbpm_enet_thresh_hook_t; + +typedef gbpm_status_hook_t gbpm_xtm_status_hook_t; +typedef gbpm_thresh_hook_t gbpm_xtm_thresh_hook_t; + + +/* Typedef of the Global BPM hook manager */ +#undef GBPM_DECL +#define GBPM_DECL(HOOKNAME) gbpm_ ## HOOKNAME ## _hook_t HOOKNAME; + +typedef struct gbpm { + GBPM_BIND() /* List of BPM "BIND" hooks */ + GBPM_USER() /* List of DRV "USER" hooks */ + uint32_t debug; + bool is_hw_buffer_mngr_registered; + struct gbpm_hw_buffer_mngr hw_buffer_mngr; +} gbpm_t; + +extern gbpm_t gbpm_g; /* exported global */ + +int gbpm_register_hw_buffer_manager(struct gbpm_hw_buffer_mngr *new_mngr); + +/* BPM registering callbacks into GBPM */ +#undef GBPM_DECL +#define GBPM_DECL(HOOKNAME) gbpm_ ## HOOKNAME ## _hook_t HOOKNAME, + +void gbpm_bind(GBPM_BIND() uint32_t debug); +void gbpm_unbind(void); + +void gbpm_queue_work(void); + +/* + * Wrappers for GBPM callbacks + */ + +/* --- BPM BUF POOL --- */ +/* combine gbpm_g.alloc_mult_buf and gbpm_g.alloc_mult_buf_ex, they + * are really the same functions + */ +static inline int gbpm_alloc_mult_buf(uint32_t num, void **buf_p) +{ + return gbpm_g.alloc_mult_buf(num, buf_p, GBPM_LOW_PRIO_ALLOC); +} + +static inline int gbpm_alloc_mult_buf_ex(uint32_t num, void **buf_p, + uint32_t prio) +{ + return gbpm_g.alloc_mult_buf(num, buf_p, prio); +} + +static inline void gbpm_free_mult_buf(uint32_t num, void **buf_p) +{ + gbpm_g.free_mult_buf(num, buf_p); +} + +static inline void *gbpm_alloc_buf(void) +{ + return gbpm_g.alloc_buf(); +} + +static inline void gbpm_free_buf(void *buf_p) +{ + return gbpm_g.free_buf(buf_p); +} + +/* --- BPM SKB --- */ +static inline uint32_t gbpm_total_skb(void) +{ + return gbpm_g.total_skb(); +} + +static inline uint32_t gbpm_avail_skb(void) +{ + return gbpm_g.avail_skb(); +} + +static inline void gbpm_attach_skb(void *skbp, void *data, uint32_t datalen) +{ + gbpm_g.attach_skb(skbp, data, datalen); +} + +static inline void *gbpm_alloc_skb(void) +{ + return gbpm_g.alloc_skb(); +} + +static inline void *gbpm_alloc_buf_skb_attach(uint32_t datalen) +{ + return gbpm_g.alloc_buf_skb_attach(datalen); +} + +static inline void *gbpm_alloc_mult_skb(uint32_t num) +{ + return gbpm_g.alloc_mult_skb(num); +} + +static inline void gbpm_free_skb(void *skbp) +{ + gbpm_g.free_skb(skbp); +} + +static inline void gbpm_free_skblist(void *head, void *tail, uint32_t num, + void **bufp_arr) +{ + gbpm_g.free_skblist(head, tail, num, bufp_arr); +} + +static inline void *gbpm_invalidate_dirtyp(void *skb) +{ + return gbpm_g.invalidate_dirtyp(skb); +} + +static inline void gbpm_recycle_skb(void *skbp, unsigned long context, + uint32_t recycle_action) +{ + gbpm_g.recycle_skb(skbp, context, recycle_action); +} + +/* --- BPM pNBuff --- */ +static inline void gbpm_recycle_pNBuff(void *pNBuff, unsigned long context, + uint32_t recycle_action) +{ + gbpm_g.recycle_pNBuff(pNBuff, context, recycle_action); +} + + +/* --- BPM Get Accessors --- */ +static inline int gbpm_get_dyn_buf_lvl(void) +{ + return gbpm_g.get_dyn_buf_lvl(); +} + +static inline uint32_t gbpm_get_total_bufs(void) +{ + return gbpm_g.get_total_bufs(); +} + +static inline uint32_t gbpm_get_avail_bufs(void) +{ + return gbpm_g.get_avail_bufs(); +} + +static inline uint32_t gbpm_get_max_dyn_bufs(void) +{ + return gbpm_g.get_max_dyn_bufs(); +} + +/* --- BPM Runtime --- */ +static inline int gbpm_resv_rx_buf(gbpm_port_t port, uint32_t chnl, + uint32_t num_rx_buf, uint32_t bulk_alloc_cnt) +{ + return gbpm_g.resv_rx_buf(port, chnl, num_rx_buf, bulk_alloc_cnt); +} + +static inline int gbpm_unresv_rx_buf(gbpm_port_t port, uint32_t chnl) +{ + return gbpm_g.unresv_rx_buf(port, chnl); +} + +/* --- BPM HW Buffer Manager ---*/ +static inline bool gbpm_is_buf_hw_recycle_capable(void *buf_p) +{ + return gbpm_g.is_buf_hw_recycle_capable(buf_p); +} + +static inline void gbpm_recycle_hw_buf(void *buf_p, uint32_t recycle_context) +{ + return gbpm_g.recycle_hw_buf(buf_p, recycle_context); +} + +static inline int gbpm_register_hw_pool_api(void *ops_p, void *id_p) +{ + return gbpm_g.register_hw_pool_api(ops_p, id_p); +} + +#if defined(CONFIG_BCM_BPM_BUF_TRACKING) +void gbpm_mark_buf(void *buf_p, void *addr, int reftype, int driver, int value, + int info); +void gbpm_add_ref(void *buf_p, int i); + +#define GBPM_TRACK_BUF(buf, drv, value, info) \ + gbpm_mark_buf((void *)(buf), (void *)0, GBPM_REF_BUFF, (drv), (value), \ + (info)) + +#define GBPM_TRACK_SKB(skb, drv, value, info) \ + gbpm_mark_buf((void *)((skb)->data), (void *)(skb), GBPM_REF_SKB, \ + (drv), (value), (info)) + +#define GBPM_TRACK_FKB(fkb, drv, value, info) \ + gbpm_mark_buf((void *)((fkb)->data), (void *)(fkb), GBPM_REF_FKB, \ + (drv), (value), (info)) \ + +#define GBPM_INC_REF(buf) gbpm_add_ref((buf), 1) +#define GBPM_DEC_REF(buf) gbpm_add_ref((buf), -1) +#else +#define GBPM_TRACK_BUF(buf, drv, value, info) do {} while (0) +#define GBPM_TRACK_SKB(skb, drv, value, info) do {} while (0) +#define GBPM_TRACK_FKB(fkb, drv, value, info) do {} while (0) +#define GBPM_INC_REF(buf) do {} while (0) +#define GBPM_DEC_REF(buf) do {} while (0) +#endif + +#endif /* defined(__GBPM_H_INCLUDED__) */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 778d3ef939d89f6ffc18dae35be5cc758cbc5dd2..e6529b2984b374977b9b2f1242586b2ac22924cd 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2923,6 +2923,26 @@ struct ieee80211_tspec_ie { __le16 medium_time; } __packed; +#ifdef CONFIG_BCM_KF_NL80211_HE_6G_CAP_SUPPORT +struct ieee80211_he_6ghz_capa { + /* uses IEEE80211_HE_6GHZ_CAP_* below */ + __le16 capa; +} __packed; + +/* HE 6 GHz band capabilities */ +/* uses enum ieee80211_min_mpdu_spacing values */ +#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007 +/* uses enum ieee80211_vht_max_ampdu_length_exp values */ +#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038 +/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */ +#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0 +/* WLAN_HT_CAP_SM_PS_* values */ +#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600 +#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800 +#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000 +#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000 +#endif /* CONFIG_BCM_KF_NL80211_HE_6G_CAP_SUPPORT */ + /** * ieee80211_get_qos_ctl - get pointer to qos control bytes * @hdr: the frame diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 60afff19d7fcd23336fee68e48f0fb5c4b988500..6dac2140945443e49fc4c3ff92e7fd51920defb5 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -81,7 +81,11 @@ static inline bool is_vlan_dev(const struct net_device *dev) } #define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#if defined(CONFIG_BCM_KF_VLAN_DEI) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) +#define skb_vlan_tag_get(__skb) (((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) | skbuff_bcm_ext_vlan_get(__skb, cfi_save)) +#else #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#endif #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) @@ -477,7 +481,14 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, skb_vlan_tag_get(skb)); if (likely(skb)) +#if defined(CONFIG_BCM_KF_VLAN_DEI) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) + { + skbuff_bcm_ext_vlan_get(skb, cfi_save) = 0; skb->vlan_tci = 0; + } +#else + skb->vlan_tci = 0; +#endif return skb; } @@ -494,6 +505,9 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, { skb->vlan_proto = vlan_proto; skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; +#if defined(CONFIG_BCM_KF_VLAN_DEI) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) + skbuff_bcm_ext_vlan_get(skb, cfi_save) = vlan_tci & VLAN_CFI_MASK; +#endif } /** diff --git a/include/linux/iqos.h b/include/linux/iqos.h new file mode 100644 index 0000000000000000000000000000000000000000..57192e3ec3b6c483646818b2e0f91a9eae8ebe5f --- /dev/null +++ b/include/linux/iqos.h @@ -0,0 +1,268 @@ +#ifndef __IQOS_H_INCLUDED__ +#define __IQOS_H_INCLUDED__ + +/* +<:copyright-BRCM:2009:DUAL/GPL:standard + + Copyright (c) 2009 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + + +/* + ******************************************************************************* + * File Name : ingqos.h + * + ******************************************************************************* + */ +#define IQOS_VERSION "v1.0" +#define IQOS_VER_STR IQOS_VERSION +#define IQOS_MODNAME "Broadcom IQoS " + +#include <linux/if_ether.h> + +typedef enum { + IQOS_PARAM_TYPE_KEYMASK, + IQOS_PARAM_TYPE_KEY, + IQOS_PARAM_TYPE_MAX +} iqos_param_type_t; + +typedef enum { + IQOS_FIELD_INGRESS_DEVICE, + IQOS_FIELD_SRC_MAC, + IQOS_FIELD_DST_MAC, + IQOS_FIELD_ETHER_TYPE, + IQOS_FIELD_OUTER_VID, + IQOS_FIELD_OUTER_PBIT, + IQOS_FIELD_INNER_VID, + IQOS_FIELD_INNER_PBIT, + IQOS_FIELD_L2_PROTO, + IQOS_FIELD_L3_PROTO, + IQOS_FIELD_IP_PROTO, + IQOS_FIELD_SRC_IP, + IQOS_FIELD_DST_IP, + IQOS_FIELD_DSCP, + IQOS_FIELD_IPV6_FLOW_LABEL, + IQOS_FIELD_SRC_PORT, + IQOS_FIELD_DST_PORT, + IQOS_FIELD_OFFSET_0, + IQOS_FIELD_OFFSET_START = IQOS_FIELD_OFFSET_0, + IQOS_FIELD_OFFSET_0_TYPE, + IQOS_FIELD_OFFSET_0_START, + IQOS_FIELD_OFFSET_0_SIZE, + IQOS_FIELD_OFFSET_0_MASK, + IQOS_FIELD_OFFSET_1, + IQOS_FIELD_OFFSET_1_TYPE, + IQOS_FIELD_OFFSET_1_START, + IQOS_FIELD_OFFSET_1_SIZE, + IQOS_FIELD_OFFSET_1_MASK, + IQOS_FIELD_OFFSET_END = IQOS_FIELD_OFFSET_1_MASK, + IQOS_FIELD_MAX +} iqos_field_t; + +typedef enum { + IQOS_ACTION_NOP, + IQOS_ACTION_PRIO, + IQOS_ACTION_DROP, + IQOS_ACTION_DST_Q, + IQOS_ACTION_TRAP, + IQOS_ACTION_MAX +} iqos_action_t; + +typedef enum { + IQOS_OFFSET_TYPE_L2, + IQOS_OFFSET_TYPE_L3, + IQOS_OFFSET_TYPE_L4, + IQOS_OFFSET_TYPE_MAX +} iqos_offset_type_t; + +typedef struct { + uint32_t type; + uint32_t start; + uint32_t size; + uint32_t mask; +} iqos_offset_data_t; + +#define IQOS_PACKET_CACHE_MAX_SIZE 128 +typedef struct { + uint32_t ingress_device; + uint8_t src_mac[ETH_HLEN]; + uint8_t dst_mac[ETH_HLEN]; + uint16_t eth_type; + uint16_t outer_vid; + uint8_t outer_pbit; + uint16_t inner_vid; + uint8_t inner_pbit; + uint16_t l2_proto; + uint16_t l3_proto; + uint8_t ip_proto; + uint8_t is_ipv6; + uint32_t src_ip[4]; + uint32_t dst_ip[4]; + uint8_t dscp; + uint32_t flow_label; + uint16_t l4_src_port; + uint16_t l4_dst_port; + uint16_t l2_offset; + uint16_t l3_offset; + uint16_t l4_offset; + /* used for storing part of packet buffer for offset check */ + uint8_t packet_cache[IQOS_PACKET_CACHE_MAX_SIZE]; +} iqos_data_t; + +typedef struct { + uint32_t param_type; + uint8_t prio; + uint8_t type; + uint32_t field_mask; + uint32_t action; + uint32_t action_value; + iqos_data_t data; + iqos_offset_data_t offset0; + iqos_offset_data_t offset1; +} iqos_param_t; + +typedef enum { + IQOS_ENT_DYN, + IQOS_ENT_STAT, + IQOS_ENT_MAX +} iqos_ent_t; + +typedef enum { + IQOS_PRIO_LOW, + IQOS_PRIO_HIGH, + IQOS_PRIO_MAX +} iqos_prio_t; + +typedef enum { + IQOS_CONG_STATUS_LO, + IQOS_CONG_STATUS_HI, + IQOS_CONG_STATUS_MAX +} iqos_cong_status_t; + +typedef enum { + IQOS_STATUS_DISABLE, + IQOS_STATUS_ENABLE, + IQOS_STATUS_MAX +} iqos_status_t; + +typedef struct { + uint8_t ipProto; + uint16_t destPort; + iqos_ent_t ent; + iqos_prio_t prio; +} iqos_config_t; + +typedef int (*iqos_common_hook_t)(iqos_param_t *param); +typedef void (*iqos_void_hook_t)(void); +typedef int (*iqos_int_hook_t)(uint32_t val); + +/* the original APIs that are backward supported by the new driver/module */ +int iqos_add_L4port(uint8_t ipProto, uint16_t destPort, iqos_ent_t ent, + iqos_prio_t prio); +int iqos_rem_L4port(uint8_t ipProto, uint16_t destPort, iqos_ent_t ent); +int iqos_prio_L4port(uint8_t ipProto, uint16_t destPort); + +/* the new APIs */ + +/* APIs for setting up keymask: + * WARNING!! one will have to perform iqos_flush() to delete all the dynamic + * entry before making any change of the keymasks. + * Deleting a keymask that has key refer to it will fail and return error */ +int iqos_keymask_param_start(iqos_param_t *param); +int iqos_keymask_param_field_set(iqos_param_t *param, uint32_t field, uint32_t *val_ptr); +int iqos_keymask_commit_and_add(iqos_param_t *param, uint8_t prio); +int iqos_keymask_commit_and_delete(iqos_param_t *param); + +/* APIs for setting up key, + * example can be found in iqos_[add/rem/prio]_L4port */ +int iqos_key_param_start(iqos_param_t *param); +int iqos_key_param_field_set(iqos_param_t *param, uint32_t field, + void *val_ptr, uint32_t val_size); +int iqos_key_param_action_set(iqos_param_t *param, uint32_t action, + uint32_t value); +int iqos_key_commit_and_add(iqos_param_t *param, uint8_t type); +int iqos_key_commit_and_delete(iqos_param_t *param, uint8_t type); +int iqos_key_commit_and_get(iqos_param_t *param); + +/* API to flush all the dynamic entries, and + * delete keymask if no key refers to it */ +void iqos_flush(void); + +/* API to set the status for IQOS to enabled(1) or disabled(0) */ +int iqos_set_status(uint32_t status); + +void iqos_bind(iqos_common_hook_t iqos_add_keymask, + iqos_common_hook_t iqos_rem_keymask, + iqos_common_hook_t iqos_add_key, + iqos_common_hook_t iqos_rem_key, + iqos_common_hook_t iqos_get_key, + iqos_int_hook_t iqos_set_status, + iqos_void_hook_t iqos_flush); + + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) +#define IQOS_LOCK_IRQSAVE() spin_lock_irqsave( &iqos_cong_lock_g, flags ) +#define IQOS_UNLOCK_IRQRESTORE() spin_unlock_irqrestore( &iqos_cong_lock_g, flags ) +#define IQOS_LOCK_BH() spin_lock_bh( &iqos_lock_g ) +#define IQOS_UNLOCK_BH() spin_unlock_bh( &iqos_lock_g ) +#else +#define IQOS_LOCK_IRQSAVE() local_irq_save(flags) +#define IQOS_UNLOCK_IRQRESTORE() local_irq_restore(flags) +#define IQOS_LOCK_BH() NULL_STMT +#define IQOS_UNLOCK_BH() NULL_STMT +#endif + +#if IS_ENABLED(CONFIG_BCM_INGQOS) +#define IQOS_RXCHNL_MAX 4 +#define IQOS_RXCHNL_DISABLED 0 +#define IQOS_RXCHNL_ENABLED 1 +#define IQOS_MAX_RX_RING_SIZE 4096 + +typedef enum { + IQOS_IF_ENET, + IQOS_IF_ENET_RXCHNL0 = IQOS_IF_ENET, + IQOS_IF_ENET_RXCHNL1, + IQOS_IF_ENET_RXCHNL2, + IQOS_IF_ENET_RXCHNL3, + IQOS_IF_XTM, + IQOS_IF_XTM_RXCHNL0 = IQOS_IF_XTM, + IQOS_IF_XTM_RXCHNL1, + IQOS_IF_XTM_RXCHNL2, + IQOS_IF_XTM_RXCHNL3, + IQOS_IF_FWD, + IQOS_IF_FWD_RXCHNL0 = IQOS_IF_FWD, + IQOS_IF_FWD_RXCHNL1, + IQOS_IF_WL, + IQOS_IF_USB, + IQOS_IF_MAX, +} iqos_if_t; + +iqos_cong_status_t iqos_get_sys_cong_status(void); +iqos_cong_status_t iqos_get_cong_status(iqos_if_t iface, uint32_t chnl); +uint32_t iqos_set_cong_status(iqos_if_t iface, uint32_t chnl, + iqos_cong_status_t status); +#endif +#endif /* defined(__IQOS_H_INCLUDED__) */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 1f96ce2b47df1f21548ed892ba58af4bb5622c2a..5f18dedc0f346770ba9f04a64ae2501172196335 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -168,7 +168,14 @@ static inline bool kallsyms_show_value(const struct cred *cred) static inline void print_ip_sym(unsigned long ip) { +#if defined(CONFIG_BCM_KF_EXTRA_DEBUG) + if (( is_kernel_text(ip) || is_kernel_inittext(ip)) || (ip >= MODULES_VADDR && ip < MODULES_END)) + printk("[<%p>] %pS\n", (void *) ip, (void *) ip); + else + printk("[<%p>] (suspected corrupt symbol)\n", (void *) ip); +#else printk("[<%px>] %pS\n", (void *) ip, (void *) ip); +#endif } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 3f8e84a80b4add29878e2fa938bc767c8e03e9ff..b2ee86eeee39ecb9c69259b0e5cf1337a5698e83 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -322,4 +322,8 @@ bool mmc_card_is_blockaddr(struct mmc_card *card); #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) #define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) +#if defined(CONFIG_BCM_KF_MMC_OOPS) && defined(CONFIG_MMC_OOPS) +int mmc_oops_card_set(struct mmc_card *card); +#endif /* CONFIG_MMC_OOPS */ + #endif /* LINUX_MMC_CARD_H */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 134a6483347a1e565d91c812c1e1212e989463bb..ce705d97fa16ca769941c4ec5c43cc11efc3a517 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -173,6 +173,9 @@ struct mmc_request { struct mmc_card; void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq); +#if defined(CONFIG_BCM_KF_MMC_OOPS) && defined(CONFIG_MMC_OOPS) +extern void mmc_wait_for_oops_req(struct mmc_host *, struct mmc_request *); +#endif /* CONFIG_MMC_OOPS */ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries); diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 31013c2effd3d5c7b148d601dff665e8ed10e7b1..8262dd45b75b69f32d80ace6afbd988208c42ecb 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -29,6 +29,18 @@ void *module_alloc(unsigned long size); /* Free memory returned from module_alloc. */ void module_memfree(void *module_region); +#ifdef CONFIG_BCM_KF_MISC_BACKPORTS +/* Determines if the section name is an init section (that is only used during + * module loading). + */ +bool module_init_section(const char *name); + +/* Determines if the section name is an exit section (that is only used during + * module unloading) + */ +bool module_exit_section(const char *name); +#endif /* #ifdef CONFIG_BCM_KF_MISC_BACKPORTS */ + /* * Apply the given relocation to the (simplified) ELF. Return -error * or 0. diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a4be6b2bcc35a45a541516be2dc6c5389ba3a840..c3a91e517148f89b38e49c9e0e545842bcc90c95 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -198,6 +198,15 @@ enum nand_ecc_algo { */ #define NAND_USE_BOUNCE_BUFFER 0x00100000 +#if defined(CONFIG_BCM_KF_MTD_BCMNAND) +/* For Hynix MLC flashes, the BI are written to last and (last-2) pages. */ +#define NAND_SCAN_BI_3RD_PAGE 0x10000000 + +/* NOP=1 NAND SLC device */ +#define NAND_PAGE_NOP1 0x20000000 +#endif + + /* * In case your controller is implementing ->cmd_ctrl() and is relying on the * default ->cmdfunc() implementation, you may want to let the core handle the @@ -1331,6 +1340,13 @@ struct nand_chip { uint16_t ecc_strength_ds; uint16_t ecc_step_ds; int onfi_timing_mode_default; +#if defined(CONFIG_BCM_KF_MTD_BCMNAND) + /* Before ONFI auto timing adjustment in kernel, add Broadcom specific timing + * parameters for better performnace. + */ + uint32_t timing_1; + uint32_t timing_2; +#endif int badblockpos; int badblockbits; @@ -1518,6 +1534,13 @@ struct nand_flash_dev { uint16_t step_ds; } ecc; int onfi_timing_mode_default; +#if defined(CONFIG_BCM_KF_MTD_BCMNAND) + /* before ONFI auto timing adjustment in kernel, add Broadcom specific timing + * parameters for better performnace. + */ + uint32_t timing_1; + uint32_t timing_2; +#endif }; /** diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 088ff96c3eb6d4964df57fad76cc4f7a76089267..8605186650cf427fb0f14a50c48b60902439ca4d 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -197,6 +197,9 @@ struct spinand_manufacturer { extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; extern const struct spinand_manufacturer winbond_spinand_manufacturer; +#ifdef CONFIG_BCM_KF_MTD_BCMNAND +extern const struct spinand_manufacturer broadcom_spinand_manufacturer; +#endif /** * struct spinand_op_variants - SPI NAND operation variants diff --git a/include/linux/nbuff.h b/include/linux/nbuff.h new file mode 100755 index 0000000000000000000000000000000000000000..31a31922b0cf9d16a2de7fcd64238c975a43d7ba --- /dev/null +++ b/include/linux/nbuff.h @@ -0,0 +1,1783 @@ +#ifndef __NBUFF_H_INCLUDED__ +#define __NBUFF_H_INCLUDED__ + + +/* +<:copyright-BRCM:2013:DUAL/GPL:standard + + Copyright (c) 2013 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +/* + ******************************************************************************* + * + * File Name : nbuff.h + * Description: Definition of a network buffer to support various forms of + * network buffer, to include Linux socket buff (SKB), lightweight + * fast kernel buff (FKB), BRCM Free Pool buffer (FPB), and traffic + * generator support buffer (TGB) + * + * nbuff.h may also be used to provide an interface to common APIs + * available on other OS (in particular BSD style mbuf). + * + * Common APIs provided: pushing, pulling, reading, writing, cloning, freeing + * + * Implementation Note: + * + * One may view NBuff as a base class from which other buff types are derived. + * Examples of derived network buffer types are sk_buff, fkbuff, fpbuff, tgbuff + * + * A pointer to a buffer is converted to a pointer to a special (derived) + * network buffer type by encoding the type into the least significant 2 bits + * of a word aligned buffer pointer. pBuf points to the real network + * buffer and pNBuff refers to pBuf ANDed with the Network Buffer Type. + * C++ this pointer to a virtual class (vtable based virtual function thunks). + * + * Thunk functions to redirect the calls to the appropriate buffer type, e.g. + * SKB or FKB uses the Network Buffer Pointer type information. + * + * This file also implements the Fast Kernel Buffer API. The fast kernel buffer + * carries a minimal context of the received buffer and associated buffer + * recycling information. + * + ******************************************************************************* */ + +#include <linux/version.h> +#include <generated/autoconf.h> +#include <linux/types.h> /* include ISO C99 inttypes.h */ +#include <linux/skbuff.h> /* include corresponding BSD style mbuf */ +#include <bcm_pkt_lengths.h> +#include <linux/netdevice.h> +#ifdef CONFIG_BLOG +#include <linux/blog.h> +#endif +#include <linux/blog_net.h> /*TODO rename this file as bcm_net.h as it's not specific to blog */ + +#define NBUFF_VERSION "v1.0" + + +/* Engineering Constants for Fast Kernel Buffer Global Pool (used for clones) */ +#define SUPPORT_FKB_EXTEND +#define FKBC_POOL_SIZE_ENGG (2080) /*1280 more to be allocated for wireless*/ +#define FKBC_EXTEND_SIZE_ENGG 32 /* Number of FkBuf_t per extension*/ +#define FKBC_EXTEND_MAX_ENGG 16 /* Maximum extensions allowed */ + +#define FKBM_POOL_SIZE_ENGG 128 +#define FKBM_EXTEND_SIZE_ENGG 32 +#define FKBM_EXTEND_MAX_ENGG \ + (((CONFIG_BCM_MAX_MCAST_GROUPS * (CONFIG_BCM_MAX_MCAST_CLIENTS+1))/FKBM_EXTEND_SIZE_ENGG) + 1) + +/* + * Network device drivers ported to NBUFF must ensure that the headroom is at + * least 186 bytes in size. Remove this dependancy (TBD). + */ +// #define CC_FKB_HEADROOM_AUDIT + +/* Conditional compile of FKB functional APIs as inlined or non-inlined */ +#define CC_CONFIG_FKB_FN_INLINE +#ifdef CC_CONFIG_FKB_FN_INLINE +#define FKB_FN(fn_name, fn_signature, body) \ +static inline fn_signature { body; } /* APIs inlined in header file */ +#else +#ifdef FKB_IMPLEMENTATION_FILE +#define FKB_FN(fn_name, fn_signature, body) \ +fn_signature { body; } \ +EXPORT_SYMBOL(fn_name); /* APIs declared in implementation */ +#else +#define FKB_FN(fn_name, fn_signature, body) \ +extern fn_signature; +#endif /* !defined(FKB_IMPLEMENTATION_FILE) */ +#endif /* !defined(FKB_FN) */ + +/* LAB ONLY: Design development */ +//#define CC_CONFIG_FKB_STATS +//#define CC_CONFIG_FKB_COLOR +//#define CC_CONFIG_FKB_DEBUG +//#define CC_CONFIG_FKB_AUDIT +//#define CC_CONFIG_FKB_STACK + +// #include <linux/smp.h> /* smp_processor_id() CC_CONFIG_FKB_AUDIT */ + +#if defined(CC_CONFIG_FKB_STATS) +#define FKB_STATS(stats_code) do { stats_code } while(0) +#else +#define FKB_STATS(stats_code) NULL_STMT +#endif + +#if defined(CC_CONFIG_FKB_STACK) +extern void dump_stack(void); +#define DUMP_STACK() dump_stack() +#else +#define DUMP_STACK() NULL_STMT +#endif + +#if defined(CC_CONFIG_FKB_AUDIT) +#define FKB_AUDIT(audit_code) do { audit_code } while(0) +#else +#define FKB_AUDIT(audit_code) NULL_STMT +#endif + +extern int nbuff_dbg; +#if defined(CC_CONFIG_FKB_DEBUG) +#define fkb_dbg(lvl, fmt, arg...) \ + if (nbuff_dbg >= lvl) printk( "FKB %s :" fmt "[<%pS>]\n", \ + __FUNCTION__, ##arg, __builtin_return_address(0) ) +#define FKB_DBG(debug_code) do { debug_code } while(0) +#else +#define fkb_dbg(lvl, fmt, arg...) do {} while(0) +#define FKB_DBG(debug_code) NULL_STMT +#endif + + +#define CC_NBUFF_FLUSH_OPTIMIZATION + +/* CACHE OPERATIONS */ +#define FKB_CACHE_FLUSH 0 +#define FKB_CACHE_INV 1 + +/* OS Specific Section Begin */ +#if defined(__KERNEL__) /* Linux Cache Specific */ +/* + *------------------------------------------------------------------------------ + * common cache operations: + * + * - addr is rounded down to the cache line + * - end is rounded up to cache line. + * + * - if ((addr == end) and (addr was cache aligned before rounding)) + * no operation is performed. + * else + * flush data cache line UPTO but NOT INCLUDING rounded up end. + * + * Note: + * if before rounding, (addr == end) AND addr was not cache aligned, + * we would flush at least one line. + * + * Uses: L1_CACHE_BYTES + *------------------------------------------------------------------------------ + */ +#include <asm/cache.h> + +extern void cache_flush_data_len(void *addr, int len); + +/* + * Macros to round down and up, an address to a cachealigned address + */ +#define ADDR_ALIGN_DN(addr, align) ( (addr) & ~((align) - 1) ) +#define ADDR_ALIGN_UP(addr, align) ( ((addr) + (align) - 1) & ~((align) - 1) ) + +#if defined(CONFIG_ARM) + +#include <asm/cacheflush.h> + +#ifdef CONFIG_CPU_CACHE_V7 +#define __cpuc_flush_line(_addr) \ + __asm__ __volatile__("mcr p15, 0, %0, c7, c14, 1" : : "r" (_addr)) +#define __cpuc_clean_line(_addr) \ + __asm__ __volatile__("mcr p15, 0, %0, c7, c10, 1" : : "r" (_addr)) +#define __cpuc_inv_line(_addr) \ + __asm__ __volatile__("mcr p15, 0, %0, c7, c6, 1" : : "r" (_addr)) +#else +#define __cpuc_flush_line(_addr) do {} while(0) +#define __cpuc_clean_line(_addr) do {} while(0) +#define __cpuc_inv_line(_addr) do {} while(0) +#endif + +#if defined(CONFIG_ARM_L1_CACHE_SHIFT) +#define L1_CACHE_LINE_SIZE (0x1 << CONFIG_ARM_L1_CACHE_SHIFT) +#else +#warning There is no L1 cache line size defined! +#endif + +#if defined(CONFIG_OUTER_CACHE) + +#if defined(CONFIG_CACHE_L2X0) +#define L2_CACHE_LINE_SIZE 32 +#endif + +#if defined(L2_CACHE_LINE_SIZE) && (L1_CACHE_LINE_SIZE != L2_CACHE_LINE_SIZE) +#warning L1 Cache line size is different from L2 cache line size! +#endif + +#define CONFIG_OPTIMIZED_CACHE_FLUSH 1 +#endif + +static inline void cache_flush_len(void *addr, int len); +static inline void _cache_flush_len(void *addr, int len); + +#ifdef CONFIG_BCM_GLB_COHERENCY +#define cache_invalidate_len_outer_first(virt_addr, len) +#define cache_invalidate_region_outer_first(virt_addr, end) +#define cache_invalidate_len(virt_addr, len) +#define cache_invalidate_region(virt_addr, end) +#define cache_flush_region(addr, end) +#else + +/* the following functions are optimized that it does NOT support + * HIGHMEM in 32-bit system, please make sure buffer allocated + * are in memory zone 'Normal' or before */ +static inline void cache_invalidate_len_outer_first(void *virt_addr, int len) +{ + uintptr_t start_vaddr = (uintptr_t)virt_addr; + uintptr_t end_vaddr = start_vaddr + len; +#if defined(CONFIG_OUTER_CACHE) + uintptr_t start_paddr = virt_to_phys(virt_addr); + uintptr_t end_paddr = start_paddr + len; +#endif + +#if defined(CONFIG_OUTER_CACHE) + outer_spin_lock_irqsave(); +#endif + /* 1st, flush & invalidate if start addr and / or end addr are not + * cache line aligned */ + if (start_vaddr & (L1_CACHE_LINE_SIZE - 1)) { + start_vaddr &= ~(L1_CACHE_LINE_SIZE - 1); + __cpuc_flush_line(start_vaddr); +#if defined(CONFIG_OUTER_CACHE) + dsb(); +#endif + start_vaddr += L1_CACHE_LINE_SIZE; + } + +#if defined(CONFIG_OUTER_CACHE) + if (start_paddr & (L2_CACHE_LINE_SIZE - 1)) { + start_paddr &= ~(L2_CACHE_LINE_SIZE - 1); + outer_flush_line_no_lock(start_paddr); + outer_sync_no_lock(); + start_paddr += L2_CACHE_LINE_SIZE; + } +#endif + + if (end_vaddr & (L1_CACHE_LINE_SIZE - 1)) { + end_vaddr &= ~(L1_CACHE_LINE_SIZE - 1); + __cpuc_flush_line(end_vaddr); +#if defined(CONFIG_OUTER_CACHE) + dsb(); +#endif + } + +#if defined(CONFIG_OUTER_CACHE) + if (end_paddr & (L2_CACHE_LINE_SIZE - 1)) { + end_paddr &= ~(L2_CACHE_LINE_SIZE - 1); + outer_flush_line_no_lock(end_paddr); + outer_sync_no_lock(); + } +#endif + +#if defined(CONFIG_OUTER_CACHE) + /* now do the real invalidation jobs */ + while (start_paddr < end_paddr) { + outer_inv_line_no_lock(start_paddr); + start_paddr += L2_CACHE_LINE_SIZE; + } + outer_sync_no_lock(); +#endif + + /* now do the real invalidation jobs */ + while (start_vaddr < end_vaddr) { + __cpuc_inv_line(start_vaddr); + start_vaddr += L1_CACHE_LINE_SIZE; + } + + dsb(); +#if defined(CONFIG_OUTER_CACHE) + outer_spin_unlock_irqrestore(); +#endif + + if ((len >= PAGE_SIZE) && (((uintptr_t)virt_addr & ~PAGE_MASK) == 0)) + set_bit(PG_dcache_clean, &phys_to_page(virt_to_phys(virt_addr))->flags); +} + +static inline void cache_invalidate_region_outer_first(void *virt_addr, void *end) +{ + cache_invalidate_len_outer_first(virt_addr, + (uintptr_t)end - (uintptr_t)virt_addr); +} + +static inline void cache_invalidate_len(void *virt_addr, int len) +{ + uintptr_t start_vaddr = (uintptr_t)virt_addr; + uintptr_t end_vaddr = start_vaddr + len; +#if defined(CONFIG_OUTER_CACHE) + uintptr_t start_paddr = virt_to_phys(virt_addr); + uintptr_t end_paddr = start_paddr + len; +#endif + +#if defined(CONFIG_OUTER_CACHE) + outer_spin_lock_irqsave(); +#endif + /* 1st, flush & invalidate if start addr and / or end addr are not + * cache line aligned */ + if (start_vaddr & (L1_CACHE_LINE_SIZE - 1)) { + start_vaddr &= ~(L1_CACHE_LINE_SIZE - 1); + __cpuc_flush_line(start_vaddr); +#if defined(CONFIG_OUTER_CACHE) + dsb(); +#endif + start_vaddr += L1_CACHE_LINE_SIZE; + } + +#if defined(CONFIG_OUTER_CACHE) + if (start_paddr & (L2_CACHE_LINE_SIZE - 1)) { + start_paddr &= ~(L2_CACHE_LINE_SIZE - 1); + outer_flush_line_no_lock(start_paddr); + start_paddr += L2_CACHE_LINE_SIZE; + } +#endif + + if (end_vaddr & (L1_CACHE_LINE_SIZE - 1)) { + end_vaddr &= ~(L1_CACHE_LINE_SIZE - 1); + __cpuc_flush_line(end_vaddr); +#if defined(CONFIG_OUTER_CACHE) + dsb(); +#endif + } + +#if defined(CONFIG_OUTER_CACHE) + if (end_paddr & (L2_CACHE_LINE_SIZE - 1)) { + end_paddr &= ~(L2_CACHE_LINE_SIZE - 1); + outer_flush_line_no_lock(end_paddr); + } +#endif + + /* now do the real invalidation jobs */ + while (start_vaddr < end_vaddr) { + __cpuc_inv_line(start_vaddr); +#if defined(CONFIG_OUTER_CACHE) + dsb(); + outer_inv_line_no_lock(start_paddr); + start_paddr += L2_CACHE_LINE_SIZE; +#endif + start_vaddr += L1_CACHE_LINE_SIZE; + } +#if defined(CONFIG_OUTER_CACHE) + outer_sync_no_lock(); + outer_spin_unlock_irqrestore(); +#else + dsb(); +#endif + + if ((len >= PAGE_SIZE) && (((uintptr_t)virt_addr & ~PAGE_MASK) == 0)) + set_bit(PG_dcache_clean, &phys_to_page(virt_to_phys(virt_addr))->flags); +} + +static inline void cache_invalidate_region(void *virt_addr, void *end) +{ + cache_invalidate_len(virt_addr, + (uintptr_t)end - (uintptr_t)virt_addr); +} + +static inline void cache_flush_region(void *addr, void *end) +{ + cache_flush_len(addr, (uintptr_t)end - (uintptr_t)addr); +} + +#endif /* CONFIG_BCM_GLB_COHERENCY */ + +static inline void cache_flush_len(void *addr, int len) +{ +#ifndef CONFIG_BCM_GLB_COHERENCY + _cache_flush_len(addr, len); +#endif +} +static inline void fpm_cache_flush_len(void *addr, int len) +{ +#if !defined(CONFIG_BCM_GLB_COHERENCY) || defined(CONFIG_BCM_FPM_COHERENCY_EXCLUDE) + _cache_flush_len(addr, len); +#endif +} + +static inline void _cache_flush_len(void *addr, int len) +{ + uintptr_t start_vaddr = (uintptr_t)addr & ~(L1_CACHE_LINE_SIZE - 1); + uintptr_t end_vaddr = (uintptr_t)addr + len; +#if defined(CONFIG_OUTER_CACHE) + uintptr_t start_paddr = (uintptr_t)virt_to_phys((void *)start_vaddr); +#endif + +#if defined(CONFIG_OUTER_CACHE) + outer_spin_lock_irqsave(); +#endif +#if defined(CONFIG_OPTIMIZED_CACHE_FLUSH) + /* this function has been optimized in a non-recommended way, if any + * type of packet error occurs, please try undefine + * CONFIG_OPTIMIZED_CACHE_FLUSH to use the recommended algorithm + * provided by ARM cache document. + * Usually, when we have multiple levels of cache, in a cache_flush + * case, we do L1_clean -> L2_clean -> L2_invalidate -> L1_clean + * -> L1_invalidate, we can optimize this sequence to L1_clean -> + * L2_flush -> L1_flush. This is our original approach. However, + * this will introduce 3 loops of cache operation. + * This optimized method will do L1_flush -> L2_flush. This will only + * introduce 2 loops of cache operation, but it also puts us into + * danger that L2 cache might update L1 cache on the cache line + * that should have been invalidated. */ + + while (start_vaddr < end_vaddr) { + __cpuc_flush_line(start_vaddr); + start_vaddr += L1_CACHE_LINE_SIZE; +#if defined(CONFIG_OUTER_CACHE) + dsb(); + outer_flush_line_no_lock(start_paddr); + start_paddr += L2_CACHE_LINE_SIZE; +#endif + } +#if defined(CONFIG_OUTER_CACHE) + outer_sync_no_lock(); +#else + wmb(); +#endif +#else /* the non-optimized cache_flush */ + while (start_vaddr < end_vaddr) { +#if defined(CONFIG_OUTER_CACHE) + __cpuc_clean_line(start_vaddr); + dsb(); + outer_flush_line_no_lock(start_paddr); + start_paddr += L2_CACHE_LINE_SIZE; + outer_sync_no_lock(); +#endif + __cpuc_flush_line(start_vaddr); + start_vaddr += L1_CACHE_LINE_SIZE; + } + wmb(); +#endif +#if defined(CONFIG_OUTER_CACHE) + outer_spin_unlock_irqrestore(); +#endif +} + + +static inline uint32_t _is_kptr_(const void * vptr) +{ + return ( (uintptr_t)vptr > 0x0FFFFFFF ); +} + +#elif defined(CONFIG_ARM64) + +#define nbuff_flush_dcache_area(addr, len) \ + __asm__ __volatile__ ( \ + "mov x0, %0 \n" \ + "mov x1, %1 \n" \ + "mrs x3, ctr_el0 \n" \ + "ubfm x3, x3, #16, #19 \n" \ + "mov x2, #4 \n" \ + "lsl x2, x2, x3 \n" \ + "add x1, x0, x1 \n" \ + "sub x3, x2, #1 \n" \ + "bic x0, x0, x3 \n" \ + "1: dc civac, x0 \n" \ + "add x0, x0, x2 \n" \ + "cmp x0, x1 \n" \ + "b.lo 1b \n" \ + "dsb sy \n" \ + : : "r" ((uintptr_t)addr), "r" ((uintptr_t)len) \ + : "x0", "x1", "x2", "x3", "cc") + +#define nbuff_inval_dcache_range(start, end) \ + __asm__ __volatile__ ( \ + "mov x0, %0 \n" \ + "mov x1, %1 \n" \ + "mrs x3, ctr_el0 \n" \ + "ubfm x3, x3, #16, #19 \n" \ + "mov x2, #4 \n" \ + "lsl x2, x2, x3 \n" \ + "sub x3, x2, #1 \n" \ + "tst x1, x3 \n" \ + "bic x1, x1, x3 \n" \ + "b.eq 1f \n" \ + "dc civac, x1 \n" \ + "1: tst x0, x3 \n" \ + "bic x0, x0, x3 \n" \ + "b.eq 2f \n" \ + "dc civac, x0 \n" \ + "b 3f \n" \ + "2: dc ivac, x0 \n" \ + "3: add x0, x0, x2 \n" \ + "cmp x0, x1 \n" \ + "b.lo 2b \n" \ + "dsb sy \n" \ + : : "r" ((uintptr_t)start), "r" ((uintptr_t)end)\ + : "x0", "x1", "x2", "x3", "cc") + +static inline void cache_flush_region(void *addr, void *end) +{ +#ifndef CONFIG_BCM_GLB_COHERENCY + nbuff_flush_dcache_area(addr, (uintptr_t)end - (uintptr_t)addr); +#endif +} + +static inline void cache_flush_len(void *addr, int len) +{ +#ifndef CONFIG_BCM_GLB_COHERENCY + nbuff_flush_dcache_area(addr, len); +#endif +} + +static inline void fpm_cache_flush_len(void *addr, int len) +{ +#if !defined(CONFIG_BCM_GLB_COHERENCY) || defined(CONFIG_BCM_FPM_COHERENCY_EXCLUDE) + nbuff_flush_dcache_area(addr, len); +#endif +} + +static inline void cache_invalidate_region(void *addr, void *end) +{ +#ifndef CONFIG_BCM_GLB_COHERENCY + nbuff_inval_dcache_range(addr, end); +#endif +} + +static inline void cache_invalidate_len(void *addr, int len) +{ +#ifndef CONFIG_BCM_GLB_COHERENCY + nbuff_inval_dcache_range(addr, (void*)((uintptr_t)addr+len)); +#endif +} + +#define cache_invalidate_region_outer_first(a, b) cache_invalidate_region(a, b) +#define cache_invalidate_len_outer_first(a, b) cache_invalidate_len(a, b) + +static inline uint32_t _is_kptr_(const void * vptr) +{ + return ( (uintptr_t)vptr > 0xFFFFFF8000000000 ); +} +#endif + +#endif /* defined(__KERNEL__) Linux MIPS Cache Specific */ +/* OS Specific Section End */ + + +/* + * For BSD style mbuf with FKB : + * generate nbuff.h by replacing "SKBUFF" to "BCMMBUF", and, + * use custom arg1 and arg2 instead of mark and priority, respectively. + */ + +#ifdef TRACE_COMPILE +#pragma message "got here 4" +#endif + +struct sk_buff; +#if defined(CONFIG_BLOG) +struct blog_t; +#endif +struct net_device; +typedef int (*HardStartXmitFuncP) (struct sk_buff *skb, + struct net_device *dev); + +struct fkbuff; +typedef struct fkbuff FkBuff_t; + +#define FKB_NULL ((FkBuff_t *)NULL) + +#include <linux/nbuff_types.h> + +/* + *------------------------------------------------------------------------------ + * + * Pointer conversion between pBuf and pNBuff encoded buffer pointers + * uint8_t * pBuf; + * pNBuff_t pNBuff; + * ... + * // overlays FKBUFF_PTR into pointer to build a virtual pNBuff_t + * pNBuff = PBUF_2_PNBUFF(pBuf,FKBUFF_PTR); + * ... + * // extracts a real uint8_t * from a virtual pNBuff_t + * pBuf = PNBUFF_2_PBUF(pNBuff); + * + *------------------------------------------------------------------------------ + */ +#define PBUF_2_PNBUFF(pBuf,realType) \ + ( (pNBuff_t) ((uintptr_t)(pBuf) | (uintptr_t)(realType)) ) +#define PNBUFF_2_PBUF(pNBuff) \ + ( (uint8_t*) ((uintptr_t)(pNBuff) & (uintptr_t)NBUFF_PTR_MASK) ) + +#if (MUST_BE_ZERO != 0) +#error "Design assumption SKBUFF_PTR == 0" +#endif +#define PNBUFF_2_SKBUFF(pNBuff) ((struct sk_buff *)(pNBuff)) + +#define SKBUFF_2_PNBUFF(skb) ((pNBuff_t)(skb)) /* see MUST_BE_ZERO */ +#define FKBUFF_2_PNBUFF(fkb) PBUF_2_PNBUFF(fkb,FKBUFF_PTR) + +/* + *------------------------------------------------------------------------------ + * + * Cast from/to virtual "pNBuff_t" to/from real typed pointers + * + * pNBuff_t pNBuff2Skb, pNBuff2Fkb; // "void *" with NBuffPtrType_t + * struct sk_buff * skb_p; + * struct fkbuff * fkb_p; + * ... + * pNBuff2Skb = CAST_REAL_TO_VIRT_PNBUFF(skb_p,SKBUFF_PTR); + * pNBuff2Fkb = CAST_REAL_TO_VIRT_PNBUFF(fkb_p,FKBUFF_PTR); + * ... + * skb_p = CAST_VIRT_TO_REAL_PNBUFF(pNBuff2Skb, struct sk_buff *); + * fkb_p = CAST_VIRT_TO_REAL_PNBUFF(pNBuff2Fkb, struct fkbuff *); + * or, + * fkb_p = PNBUFF_2_FKBUFF(pNBuff2Fkb); + *------------------------------------------------------------------------------ + */ + +#define CAST_REAL_TO_VIRT_PNBUFF(pRealNBuff,realType) \ + ( (pNBuff_t) (PBUF_2_PNBUFF((pRealNBuff),(realType))) ) + +#define CAST_VIRT_TO_REAL_PNBUFF(pVirtNBuff,realType) \ + ( (realType) PNBUFF_2_PBUF(pVirtNBuff) ) + +#define PNBUFF_2_FKBUFF(pNBuff) CAST_VIRT_TO_REAL_PNBUFF((pNBuff), struct fkbuff*) + + + +/* + *------------------------------------------------------------------------------ + * FKB: Fast Kernel Buffers placed directly into Rx DMA Buffer + * May be used ONLY for common APIs such as those available in BSD-Style mbuf + *------------------------------------------------------------------------------ + */ + +struct fkbuff +{ + /* List pointer must be the first field */ + union { + void * word0; + FkBuff_t * list; /* SLL of free FKBs for cloning */ + FkBuff_t * master_p; /* Clone FKB to point to master FKB */ + atomic_long_t users; /* (private) # of references to FKB */ + }; + union { /* Use _is_kptr_ to determine if ptr */ + union { + void *ptr; + struct blog_t *blog_p; /* Pointer to a blog */ + uint8_t *dirty_p; /* Pointer to packet payload dirty incache*/ + uint32_t flags; /* Access all flags */ + }; + /* + * First nibble denotes a pointer or flag usage. + * Lowest two significant bits denote the type of pinter + * Remaining 22 bits may be used as flags + */ + struct { + uint32_t ptr_type : 8;/* Identifies whether pointer */ + uint32_t unused :21;/* Future use for flags */ + uint32_t in_skb : 1;/* flag: FKB passed inside a SKB */ + uint32_t other_ptr: 1;/* future use, to override another pointer*/ + uint32_t dptr_tag : 1;/* Pointer type is a dirty pointer */ + }; + }; + uint8_t * data; /* Pointer to packet data */ + + union { + /* here the bits 31-24 are valid only for native fkbs's + * these bits bits will be cleared when using fkbInSkb + * Note that it is critical to have the Little Endian/Big endian + * declaration since FKB will use length as bit field and SKB will use + * length as a word Need to maintain the same bit positions across MIPS + * and ARM. + */ + struct{ + BE_DECL( + uint32_t rx_csum_verified:1; + uint32_t spdtst:1; + uint32_t data_hw_recycle_capable:1; + uint32_t data_hw_recycle_done:1; + uint32_t reserved:4; + uint32_t len:24; /* Packet length */ + ) + LE_DECL( + uint32_t len:24; + uint32_t reserved:4; + uint32_t data_hw_recycle_done:1; + uint32_t data_hw_recycle_capable:1; + uint32_t spdtst:1; + uint32_t rx_csum_verified:1; + ) + }; + uint32_t len_word; + }; + + union { + /* only the lower 32 bit in mark is used in 64 bit system, + * but we delcare it as unsigned long for the ease for fcache + * to handle it in different architecture, since it is part + * of union with a dst_entry pointer */ + unsigned long mark; /* Custom arg1, e.g. tag or mark field */ + void *queue; /* Single link list queue of FKB | SKB */ + void *dst_entry; /* rtcache entry for locally termiated pkts */ + uint32_t fc_ctxt; /* hybrid flow cache context */ + }; + union { + uint32_t priority; /* Custom arg2, packet priority, tx info */ + /*TODO define wl as just uint32_t */ + wlFlowInf_t wl; /* WLAN Flow Info */ + uint32_t flowid; /* used for locally terminated pkts */ + }; + + RecycleFuncP recycle_hook; /* Nbuff recycle handler */ + union { + /* recycle hook for Clone FKB is used in DHD pointing to extra info + * BE CAREFULL when using this recylce_context for free etc.... + */ + void *dhd_pkttag_info_p; + unsigned long recycle_context; /* Rx network device/channel or pool */ + uint32_t fpm_num; + }; + +} ____cacheline_aligned; /* 2 cache lines wide */ + +#define FKB_CLEAR_LEN_WORD_FLAGS(len_word) (len_word &= 0x00FFFFFF) + + +/* + *------------------------------------------------------------------------------ + * An fkbuff may be referred to as a: + * master - a pre-allocated rxBuffer, inplaced ahead of the headroom. + * cloned - allocated from a free pool of fkbuff and points to a master. + * + * in_skb - when a FKB is passed as a member of a SKB structure. + *------------------------------------------------------------------------------ + */ +#define FKB_IN_SKB (1 << 2) /* Bit#2 is in_skb */ + +/* Return flags with the in_skb tag set */ +static inline uint32_t _set_in_skb_tag_(uint32_t flags) +{ + return (flags | FKB_IN_SKB); +} + +/* Fetch the in_skb tag in flags */ +static inline uint32_t _get_in_skb_tag_(void *ptr, uint32_t flags) +{ + if (_is_kptr_(ptr)) + return 0; + return (flags & FKB_IN_SKB); +} + +/* Determine whether the in_skb tag is set in flags */ +static inline uint32_t _is_in_skb_tag_(void *ptr, uint32_t flags) +{ + return ( _get_in_skb_tag_(ptr, flags) ? 1 : 0 ); +} + +#define CHK_IQ_PRIO (1 << 3) /* Bit#3 is check IQ Prio */ + +/* Return flags with the in_skb_tag and chk_iq_prio set */ +static inline uint32_t _set_in_skb_n_chk_iq_prio_tag_(uint32_t flags) +{ + return (flags | FKB_IN_SKB | CHK_IQ_PRIO); +} + +/* Return flags with the chk_iq_prio set */ +static inline uint32_t _set_chk_iq_prio_tag_(uint32_t flags) +{ + return (flags | CHK_IQ_PRIO); +} + +/* Fetch the chk_iq_prio tag in flags */ +static inline uint32_t _get_chk_iq_prio_tag_(uint32_t flags) +{ + return (flags & CHK_IQ_PRIO); +} + +/* Determine whether the chk_iq_prio tag is set in flags */ +static inline uint32_t _is_chk_iq_prio_tag_(uint32_t flags) +{ + return ( _get_chk_iq_prio_tag_(flags) ? 1 : 0 ); +} + + +/* + *------------------------------------------------------------------------------ + * APIs to convert between a real kernel pointer and a dirty pointer. + *------------------------------------------------------------------------------ + */ + +#define FKB_DPTR_TAG (1 << 0) /* Bit#0 is dptr_tag */ + +/* Test whether a pointer is a dirty pointer type */ +static inline uint32_t is_dptr_tag_(uint8_t * ptr) +{ + return ( ( (uint32_t) ((uintptr_t)ptr & FKB_DPTR_TAG) ) ? 1 : 0); +} + +/* Encode a real kernel pointer to a dirty pointer type */ +static inline uint8_t * _to_dptr_from_kptr_(uint8_t * kernel_ptr) +{ + if((uintptr_t)(kernel_ptr) & FKB_DPTR_TAG) + kernel_ptr++; + /* Tag a kernel pointer's dirty_ptr bit, to denote a FKB dirty pointer */ + return ( (uint8_t*) ((uintptr_t)(kernel_ptr) | FKB_DPTR_TAG) ); +} + +/* Decode a dirty pointer type into a real kernel pointer */ +static inline uint8_t * _to_kptr_from_dptr_(uint8_t * dirty_ptr) +{ + FKB_AUDIT( + if ( dirty_ptr && !is_dptr_tag_(dirty_ptr) ) + printk("FKB ASSERT %s !is_dptr_tag_(%lu)\n", + __FUNCTION__, (uintptr_t)dirty_ptr); ); + + /* Fetch kernel pointer from encoded FKB dirty_ptr, + by clearing dirty_ptr bit */ + return ( (uint8_t*) ((uintptr_t)(dirty_ptr) & (~FKB_DPTR_TAG)) ); +} + +#define FKB_OPTR_TAG (1<<1) /* Bit#1 other_ptr tag */ + +#define FKB_BLOG_TAG_MASK (FKB_DPTR_TAG | FKB_OPTR_TAG) + +/* Verify whether a FKB pointer is pointing to a Blog */ +#define _IS_BPTR_(fkb_ptr) \ + ( _is_kptr_(fkb_ptr) && ! ((uintptr_t)(fkb_ptr) & FKB_BLOG_TAG_MASK) ) + + +/* + *------------------------------------------------------------------------------ + * + * Types of FKB objects + * + * - A Master FKB object contains memory for the rx buffer, with a FkBuff_t + * placed at the head of the buffer. A Master FKB object may serve to + * replenish a network devices receive ring, when packet buffers are not + * promptly recycled. A Master FKB may also be used for packet replication + * where in one of the transmitted packet replicas may need a unique + * modification distinct from other replicas. In such a case, the FKB must + * be first "unshared" by a deep packet buffer copy into a Master Fkb. + * If CONFIG_BCM_NBUFF_FKB_POOL is enabled then a Free Pool of Master + * FKB objects is maintained. Master FKB may be alocated and recycled from + * this Master FKB Pool.The Master FKB Pool may also be used for + * replinishing a network device driver's rx buffer ring. + * + * - A Cloned FKB object does not contain memory for the rx buffer. + * Used by fkb_clone, to create multiple references to a packet buffer. + * Multiple references to a packet buffer may be used for packet replication. + * A FKB allocated from the FKB Cloned Pool will have master_p pointing to + * a Master FKB and the recycle_hook member set to NULL. + * + *------------------------------------------------------------------------------ + */ +typedef enum { + FkbMaster_e = 0, + FkbCloned_e = 1, + FkbMaxType_e +} FkbObject_t; + +/* + * Function : _get_master_users_ + * Description: Given a pointer to a Master FKB, fetch the users count + * Caution : Does not check whether the FKB is a Master or not! + */ +static inline uint32_t _get_master_users_(FkBuff_t * fkbM_p) +{ + uint32_t users; + users = atomic_read(&fkbM_p->users); + + FKB_AUDIT( + if ( users == 0 ) + printk("FKB ASSERT cpu<%u> %s(%p) users == 0, recycle<%pS>\n", + smp_processor_id(), __FUNCTION__, + fkbM_p, fkbM_p->recycle_hook); ); + return users; +} + +/* + * Function : _is_fkb_cloned_pool_ + * Description: Test whether an "allocated" FKB is from the FKB Cloned Pool. + */ +static inline uint32_t _is_fkb_cloned_pool_(FkBuff_t * fkb_p) +{ + if ( _is_kptr_(fkb_p->master_p) + && (fkb_p->recycle_hook == (RecycleFuncP)NULL) ) + { + FKB_AUDIT( + /* ASSERT if the FKB is actually linked in a FKB pool */ + if ( _is_kptr_(fkb_p->master_p->list) ) + { + printk("FKB ASSERT cpu<%u> %s :" + " _is_kptr_((%p)->%p->%p)" + " master<%p>.recycle<%pS>\n", + smp_processor_id(), __FUNCTION__, fkb_p, + fkb_p->master_p, fkb_p->master_p->list, + fkb_p->master_p, + fkb_p->master_p->recycle_hook); + } + /* ASSERT that Master FKB users count is greater than 0 */ + if ( _get_master_users_(fkb_p->master_p) == 0 ) + { + printk("FKB ASSERT cpu<%u> %s :" + " _get_master_users_(%p->%p) == 0\n", + smp_processor_id(), __FUNCTION__, + fkb_p, fkb_p->master_p); + return 0; + } ); + + return 1; /* Allocated FKB is from the FKB Cloned Pool */ + } + else + return 0; +} + +/* + * Function : _get_fkb_users_ + * Description: Given a pointer to a FKB (Master or Cloned), fetch users count + */ +static inline uint32_t _get_fkb_users_(FkBuff_t * fkb_p) +{ + if ( _is_kptr_(fkb_p->master_p) ) /* Cloned FKB */ + { + FKB_AUDIT( + if ( !_is_fkb_cloned_pool_(fkb_p) ) /* double check Cloned FKB */ + { + printk("FKB ASSERT cpu<%u> %s :" + " !_is_fkb_cloned_pool_(%p)" + " master<%p>.recycle<%pS>\n", + smp_processor_id(), __FUNCTION__, + fkb_p, fkb_p->master_p, + fkb_p->master_p->recycle_hook); + return 0; + } ); + + return _get_master_users_(fkb_p->master_p); + } + else /* Master FKB */ + return _get_master_users_(fkb_p); +} + +/* + * Function : _get_fkb_master_ptr_ + * Description: Fetch the pointer to the Master FKB. + */ +static inline FkBuff_t * _get_fkb_master_ptr_(FkBuff_t * fkb_p) +{ + if ( _is_kptr_(fkb_p->master_p) ) /* Cloned FKB */ + { + FKB_AUDIT( + if ( !_is_fkb_cloned_pool_(fkb_p) ) /* double check Cloned FKB */ + { + printk("FKB ASSERT cpu<%u> %s " + " !_is_fkb_cloned_pool_(%p)" + " master<%p>.recycle<%pS>\n", + smp_processor_id(), __FUNCTION__, + fkb_p, fkb_p->master_p, + fkb_p->master_p->recycle_hook); + return FKB_NULL; + } ); + + return fkb_p->master_p; + } + else /* Master FKB */ + { + FKB_AUDIT( + if ( _get_master_users_(fkb_p) == 0 ) /* assert Master FKB users */ + { + printk("FKB ASSERT cpu<%u> %s " + " _get_master_users_(%p) == 0\n", + smp_processor_id(), __FUNCTION__, fkb_p); + return FKB_NULL; + } ); + + return fkb_p; + } +} + +/* + *------------------------------------------------------------------------------ + * Placement of a FKB object in the Rx DMA buffer: + * + * RX DMA Buffer: |----- FKB ----|--- reserve headroom ---|---...... + * ^ ^ ^ + * pFkb pHead pData + * pBuf + *------------------------------------------------------------------------------ + */ +#define PFKBUFF_PHEAD_OFFSET sizeof(FkBuff_t) +#define PFKBUFF_TO_PHEAD(pFkb) ((uint8_t*)((FkBuff_t*)(pFkb) + 1)) +#define PHEAD_TO_PFKBUFF(pHead) \ + (FkBuff_t *)((uint8_t*)(pHead)-PFKBUFF_PHEAD_OFFSET) + +#define PDATA_TO_PFKBUFF(pData,headroom) \ + (FkBuff_t *)((uint8_t*)(pData)-(headroom)-PFKBUFF_PHEAD_OFFSET) +#define PFKBUFF_TO_PDATA(pFkb,headroom) \ + (uint8_t*)((uint8_t*)(pFkb) + PFKBUFF_PHEAD_OFFSET + (headroom)) + +#define NBUFF_ALIGN_MASK_8 0x07 +pNBuff_t nbuff_align_data(pNBuff_t pNBuff, uint8_t **data_pp, + uint32_t len, unsigned long alignMask); + +/* + *------------------------------------------------------------------------------ + * FKB Functional Interfaces + *------------------------------------------------------------------------------ + */ + +/* + * Function : fkb_in_skb_test + * Description: Verifies that the layout of SKB member fields corresponding to + * a FKB have the same layout. This allows a FKB to be passed via + * a SKB. + */ + +extern int fkb_in_skb_test( int fkb_in_skb_offset, + int list_offset, int blog_p_offset, + int data_offset, int len_offset, int mark_offset, + int priority_offset, int recycle_hook_offset, + int recycle_context_offset ); + +/* + * Global FKB Subsystem Constructor + * fkb_construct() validates that the layout of fkbuff members in sk_buff + * is the same. An sk_buff contains an fkbuff and permits a quick translation + * to and from a fkbuff. It also preallocates the pools of FKBs. + */ +extern int fkb_construct(int fkb_in_skb_offset); + +/* + * Function : fkb_stats + * Description: Report FKB Pool statistics, see CC_CONFIG_FKB_STATS + */ +extern void fkb_stats(void); + +/* + * Function : fkb_alloc + * Description: Allocate a Cloned/Master FKB object from preallocated pool + */ +extern FkBuff_t * fkb_alloc( FkbObject_t object ); + +/* + * Function : fkb_free + * Description: Free a FKB object to its respective preallocated pool. + */ +extern void fkb_free(FkBuff_t * fkb_p); + +/* + * Function : fkb_unshare + * Description: If a FKB is pointing to a buffer with multiple references + * to this buffer, then create a copy of the buffer and return a FKB with a + * single reference to this buffer. + */ +extern FkBuff_t * fkb_unshare(FkBuff_t * fkb_p); + +/* + * Function : fkbM_borrow + * Description: Allocate a Master FKB object from the pre-allocated pool. + */ +extern FkBuff_t * fkbM_borrow(void); + +/* + * Function : fkb_set_ref + * Description: Set reference count to an FKB. + */ +static inline void _fkb_set_ref(FkBuff_t * fkb_p, const int count) +{ + atomic_long_set(&fkb_p->users, count); +} +FKB_FN( fkb_set_ref, + void fkb_set_ref(FkBuff_t * fkb_p, const int count), + _fkb_set_ref(fkb_p, count) ) + +/* + * Function : fkb_inc_ref + * Description: Increment reference count to an FKB. + */ +static inline void _fkb_inc_ref(FkBuff_t * fkb_p) +{ + atomic_long_inc(&fkb_p->users); +} +FKB_FN( fkb_inc_ref, + void fkb_inc_ref(FkBuff_t * fkb_p), + _fkb_inc_ref(fkb_p) ) + +/* + * Function : fkb_dec_ref + * Description: Decrement reference count to an FKB. + */ +static inline void _fkb_dec_ref(FkBuff_t * fkb_p) +{ + atomic_long_dec(&fkb_p->users); + /* For debug, may want to assert that users does not become negative */ +} +FKB_FN( fkb_dec_ref, + void fkb_dec_ref(FkBuff_t * fkb_p), + _fkb_dec_ref(fkb_p) ) + + +/* + * Function : fkb_preinit + * Description: A network device driver may use this function to place a + * FKB object into rx buffers, when they are created. FKB objects preceeds + * the reserved headroom. + */ +extern void fkb_preinit(uint8_t * pBuf, RecycleFuncP recycle_hook, + unsigned long recycle_context); + +/* + * Function : fkb_init + * Description: Initialize the FKB context for a received packet. Invoked by a + * network device on extract the packet from a buffer descriptor and associating + * a FKB context to the received packet. + */ +extern FkBuff_t *_fkb_init(uint8_t * pBuf, uint32_t headroom, + uint8_t * pData, uint32_t len); + +FKB_FN( fkb_init, + FkBuff_t * fkb_init(uint8_t * pBuf, uint32_t headroom, + uint8_t * pData, uint32_t len), + return _fkb_init(pBuf, headroom, pData, len) ) + +/* + * Function : fkb_qinit + * Description: Same as fkb_init, with the exception that a recycle queue + * context is associated with the FKB, each time the packet is receieved. + */ +static inline FkBuff_t * _fkb_qinit(uint8_t * pBuf, uint32_t headroom, + uint8_t * pData, uint32_t len, unsigned long qcontext) +{ + FkBuff_t * fkb_p = PDATA_TO_PFKBUFF(pBuf, headroom); + fkb_dbg(1, "fkb_p<%p> qcontext<%lx>", fkb_p, qcontext ); + fkb_p->recycle_context = qcontext; + + return _fkb_init(pBuf, headroom, pData, len); +} +FKB_FN( fkb_qinit, + FkBuff_t * fkb_qinit(uint8_t * pBuf, uint32_t headroom, + uint8_t * pData, uint32_t len, unsigned long qcontext), + return _fkb_qinit(pBuf, headroom, pData, len, qcontext) ) + +/* + * Function : fkb_release + * Description: Release any associated blog and set ref count to 0. A fkb + * may be released multiple times (not decrement reference count). + */ +static inline void _fkb_release(FkBuff_t * fkb_p) +{ + fkb_dbg(1, "fkb_p<%p> fkb_p->blog_p<%px>", fkb_p, fkb_p->blog_p ); +#if defined(CONFIG_BLOG) + if ( _IS_BPTR_( fkb_p->blog_p ) ) + blog_put(fkb_p->blog_p); +#endif + fkb_p->ptr = (void*)NULL; /* reset dirty_p, blog_p */ + + fkb_set_ref( fkb_p, 0 ); /* fkb_release may be invoked multiple times */ +} +FKB_FN( fkb_release, + void fkb_release(FkBuff_t * fkb_p), + _fkb_release(fkb_p) ) + +/* + * Function : fkb_headroom + * Description: Determine available headroom for the packet in the buffer. + */ +static inline int _fkb_headroom(const FkBuff_t *fkb_p) +{ + return (int)( (uintptr_t)(fkb_p->data) - (uintptr_t)(fkb_p+1) ); +} +FKB_FN( fkb_headroom, + int fkb_headroom(const FkBuff_t *fkb_p), + return _fkb_headroom(fkb_p) ) + +/* + * Function : fkb_init_headroom + * Description: The available headroom the packet in the buffer at fkb_init time. + */ +static inline int _fkb_init_headroom(void) +{ + return BCM_PKT_HEADROOM; +} +FKB_FN( fkb_init_headroom, + int fkb_init_headroom(void), + return _fkb_init_headroom() ) + + +/* + * Function : fkb_push + * Description: Prepare space for data at head of the packet buffer. + */ +static inline uint8_t * _fkb_push(FkBuff_t * fkb_p, uint32_t len) +{ + fkb_p->len += len; + fkb_p->data -= len; + return fkb_p->data; +} +FKB_FN( fkb_push, + uint8_t * fkb_push(FkBuff_t * fkb_p, uint32_t len), + return _fkb_push(fkb_p, len) ) + +/* + * Function : fkb_pull + * Description: Delete data from the head of packet buffer. + */ +static inline uint8_t * _fkb_pull(FkBuff_t * fkb_p, uint32_t len) +{ + fkb_p->len -= len; + fkb_p->data += len; + return fkb_p->data; +} +FKB_FN( fkb_pull, + uint8_t * fkb_pull(FkBuff_t * fkb_p, uint32_t len), + return _fkb_pull(fkb_p, len) ) + +/* + * Function : fkb_put + * Description: Prepare space for data at tail of the packet buffer. + */ +static inline uint8_t * _fkb_put(FkBuff_t * fkb_p, uint32_t len) +{ + uint8_t * tail_p = fkb_p->data + fkb_p->len; + fkb_p->len += len; + return tail_p; +} +FKB_FN( fkb_put, + uint8_t * fkb_put(FkBuff_t * fkb_p, uint32_t len), + return _fkb_put(fkb_p, len) ) + +/* + * Function : fkb_pad + * Description: Pad the packet by requested number of bytes. + */ +static inline uint32_t _fkb_pad(FkBuff_t * fkb_p, uint32_t padding) +{ + memset((uint8_t *)(fkb_p->data + fkb_p->len), 0, padding); + fkb_p->len += padding; + return fkb_p->len; +} +FKB_FN( fkb_pad, + uint32_t fkb_pad(FkBuff_t * fkb_p, uint32_t padding), + return _fkb_pad(fkb_p, padding) ) + +/* + * Function : fkb_len + * Description: Determine the length of the packet. + */ +static inline uint32_t _fkb_len(FkBuff_t * fkb_p) +{ + return fkb_p->len; +} +FKB_FN( fkb_len, + uint32_t fkb_len(FkBuff_t * fkb_p), + return _fkb_len(fkb_p) ) + +/* + * Function : fkb_data + * Description: Fetch the start of the packet. + */ +static inline uint8_t * _fkb_data(FkBuff_t * fkb_p) +{ + return fkb_p->data; +} +FKB_FN( fkb_data, + uint8_t * fkb_data(FkBuff_t * fkb_p), + return _fkb_data(fkb_p) ) + +#if defined(CONFIG_BLOG) +/* + * Function : fkb_blog + * Description: Fetch the associated blog. + */ +static inline struct blog_t * _fkb_blog(FkBuff_t * fkb_p) +{ + return fkb_p->blog_p; +} +FKB_FN( fkb_blog, + struct blog_t * fkb_blog(FkBuff_t * fkb_p), + return _fkb_blog(fkb_p) ) +#endif + +/* + * Function : fkb_clone + * Description: Allocate a FKB from the Cloned Pool and make it reference the + * same packet. fkbInSkb case allocates a new master FKB and copy the data + * (basically a FKB copy). + * + */ +extern FkBuff_t * _fkb_clone(FkBuff_t *fkb_p); + +FKB_FN( fkb_clone, + FkBuff_t * fkb_clone(FkBuff_t * fkbM_p), + return _fkb_clone(fkbM_p) ) + +extern void fkb_flush(FkBuff_t * fkb_p, uint8_t * data_p, int len, int cache_op); + +/* + *------------------------------------------------------------------------------ + * Virtual accessors to common members of network kernel buffer + *------------------------------------------------------------------------------ + */ + +/* __BUILD_NBUFF_SET_ACCESSOR: generates function nbuff_set_MEMBER() */ +#define __BUILD_NBUFF_SET_ACCESSOR( TYPE, MEMBER ) \ +static inline void nbuff_set_##MEMBER(pNBuff_t pNBuff, TYPE MEMBER) \ +{ \ + void * pBuf = PNBUFF_2_PBUF(pNBuff); \ + if ( IS_SKBUFF_PTR(pNBuff) ) \ + ((struct sk_buff *)pBuf)->MEMBER = MEMBER; \ + /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */ \ + else \ + ((FkBuff_t *)pBuf)->MEMBER = MEMBER; \ +} + +/* __BUILD_NBUFF_GET_ACCESSOR: generates function nbuff_get_MEMBER() */ +#define __BUILD_NBUFF_GET_ACCESSOR( TYPE, MEMBER ) \ +static inline TYPE nbuff_get_##MEMBER(pNBuff_t pNBuff) \ +{ \ + void * pBuf = PNBUFF_2_PBUF(pNBuff); \ + if ( IS_SKBUFF_PTR(pNBuff) ) \ + return (TYPE)(((struct sk_buff *)pBuf)->MEMBER); \ + /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */ \ + else \ + return (TYPE)(((FkBuff_t *)pBuf)->MEMBER); \ +} + +/* + * Common set/get accessor of base network buffer fields: + * nbuff_set_data(), nbuff_set_len(), nbuff_set_mark(), nbuff_set_priority() + * nbuff_get_data(), nbuff_get_len(), nbuff_get_mark(), nbuff_get_priority() + */ +__BUILD_NBUFF_SET_ACCESSOR(uint8_t *, data) +__BUILD_NBUFF_SET_ACCESSOR(uint32_t, len) +__BUILD_NBUFF_SET_ACCESSOR(uint32_t, mark) /* Custom network buffer arg1 */ +__BUILD_NBUFF_SET_ACCESSOR(void *, queue) /* Custom network buffer arg1 */ +__BUILD_NBUFF_SET_ACCESSOR(uint32_t, priority) /* Custom network buffer arg2 */ + +__BUILD_NBUFF_GET_ACCESSOR(uint8_t *, data) +__BUILD_NBUFF_GET_ACCESSOR(uint32_t, len) +__BUILD_NBUFF_GET_ACCESSOR(uint32_t, mark) /* Custom network buffer arg1 */ +__BUILD_NBUFF_GET_ACCESSOR(void *, queue) /* Custom network buffer arg1 */ +__BUILD_NBUFF_GET_ACCESSOR(uint32_t, priority) /* Custom network buffer arg2 */ + +/* + * Function : nbuff_get_context + * Description: Extracts the data and len fields from a pNBuff_t. + */ +static inline void * nbuff_get_context(pNBuff_t pNBuff, + uint8_t ** data_p, uint32_t *len_p) +{ + void * pBuf = PNBUFF_2_PBUF(pNBuff); + if ( pBuf == (void*) NULL ) + return pBuf; + if ( IS_SKBUFF_PTR(pNBuff) ) + { + *data_p = ((struct sk_buff *)pBuf)->data; + *len_p = ((struct sk_buff *)pBuf)->len; + } + else + { + *data_p = ((FkBuff_t *)pBuf)->data; + *len_p = ((FkBuff_t *)pBuf)->len; + } + fkb_dbg(1, "pNBuff<%p> pBuf<%p> data_p<%p>", + pNBuff, pBuf, *data_p ); + return pBuf; +} + +/* + * Function : nbuff_get_params + * Description: Extracts the data, len, mark and priority field from a network + * buffer. + */ +static inline void * nbuff_get_params(pNBuff_t pNBuff, + uint8_t ** data_p, uint32_t *len_p, + uint32_t * mark_p, uint32_t *priority_p) +{ + void * pBuf = PNBUFF_2_PBUF(pNBuff); + if ( pBuf == (void*) NULL ) + return pBuf; + if ( IS_SKBUFF_PTR(pNBuff) ) + { + *data_p = ((struct sk_buff *)pBuf)->data; + *len_p = ((struct sk_buff *)pBuf)->len; + *mark_p = ((struct sk_buff *)pBuf)->mark; + *priority_p = ((struct sk_buff *)pBuf)->priority; + } + else + { + *data_p = ((FkBuff_t *)pBuf)->data; + *len_p = ((FkBuff_t *)pBuf)->len; + *mark_p = ((FkBuff_t *)pBuf)->mark; + *priority_p = ((FkBuff_t *)pBuf)->priority; + } + fkb_dbg(1, "pNBuff<%px> pBuf<%px> data_p<%px>", + pNBuff, pBuf, *data_p ); + return pBuf; +} + +/* adds recycle flags/context to nbuff_get_params used in impl4 enet */ +/* + * Function : nbuff_get_params_ext + * Description: Extracts the data, len, mark, priority and + * recycle flags/context field from a network buffer. + */ +static inline void * nbuff_get_params_ext(pNBuff_t pNBuff, uint8_t **data_p, + uint32_t *len_p, uint32_t *mark_p, + uint32_t *priority_p, + uint32_t *rflags_p) +{ + void * pBuf = PNBUFF_2_PBUF(pNBuff); + if ( pBuf == (void*) NULL ) + return pBuf; + if ( IS_SKBUFF_PTR(pNBuff) ) + { + *data_p = ((struct sk_buff *)pBuf)->data; + *len_p = ((struct sk_buff *)pBuf)->len; + *mark_p = ((struct sk_buff *)pBuf)->mark; + *priority_p = ((struct sk_buff *)pBuf)->priority; +#if defined(CONFIG_BLOG) + *rflags_p = ((struct sk_buff *)pBuf)->recycle_flags; +#endif + } + else + { + *data_p = ((FkBuff_t *)pBuf)->data; + *len_p = ((FkBuff_t *)pBuf)->len; + *mark_p = ((FkBuff_t *)pBuf)->mark; + *priority_p = ((FkBuff_t *)pBuf)->priority; +#if defined(CONFIG_BLOG) + *rflags_p = ((FkBuff_t *)pBuf)->recycle_context; +#endif + } + fkb_dbg(1, "pNBuff<%px> pBuf<%px> data_p<%px>", + pNBuff, pBuf, *data_p ); + return pBuf; +} + +/* + *------------------------------------------------------------------------------ + * Virtual common functional apis of a network kernel buffer + *------------------------------------------------------------------------------ + */ + +/* + * Function : nbuff_push + * Description: Make space at the start of a network buffer. + * CAUTION : In the case of a FKB, no check for headroom is done. + */ +static inline uint8_t * nbuff_push(pNBuff_t pNBuff, uint32_t len) +{ + uint8_t * data; + void * pBuf = PNBUFF_2_PBUF(pNBuff); + if ( IS_SKBUFF_PTR(pNBuff) ) + data = skb_push(((struct sk_buff *)pBuf), len); + /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */ + else + data = fkb_push((FkBuff_t*)pBuf, len); + fkb_dbg(1, "pNBuff<%px> pBuf<%px> data<%px> len<%u>", + pNBuff, pBuf, data, len ); + return data; +} + +/* + * Function : nbuff_pull + * Description: Delete data from start of a network buffer. + */ +static inline uint8_t * nbuff_pull(pNBuff_t pNBuff, uint32_t len) +{ + uint8_t * data; + void * pBuf = PNBUFF_2_PBUF(pNBuff); + if ( IS_SKBUFF_PTR(pNBuff) ) + data = skb_pull(((struct sk_buff *)pBuf), len); + /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */ + else + data = fkb_pull((FkBuff_t *)pBuf, len); + fkb_dbg(1, "pNBuff<%px> pBuf<%px> data<%px> len<%u>", + pNBuff, pBuf, data, len ); + return data; +} + +/* + * Function : nbuff_put + * Description: Make space at the tail of a network buffer. + * CAUTION: In the case of a FKB, no check for tailroom is done. + */ +static inline uint8_t * nbuff_put(pNBuff_t pNBuff, uint32_t len) +{ + uint8_t * tail; + void * pBuf = PNBUFF_2_PBUF(pNBuff); + if ( IS_SKBUFF_PTR(pNBuff) ) + tail = skb_put(((struct sk_buff *)pBuf), len); + /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */ + else + tail = fkb_put((FkBuff_t *)pBuf, len); + fkb_dbg(1, "pNBuff<%px> pBuf<%px> tail<%px> len<%u>", + pNBuff, pBuf, tail, len ); + return tail; +} + +extern void nbuff_free_ex(pNBuff_t pNBuff, int in_thread); +extern void nbuff_free(pNBuff_t pNBuff); + +/* + * Function : nbuff_unshare + * Description: If there are more than one references to the data buffer + * associated with the network buffer, create a deep copy of the data buffer + * and return a network buffer context to it. The returned network buffer + * may be then used to modify the data packet without impacting the original + * network buffer and its data buffer. + * + * If the data packet had a single network buffer referencing it, then the + * original network buffer is returned. + */ +static inline pNBuff_t nbuff_unshare(pNBuff_t pNBuff) +{ + void * pBuf = PNBUFF_2_PBUF(pNBuff); + fkb_dbg(1, "pNBuff<%px> pBuf<%px>", pNBuff, pBuf); + if ( IS_SKBUFF_PTR(pNBuff) ) + { + struct sk_buff *skb_p; + skb_p = skb_unshare( (struct sk_buff *)pBuf, GFP_ATOMIC); + pNBuff = SKBUFF_2_PNBUFF(skb_p); + } + else + { + FkBuff_t * fkb_p; + fkb_p = fkb_unshare( (FkBuff_t *)pBuf ); + pNBuff = FKBUFF_2_PNBUFF(fkb_p); + } + + fkb_dbg(2, "<<"); + return pNBuff; +} + + /* Function : nbuff_is_shared + * Description: check if there are more than one references to the data buffer + * associated with the network buffer + */ +static inline bool nbuff_is_shared(pNBuff_t pNBuff) +{ + if ( IS_SKBUFF_PTR(pNBuff) ) + { + struct sk_buff *skb_p = PNBUFF_2_SKBUFF(pNBuff); + return (skb_shared(skb_p) || skb_cloned(skb_p)); + } + else + { + FkBuff_t *fkb_p = PNBUFF_2_FKBUFF(pNBuff); + if ( unlikely(_is_fkb_cloned_pool_(fkb_p)) ) /* Cloned FKB */ + return atomic_read(&(fkb_p->master_p->users)) > 1; + else + return atomic_read(&fkb_p->users) > 1; + } +} + + /* Function : nbuff_is_hw_recycle_capable + * Description: check if the data buffer associated with the network buffer + * can be recycled by HW/Accelerator + */ +static inline bool nbuff_is_hw_recycle_capable(pNBuff_t pNBuff) +{ + if ( IS_SKBUFF_PTR(pNBuff) ) + { + struct sk_buff *skb_p = PNBUFF_2_SKBUFF(pNBuff); + + if(skb_p->recycle_flags & SKB_DATA_HW_RECYCLE_CAPABLE) + return true; + } + else + { + FkBuff_t *fkb_p = PNBUFF_2_FKBUFF(pNBuff); + if(fkb_p->data_hw_recycle_capable) + return true; + } + return false; +} + +/* Function : nbuff_is_mark_hw_recycle_done + * Description: Mark the network buffer to indicate the associated data buffer + * is recyled by HW/Accelerator, i.e after this data buffer should not be + * accessed any more + */ +static inline void nbuff_mark_hw_recycle_done(pNBuff_t pNBuff) +{ + if ( IS_SKBUFF_PTR(pNBuff) ) + { + struct sk_buff *skb_p = PNBUFF_2_SKBUFF(pNBuff); + skb_p->recycle_flags |= SKB_DATA_HW_RECYCLE_DONE; + } + else + { + FkBuff_t *fkb_p = PNBUFF_2_FKBUFF(pNBuff); + fkb_p->data_hw_recycle_done=1; + } +} + +/* Function : nbuff_data_hw_recycle_prep + * Description: If data buffer associated with the netwrok buffer is capable + * HW recyle then free th network buffer now and the data buffer will be + * recycled by HW/Accelerator later + */ +static inline bool nbuff_data_hw_recycle_prep(pNBuff_t pNBuff) +{ + if(nbuff_is_hw_recycle_capable(pNBuff) && !nbuff_is_shared(pNBuff)) + { + nbuff_mark_hw_recycle_done(pNBuff); + /*this will free only skb or fkb structure */ + nbuff_free(pNBuff); + return true; + } + return false; + + /*TODO do we need an optimized version of this i.e reduce number of skbuff/fkbuff checks + * or the compiler otimization is good enough */ +} + +/* + * Function : nbuff_invalidate_headroom + * Description: invalidate datacache lines of memory prefixing "data" pointer. + * Invalidation does not include the dcache line "data" is in. This dcache line + * must be flushed, not invalidated. + */ +static inline void nbuff_invalidate_headroom(pNBuff_t pNBuff, uint8_t * data) +{ + + /* Invalidate functions used here will round up end pointer to cache line + * boundry. That's the reason for L1_CACHE_BYTES substruction. + */ + int32_t inv_len = 0; + fkb_dbg(1, "pNBuff<%p> data<%p>", pNBuff, data); + + if ( IS_SKBUFF_PTR(pNBuff) ) + { + inv_len = skb_avail_headroom( PNBUFF_2_SKBUFF(pNBuff) ) - L1_CACHE_BYTES; + cache_invalidate_region(PNBUFF_2_SKBUFF(pNBuff)->head, data - L1_CACHE_BYTES); + } + else + { + FkBuff_t * fkb_p = (FkBuff_t *)PNBUFF_2_PBUF(pNBuff); + + if ( _is_fkb_cloned_pool_(fkb_p) ) + fkb_p = fkb_p->master_p; + + inv_len = data - PFKBUFF_TO_PHEAD(fkb_p) - L1_CACHE_BYTES; + fkb_flush(fkb_p, PFKBUFF_TO_PHEAD(fkb_p), inv_len, FKB_CACHE_INV); + } + fkb_dbg(1, " len<%d>", inv_len); + fkb_dbg(2, "<<"); +} + +extern void nbuff_flush(pNBuff_t pNBuff, uint8_t * data, int len); +extern void nbuff_flushfree(pNBuff_t pNBuff); + +/* + * Function : nbuff_xlate + * Description: Convert a FKB to a SKB. The SKB is data filled with the + * data, len, mark, priority, and recycle hook and context. + * + * Other SKB fields for SKB API manipulation are also initialized. + * SKB fields for network stack manipulation are NOT initialized. + * + * This function is typically used only in a network device drivers' hard + * start xmit function handler. A hard start xmit function handler may receive + * a network buffer of a FKB type and may not wish to rework the implementation + * to use nbuff APIs. In such an event, a nbuff may be translated to a skbuff. + */ +struct sk_buff * fkb_xlate(FkBuff_t * fkb_p); +static inline struct sk_buff * nbuff_xlate( pNBuff_t pNBuff ) +{ + void * pBuf = PNBUFF_2_PBUF(pNBuff); + fkb_dbg(1, "pNBuff<%px> pBuf<%px>", pNBuff, pBuf); + + if ( IS_SKBUFF_PTR(pNBuff) ) + return (struct sk_buff *)pBuf; + /* else if IS_FPBUFF_PTR, else if IS_TGBUFF_PTR */ + else + return fkb_xlate( (FkBuff_t *)pBuf ); +} + + +/* Miscellaneous helper routines */ +static inline void u16cpy( void * dst_p, const void * src_p, uint32_t bytes ) +{ + uint16_t * dst16_p = (uint16_t*)dst_p; + uint16_t * src16_p = (uint16_t*)src_p; + do { // assuming: (bytes % sizeof(uint16_t) == 0 !!! + *dst16_p++ = *src16_p++; + } while ( bytes -= sizeof(uint16_t) ); +} + +static inline void u16datacpy( void * dst_p, const void * src_p, uint32_t bytes ) +{ + uint16_t * dst16_p = (uint16_t*)dst_p; + uint16_t * src16_p = (uint16_t*)src_p; + do { // assuming: (bytes % sizeof(uint16_t) == 0 !!! + *dst16_p++ = htons (*src16_p++); + } while ( bytes -= sizeof(uint16_t) ); +} + +static inline int u16cmp( void * dst_p, const void * src_p, + uint32_t bytes ) +{ + uint16_t * dst16_p = (uint16_t*)dst_p; + uint16_t * src16_p = (uint16_t*)src_p; + do { // assuming: (bytes % sizeof(uint16_t) == 0 !!! + if ( *dst16_p++ != *src16_p++ ) + return -1; + } while ( bytes -= sizeof(uint16_t) ); + + return 0; +} + +static inline int nbuff_pad(pNBuff_t pNBuff, int padLen) +{ + if ( IS_SKBUFF_PTR(pNBuff) ) + { + int ret = skb_put_padto((struct sk_buff *)pNBuff, padLen + ((struct sk_buff *)pNBuff)->len); + if (ret) + printk(KERN_ERR "nbuff_pad() skb_put_padto err=%d!!\n", ret); + return ret; + } + else + { + fkb_pad(PNBUFF_2_FKBUFF(pNBuff), padLen); + } + return 0; +} + +#ifdef DUMP_DATA +/* dumpHexData dump out the hex base binary data */ +static inline void dumpHexData1(uint8_t *pHead, uint32_t len) +{ + uint32_t i; + uint8_t *c = pHead; + for (i = 0; i < len; ++i) { + if (i % 16 == 0) + printk("\n"); + printk("0x%02X, ", *c++); + } + printk("\n"); +} + +static inline void dump_pkt(const char * fname, uint8_t * pBuf, uint32_t len) +{ + //int dump_len = ( len < 64) ? len : 64; + int dump_len = len ; + printk("%s: data<0x%lu len<%u>", fname, (uintptr_t)pBuf, len); + dumpHexData1(pBuf, dump_len); + cache_flush_len((void*)pBuf, dump_len); +} +#define DUMP_PKT(pBuf,len) dump_pkt(__FUNCTION__, (pBuf), (len)) +#else /* !defined(DUMP_DATA) */ +#define DUMP_PKT(pBuf,len) do {} while(0) +#endif + +#endif /* defined(__NBUFF_H_INCLUDED__) */ diff --git a/include/linux/nbuff_types.h b/include/linux/nbuff_types.h new file mode 100644 index 0000000000000000000000000000000000000000..672f79c6189b21e94ac72e0ff4f6cd55f8890994 --- /dev/null +++ b/include/linux/nbuff_types.h @@ -0,0 +1,68 @@ +#ifndef __NBUFF_TYPES_H_INCLUDED__ +#define __NBUFF_TYPES_H_INCLUDED__ + +/* +<:copyright-BRCM:2013:DUAL/GPL:standard + + Copyright (c) 2013 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +/* + ******************************************************************************* + * + * File Name : nbuff_types.h + * Description: Simple nbuff type defines. + * + ******************************************************************************* */ + +#define MUST_BE_ZERO 0 + +/* virtual network buffer pointer to SKB|FPB|TGB|FKB */ +typedef void * pNBuff_t; +#define PNBUFF_NULL ((pNBuff_t)NULL) + +typedef enum NBuffPtrType +{ + SKBUFF_PTR = MUST_BE_ZERO, /* Default Linux networking socket buffer */ + FPBUFF_PTR, /* Experimental BRCM IuDMA freepool buffer*/ + TGBUFF_PTR, /* LAB Traffic generated network buffer */ + FKBUFF_PTR, /* Lightweight fast kernel network buffer */ + /* Do not add new ptr types */ +} NBuffPtrType_t; + + /* 2lsbits in pointer encode NbuffType_t */ +#define NBUFF_TYPE_MASK 0x3ul +#define NBUFF_PTR_MASK (~NBUFF_TYPE_MASK) +#define NBUFF_PTR_TYPE(pNBuff) ((uintptr_t)(pNBuff) & NBUFF_TYPE_MASK) + + +#define IS_SKBUFF_PTR(pNBuff) ( NBUFF_PTR_TYPE(pNBuff) == SKBUFF_PTR ) +#define IS_FPBUFF_PTR(pNBuff) ( NBUFF_PTR_TYPE(pNBuff) == FPBUFF_PTR ) +#define IS_TGBUFF_PTR(pNBuff) ( NBUFF_PTR_TYPE(pNBuff) == TGBUFF_PTR ) +#define IS_FKBUFF_PTR(pNBuff) ( NBUFF_PTR_TYPE(pNBuff) == FKBUFF_PTR ) + + +#endif /* defined(__NBUFF_TYPES_H_INCLUDED__) */ diff --git a/include/linux/ndi.h b/include/linux/ndi.h new file mode 100644 index 0000000000000000000000000000000000000000..d2fba5013469a56dcf589aee099ef4576b7c0b4a --- /dev/null +++ b/include/linux/ndi.h @@ -0,0 +1,90 @@ +#ifndef _LINUX_NDI_H +#define _LINUX_NDI_H + +/* 256 was chosen as the max length of a hostname in a DHCP packet is 255. */ +#define NDI_HOSTNAME_MAX_LEN 256 + +#if defined(CONFIG_BCM_KF_NDI) +#include <linux/if_ether.h> +#include <linux/list.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#if IS_ENABLED(CONFIG_BCM_DPI) +#include <linux/dpi.h> +#endif + +enum { + NDI_DEV_IGNORE_BIT = 0, + NDI_DEV_STALE_BIT, +}; + +struct ndi_dev { + u32 id; + struct in_addr ip4; + struct in6_addr ip6; + u8 mac[ETH_ALEN]; + char hostname[NDI_HOSTNAME_MAX_LEN]; +#if IS_ENABLED(CONFIG_BCM_DPI) + struct dpi_dev dpi; +#endif + struct net_device *netdev; + u8 state; + u8 probe_count; + unsigned long flags; + + atomic_t refcount; + struct hlist_node node; +}; + +struct ndi_info { + struct ndi_dev *dev; +}; + +#endif /* CONFIG_BCM_KF_NDI */ + +enum { + NDINLGRP_NONE, + NDINLGRP_DEV, + + __NDINLGRP_MAX +#define NDINLGRP_MAX (__NDINLGRP_MAX - 1) +}; + +enum { + NDINL_BASE = 16, + + NDINL_NEWDEVICE = 16, + NDINL_DELDEVICE, + NDINL_GETDEVICE, + NDINL_SETDEVICE, + + __NDINL_MAX, +#define NDINL_MAX (__NDINL_MAX - 1) +}; + +/* + * The following describe the netlink attributes used by NDI when + * transferring data to/from userspace. + */ +enum { + NDIA_DEV_UNSPEC, + NDIA_DEV_ID, + NDIA_DEV_IP4, + NDIA_DEV_IP6, + NDIA_DEV_MAC, + NDIA_DEV_HOSTNAME, + NDIA_DEV_ONLINE, + /* dpi fields */ + NDIA_DEV_DPI_VENDOR, + NDIA_DEV_DPI_OS, + NDIA_DEV_DPI_OS_CLASS, + NDIA_DEV_DPI_ID, + NDIA_DEV_DPI_CATEGORY, + NDIA_DEV_DPI_FAMILY, + NDIA_DEV_DPI_PRIO, + __NDIA_DEV_MAX +}; +#define NDIA_DEV_MAX (__NDIA_DEV_MAX - 1) + +#endif /* _LINUX_NDI_H */ + diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2a8105d204a9616293a94ec2ed47c36ddec9745d..d1596eff7db6b59434e3040d3145d40f04281c62 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -85,6 +85,13 @@ enum { NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) + NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */ +#endif + +#if defined(CONFIG_BCM_KF_EXTSTATS) + NETIF_F_EXTSTATS_BIT, /* Support extended statistics */ +#endif /* * Add your fresh new feature above and remember to update @@ -155,6 +162,13 @@ enum { #define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) +#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC) +#endif + +#if defined(CONFIG_BCM_KF_EXTSTATS) +#define NETIF_F_EXTSTATS __NETIF_F(EXTSTATS) +#endif /* Finds the next feature with the highest number of the range of start till 0. */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 58ee9d2d6a3caea8045b66d60f4f679d5d7fbe3e..81d362ca057063c96f69a48e0b09676c32874ad7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -25,6 +25,7 @@ #ifndef _LINUX_NETDEVICE_H #define _LINUX_NETDEVICE_H + #include <linux/timer.h> #include <linux/bug.h> #include <linux/delay.h> @@ -53,10 +54,19 @@ #include <uapi/linux/pkt_cls.h> #include <linux/hashtable.h> + +#if defined(CONFIG_BCM_KF_NETDEV_EXT) +#include <linux/bcm_netdevice.h> +#endif /* CONFIG_BCM_KF_NETDEV_EXT */ + struct netpoll_info; struct device; struct phy_device; struct dsa_port; +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) +struct macsec_context; +struct macsec_ops; +#endif struct sfp_bus; /* 802.11 specific */ @@ -186,6 +196,16 @@ struct net_device_stats { unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; +#if defined(CONFIG_BCM_KF_EXTSTATS) + unsigned long tx_multicast_packets; /* multicast packets transmitted */ + unsigned long rx_multicast_bytes; /* multicast bytes recieved */ + unsigned long tx_multicast_bytes; /* multicast bytes transmitted */ + unsigned long rx_broadcast_packets; /* broadcast packets recieved */ + unsigned long tx_broadcast_packets; /* broadcast packets transmitted */ + /* NOTE: Unicast packets are not counted but are instead calculated as needed + using total - (broadcast + multicast) */ + unsigned long rx_unknown_packets; /* unknown protocol packets recieved */ +#endif }; @@ -1791,6 +1811,11 @@ struct net_device { struct net_device_stats stats; + +#if defined(CONFIG_BCM_KF_NETDEV_EXT) + struct bcm_netdev_ext bcm_nd_ext; +#endif + atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; @@ -2033,6 +2058,12 @@ struct net_device { struct lock_class_key *qdisc_running_key; bool proto_down; unsigned wol_enabled:1; +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) +#if IS_ENABLED(CONFIG_BCM_MACSEC) + /* MACsec management functions */ + const struct macsec_ops *macsec_ops; +#endif +#endif }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -4259,6 +4290,9 @@ void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); +#if defined(CONFIG_BCM_KF_NETDEV_EXT) +struct net_device *bcm_netdev_master_upper_dev_get_nolock(struct net_device *dev); +#endif /* CONFIG_BCM_KF_NETDEV_EXT */ int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, struct netlink_ext_ack *extack); int netdev_master_upper_dev_link(struct net_device *dev, @@ -4354,6 +4388,11 @@ static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, bool more) { skb->xmit_more = more ? 1 : 0; +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) + if(is_netdev_accel_gdx_tx(dev)){ + blog_emit_generic( skb, dev, BLOG_GENPHY); + } +#endif return ops->ndo_start_xmit(skb, dev); } @@ -4614,6 +4653,7 @@ static inline bool netif_reduces_vlan_mtu(struct net_device *dev) extern struct pernet_operations __net_initdata loopback_net_ops; + /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* netdev_printk helpers, similar to dev_printk */ diff --git a/include/linux/netfilter/bcm_nf_defrag_ipv6.h b/include/linux/netfilter/bcm_nf_defrag_ipv6.h new file mode 100644 index 0000000000000000000000000000000000000000..0b00a98e511182058ec0f9799ffa003ebbe78241 --- /dev/null +++ b/include/linux/netfilter/bcm_nf_defrag_ipv6.h @@ -0,0 +1,8 @@ +#ifndef _BCM_NF_DEFRAG_IPV6_H +#define _BCM_NF_DEFRAG_IPV6_H + +extern void +nf_ct_frag6_ident_reuse(struct frag_queue *fq, struct sk_buff *skb, + struct net_device *dev); + +#endif /* _BCM_NF_DEFRAG_IPV6_H */ diff --git a/include/linux/netfilter/bcm_nfnetlink_conntrack.h b/include/linux/netfilter/bcm_nfnetlink_conntrack.h new file mode 100644 index 0000000000000000000000000000000000000000..a1051478648a78adbedfef0112f4a39c46fbd071 --- /dev/null +++ b/include/linux/netfilter/bcm_nfnetlink_conntrack.h @@ -0,0 +1,9 @@ +#ifndef _BCM_NFNETLINK_CONNTRACK_H +#define _BCM_NFNETLINK_CONNTRACK_H + +extern int bcm_ctnetlink_size(const struct nf_conn *ct); +extern int bcm_ctnetlink_dump(struct sk_buff *skb, const struct nf_conn *ct); +extern int bcm_ctnetlink_change(struct nf_conn *ct, + const struct nlattr * const cda[]); + +#endif /* _BCM_NFNETLINK_CONNTRACK_H */ diff --git a/include/linux/netfilter/nf_conntrack_ipsec.h b/include/linux/netfilter/nf_conntrack_ipsec.h new file mode 100644 index 0000000000000000000000000000000000000000..4a709a8fb94afbf9af69f423d7e5735c12130bc5 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_ipsec.h @@ -0,0 +1,43 @@ +/* IPSEC constants and structs */ +#ifndef _NF_CONNTRACK_IPSEC_H +#define _NF_CONNTRACK_IPSEC_H + +#include <linux/netfilter/nf_conntrack_common.h> + +/* conntrack private data */ +struct nf_ct_ipsec_master +{ + __be32 initcookie; /* initcookie of ISAKMP */ + __be32 lan_ip; /* LAN IP */ +}; + +struct nf_nat_ipsec +{ + __be32 lan_ip; /* LAN IP */ +}; + +#ifdef __KERNEL__ + +#define IPSEC_PORT 500 +#define MAX_VPN_CONNECTION 8 + +struct isakmp_pkt_hdr +{ + __be32 initcookie; +}; + + +/* crap needed for nf_conntrack_compat.h */ +struct nf_conn; +struct nf_conntrack_expect; + +extern int +(*nf_nat_ipsec_hook_outbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo); + +extern int +(*nf_nat_ipsec_hook_inbound)(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, __be32 lan_ip); + +#endif /* __KERNEL__ */ +#endif /* _NF_CONNTRACK_IPSEC_H */ diff --git a/include/linux/netfilter/nf_conntrack_proto_esp.h b/include/linux/netfilter/nf_conntrack_proto_esp.h new file mode 100644 index 0000000000000000000000000000000000000000..2717a52cc504f7f266b28ce6df3b826fb2ca9905 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_proto_esp.h @@ -0,0 +1,23 @@ +#if defined(CONFIG_BCM_KF_PROTO_ESP) +#ifndef _CONNTRACK_PROTO_ESP_H +#define _CONNTRACK_PROTO_ESP_H +#include <asm/byteorder.h> + +/* ESP PROTOCOL HEADER */ + +struct esphdr { + __u32 spi; +}; + +struct nf_ct_esp { + unsigned int stream_timeout; + unsigned int timeout; +}; + +#ifdef __KERNEL__ +#include <net/netfilter/nf_conntrack_tuple.h> + +#endif /* __KERNEL__ */ +#endif /* _CONNTRACK_PROTO_ESP_H */ +#endif + diff --git a/include/linux/netfilter/nf_conntrack_rtsp.h b/include/linux/netfilter/nf_conntrack_rtsp.h new file mode 100644 index 0000000000000000000000000000000000000000..e087a5141f776e5d3d7bf81bd19323d6217e381c --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_rtsp.h @@ -0,0 +1,91 @@ +/* +* <:copyright-BRCM:2012:DUAL/GPL:standard +* +* Copyright (c) 2012 Broadcom +* All Rights Reserved +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed +* to you under the terms of the GNU General Public License version 2 +* (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +* with the following added to such license: +* +* As a special exception, the copyright holders of this software give +* you permission to link this software with independent modules, and +* to copy and distribute the resulting executable under terms of your +* choice, provided that you also meet, for each linked independent +* module, the terms and conditions of the license of that module. +* An independent module is a module which is not derived from this +* software. The special exception does not apply to any modifications +* of the software. +* +* Not withstanding the above, under no circumstances may you combine +* this software in any way with any other Broadcom software provided +* under a license other than the GPL, without Broadcom's express prior +* written consent. +* +:> +*/ + +#ifndef _NF_CONNTRACK_RTSP_H +#define _NF_CONNTRACK_RTSP_H + +#ifdef __KERNEL__ + +/* This structure exists only once per master */ +struct nf_ct_rtsp_master { + /* The client has sent PAUSE message and not replied */ + int paused; +}; + +/* Single data channel */ +extern int (*nat_rtsp_channel_hook) (struct sk_buff *skb, + unsigned int protoff, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int matchoff, + unsigned int matchlen, + struct nf_conntrack_expect *exp, + int *delta); + +/* A pair of data channels (RTP/RTCP) */ +extern int (*nat_rtsp_channel2_hook) (struct sk_buff *skb, + unsigned int protoff, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int matchoff, + unsigned int matchlen, + struct nf_conntrack_expect *rtp_exp, + struct nf_conntrack_expect *rtcp_exp, + char dash, int *delta); + +/* Modify parameters like client_port in Transport for single data channel */ +extern int (*nat_rtsp_modify_port_hook) (struct sk_buff *skb, + unsigned int protoff, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int matchoff, + unsigned int matchlen, + __be16 rtpport, int *delta); + +/* Modify parameters like client_port in Transport for multiple data channels*/ +extern int (*nat_rtsp_modify_port2_hook) (struct sk_buff *skb, + unsigned int protoff, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int matchoff, + unsigned int matchlen, + __be16 rtpport, __be16 rtcpport, + char dash, int *delta); + +/* Modify parameters like destination in Transport */ +extern int (*nat_rtsp_modify_addr_hook) (struct sk_buff *skb, + unsigned int protoff, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + int matchoff, int matchlen, + int *delta); +#endif /* __KERNEL__ */ + +#endif /* _NF_CONNTRACK_RTSP_H */ + diff --git a/include/linux/pci.h b/include/linux/pci.h index a4bbce871e08e1de2f9ae98188caeed4fd6c06f6..ece6efb2540e9a096c1c0f4ef9e952d013b3d6ea 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1541,6 +1541,9 @@ void pci_cfg_access_unlock(struct pci_dev *dev); */ #ifdef CONFIG_PCI_DOMAINS extern int pci_domains_supported; +#ifdef CONFIG_BCM_KF_PCI_RESET_DOMAIN_NR +void pci_reset_domain_nr(void); +#endif /* CONFIG_BCM_KF_PCI_RESET_DOMAIN_NR */ #else enum { pci_domains_supported = 0 }; static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } diff --git a/include/linux/pf_ring.h b/include/linux/pf_ring.h new file mode 100644 index 0000000000000000000000000000000000000000..68d13db838831359476eddea5b725c2bfcd6f42f --- /dev/null +++ b/include/linux/pf_ring.h @@ -0,0 +1,1401 @@ +/* +<:copyright-BRCM:2021:GPL/GPL:standard + + Copyright (c) 2021 Broadcom + All Rights Reserved + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License, version 2, as published by +the Free Software Foundation (the "GPL"). + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + + +A copy of the GPL is available at http://www.broadcom.com/licenses/GPLv2.php, or by +writing to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. + +:> +*/ + +/* + * + * Definitions for packet ring + * + * 2004-2020 - ntop.org + * + */ + +#ifndef __RING_H +#define __RING_H + +/** + * @file pf_ring.h + * + * @brief PF_RING kernel module header file. + * @details This header file should NOT be included in PF_RING-based applications directly. + */ + +#ifdef __KERNEL__ +#include <linux/in6.h> +#else +#include <netinet/in.h> +#endif /* __KERNEL__ */ + +#if defined(CONFIG_BCM_KF_SGS) || !defined(__KERNEL__) +#include <linux/pf_ring_sgs.h> +#endif + +/* Versioning */ +#define RING_VERSION "7.5.0" +#define RING_VERSION_NUM 0x070500 + +/* Increment whenever we change slot or packet header layout (e.g. we add/move a field) */ +#define RING_FLOWSLOT_VERSION 17 + +#define RING_MAGIC +#define RING_MAGIC_VALUE 0x88 + +#define MIN_NUM_SLOTS 512 +#define DEFAULT_NUM_SLOTS 4096 +#define DEFAULT_BUCKET_LEN 128 +#define MAX_NUM_DEVICES 256 + +#define MAX_NUM_RING_SOCKETS 256 + +/* Watermark */ +#define DEFAULT_MIN_PKT_QUEUED 1 /* 128 */ +#define DEFAULT_POLL_WATERMARK_TIMEOUT 0 + +#define FILTERING_SAMPLING_RATIO 10 + +/* Set */ +#define SO_ADD_TO_CLUSTER 99 +#define SO_REMOVE_FROM_CLUSTER 100 +#define SO_SET_STRING 101 +#define SO_ADD_FILTERING_RULE 102 +#define SO_REMOVE_FILTERING_RULE 103 +#define SO_TOGGLE_FILTER_POLICY 104 +#define SO_SET_SAMPLING_RATE 105 +#define SO_ACTIVATE_RING 106 +#define SO_RING_BUCKET_LEN 107 +#define SO_SET_CHANNEL_ID 108 +#define SO_PURGE_IDLE_HASH_RULES 109 /* inactivity (sec) */ +#define SO_SET_APPL_NAME 110 +#define SO_SET_PACKET_DIRECTION 111 +#define SO_SET_MASTER_RING 112 +#define SO_ADD_HW_FILTERING_RULE 113 +#define SO_DEL_HW_FILTERING_RULE 114 +#define SO_DISCARD_INJECTED_PKTS 115 /* discard stack injected packets */ +#define SO_DEACTIVATE_RING 116 +#define SO_SET_POLL_WATERMARK 117 +#define SO_SET_VIRTUAL_FILTERING_DEVICE 118 +#define SO_REHASH_RSS_PACKET 119 +#define SO_SET_FILTERING_SAMPLING_RATE 120 +#define SO_SET_POLL_WATERMARK_TIMEOUT 121 +#define SO_SHUTDOWN_RING 124 +#define SO_PURGE_IDLE_RULES 125 /* inactivity (sec) */ +#define SO_SET_SOCKET_MODE 126 +#define SO_USE_SHORT_PKT_HEADER 127 +#define SO_ENABLE_RX_PACKET_BOUNCE 131 +#define SO_SET_APPL_STATS 133 +#define SO_SET_STACK_INJECTION_MODE 134 /* stack injection/interception from userspace */ +#define SO_CREATE_CLUSTER_REFEREE 135 +#define SO_PUBLISH_CLUSTER_OBJECT 136 +#define SO_LOCK_CLUSTER_OBJECT 137 +#define SO_UNLOCK_CLUSTER_OBJECT 138 +#define SO_SET_CUSTOM_BOUND_DEV_NAME 139 +#define SO_SET_IFF_PROMISC 140 +#define SO_SET_VLAN_ID 141 + +/* Get */ +#define SO_GET_RING_VERSION 170 +#define SO_GET_FILTERING_RULE_STATS 171 +#define SO_GET_HASH_FILTERING_RULE_STATS 172 +#define SO_GET_ZC_DEVICE_INFO 173 +#define SO_GET_NUM_RX_CHANNELS 174 +#define SO_GET_RING_ID 175 +#define SO_GET_BPF_EXTENSIONS 176 +#define SO_GET_BOUND_DEVICE_ADDRESS 177 +#define SO_GET_NUM_QUEUED_PKTS 178 +#define SO_GET_PKT_HEADER_LEN 179 +#define SO_GET_LOOPBACK_TEST 180 +#define SO_GET_BUCKET_LEN 181 +#define SO_GET_DEVICE_TYPE 182 +#define SO_GET_EXTRA_DMA_MEMORY 183 +#define SO_GET_BOUND_DEVICE_IFINDEX 184 +#define SO_GET_DEVICE_IFINDEX 185 +#define SO_GET_APPL_STATS_FILE_NAME 186 +#define SO_GET_LINK_STATUS 187 + +/* Other *sockopt */ +#define SO_SELECT_ZC_DEVICE 190 + +/* Error codes */ +#define PF_RING_ERROR_GENERIC -1 +#define PF_RING_ERROR_INVALID_ARGUMENT -2 +#define PF_RING_ERROR_NO_PKT_AVAILABLE -3 +#define PF_RING_ERROR_NO_TX_SLOT_AVAILABLE -4 +#define PF_RING_ERROR_WRONG_CONFIGURATION -5 +#define PF_RING_ERROR_END_OF_DEMO_MODE -6 +#define PF_RING_ERROR_NOT_SUPPORTED -7 +#define PF_RING_ERROR_INVALID_LIB_VERSION -8 +#define PF_RING_ERROR_UNKNOWN_ADAPTER -9 +#define PF_RING_ERROR_NOT_ENOUGH_MEMORY -10 +#define PF_RING_ERROR_INVALID_STATUS -11 +#define PF_RING_ERROR_RING_NOT_ENABLED -12 + +#define REFLECTOR_NAME_LEN 8 + +#ifndef IN6ADDR_ANY_INIT +#define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } +#endif + +/* *********************************** */ + +/* + Note that as offsets *can* be negative, + please do not change them to unsigned +*/ +struct pkt_offset { + /* This 'eth_offset' offset *must* be added to all offsets below + * ONLY if you are inside the kernel. Ignore it in user-space. */ + int16_t eth_offset; + + int16_t vlan_offset; + int16_t l3_offset; + int16_t l4_offset; + int16_t payload_offset; +} __attribute__((packed)); + +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif + +#define REFLECT_PACKET_DEVICE_NONE 0 + +typedef union { + struct in6_addr v6; /* IPv6 src/dst IP addresses (Network byte order) */ + u_int32_t v4; /* IPv4 src/dst IP addresses */ +} ip_addr; + +#define ipv4_tos ip_tos +#define ipv6_tos ip_tos +#define ipv4_src ip_src.v4 +#define ipv4_dst ip_dst.v4 +#define ipv6_src ip_src.v6 +#define ipv6_dst ip_dst.v6 +#define host4_low host_low.v4 +#define host4_high host_high.v4 +#define host6_low host_low.v6 +#define host6_high host_high.v6 +#define host4_peer_a host_peer_a.v4 +#define host4_peer_b host_peer_b.v4 +#define host6_peer_a host_peer_a.v6 +#define host6_peer_b host_peer_b.v6 + +struct eth_vlan_hdr { + u_int16_t h_vlan_id; /* Tag Control Information (QoS, VLAN ID) */ + u_int16_t h_proto; /* packet type ID field */ +} __attribute__((packed)); + +#define NEXTHDR_HOP 0 +#define NEXTHDR_IPV6 41 +#define NEXTHDR_ROUTING 43 +#define NEXTHDR_FRAGMENT 44 +#define NEXTHDR_ESP 50 +#define NEXTHDR_AUTH 51 +#define NEXTHDR_NONE 59 +#define NEXTHDR_DEST 60 +#define NEXTHDR_MOBILITY 135 + +struct kcompact_ipv6_hdr { + u_int32_t flow_lbl:24, + priority:4, + version:4; + u_int16_t payload_len; + u_int8_t nexthdr; + u_int8_t hop_limit; + struct in6_addr saddr; + struct in6_addr daddr; +} __attribute__((packed)); + +struct kcompact_ipv6_opt_hdr { + u_int8_t nexthdr; + u_int8_t hdrlen; + u_int8_t padding[6]; +} __attribute__((packed)); + +#define GRE_HEADER_CHECKSUM 0x8000 +#define GRE_HEADER_ROUTING 0x4000 +#define GRE_HEADER_KEY 0x2000 +#define GRE_HEADER_SEQ_NUM 0x1000 +#define GRE_HEADER_VERSION 0x0007 + +struct gre_header { + u_int16_t flags_and_version; + u_int16_t proto; + /* Optional fields */ +} __attribute__((packed)); + +#define GTP_SIGNALING_PORT 2123 +#define GTP_U_DATA_PORT 2152 + +#define GTP_VERSION_1 0x1 +#define GTP_VERSION_2 0x2 +#define GTP_PROTOCOL_TYPE 0x1 + +struct gtp_v1_hdr { +#define GTP_FLAGS_VERSION 0xE0 +#define GTP_FLAGS_VERSION_SHIFT 5 +#define GTP_FLAGS_PROTOCOL_TYPE 0x10 +#define GTP_FLAGS_RESERVED 0x08 +#define GTP_FLAGS_EXTENSION 0x04 +#define GTP_FLAGS_SEQ_NUM 0x02 +#define GTP_FLAGS_NPDU_NUM 0x01 + u_int8_t flags; + u_int8_t message_type; + u_int16_t payload_len; + u_int32_t teid; +} __attribute__((__packed__)); + +/* Optional: GTP_FLAGS_EXTENSION | GTP_FLAGS_SEQ_NUM | GTP_FLAGS_NPDU_NUM */ +struct gtp_v1_opt_hdr { + u_int16_t seq_num; + u_int8_t npdu_num; + u_int8_t next_ext_hdr; +} __attribute__((__packed__)); + +/* Optional: GTP_FLAGS_EXTENSION && next_ext_hdr != 0 */ +struct gtp_v1_ext_hdr { +#define GTP_EXT_HDR_LEN_UNIT_BYTES 4 + u_int8_t len; /* 4-byte unit */ + /* + * u_char contents[len*4-2]; + * u_int8_t next_ext_hdr; + */ +} __attribute__((__packed__)); + +#define NO_TUNNEL_ID 0xFFFFFFFF + +/* GPRS Tunneling Protocol */ +typedef struct { + u_int32_t tunnel_id; /* GTP/GRE tunnelId or NO_TUNNEL_ID for no filtering */ + u_int8_t tunneled_ip_version; /* Layer 4 protocol */ + u_int8_t tunneled_proto; /* Layer 4 protocol */ + ip_addr tunneled_ip_src, tunneled_ip_dst; + u_int16_t tunneled_l4_src_port, tunneled_l4_dst_port; +} __attribute__((packed)) + tunnel_info; + +#define MOBILE_IP_PORT 434 + +struct mobile_ip_hdr { + u_int8_t message_type, next_header; + u_int16_t reserved; +} __attribute__((packed)); + +typedef enum { + long_pkt_header = 0, /* it includes PF_RING-extensions over the original pcap header */ + short_pkt_header /* Short pcap-like header */ +} pkt_header_len; + +struct pkt_parsing_info { + /* Core fields (also used by NetFlow) */ + u_int8_t dmac[ETH_ALEN], smac[ETH_ALEN]; /* MAC src/dst addresses */ + u_int16_t eth_type; /* Ethernet type */ + u_int16_t vlan_id; /* VLAN Id or NO_VLAN */ + u_int16_t qinq_vlan_id; /* VLAN Id or NO_VLAN */ + u_int8_t ip_version; + u_int8_t l3_proto, ip_tos; /* Layer 3 protocol, TOS */ + ip_addr ip_src, ip_dst; /* IPv4/6 src/dst IP addresses */ + u_int16_t l4_src_port, l4_dst_port;/* Layer 4 src/dst ports */ + u_int8_t icmp_type, icmp_code; /* Variables for ICMP packets */ + struct { + u_int8_t flags; /* TCP flags (0 if not available) */ + u_int32_t seq_num, ack_num; /* TCP sequence number */ + } tcp; + tunnel_info tunnel; + int32_t last_matched_rule_id; /* If > 0 identifies a rule that matched the packet */ + struct pkt_offset offset; /* Offsets of L3/L4/payload elements */ +} __attribute__((packed)); + +#define UNKNOWN_INTERFACE -1 +#define FAKE_PACKET -2 /* It indicates that the returned packet + is faked, and that the info is basically + a message from PF_RING + */ + +struct pfring_extended_pkthdr { + u_int64_t timestamp_ns; /* Packet timestamp at ns precision. Note that if your NIC supports + hardware timestamp, this is the place to read timestamp from */ +#define PKT_FLAGS_CHECKSUM_OFFLOAD 1 << 0 /* IP/TCP checksum offload enabled */ +#define PKT_FLAGS_CHECKSUM_OK 1 << 1 /* Valid checksum (with IP/TCP checksum offload enabled) */ +#define PKT_FLAGS_IP_MORE_FRAG 1 << 2 /* IP More fragments flag set */ +#define PKT_FLAGS_IP_FRAG_OFFSET 1 << 3 /* IP fragment offset set (not 0) */ +#define PKT_FLAGS_VLAN_HWACCEL 1 << 4 /* VLAN stripped by hw */ +#define PKT_FLAGS_FLOW_OFFLOAD_UPDATE 1 << 6 /* Flow update metadata, see generic_flow_update struct (keep flag compatible with ZC) */ +#define PKT_FLAGS_FLOW_OFFLOAD_PACKET 1 << 7 /* Flow raw packet, pkt_hash contains the flow_id (keep flag compatible with ZC) */ +#define PKT_FLAGS_FLOW_OFFLOAD_MARKER 1 << 8 /* Flow raw packet belongs to a flow that has been marked (keep flag compatible with ZC) */ + u_int32_t flags; + + u_int8_t rx_direction; /* 1=RX: packet received by the NIC, 0=TX: packet transmitted by the NIC */ + int32_t if_index; /* index of the interface on which the packet has been received. + It can be also used to report other information */ + u_int32_t pkt_hash; /* Hash based on the packet header */ + + /* --- short header ends here --- */ + + struct { + int32_t bounce_interface; /* Interface Id where this packet will bounce after processing + if its values is other than UNKNOWN_INTERFACE */ + struct sk_buff *reserved; /* Kernel only pointer */ +#if (defined(CONFIG_BCM_KF_SGS) && defined(CONFIG_ARM)) || defined(USER_ARCH_ARM32) + u_int32_t reserved1; +#endif + } tx; +#if (defined(CONFIG_BCM_KF_SGS) && defined(CONFIG_ARM)) || defined(USER_ARCH_ARM32) + u_int8_t align[4]; +#endif + + /* NOTE: leave it as last field of the memset on parse_pkt() will fail */ + struct pkt_parsing_info parsed_pkt; /* packet parsing info */ +} __attribute__((packed)); + +/* NOTE: Keep 'struct pfring_pkthdr' in sync with 'struct pcap_pkthdr' */ + +struct pfring_pkthdr { + /* pcap header */ + struct timeval ts; /* time stamp */ +#if (defined(CONFIG_BCM_KF_SGS) && defined(CONFIG_ARM)) || defined(USER_ARCH_ARM32) + u_int32_t padding[2]; +#endif + u_int32_t caplen; /* length of portion present */ + u_int32_t len; /* length of whole packet (off wire) */ +#if defined(CONFIG_BCM_KF_SGS) || !defined(__KERNEL__) + struct sgs_pkthdr sgs; +#endif + struct pfring_extended_pkthdr extended_hdr; /* PF_RING extended header */ +} __attribute__((packed)); + +/* *********************************** */ + +#define MAX_NUM_LIST_ELEMENTS MAX_NUM_RING_SOCKETS /* sizeof(bits_set) [see below] */ + +#ifdef __KERNEL__ +typedef struct { + u_int32_t num_elements, top_element_id; + rwlock_t list_lock; + void *list_elements[MAX_NUM_LIST_ELEMENTS]; +} lockless_list; + +void init_lockless_list(lockless_list *l); +int lockless_list_add(lockless_list *l, void *elem); +int lockless_list_remove(lockless_list *l, void *elem); +void* lockless_list_get_next(lockless_list *l, u_int32_t *last_list_idx); +void* lockless_list_get_first(lockless_list *l, u_int32_t *last_list_idx); +void lockless_list_empty(lockless_list *l, u_int8_t free_memory); +void term_lockless_list(lockless_list *l, u_int8_t free_memory); +#endif + +/* ************************************************* */ + +typedef struct { + int32_t if_index; /* Index of the interface on which the packet has been received */ + u_int8_t smac[ETH_ALEN], dmac[ETH_ALEN]; /* Use '0' (zero-ed MAC address) for any MAC address. + This is applied to both source and destination. */ + u_int16_t vlan_id; /* Use 0 for any vlan */ + u_int16_t eth_type; /* Use 0 for any ethernet type */ + u_int8_t proto; /* Use 0 for any l3 protocol */ + ip_addr shost, dhost; /* User '0' for any host. This is applied to both source and destination. */ + ip_addr shost_mask, dhost_mask; /* IPv4/6 network mask */ + u_int16_t sport_low, sport_high; /* All ports between port_low...port_high means 'any' port */ + u_int16_t dport_low, dport_high; /* All ports between port_low...port_high means 'any' port */ + struct { + u_int8_t flags; /* TCP flags (0 if not available) */ + } tcp; +} __attribute__((packed)) +filtering_rule_core_fields; + +/* ************************************************* */ + +typedef struct { + +#define FILTER_TUNNEL_ID_FLAG 1 << 0 + u_int16_t optional_fields; /* Use this mask to activate optional fields */ + + struct { + u_int32_t tunnel_id; /* GTP/GRE tunnelId or NO_TUNNEL_ID for no filtering */ + ip_addr shost, dhost; /* Filter on tunneled IPs */ + ip_addr shost_mask, dhost_mask; /* IPv4/6 network mask */ + } tunnel; + + char payload_pattern[32]; /* If strlen(payload_pattern) > 0, the packet payload + must match the specified pattern */ +} __attribute__((packed)) +filtering_rule_extended_fields; + +/* ************************************************* */ + +typedef enum { + forward_packet_and_stop_rule_evaluation = 0, + dont_forward_packet_and_stop_rule_evaluation, + execute_action_and_continue_rule_evaluation, + execute_action_and_stop_rule_evaluation, + forward_packet_add_rule_and_stop_rule_evaluation, /* auto-filled hash rule */ + reflect_packet_and_stop_rule_evaluation, + reflect_packet_and_continue_rule_evaluation, + bounce_packet_and_stop_rule_evaluation, + bounce_packet_and_continue_rule_evaluation +} rule_action_behaviour; + +typedef enum { + pkt_detail_flow, + pkt_detail_aggregation +} pkt_detail_mode; + +typedef enum { + rx_and_tx_direction = 0, + rx_only_direction, + tx_only_direction +} packet_direction; + +typedef enum { + send_and_recv_mode = 0, + send_only_mode, + recv_only_mode +} socket_mode; + +typedef struct { + unsigned long jiffies_last_match; /* Jiffies of the last rule match (updated by pf_ring) */ + struct net_device *reflector_dev; /* Reflector device */ +} __attribute__((packed)) +filtering_internals; + +typedef struct { +#define FILTERING_RULE_AUTO_RULE_ID 0xFFFF + u_int16_t rule_id; /* Rules are processed in order from lowest to higest id */ + + rule_action_behaviour rule_action; /* What to do in case of match */ + u_int8_t balance_id, balance_pool; /* If balance_pool > 0, then pass the packet above only if the + (hash(proto, sip, sport, dip, dport) % balance_pool) = balance_id */ + u_int8_t locked; /* Do not purge with pfring_purge_idle_rules() */ + u_int8_t bidirectional; /* Swap peers when checking if they match the rule. Default: monodir */ + filtering_rule_core_fields core_fields; + filtering_rule_extended_fields extended_fields; + char reflector_device_name[REFLECTOR_NAME_LEN]; + + filtering_internals internals; /* PF_RING internal fields */ +} __attribute__((packed)) +filtering_rule; + +/* *********************************** */ + +/* 82599 packet steering filters */ + +typedef struct { + u_int8_t proto; + u_int32_t s_addr, d_addr; + u_int16_t s_port, d_port; + u_int16_t queue_id; +} __attribute__((packed)) +intel_82599_five_tuple_filter_hw_rule; + +typedef struct { + u_int16_t vlan_id; + u_int8_t proto; + u_int32_t s_addr, d_addr; + u_int16_t s_port, d_port; + u_int16_t queue_id; +} __attribute__((packed)) +intel_82599_perfect_filter_hw_rule; + +/* + Rules are defined per port. Each redirector device + has 4 ports (numbeder 0..3): + + 0 +--------------+ 2 +--------------+ + LAN <===> | | <===> | 1/10G | + | Redirector | | Ethernet | + LAN <===> | Switch | <===> | Adapter | + 1 +--------------+ 3 +--------------+ + + Drop Rule + Discard incoming packets matching the filter + on 'rule_port' + + Redirect Rule + Divert incoming packets matching the filter + on 'rule_port' to 'rule_target_port'. + + Mirror Rule + Copy incoming packets matching the filter + on 'rule_port' to 'rule_target_port'. The original + packet will continue its journey (i.e. packet are + actually duplicated) +*/ + +typedef enum { + drop_rule, + redirect_rule, + mirror_rule +} silicom_redirector_rule_type; + +typedef struct { + silicom_redirector_rule_type rule_type; + u_int8_t rule_port; /* Port on which the rule is defined */ + u_int8_t rule_target_port; /* Target port (ignored for drop rules) */ + u_int16_t vlan_id_low, vlan_id_high; + u_int8_t l3_proto; + ip_addr src_addr, dst_addr; + u_int32_t src_mask, dst_mask; + u_int16_t src_port_low, src_port_high; + u_int16_t dst_port_low, dst_port_high; +} __attribute__((packed)) +silicom_redirector_hw_rule; + +typedef enum { + accolade_drop, + accolade_pass +} accolade_rule_action_type; + +/* Accolade supports mode 1 filtering on almost all cards (up to 32 rules), + * and mode 2 filtering on selected adapters (up to 1K rules). + * PF_RING automatically select mode 2 when available, and mode 1 as fallback. + * Mode 1 and 2 support different fields, please refer to the fields description. */ +typedef struct { + accolade_rule_action_type action; /* in mode 2 this should be always the opposite of the default action */ + u_int32_t port_mask; /* ports on which the rule is defined (default 0xf) - mode 1 only */ + u_int8_t ip_version; + u_int8_t protocol; /* l4 */ + u_int16_t vlan_id; /* mode 2 only (if vlan_id is set, mpls_label is ignored due to hw limitations) */ + u_int32_t mpls_label; /* mode 2 only */ + ip_addr src_addr, dst_addr; + u_int32_t src_addr_bits, dst_addr_bits; + u_int16_t src_port_low; + u_int16_t src_port_high; /* mode 1 only */ + u_int16_t dst_port_low; + u_int16_t dst_port_high; /* mode 1 only */ + u_int8_t l4_port_not; /* rule match if src_port_low/dst_port_low are defined and they do not match - mode 2 only */ +} __attribute__((packed)) +accolade_hw_rule; + +typedef enum { + flow_drop_rule, + flow_mark_rule +} generic_flow_rule_action_type; + +typedef struct { + generic_flow_rule_action_type action; + u_int32_t flow_id; /* flow id from flow metadata */ + u_int32_t thread; /* id of the thread setting the rule */ +} __attribute__((packed)) +generic_flow_id_hw_rule; + +typedef struct { + generic_flow_rule_action_type action; + ip_addr src_ip; + ip_addr dst_ip; + u_int16_t src_port; + u_int16_t dst_port; + u_int8_t ip_version; + u_int8_t protocol; + u_int8_t interface; /* from extended_hdr.if_index */ +} __attribute__((packed)) +generic_flow_tuple_hw_rule; + +typedef enum { + intel_82599_five_tuple_rule, + intel_82599_perfect_filter_rule, + silicom_redirector_rule, + generic_flow_id_rule, + generic_flow_tuple_rule, + accolade_rule, + accolade_default +} hw_filtering_rule_type; + +typedef struct { + hw_filtering_rule_type rule_family_type; + u_int16_t rule_id; + + union { + intel_82599_five_tuple_filter_hw_rule five_tuple_rule; + intel_82599_perfect_filter_hw_rule perfect_rule; + silicom_redirector_hw_rule redirector_rule; + generic_flow_id_hw_rule flow_id_rule; + generic_flow_tuple_hw_rule flow_tuple_rule; + accolade_hw_rule accolade_rule; + } rule_family; +} __attribute__((packed)) +hw_filtering_rule; + +#define MAGIC_HW_FILTERING_RULE_REQUEST 0x29010020 /* deprecated? */ + +#ifdef __KERNEL__ + +#define ETHTOOL_PFRING_SRXFTCHECK 0x10000000 +#define ETHTOOL_PFRING_SRXFTRLDEL 0x10000031 +#define ETHTOOL_PFRING_SRXFTRLINS 0x10000032 + +#if defined(I82599_HW_FILTERING_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40)) +#define FLOW_EXT 0x80000000 +union _kcompat_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kcompat_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kcompat_ethtool_rx_flow_spec { + __u32 flow_type; + union _kcompat_ethtool_flow_union h_u; + struct _kcompat_ethtool_flow_ext h_ext; + union _kcompat_ethtool_flow_union m_u; + struct _kcompat_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kcompat_ethtool_rx_flow_spec +#endif /* defined(I82599_HW_FILTERING_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40)) */ + +#endif /* __KERNEL__ */ + +typedef enum { + add_hw_rule, + remove_hw_rule +} hw_filtering_rule_command; + +/* *********************************** */ + +struct pfring_timespec { + u_int32_t tv_sec; + u_int32_t tv_nsec; +} __attribute__((packed)); + +typedef struct { + u_int32_t flow_id; + + u_int8_t ip_version; + u_int8_t l4_protocol; + + u_int8_t tos; + u_int8_t tcp_flags; + + ip_addr src_ip; + ip_addr dst_ip; + + u_int16_t vlan_id; + u_int16_t reserved; /* padding */ + + u_int16_t src_port; + u_int16_t dst_port; + + u_int32_t fwd_packets; + u_int32_t fwd_bytes; + u_int32_t rev_packets; + u_int32_t rev_bytes; + + struct pfring_timespec fwd_ts_first; + struct pfring_timespec fwd_ts_last; + struct pfring_timespec rev_ts_first; + struct pfring_timespec rev_ts_last; +} __attribute__((packed)) +generic_flow_update; + +typedef struct { + generic_flow_rule_action_type action; + u_int32_t flow_id; +} __attribute__((packed)) +generic_flow_feedback; + +/* *********************************** */ + +extern struct pf_ring_socket *pfr; /* Forward */ + +/* *********************************** */ + +typedef int (*five_tuple_rule_handler)(struct pf_ring_socket *pfr, + hw_filtering_rule *rule, + hw_filtering_rule_command request); +typedef int (*perfect_filter_hw_rule_handler)(struct pf_ring_socket *pfr, + hw_filtering_rule *rule, + hw_filtering_rule_command request); + +typedef struct { + five_tuple_rule_handler five_tuple_handler; + perfect_filter_hw_rule_handler perfect_filter_handler; +} __attribute__((packed)) +hw_filtering_device_handler; + +/* *********************************** */ + +/* Hash size used for precise packet matching */ +#define DEFAULT_RING_HASH_SIZE 4096 + +/* + * The hashtable contains only perfect matches: no + * wildacards or so are accepted. (bidirectional) + */ +typedef struct { + u_int16_t rule_id; /* Future use */ + u_int16_t vlan_id; + u_int8_t ip_version; + u_int8_t proto; /* Layer 3 protocol */ + ip_addr host_peer_a, host_peer_b; + u_int16_t port_peer_a, port_peer_b; + + rule_action_behaviour rule_action; /* What to do in case of match */ + char reflector_device_name[REFLECTOR_NAME_LEN]; + + filtering_internals internals; /* PF_RING internal fields */ +} __attribute__((packed)) +hash_filtering_rule; + +typedef struct { + u_int64_t match; + u_int64_t filtered; + u_int64_t match_forward; + u_int32_t inactivity; /* sec */ +} __attribute__((packed)) +hash_filtering_rule_stats; + +/* ************************************************* */ + +typedef struct _sw_filtering_hash_bucket { + hash_filtering_rule rule; + u_int64_t match; /* number of packets matching the rule */ + u_int64_t filtered; /* number of packets filtered by the rule */ + u_int64_t match_forward; /* number of packets sampled by the rule (equivalent to match minus filtered) */ + struct _sw_filtering_hash_bucket *next; +} __attribute__((packed)) +sw_filtering_hash_bucket; + +/* *********************************** */ + +#define RING_MIN_SLOT_SIZE (60+sizeof(struct pfring_pkthdr)) +#define RING_MAX_SLOT_SIZE (2000+sizeof(struct pfring_pkthdr)) + +#if !defined(__cplusplus) + +#define min_val(a,b) ((a < b) ? a : b) +#define max_val(a,b) ((a > b) ? a : b) + +#endif + +/* *********************************** */ +#if defined(CONFIG_BCM_KF_SGS) || !defined(__KERNEL__) +struct response { + union { + unsigned long status; + u_int64_t status_u64; + }; + union { + struct sk_buff *skb; + u_int64_t skb_u64; + }; +}; + +struct response_queue { + u_int64_t read; /* managed by kernel */ + u_int64_t size; /* managed by kernel */ + u_int64_t write; /* managed by userspace */ + struct response data[(65536 - 48) / sizeof(struct response)]; +} __attribute__((packed)); +#endif + +/* False sharing reference: http://en.wikipedia.org/wiki/False_sharing */ + +typedef struct flowSlotInfo { + /* first page, managed by kernel */ + u_int16_t version, sample_rate; + u_int32_t min_num_slots, slot_len, data_len; + u_int64_t tot_mem; + volatile u_int64_t insert_off; + u_int64_t kernel_remove_off; + u_int64_t tot_pkts, tot_lost; + volatile u_int64_t tot_insert; + u_int64_t kernel_tot_read; + u_int64_t tot_fwd_ok, tot_fwd_notok; + u_int64_t good_pkt_sent, pkt_send_error; + /* <-- 64 bytes here, should be enough to avoid some L1 VIVT coherence issues (32 ~ 64bytes lines) */ + char padding[128-104]; + /* <-- 128 bytes here, should be enough to avoid false sharing in most L2 (64 ~ 128bytes lines) */ + char k_padding[4096-128]; + /* <-- 4096 bytes here, to get a page aligned block writable by kernel side only */ + + /* second page, managed by userland */ + volatile u_int64_t tot_read; + volatile u_int64_t remove_off /* managed by userland */; +#if defined(CONFIG_BCM_KF_SGS) || !defined(__KERNEL__) + struct response_queue rq; + /* <-- 69632 (68k) bytes here, to get a page aligned block writable by userland only */ +#endif +} __attribute__((packed)) +FlowSlotInfo; + +/* **************************************** */ + +#ifdef __KERNEL__ +FlowSlotInfo *getRingPtr(void); +int allocateRing(char *deviceName, u_int numSlots, u_int bucketLen, u_int sampleRate); +unsigned int pollRing(struct file *fp, struct poll_table_struct * wait); +void deallocateRing(void); +#endif /* __KERNEL__ */ + +/* *********************************** */ + +#define PF_RING 27 /* (0x1b) Packet Ring */ +#define SOCK_RING PF_RING + +/* ioctl() */ +#define SIORINGPOLL 0x8888 + +/* ************************************************* */ + +#ifdef __KERNEL__ +struct ring_sock { + struct sock sk; /* It MUST be the first element */ + struct pf_ring_socket *pf_ring_sk; + /* FIXX Do we really need the following items? */ + //struct packet_type prot_hook; + //spinlock_t bind_lock; +}; +#endif + +/* *********************************** */ + +typedef int (*zc_dev_wait_packet)(void *adapter, int mode); +typedef int (*zc_dev_notify)(void *rx_adapter_ptr, void *tx_adapter_ptr, u_int8_t device_in_use); + +typedef enum { + add_device_mapping = 0, remove_device_mapping +} zc_dev_operation; + +/* IMPORTANT NOTE + * add new family types ALWAYS at the end + * (i.e. append) of this datatype */ +typedef enum { + intel_e1000e = 0, + intel_igb, + intel_ixgbe, + intel_ixgbe_82598, + intel_ixgbe_82599, + intel_igb_82580, + intel_e1000, + intel_ixgbe_82599_ts, + intel_i40e, + intel_fm10k, + intel_ixgbe_vf, + intel_ixgbe_x550 +} zc_dev_model; + +typedef struct { + u_int32_t packet_memory_num_slots; + u_int32_t packet_memory_slot_len; + u_int32_t descr_packet_memory_tot_len; + u_int16_t registers_index; + u_int16_t stats_index; + u_int32_t vector; + u_int32_t num_queues; +} __attribute__((packed)) +mem_ring_info; + +typedef struct { + mem_ring_info rx; + mem_ring_info tx; + u_int32_t phys_card_memory_len; + zc_dev_model device_model; +} __attribute__((packed)) +zc_memory_info; + +typedef struct { + zc_memory_info mem_info; + u_int16_t channel_id; + void *rx_descr_packet_memory; /* Invalid in userland */ + void *tx_descr_packet_memory; /* Invalid in userland */ + char *phys_card_memory; /* Invalid in userland */ + struct net_device *dev; /* Invalid in userland */ + struct device *hwdev; /* Invalid in userland */ + u_char device_address[6]; +#ifdef __KERNEL__ + wait_queue_head_t *packet_waitqueue; +#else + void *packet_waitqueue; +#endif + u_int8_t *interrupt_received, in_use; + void *rx_adapter_ptr, *tx_adapter_ptr; + zc_dev_wait_packet wait_packet_function_ptr; + zc_dev_notify usage_notification; +} __attribute__((packed)) +zc_dev_info; + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif + +typedef struct { + zc_dev_operation operation; + char device_name[IFNAMSIZ]; + int32_t channel_id; + zc_dev_model device_model; /* out */ +} __attribute__((packed)) +zc_dev_mapping; + +/* ************************************************* */ + +#define RING_ANY_CHANNEL ((u_int64_t)-1) +#define MAX_NUM_RX_CHANNELS 64 /* channel_id_mask is a 64 bit mask */ +#define UNKNOWN_NUM_RX_CHANNELS 1 + +#define RING_ANY_VLAN ((u_int16_t)0xFFFF) +#define RING_NO_VLAN ((u_int16_t)0) + +/* ************************************************* */ + +typedef enum { + cluster_per_flow = 0, /* 6-tuple: <src ip, src port, dst ip, dst port, proto, vlan> */ + cluster_round_robin, + cluster_per_flow_2_tuple, /* 2-tuple: <src ip, dst ip > */ + cluster_per_flow_4_tuple, /* 4-tuple: <src ip, src port, dst ip, dst port > */ + cluster_per_flow_5_tuple, /* 5-tuple: <src ip, src port, dst ip, dst port, proto > */ + cluster_per_flow_tcp_5_tuple, /* 5-tuple only with TCP, 2 tuple with all other protos */ + /* same as above, computing on tunnel content when present */ + cluster_per_inner_flow, /* 6-tuple: <src ip, src port, dst ip, dst port, proto, vlan> */ + cluster_per_inner_flow_2_tuple, /* 2-tuple: <src ip, dst ip > */ + cluster_per_inner_flow_4_tuple, /* 4-tuple: <src ip, src port, dst ip, dst port > */ + cluster_per_inner_flow_5_tuple, /* 5-tuple: <src ip, src port, dst ip, dst port, proto > */ + cluster_per_inner_flow_tcp_5_tuple,/* 5-tuple only with TCP, 2 tuple with all other protos */ + /* new types, for L2-only protocols */ + cluster_per_flow_ip_5_tuple, /* 5-tuple only with IP, 2 tuple with non-IP <src mac, dst mac> */ + cluster_per_inner_flow_ip_5_tuple, /* 5-tuple only with IP, 2 tuple with non-IP <src mac, dst mac> */ + cluster_per_flow_ip_with_dup_tuple /* 1-tuple: <src ip> and <dst ip> with duplication */ +} cluster_type; + +#define MAX_CLUSTER_TYPE_ID cluster_per_flow_ip_with_dup_tuple + +struct add_to_cluster { + u_int clusterId; + cluster_type the_type; +} __attribute__((packed)); + +typedef enum { + standard_nic_family = 0, /* No Hw Filtering */ + intel_82599_family +} pfring_device_type; + +typedef struct { + char device_name[IFNAMSIZ]; + pfring_device_type device_type; + + /* Entry in the /proc filesystem */ + struct proc_dir_entry *proc_entry; +} __attribute__((packed)) +virtual_filtering_device_info; + +/* ************************************************* */ + +struct create_cluster_referee_info { + u_int32_t cluster_id; + u_int32_t recovered; /* fresh or recovered */ +} __attribute__((packed)); + +struct public_cluster_object_info { + u_int32_t cluster_id; + u_int32_t object_type; + u_int32_t object_id; +} __attribute__((packed)); + +struct lock_cluster_object_info { + u_int32_t cluster_id; + u_int32_t object_type; + u_int32_t object_id; + u_int32_t lock_mask; + u_int32_t reserved; +} __attribute__((packed)); + +/* ************************************************* */ + +typedef enum { + cluster_slave = 0, + cluster_master = 1 +} cluster_client_type; + +/* ************************************************* */ + +#ifdef __KERNEL__ + +#if(LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)) +#ifndef netdev_notifier_info_to_dev +#define netdev_notifier_info_to_dev(a) ((struct net_device*)a) +#endif +#endif + +#define CLUSTER_LEN 64 + +/* + * A ring cluster is used group together rings used by various applications + * so that they look, from the PF_RING point of view, as a single ring. + * This means that developers can use clusters for sharing packets across + * applications using various policies as specified in the hashing_mode + * parameter. + */ +struct ring_cluster { + u_int32_t cluster_id; /* 0 = no cluster */ + u_int32_t num_cluster_elements; + cluster_type hashing_mode; + u_short hashing_id; + struct sock *sk[CLUSTER_LEN]; +}; + +/* + * Linked-list of ring clusters + */ +typedef struct { + struct ring_cluster cluster; + struct list_head list; +} ring_cluster_element; + +#define MAX_NUM_ZC_BOUND_SOCKETS MAX_NUM_RING_SOCKETS + +typedef struct { + u8 num_bound_sockets; + zc_dev_info zc_dev; + struct list_head list; + /* + In the ZC world only one application can open and enable the + device@channel per direction. The array below is used to keep + pointers to the sockets bound to device@channel. + No more than one socket can be enabled for RX and one for TX. + */ + struct pf_ring_socket *bound_sockets[MAX_NUM_ZC_BOUND_SOCKETS]; + rwlock_t lock; +} zc_dev_list; + +#define MAX_NUM_IFINDEX 0x7FFFFFFF +#define MAX_NUM_DEV_IDX 1024 + +/* + * Linked-list of virtual filtering devices + */ +typedef struct { + virtual_filtering_device_info info; + struct list_head list; +} virtual_filtering_device_element; + +/* ************************************************* */ + +typedef struct { + u_int8_t set; + u_int8_t direct_mapping; + int32_t ifindex; +} ifindex_map_item; + +/* ************************************************* */ + +typedef struct { + struct net_device *dev; + + /* Note: we keep device_name here for a couple of reasons: + * 1. some device types might NOT have a net_device handler + * 2. when a device name changes we need to remember the old name */ + char device_name[IFNAMSIZ]; + + pfring_device_type device_type; /* Device Type */ + int32_t dev_index; + + u_int8_t do_not_remove_promisc; /* promisc was set before any socket */ + atomic_t promisc_users; /* number of rings with promisc set bound to this device */ + + /* Entry in the /proc filesystem */ + struct proc_dir_entry *proc_entry; + struct proc_dir_entry *proc_info_entry; + + /* ZC */ + u_int8_t is_zc_device; + zc_dev_model zc_dev_model; + u_int num_zc_dev_rx_queues; /* 0 for non ZC devices */ + u_int32_t num_zc_rx_slots; + u_int32_t num_zc_tx_slots; + + /* Hardware Filters */ + struct { + u_int16_t num_filters; + hw_filtering_device_handler filter_handlers; + } hw_filters; + + struct list_head device_list; +} pf_ring_device; + +/* ************************************************* */ + +struct dma_memory_info { + u_int32_t num_chunks, chunk_len; + u_int32_t num_slots, slot_len; + unsigned long *virtual_addr; /* chunks pointers */ + u_int64_t *dma_addr; /* per-slot DMA adresses */ + struct device *hwdev; /* dev for DMA mapping */ +}; + +/* ************************************************* */ + +typedef struct { + u_int32_t object_type; + u_int32_t object_id; + u_int32_t lock_bitmap; + + struct list_head list; +} cluster_object; + +struct cluster_referee { + u_int32_t id; + u_int32_t users; + u_int8_t master_running; + struct list_head objects_list; + + struct list_head list; +}; + +/* ************************************************* */ + +typedef int (*do_handle_sw_filtering_hash_bucket)(struct pf_ring_socket *pfr, + sw_filtering_hash_bucket* rule, + u_char add_rule); + +typedef int (*do_add_packet_to_ring)(struct pf_ring_socket *pfr, + u_int8_t real_skb, + struct pfring_pkthdr *hdr, struct sk_buff *skb, + int displ, u_int8_t parse_pkt_first); + +typedef int (*do_add_raw_packet_to_ring)(struct pf_ring_socket *pfr, + struct pfring_pkthdr *hdr, + u_char *data, u_int data_len, + u_int8_t parse_pkt_first); + +typedef u_int32_t (*do_rehash_rss)(struct sk_buff *skb, struct pfring_pkthdr *hdr); + +/* ************************************************* */ + +#define NUM_FRAGMENTS_HASH_SLOTS 4096 +#define MAX_CLUSTER_FRAGMENTS_LEN 8*NUM_FRAGMENTS_HASH_SLOTS + +struct hash_fragment_node { + /* Key */ + u_int32_t ipv4_src_host, ipv4_dst_host; + u_int16_t ip_fragment_id; + + /* Value */ + u_int8_t cluster_app_id; /* Identifier of the app where the main fragment has been placed */ + + /* Expire */ + unsigned long expire_jiffies; /* Time at which this entry will be expired */ + + /* collision list */ + struct list_head frag_list; +}; + +/* ************************************************* */ + +/* + * Ring options + */ +struct pf_ring_socket { + rwlock_t ring_config_lock; + + u_int8_t ring_active, ring_shutdown, num_rx_channels, num_bound_devices; + pf_ring_device *ring_dev; + + /* last device set with bind, needed to heck channels when multiple + * devices are used with quick-mode */ + pf_ring_device *last_bind_dev; + + DECLARE_BITMAP(pf_dev_mask, MAX_NUM_DEV_IDX /* bits */); + int ring_pid; + u_int32_t ring_id; + char *appl_name; /* String that identifies the application bound to the socket */ + packet_direction direction; /* Specify the capture direction for packets */ + socket_mode mode; /* Specify the link direction to enable (RX, TX, both) */ + pkt_header_len header_len; + u_int8_t stack_injection_mode; + u_int8_t discard_injected_pkts; + u_int8_t promisc_enabled; + u_int8_t __padding; + + struct sock *sk; + + /* /proc */ + char sock_proc_name[64]; /* /proc/net/pf_ring/<sock_proc_name> */ + char sock_proc_stats_name[64]; /* /proc/net/pf_ring/stats/<sock_proc_stats_name> */ + char statsString[1024]; + char custom_bound_device_name[32]; + + /* Poll Watermark */ + u_int32_t num_poll_calls; + u_int16_t poll_num_pkts_watermark; + u_int16_t poll_watermark_timeout; + u_long queue_nonempty_timestamp; + + /* Master Ring */ + struct pf_ring_socket *master_ring; + + /* Used to transmit packets after they have been received + from user space */ + struct { + u_int8_t enable_tx_with_bounce; + rwlock_t consume_tx_packets_lock; + int32_t last_tx_dev_idx; + struct net_device *last_tx_dev; + } tx; + + /* ZC (Direct NIC Access) */ + zc_dev_mapping zc_mapping; + zc_dev_info *zc_dev; + zc_dev_list *zc_device_entry; + + /* Extra DMA memory */ + struct dma_memory_info *extra_dma_memory; + + /* Cluster */ + u_int32_t cluster_id /* 0 = no cluster */; + + /* Channel */ + int64_t channel_id_mask; /* -1 = any channel */ + u_int16_t num_channels_per_ring; + + /* rehash rss function pointer */ + do_rehash_rss rehash_rss; + + /* Ring Slots */ + u_char *ring_memory; + u_int16_t slot_header_len; + u_int32_t bucket_len, slot_tot_mem; + FlowSlotInfo *slots_info; /* Points to ring_memory */ + u_char *ring_slots; /* Points to ring_memory+sizeof(FlowSlotInfo) */ + + /* Packet Sampling */ + u_int32_t pktToSample, sample_rate; + + /* Virtual Filtering Device */ + virtual_filtering_device_element *v_filtering_dev; + + /* VLAN ID */ + u_int16_t vlan_id; /* 0 = all VLANs are accepted */ + + int32_t bpfFilter; /* bool */ + + /* Sw Filtering Rules - default policy */ + u_int8_t sw_filtering_rules_default_accept_policy; /* 1=default policy is accept, drop otherwise */ + + /* Sw Filtering Rules - hash */ + sw_filtering_hash_bucket **sw_filtering_hash; + u_int64_t sw_filtering_hash_match; + u_int64_t sw_filtering_hash_miss; + u_int64_t sw_filtering_hash_filtered; + u_int32_t num_sw_filtering_hash; + + /* Sw Filtering Rules - wildcard */ + u_int32_t num_sw_filtering_rules; + struct list_head sw_filtering_rules; + + /* Hw Filtering Rules */ + u_int16_t num_hw_filtering_rules; + struct list_head hw_filtering_rules; + + /* Filtering sampling */ + u_int32_t filtering_sample_rate; + u_int32_t filtering_sampling_size; + + /* Locks */ + atomic_t num_ring_users; + wait_queue_head_t ring_slots_waitqueue; + rwlock_t ring_index_lock, ring_rules_lock; + + /* Indexes (Internal) */ + u_int32_t insert_page_id, insert_slot_id; + + /* Function pointer */ + do_add_packet_to_ring add_packet_to_ring; + do_add_raw_packet_to_ring add_raw_packet_to_ring; + + /* Kernel consumer */ + char *kernel_consumer_options, *kernel_consumer_private; + + /* Userspace cluster (ZC) */ + struct cluster_referee *cluster_referee; + cluster_client_type cluster_role; + +#if defined(CONFIG_BCM_KF_SGS) + struct sgs_pfr_socket sgs; +#endif +}; + +/* **************************************** */ + +typedef struct { + struct net *net; + + /* /proc entry for ring module */ + struct proc_dir_entry *proc; + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *proc_dev_dir; + struct proc_dir_entry *proc_stats_dir; + + struct list_head list; +} pf_ring_net; + +/* **************************************** */ + +#define MAX_NUM_PATTERN 32 + +typedef struct { + filtering_rule rule; + +#ifdef CONFIG_TEXTSEARCH + struct ts_config *pattern[MAX_NUM_PATTERN]; +#endif + struct list_head list; +} sw_filtering_rule_element; + +typedef struct { + hw_filtering_rule rule; + struct list_head list; +} hw_filtering_rule_element; + +/* **************************************** */ + +/* Exported functions - used by drivers */ + +int pf_ring_skb_ring_handler(struct sk_buff *skb, + u_int8_t recv_packet, + u_int8_t real_skb /* 1=real skb, 0=faked skb */, + int32_t channel_id, + u_int32_t num_rx_channels); + +void pf_ring_zc_dev_handler(zc_dev_operation operation, + mem_ring_info *rx_info, + mem_ring_info *tx_info, + void *rx_descr_packet_memory, + void *tx_descr_packet_memory, + void *phys_card_memory, + u_int phys_card_memory_len, + u_int channel_id, + struct net_device *dev, + struct device *hwdev, + zc_dev_model device_model, + u_char *device_address, + wait_queue_head_t *packet_waitqueue, + u_int8_t *interrupt_received, + void *rx_adapter_ptr, void *tx_adapter_ptr, + zc_dev_wait_packet wait_packet_function_ptr, + zc_dev_notify dev_notify_function_ptr); + +#endif /* __KERNEL__ */ + +/* *********************************** */ + +#endif /* __RING_H */ diff --git a/include/linux/pf_ring_sgs.h b/include/linux/pf_ring_sgs.h new file mode 100644 index 0000000000000000000000000000000000000000..a8687c86069b7bca41fcd00b9aa5ef8754d348e4 --- /dev/null +++ b/include/linux/pf_ring_sgs.h @@ -0,0 +1,113 @@ +/* + * <:copyright-BRCM:2021:DUAL/GPL:standard + * + * Copyright (c) 2021 Broadcom + * All Rights Reserved + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed + * to you under the terms of the GNU General Public License version 2 + * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, + * with the following added to such license: + * + * As a special exception, the copyright holders of this software give + * you permission to link this software with independent modules, and + * to copy and distribute the resulting executable under terms of your + * choice, provided that you also meet, for each linked independent + * module, the terms and conditions of the license of that module. + * An independent module is a module which is not derived from this + * software. The special exception does not apply to any modifications + * of the software. + * + * Not withstanding the above, under no circumstances may you combine + * this software in any way with any other Broadcom software provided + * under a license other than the GPL, without Broadcom's express prior + * written consent. + * + * :> + */ + +#ifndef _PF_RING_SGS_H_ +#define _PF_RING_SGS_H_ + +#include <linux/netfilter/nf_conntrack_tuple_common.h> + + +#define PF_RING_IOCTL_WAKEUP 0 +#define PF_RING_IOCTL_EVICT 1 + +#define PF_SGS_INIT_FROM_WAN_BIT 0 +#define PF_SGS_IS_LOCAL_BIT 1 +#define PF_SGS_NO_RESPONSE_BIT 2 + +enum sgs_cmd { + SGS_CMD_TERMINATE, + SGS_CMD_ACCELERATE +}; + +enum sgs_status { + PF_FLOW_BLOCK_BIT, + PF_FLOW_ACCEL_BIT, + PF_PACKET_DROP_BIT +}; + +struct dpi_dev_info { + u_int32_t dev_id; + u_int16_t category; + u_int16_t family; + u_int16_t vendor; + u_int16_t os; + u_int16_t os_class; + u_int16_t prio; +}; + +struct flow_stat_dir { + u_int64_t packets; + u_int64_t bytes; +}; + +struct flow_stat { + struct flow_stat_dir counters[IP_CT_DIR_MAX]; +}; + +struct sgs_pkthdr { + u_int32_t skb_len; + u_int32_t dir; + u_int64_t mac; + u_int64_t skb; + u_int64_t ct; + /* per-connection info */ + u_int32_t packet_count; + struct flow_stat stat; + u_int32_t app_id; + u_int32_t ndi_id; + u_int32_t flags; + u_int64_t start_time; /* start time of the nf_conntrack */ +}; + + +#if defined(CONFIG_BCM_KF_SGS) +struct locked_list { + struct spinlock lock; + struct list_head head; + unsigned int size; +}; + +#define DEFINE_LOCKED_LIST(name) \ + struct locked_list name = { \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .head = LIST_HEAD_INIT(name.head), \ + } + +struct sgs_pfr_socket { + char name[128]; + struct locked_list skb_list; + struct sk_buff *cmd_skb; + long seq_num; + struct proc_dir_entry *dir; + struct task_struct *response_thread; + wait_queue_head_t wait_queue; +}; +#endif + +#endif /* _PF_RING_SGS_H_ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index f92d5ae6d04e705da7b449eb46167435bf65b9b7..99da87ab9a01985a6a44d1ff73678b4bfc80fd03 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -661,6 +661,10 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; +#if defined(CONFIG_BCM_KF_CPU_AFFINITY_HINT) && \ + defined(CONFIG_BCM_PROC_CPU_AFFINITY_HINT) + cpumask_t cpus_hint; +#endif #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; diff --git a/include/linux/sgs.h b/include/linux/sgs.h new file mode 100644 index 0000000000000000000000000000000000000000..c3111707e94aa1a1c7d8b8eedc2efc78ba431dcb --- /dev/null +++ b/include/linux/sgs.h @@ -0,0 +1,57 @@ +/* + * <:copyright-BRCM:2021:DUAL/GPL:standard + * + * Copyright (c) 2021 Broadcom + * All Rights Reserved + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed + * to you under the terms of the GNU General Public License version 2 + * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, + * with the following added to such license: + * + * As a special exception, the copyright holders of this software give + * you permission to link this software with independent modules, and + * to copy and distribute the resulting executable under terms of your + * choice, provided that you also meet, for each linked independent + * module, the terms and conditions of the license of that module. + * An independent module is a module which is not derived from this + * software. The special exception does not apply to any modifications + * of the software. + * + * Not withstanding the above, under no circumstances may you combine + * this software in any way with any other Broadcom software provided + * under a license other than the GPL, without Broadcom's express prior + * written consent. + * + * :> + */ + +#ifndef _LINUX_SGS_H_ +#define _LINUX_SGS_H_ + +#define SGS_CT_ACCEL_BIT 0 +#define SGS_CT_BLOCK_BIT 1 +#define SGS_CT_SESSION_BIT 2 +#define SGS_CT_TERMINATED_BIT 3 +#define SGS_CT_IS_LOCAL_BIT 30 +#define SGS_CT_INIT_FROM_WAN_BIT 31 + +struct sgs_ct_info { + unsigned long tcp_reset_seq; + unsigned long flags; + unsigned long packet_count; + int reset_count; +}; + +struct nf_conn; + +struct sgs_core_hooks { + void (*delete)(struct nf_conn *ct); +}; + +int sgs_core_hooks_register(struct sgs_core_hooks *h); +void sgs_nf_ct_delete_from_lists(struct nf_conn *ct); +void sgs_core_hooks_unregister(void); + +#endif /* _LINUX_SGS_H_ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f97734f34746ac6e2948b6df69c5476b742379d6..3fafbc64b2a792dfd4c2974e7c0de60812e5de8d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -250,7 +250,7 @@ struct nf_conntrack { }; #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) || defined(CONFIG_BCM_KF_SKB_EXT) struct nf_bridge_info { refcount_t use; enum { @@ -491,6 +491,11 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, * the end of the header data, ie. at skb->end. */ struct skb_shared_info { +#if defined(CONFIG_BCM_KF_NBUFF) + /* to preserve compat with binary only modules, do not change the + * position of this field relative to the start of the structure. */ + __u8 *dirty_p; +#endif /* defined(CONFIG_BCM_KF_NBUFF) */ __u8 __unused; __u8 meta_len; __u8 nr_frags; @@ -587,6 +592,10 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif +#if defined(CONFIG_BCM_KF_SKB_EXT) +#include <linux/bcm_skbuff.h> +#endif + /** * struct sk_buff - socket buffer * @next: Next buffer in list @@ -662,7 +671,14 @@ typedef unsigned char *sk_buff_data_t; * @users: User count - see {datagram,tcp}.c */ +#if defined(CONFIG_BCM_KF_SKB_EXT) +/* + * Please note struct sk_buff is moved to bcm_skbuff.h. Here is no longer used. + */ +struct sk_buff_orig { +#else struct sk_buff { +#endif union { struct { /* These two members must be first. */ @@ -697,7 +713,11 @@ struct sk_buff { * want to keep them across layers you have to do a skb_clone() * first. This is owned by whoever has the skb queued ATM. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) char cb[48] __aligned(8); +#else + char cb[80] __aligned(8); +#endif union { struct { diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 16158fe097a82c7ba22e232f72ced9ca6d8ca4ff..a166a620480e0097ebe45ec1865568f6120a025e 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -816,6 +816,37 @@ struct spi_transfer { u16 delay_usecs; u32 speed_hz; +#if defined(CONFIG_BCM_KF_SPI) + /* added for controllers that support an ignore count for read + operations. This is useful if the read requires command bytes + and you want to ignore the read data on the bus during the + transmission of those bytes. Note that only prepend_cnt bytes + of data will be written from tx_buf. + */ + u8 prepend_cnt; + + /* added for multibit support + @multi_bit_en - enable multibit operation for this transfer + @multi_bit_start_offset - start offset for multibit data + */ + u8 multi_bit_en; + u8 multi_bit_start_offset; + + /* added for controllers that do not support large transfers + the controller will break up the transfer into smaller + transfers to avoid additional data copies + Note that hdr_len should not be included in len + @hdr_len - length of header + @unit_size - data for each transfer will be divided into multiples of + unit_size + @adr_len - length of address field (max 4 bytes) + @adr_offset - offset of first addr byte in header + */ + u8 hdr_len; + u8 unit_size; + u8 addr_len; + u8 addr_offset; +#endif struct list_head transfer_list; }; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 1192f1e76015f2104990baa96fb638f303389ca9..396394f320b259887e46c7f6b39a4135287a58c1 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -58,7 +58,11 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) /* TCP Fast Open */ #define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ #define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) #define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */ +#else +#define TCP_FASTOPEN_COOKIE_SIZE 4 /* the size employed by this impl. */ +#endif /* TCP Fast Open Cookie as stored in memory */ struct tcp_fastopen_cookie { @@ -83,6 +87,58 @@ struct tcp_sack_block { u32 end_seq; }; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +struct tcp_out_options { + u16 options; /* bit field of OPTION_* */ + u16 mss; /* 0 to disable */ + u8 ws; /* window scale, 0 to disable */ + u8 num_sack_blocks; /* number of SACK blocks to include */ + u8 hash_size; /* bytes in hash_location */ + __u8 *hash_location; /* temporary pointer, overloaded */ + __u32 tsval, tsecr; /* need to include OPTION_TS */ + struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ +#ifdef CONFIG_MPTCP + u16 mptcp_options; /* bit field of MPTCP related OPTION_* */ + u8 dss_csum:1, /* dss-checksum required? */ + add_addr_v4:1, + add_addr_v6:1, + mptcp_ver:4; + + union { + struct { + __u64 sender_key; /* sender's key for mptcp */ + __u64 receiver_key; /* receiver's key for mptcp */ + } mp_capable; + + struct { + __u64 sender_truncated_mac; + __u32 sender_nonce; + /* random number of the sender */ + __u32 token; /* token for mptcp */ + u8 low_prio:1; + } mp_join_syns; + }; + + struct { + __u64 trunc_mac; + struct in_addr addr; + u16 port; + u8 addr_id; + } add_addr4; + + struct { + __u64 trunc_mac; + struct in6_addr addr; + u16 port; + u8 addr_id; + } add_addr6; + + u16 remove_addrs; /* list of address id */ + u8 addr_id; /* address id (mp_join or add_address) */ +#endif /* CONFIG_MPTCP */ +}; + +#endif /*These are used to set the sack_ok field in struct tcp_options_received */ #define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ #define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ @@ -106,6 +162,11 @@ struct tcp_options_received { u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ }; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +struct mptcp_cb; +struct mptcp_tcp_sock; + +#endif static inline void tcp_clear_options(struct tcp_options_received *rx_opt) { rx_opt->tstamp_ok = rx_opt->sack_ok = 0; @@ -144,6 +205,10 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) return (struct tcp_request_sock *)req; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +struct tcp_md5sig_key; + +#endif struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; @@ -400,6 +465,46 @@ struct tcp_sock { */ struct request_sock *fastopen_rsk; u32 *saved_syn; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + + /* MPTCP/TCP-specific callbacks */ + const struct tcp_sock_ops *ops; + + struct mptcp_cb *mpcb; + struct sock *meta_sk; + /* We keep these flags even if CONFIG_MPTCP is not checked, because + * it allows checking MPTCP capability just by checking the mpc flag, + * rather than adding ifdefs everywhere. + */ + u32 mpc:1, /* Other end is multipath capable */ + inside_tk_table:1, /* Is the tcp_sock inside the token-table? */ + send_mp_fclose:1, + request_mptcp:1, /* Did we send out an MP_CAPABLE? + * (this speeds up mptcp_doit() in tcp_recvmsg) + */ + pf:1, /* Potentially Failed state: when this flag is set, we + * stop using the subflow + */ + mp_killed:1, /* Killed with a tcp_done in mptcp? */ + is_master_sk:1, + close_it:1, /* Must close socket in mptcp_data_ready? */ + closing:1, + mptcp_ver:4, + mptcp_sched_setsockopt:1, + mptcp_pm_setsockopt:1, + record_master_info:1, + tcp_disconnect:1; + struct mptcp_tcp_sock *mptcp; +#ifdef CONFIG_MPTCP +#define MPTCP_SCHED_NAME_MAX 16 +#define MPTCP_PM_NAME_MAX 16 + struct hlist_nulls_node tk_table; + u32 mptcp_loc_token; + u64 mptcp_loc_key; + char mptcp_sched_name[MPTCP_SCHED_NAME_MAX]; + char mptcp_pm_name[MPTCP_PM_NAME_MAX]; +#endif /* CONFIG_MPTCP */ +#endif }; enum tsq_enum { @@ -411,6 +516,10 @@ enum tsq_enum { TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call * tcp_v{4|6}_mtu_reduced() */ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + MPTCP_PATH_MANAGER_DEFERRED, /* MPTCP deferred creation of new subflows */ + MPTCP_SUB_DEFERRED, /* A subflow got deferred - process them */ +#endif }; enum tsq_flags { @@ -420,6 +529,10 @@ enum tsq_flags { TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + TCPF_PATH_MANAGER_DEFERRED = (1UL << MPTCP_PATH_MANAGER_DEFERRED), + TCPF_SUB_DEFERRED = (1UL << MPTCP_SUB_DEFERRED), +#endif }; static inline struct tcp_sock *tcp_sk(const struct sock *sk) @@ -442,6 +555,9 @@ struct tcp_timewait_sock { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct mptcp_tw *mptcp_tw; +#endif }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) diff --git a/include/linux/tracker.h b/include/linux/tracker.h new file mode 100644 index 0000000000000000000000000000000000000000..c4cad1bca0878227366ba4ceb9785ece63f036c0 --- /dev/null +++ b/include/linux/tracker.h @@ -0,0 +1,84 @@ +/* +* Copyright (c) 2020 Broadcom +* All Rights Reserved +* +<:label-BRCM:2020:DUAL/GPL:standard + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ +#ifndef TRACKER_H +#define TRACKER_H + +/* The tracker allows to track arbitrary pointers across the kernel lifetime. + * + * Expected usage is to sprinkle track_printf's in the interesting points of + * ptr usage. + * + * The resulting strings are accessible in the shell by catting /proc/tracker. + * + * When investigating specific badness tracker_print and tracker_find are useful + * to dump or work with this information from inside of the kernel. + */ + +/* Initialize tracker. Should be called once */ +void track_init(void); + +/* Dumps information regarding ptr to dmesg */ +void tracker_print(void *ptr); + +/* Looks for pointer ptr_any. If found, tracker_find calls cb callback for each recorded state. + * In addition to state the callback gets arbitrary ctx passed by the caller. + * + * This function is useful to analyse the pointer states during runtime. Here is example + * diff which helped us track freeing non-allocated pointer in linux kernel: + * + * +static void seen_alloc(void *ctx, const char *st) { + * + int *alloced = ctx; + * + + * + if (strncmp(st, "header_alloc", strlen("header_alloc")) == 0) + * + *alloced = 1; + * +} + * + + * static void drop_sysctl_table(struct ctl_table_header *header) + * { + * struct ctl_dir *parent = header->parent; + * + int alloced = 0; + * + * if (--header->nreg) + * return; + * + * + tracker_find(header, seen_alloc, &alloced); + * + if (!alloced) { + * + printk("boriss: non alloced header %px\n", header); + * + WARN_ON(1); + * + } + * + */ +void tracker_find(void *ptr_any, void (*cb)(void *ctx, const char *st), void *ctx); + +/* printf into new state string attached to the pointer ptr. The resulting string will + * be dumped by tracker_print and passed to the callback in tracker_find. + */ +void track_printf(void *ptr, const char *fmt, ...); + +#endif /* TRACKER_H */ diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 452ca06ed253470a02b58af774e42f8010390b7b..8e84bda98083884bf8543beddd2307fa56b7c30d 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -63,7 +63,15 @@ struct usbnet { unsigned interrupt_count; struct mutex interrupt_mutex; struct usb_anchor deferred; +#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_THREAD)) + struct task_struct *usbnet_thread; + int usbnet_thread_resched; + wait_queue_head_t thread_wq; + int pending_rx_skb_thresh; + int pending_rx_skb_count; +#else struct tasklet_struct bh; +#endif struct pcpu_sw_netstats __percpu *stats64; diff --git a/include/linux/vlanctl_bind.h b/include/linux/vlanctl_bind.h new file mode 100755 index 0000000000000000000000000000000000000000..20a1ad2197a92e1031249659e27efd2236439b65 --- /dev/null +++ b/include/linux/vlanctl_bind.h @@ -0,0 +1,123 @@ +/* +* Copyright (c) 2003-2014 Broadcom Corporation +* All Rights Reserved +* +<:label-BRCM:2014:DUAL/GPL:standard + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + + +#ifndef _VLANCTL_BIND_ +#define _VLANCTL_BIND_ + +typedef enum { + VLANCTL_BIND_CLIENT_UNKNOWN, +#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE) + VLANCTL_BIND_CLIENT_RUNNER, +#endif /* RDPA */ + VLANCTL_BIND_CLIENT_MAX +} vlanctl_bind_client_t; + + +/* + * vlanctl_bind defines three(!) hooks: + * NotifHook: When blog_notify is invoked, the bound hook is invoked. Based on + * event type the bound Blog client may perform a custom action. + * SC Hook: If this hook is defined, blog_activate() will pass a blog with + * necessary information for statical configuration. + * SD Hook: If this hook is defined, blog_deactivate() will pass a pointer + * to a network object with BlogActivateKey information. The + * respective flow entry will be deleted. + */ +typedef union { + struct { + uint8_t unused : 5; + uint8_t SN_HOOK : 1; + uint8_t SC_HOOK : 1; + uint8_t SD_HOOK : 1; + } bmap; + uint8_t hook_info; +} vlanctl_bind_t; + +typedef struct { + struct net_device *vlan_dev; + unsigned int vid; + int enable; +} vlanctl_vlan_t; + +typedef struct { + uint8_t mac[6]; + int enable; +} vlanctl_route_mac_t; + +typedef struct { + struct net_device *aggregate_vlan_dev; + struct net_device *deaggregate_vlan_dev; +} vlanctl_vlan_aggregate_t; + +typedef enum { + VLANCTL_BIND_NOTIFY_TPID, /* set interface tpid */ + VLANCTL_BIND_NOTIFY_VLAN, /* set vlan object */ + VLANCTL_BIND_NOTIFY_ROUTE_MAC, /* route mac create and delete */ + VLANCTL_BIND_NOTIFY_VLAN_AGGREGATE, /* set vlan aggregation */ + VLANCTL_BIND_DROP_PRECEDENCE_SET, /* rdpa_mw_drop_precedence_set */ +} vlanctl_bind_Notify_t; + +#if defined(CONFIG_BLOG) + +typedef uint32_t (* vlanctl_bind_ScHook_t)(Blog_t * blog_p, BlogTraffic_t traffic); + +typedef Blog_t * (* vlanctl_bind_SdHook_t)(uint32_t key, BlogTraffic_t traffic); + +typedef void (* vlanctl_bind_SnHook_t)(vlanctl_bind_Notify_t event, void *ptr); + +void vlanctl_bind_config(vlanctl_bind_ScHook_t vlanctl_bind_sc, + vlanctl_bind_SdHook_t vlanctl_bind_sd, + vlanctl_bind_SnHook_t vlanctl_bind_sn, + vlanctl_bind_client_t client, + vlanctl_bind_t bind); + + +int vlanctl_bind_activate(vlanctl_bind_client_t client); + +int vlanctl_notify(vlanctl_bind_Notify_t event, void *ptr, vlanctl_bind_client_t client); + +/* + *------------------------------------------------------------------------------ + * vlanctl_activate(): static configuration function of blog application + * pass a filled blog to the hook for configuration + *------------------------------------------------------------------------------ + */ +extern uint32_t vlanctl_activate( Blog_t * blog_p, vlanctl_bind_client_t client ); + +/* + *------------------------------------------------------------------------------ + * vlanctl_deactivate(): static deconfiguration function of blog application + *------------------------------------------------------------------------------ + */ +extern Blog_t * vlanctl_deactivate( uint32_t key, vlanctl_bind_client_t client ); + +#endif /* CONFIG_BLOG */ + +#endif /* ! _VLANCTL_BIND_ */ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 44985c4a1e86214dca6579b42bda3c6d666bcdc9..21b71f9f98f674c77418e913c6e0f8f57bffe77b 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -216,4 +216,8 @@ extern void watchdog_unregister_device(struct watchdog_device *); /* devres register variant */ int devm_watchdog_register_device(struct device *dev, struct watchdog_device *); +#if defined(CONFIG_BCM_KF_WDT) +extern void watchdog_force_disable( void ); +#endif + #endif /* ifndef _LINUX_WATCHDOG_H */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index db2a87981dd46e74b2b79c120feb1350b84e45c4..fe81463f93cdcdd12c262c1217a466d293eca381 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -490,4 +490,8 @@ int if6_proc_init(void); void if6_proc_exit(void); #endif +#if defined(CONFIG_BCM_KF_IPV6) +#include <net/bcm_addrconf.h> +#endif + #endif diff --git a/include/net/bcm_addrconf.h b/include/net/bcm_addrconf.h new file mode 100644 index 0000000000000000000000000000000000000000..a70385f1d4f88d22893409952b8030e74f6cc08f --- /dev/null +++ b/include/net/bcm_addrconf.h @@ -0,0 +1,64 @@ +/* +* Copyright (c) 2003-2019 Broadcom +* All Rights Reserved +* +<:label-BRCM:2019:DUAL/GPL:standard + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ +#ifndef _BCM_ADDRCONF_H +#define _BCM_ADDRCONF_H + +void ipv6_del_addr(struct inet6_ifaddr *ifp); +int ipv6_generate_eui64(u8 *eui, struct net_device *dev); + +static inline int isULA(const struct in6_addr *addr) +{ + __be32 st; + + st = addr->s6_addr32[0]; + + /* RFC 4193 */ + if ((st & htonl(0xFE000000)) == htonl(0xFC000000)) + return 1; + else + return 0; +} + +static inline int isSpecialAddr(const struct in6_addr *addr) +{ + __be32 st; + + st = addr->s6_addr32[0]; + + /* RFC 5156 */ + if (((st & htonl(0xFFFFFFFF)) == htonl(0x20010db8)) || + ((st & htonl(0xFFFFFFF0)) == htonl(0x20010010))) + return 1; + else + return 0; +} + +int addrconf_update_lladdr(struct net_device *dev); + +#endif /* _BCM_ADDRCONF_H */ diff --git a/include/net/bcm_bridge.h b/include/net/bcm_bridge.h new file mode 100755 index 0000000000000000000000000000000000000000..805b3d24ccf26f64c26ba10bb2ea34f0f1f83d34 --- /dev/null +++ b/include/net/bcm_bridge.h @@ -0,0 +1,33 @@ +/* +<:copyright-BRCM:2022:DUAL/GPL:standard + + Copyright (c) 2022 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ +#ifndef _BCM_BRIDGE_H +#define _BCM_BRIDGE_H +int bcm_br_hook_handle_frame_finish(struct sk_buff *skb, int state); +int bcm_br_hook_should_deliver(struct sk_buff *skb, const struct net_bridge_port *p); +#endif /* _BCM_BRIDGE_H */ diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index 732bc3b4606bf7df8c7d96fda16fc173d48c1d50..1ad3e3e3a8d666ac3ad889d6be30a6dc83dff22e 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -37,6 +37,11 @@ #define AD_LACP_SLOW 0 #define AD_LACP_FAST 1 +#if defined(CONFIG_BCM_KF_KBONDING) && defined(CONFIG_BCM_KERNEL_BONDING) +#define BOND_ASYNC_LINKSPEED_OFF 0 +#define BOND_ASYNC_LINKSPEED_ON 1 +#endif /* defined(CONFIG_BCM_KF_KBONDING) && defined(CONFIG_BCM_KERNEL_BONDING) */ + typedef struct mac_addr { u8 mac_addr_value[ETH_ALEN]; } __packed mac_addr_t; diff --git a/include/net/bond_options.h b/include/net/bond_options.h index d79d28f5318c239731317739a5933a356c21930e..99102ececd77429530cc598746488cda852d70d3 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -67,6 +67,9 @@ enum { BOND_OPT_AD_ACTOR_SYSTEM, BOND_OPT_AD_USER_PORT_KEY, BOND_OPT_NUM_PEER_NOTIF_ALIAS, +#if defined(CONFIG_BCM_KF_KBONDING) && defined(CONFIG_BCM_KERNEL_BONDING) + BOND_OPT_ASYNC_LINKSPEED, +#endif /* defined(CONFIG_BCM_KF_KBONDING) && defined(CONFIG_BCM_KERNEL_BONDING) */ BOND_OPT_LAST }; diff --git a/include/net/bonding.h b/include/net/bonding.h index c458f084f7bb98468674938cf4cb08aebbef1a99..cb91eeac19a4c176e1af916872549d8cba8c8bab 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -132,6 +132,9 @@ struct bond_params { /* 2 bytes of padding : see ether_addr_equal_64bits() */ u8 ad_actor_system[ETH_ALEN + 2]; +#if defined(CONFIG_BCM_KF_KBONDING) && defined(CONFIG_BCM_KERNEL_BONDING) + int async_linkspeed; +#endif /* defined(CONFIG_BCM_KF_KBONDING) && defined(CONFIG_BCM_KERNEL_BONDING) */ }; struct bond_parm_tbl { diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b96debd18e14213dc9d2b6fc9435e5b0f8bfbd15..52f717773c58d88fea307d348e2d058fc155deda 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -314,10 +314,17 @@ struct ieee80211_sta_he_cap { * * @types_mask: interface types mask * @he_cap: holds the HE capabilities +#ifdef CONFIG_BCM_KF_NL80211_HE_6G_CAP_SUPPORT + * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a + * 6 GHz band channel (and 0 may be valid value). +#endif */ struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; +#ifdef CONFIG_BCM_KF_NL80211_HE_6G_CAP_SUPPORT + struct ieee80211_he_6ghz_capa he_6ghz_capa; +#endif /* CONFIG_BCM_KF_NL80211_HE_6G_CAP_SUPPORT */ }; /** diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 99f8580344d03d831604a9b8d262fee90dfa4ce9..607f7b13c4f7cec5ecfc4933a376a58d9c12beda 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -48,6 +48,9 @@ struct flow_dissector_key_tags { struct flow_dissector_key_vlan { u16 vlan_id:12, +#ifdef CONFIG_BCM_KF_ENHANCED_TC + vlan_dei:1, +#endif /* CONFIG_BCM_KF_ENHANCED_TC */ vlan_priority:3; __be16 vlan_tpid; }; @@ -201,6 +204,16 @@ struct flow_dissector_key_ip { __u8 ttl; }; +#ifdef CONFIG_BCM_KF_ENHANCED_TC +/** + * struct flow_dissector_key_num_of_vlans: + * @num_of_vlans: num_of_vlans value + */ +struct flow_dissector_key_num_of_vlans { + u8 num_of_vlans; +}; +#endif /* CONFIG_BCM_KF_ENHANCED_TC */ + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -226,6 +239,9 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ +#ifdef CONFIG_BCM_KF_ENHANCED_TC + FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */ +#endif /* CONFIG_BCM_KF_ENHANCED_TC */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 3ca969cbd16117fe15b1521333c5d7a28c8709f7..1ff1a59fcddfa71c927015ef03b5f6d612cbd002 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -2,6 +2,10 @@ #ifndef _INET_COMMON_H #define _INET_COMMON_H +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#include <net/sock.h> + +#endif extern const struct proto_ops inet_stream_ops; extern const struct proto_ops inet_dgram_ops; @@ -14,6 +18,10 @@ struct sock; struct sockaddr; struct socket; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +int inet_create(struct net *net, struct socket *sock, int protocol, int kern); +int inet6_create(struct net *net, struct socket *sock, int protocol, int kern); +#endif int inet_release(struct socket *sock); int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index da8a582ab032eb429f4b735a1bfc6b80140209f0..c77c82b5ed8feee8dc1050b7e3dbb4cab44a9eed 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -29,6 +29,9 @@ struct inet_bind_bucket; struct tcp_congestion_ops; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +struct tcp_options_received; +#endif /* * Pointers to address related TCP functions diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index a80fd0ac4563283246f4f53cea1ac0cd17b41dab..1d5d65a14e835257378b135a17c9634fed8a2c62 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -83,7 +83,11 @@ struct inet_request_sock { #define ireq_state req.__req_common.skc_state #define ireq_family req.__req_common.skc_family +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) u16 snd_wscale : 4, +#else + u32 snd_wscale : 4, +#endif rcv_wscale : 4, tstamp_ok : 1, sack_ok : 1, @@ -91,6 +95,10 @@ struct inet_request_sock { ecn_ok : 1, acked : 1, no_srccheck: 1, +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + mptcp_rqsk : 1, + saw_mpc : 1, +#endif smc_ok : 1; u32 ir_mark; union { diff --git a/include/net/ip.h b/include/net/ip.h index d1a4efedbc0391cf603df045c347c63ac197db19..36481b6eb24a24e44366ec74acb48877783d7962 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -603,6 +603,9 @@ enum ip_defrag_users { IP_DEFRAG_VS_FWD, IP_DEFRAG_AF_PACKET, IP_DEFRAG_MACVLAN, +#if defined(CONFIG_BCM_KF_MAP) && IS_ENABLED(CONFIG_BCM_MAP) + IP_DEFRAG_MAP, +#endif }; /* Return true if the value of 'user' is between 'lower_bond' diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h index 1f77fb4dc79df6bc4e41d6d2f4d49ace32082ca4..e25d7b85eb885413cace452e07222c42450aba93 100644 --- a/include/net/ipv6_frag.h +++ b/include/net/ipv6_frag.h @@ -14,6 +14,9 @@ enum ip6_defrag_users { __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, IP6_DEFRAG_CONNTRACK_BRIDGE_IN, __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, +#if defined(CONFIG_BCM_KF_MAP) && IS_ENABLED(CONFIG_BCM_MAP) + IP6_DEFRAG_MAP, +#endif }; /* diff --git a/include/net/macsec.h b/include/net/macsec.h new file mode 100644 index 0000000000000000000000000000000000000000..381b12b5bcca4a467bbedec51aea26bc1734ca84 --- /dev/null +++ b/include/net/macsec.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * MACsec netdev header, used for h/w accelerated implementations. + * + * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> + */ +#ifndef _NET_MACSEC_H_ +#define _NET_MACSEC_H_ + +#include <linux/u64_stats_sync.h> +#include <uapi/linux/if_link.h> +#include <uapi/linux/if_macsec.h> + +#define MACSEC_DEFAULT_PN_LEN 4 +#define MACSEC_XPN_PN_LEN 8 + +#define MACSEC_SALT_LEN 12 +#define MACSEC_NUM_AN 4 /* 2 bits for the association number */ + +typedef u64 __bitwise sci_t; +typedef u32 __bitwise ssci_t; + +typedef union salt { + struct { + u32 ssci; + u64 pn; + } __packed; + u8 bytes[MACSEC_SALT_LEN]; +} __packed salt_t; + +typedef union pn { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 lower; + u32 upper; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 upper; + u32 lower; +#else +#error "Please fix <asm/byteorder.h>" +#endif + }; + u64 full64; +} pn_t; + +/** + * struct macsec_key - SA key + * @id: user-provided key identifier + * @tfm: crypto struct, key storage + * @salt: salt used to generate IV in XPN cipher suites + */ +struct macsec_key { + u8 id[MACSEC_KEYID_LEN]; + struct crypto_aead *tfm; + salt_t salt; +}; + +struct macsec_rx_sc_stats { + __u64 InOctetsValidated; + __u64 InOctetsDecrypted; + __u64 InPktsUnchecked; + __u64 InPktsDelayed; + __u64 InPktsOK; + __u64 InPktsInvalid; + __u64 InPktsLate; + __u64 InPktsNotValid; + __u64 InPktsNotUsingSA; + __u64 InPktsUnusedSA; +}; + +struct macsec_rx_sa_stats { + __u32 InPktsOK; + __u32 InPktsInvalid; + __u32 InPktsNotValid; + __u32 InPktsNotUsingSA; + __u32 InPktsUnusedSA; +}; + +struct macsec_tx_sa_stats { + __u32 OutPktsProtected; + __u32 OutPktsEncrypted; +}; + +struct macsec_tx_sc_stats { + __u64 OutPktsProtected; + __u64 OutPktsEncrypted; + __u64 OutOctetsProtected; + __u64 OutOctetsEncrypted; +}; + +struct macsec_dev_stats { + __u64 OutPktsUntagged; + __u64 InPktsUntagged; + __u64 OutPktsTooLong; + __u64 InPktsNoTag; + __u64 InPktsBadTag; + __u64 InPktsUnknownSCI; + __u64 InPktsNoSCI; + __u64 InPktsOverrun; +}; + +/** + * struct macsec_rx_sa - receive secure association + * @active: + * @next_pn: packet number expected for the next packet + * @lock: protects next_pn manipulations + * @key: key structure + * @ssci: short secure channel identifier + * @stats: per-SA stats + */ +struct macsec_rx_sa { + struct macsec_key key; + ssci_t ssci; + spinlock_t lock; + union { + pn_t next_pn_halves; + u64 next_pn; + }; + refcount_t refcnt; + bool active; + struct macsec_rx_sa_stats __percpu *stats; + struct macsec_rx_sc *sc; + struct rcu_head rcu; +}; + +struct pcpu_rx_sc_stats { + struct macsec_rx_sc_stats stats; + struct u64_stats_sync syncp; +}; + +struct pcpu_tx_sc_stats { + struct macsec_tx_sc_stats stats; + struct u64_stats_sync syncp; +}; + +/** + * struct macsec_rx_sc - receive secure channel + * @sci: secure channel identifier for this SC + * @active: channel is active + * @sa: array of secure associations + * @stats: per-SC stats + */ +struct macsec_rx_sc { + struct macsec_rx_sc __rcu *next; + sci_t sci; + bool active; + struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; + struct pcpu_rx_sc_stats __percpu *stats; + refcount_t refcnt; + struct rcu_head rcu_head; +}; + +/** + * struct macsec_tx_sa - transmit secure association + * @active: + * @next_pn: packet number to use for the next packet + * @lock: protects next_pn manipulations + * @key: key structure + * @ssci: short secure channel identifier + * @stats: per-SA stats + */ +struct macsec_tx_sa { + struct macsec_key key; + ssci_t ssci; + spinlock_t lock; + union { + pn_t next_pn_halves; + u64 next_pn; + }; + refcount_t refcnt; + bool active; + struct macsec_tx_sa_stats __percpu *stats; + struct rcu_head rcu; +}; + +/** + * struct macsec_tx_sc - transmit secure channel + * @active: + * @encoding_sa: association number of the SA currently in use + * @encrypt: encrypt packets on transmit, or authenticate only + * @send_sci: always include the SCI in the SecTAG + * @end_station: + * @scb: single copy broadcast flag + * @sa: array of secure associations + * @stats: stats for this TXSC + */ +struct macsec_tx_sc { + bool active; + u8 encoding_sa; + bool encrypt; + bool send_sci; + bool end_station; + bool scb; + struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; + struct pcpu_tx_sc_stats __percpu *stats; +}; + +/** + * struct macsec_secy - MACsec Security Entity + * @netdev: netdevice for this SecY + * @n_rx_sc: number of receive secure channels configured on this SecY + * @sci: secure channel identifier used for tx + * @key_len: length of keys used by the cipher suite + * @icv_len: length of ICV used by the cipher suite + * @validate_frames: validation mode + * @xpn: enable XPN for this SecY + * @operational: MAC_Operational flag + * @protect_frames: enable protection for this SecY + * @replay_protect: enable packet number checks on receive + * @replay_window: size of the replay window + * @tx_sc: transmit secure channel + * @rx_sc: linked list of receive secure channels + */ +struct macsec_secy { + struct net_device *netdev; + unsigned int n_rx_sc; + sci_t sci; + u16 key_len; + u16 icv_len; + enum macsec_validation_type validate_frames; + bool xpn; + bool operational; + bool protect_frames; + bool replay_protect; + u32 replay_window; + struct macsec_tx_sc tx_sc; + struct macsec_rx_sc __rcu *rx_sc; +}; + +/** + * struct macsec_context - MACsec context for hardware offloading + */ +struct macsec_context { + union { + struct net_device *netdev; + struct phy_device *phydev; + }; + enum macsec_offload offload; + + struct macsec_secy *secy; + struct macsec_rx_sc *rx_sc; + struct { + unsigned char assoc_num; + u8 key[MACSEC_KEYID_LEN]; + union { + struct macsec_rx_sa *rx_sa; + struct macsec_tx_sa *tx_sa; + }; + } sa; + union { + struct macsec_tx_sc_stats *tx_sc_stats; + struct macsec_tx_sa_stats *tx_sa_stats; + struct macsec_rx_sc_stats *rx_sc_stats; + struct macsec_rx_sa_stats *rx_sa_stats; + struct macsec_dev_stats *dev_stats; + } stats; + + u8 prepare:1; +}; + +/** + * struct macsec_ops - MACsec offloading operations + */ +struct macsec_ops { + /* Device wide */ + int (*mdo_dev_open)(struct macsec_context *ctx); + int (*mdo_dev_stop)(struct macsec_context *ctx); + /* SecY */ + int (*mdo_add_secy)(struct macsec_context *ctx); + int (*mdo_upd_secy)(struct macsec_context *ctx); + int (*mdo_del_secy)(struct macsec_context *ctx); + /* Security channels */ + int (*mdo_add_rxsc)(struct macsec_context *ctx); + int (*mdo_upd_rxsc)(struct macsec_context *ctx); + int (*mdo_del_rxsc)(struct macsec_context *ctx); + /* Security associations */ + int (*mdo_add_rxsa)(struct macsec_context *ctx); + int (*mdo_upd_rxsa)(struct macsec_context *ctx); + int (*mdo_del_rxsa)(struct macsec_context *ctx); + int (*mdo_add_txsa)(struct macsec_context *ctx); + int (*mdo_upd_txsa)(struct macsec_context *ctx); + int (*mdo_del_txsa)(struct macsec_context *ctx); + /* Statistics */ + int (*mdo_get_dev_stats)(struct macsec_context *ctx); + int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx); + int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx); + int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx); + int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx); +}; + +void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa); + +#endif /* _NET_MACSEC_H_ */ \ No newline at end of file diff --git a/include/net/mptcp.h b/include/net/mptcp.h new file mode 100644 index 0000000000000000000000000000000000000000..cd94cd345f181ae80bc137a057eaee0aa731b3d6 --- /dev/null +++ b/include/net/mptcp.h @@ -0,0 +1,1499 @@ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* + * MPTCP implementation + * + * Initial Design & Implementation: + * Sébastien Barré <sebastien.barre@uclouvain.be> + * + * Current Maintainer & Author: + * Christoph Paasch <christoph.paasch@uclouvain.be> + * + * Additional authors: + * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi> + * Gregory Detal <gregory.detal@uclouvain.be> + * Fabien Duchêne <fabien.duchene@uclouvain.be> + * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de> + * Lavkesh Lahngir <lavkesh51@gmail.com> + * Andreas Ripke <ripke@neclab.eu> + * Vlad Dogaru <vlad.dogaru@intel.com> + * Octavian Purdila <octavian.purdila@intel.com> + * John Ronan <jronan@tssg.org> + * Catalin Nicutar <catalin.nicutar@gmail.com> + * Brandon Heller <brandonh@stanford.edu> + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _MPTCP_H +#define _MPTCP_H + +#include <linux/inetdevice.h> +#include <linux/ipv6.h> +#include <linux/list.h> +#include <linux/net.h> +#include <linux/netpoll.h> +#include <linux/siphash.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/tcp.h> +#include <linux/kernel.h> + +#include <asm/byteorder.h> +#include <asm/unaligned.h> +#include <crypto/hash.h> +#include <net/tcp.h> + +#if defined(__LITTLE_ENDIAN_BITFIELD) + #define ntohll(x) be64_to_cpu(x) + #define htonll(x) cpu_to_be64(x) +#elif defined(__BIG_ENDIAN_BITFIELD) + #define ntohll(x) (x) + #define htonll(x) (x) +#endif + +struct mptcp_loc4 { + u8 loc4_id; + u8 low_prio:1; + int if_idx; + struct in_addr addr; +}; + +struct mptcp_rem4 { + u8 rem4_id; + __be16 port; + struct in_addr addr; +}; + +struct mptcp_loc6 { + u8 loc6_id; + u8 low_prio:1; + int if_idx; + struct in6_addr addr; +}; + +struct mptcp_rem6 { + u8 rem6_id; + __be16 port; + struct in6_addr addr; +}; + +struct mptcp_request_sock { + struct tcp_request_sock req; + struct hlist_nulls_node hash_entry; + + union { + struct { + /* Only on initial subflows */ + u64 mptcp_loc_key; + u64 mptcp_rem_key; + u32 mptcp_loc_token; + }; + + struct { + /* Only on additional subflows */ + u32 mptcp_rem_nonce; + u32 mptcp_loc_nonce; + u64 mptcp_hash_tmac; + }; + }; + + u8 loc_id; + u8 rem_id; /* Address-id in the MP_JOIN */ + u8 dss_csum:1, + is_sub:1, /* Is this a new subflow? */ + low_prio:1, /* Interface set to low-prio? */ + rcv_low_prio:1, + mptcp_ver:4; +}; + +struct mptcp_options_received { + u16 saw_mpc:1, + dss_csum:1, + drop_me:1, + + is_mp_join:1, + join_ack:1, + + saw_low_prio:2, /* 0x1 - low-prio set for this subflow + * 0x2 - low-prio set for another subflow + */ + low_prio:1, + + saw_add_addr:2, /* Saw at least one add_addr option: + * 0x1: IPv4 - 0x2: IPv6 + */ + more_add_addr:1, /* Saw one more add-addr. */ + + saw_rem_addr:1, /* Saw at least one rem_addr option */ + more_rem_addr:1, /* Saw one more rem-addr. */ + + mp_fail:1, + mp_fclose:1; + u8 rem_id; /* Address-id in the MP_JOIN */ + u8 prio_addr_id; /* Address-id in the MP_PRIO */ + + const unsigned char *add_addr_ptr; /* Pointer to add-address option */ + const unsigned char *rem_addr_ptr; /* Pointer to rem-address option */ + + u32 data_ack; + u32 data_seq; + u16 data_len; + + u8 mptcp_ver; /* MPTCP version */ + + /* Key inside the option (from mp_capable or fast_close) */ + u64 mptcp_sender_key; + u64 mptcp_receiver_key; + + u32 mptcp_rem_token; /* Remote token */ + + u32 mptcp_recv_nonce; + u64 mptcp_recv_tmac; + u8 mptcp_recv_mac[20]; +}; + +struct mptcp_tcp_sock { + struct hlist_node node; + struct hlist_node cb_list; + struct mptcp_options_received rx_opt; + + /* Those three fields record the current mapping */ + u64 map_data_seq; + u32 map_subseq; + u16 map_data_len; + u16 slave_sk:1, + fully_established:1, + second_packet:1, + attached:1, + send_mp_fail:1, + include_mpc:1, + mapping_present:1, + map_data_fin:1, + low_prio:1, /* use this socket as backup */ + rcv_low_prio:1, /* Peer sent low-prio option to us */ + send_mp_prio:1, /* Trigger to send mp_prio on this socket */ + pre_established:1; /* State between sending 3rd ACK and + * receiving the fourth ack of new subflows. + */ + + /* isn: needed to translate abs to relative subflow seqnums */ + u32 snt_isn; + u32 rcv_isn; + u8 path_index; + u8 loc_id; + u8 rem_id; + u8 sk_err; + +#define MPTCP_SCHED_SIZE 16 + u8 mptcp_sched[MPTCP_SCHED_SIZE] __aligned(8); + + int init_rcv_wnd; + u32 infinite_cutoff_seq; + struct delayed_work work; + u32 mptcp_loc_nonce; + struct tcp_sock *tp; + u32 last_end_data_seq; + + /* MP_JOIN subflow: timer for retransmitting the 3rd ack */ + struct timer_list mptcp_ack_timer; + + /* HMAC of the third ack */ + char sender_mac[20]; +}; + +struct mptcp_tw { + struct list_head list; + u64 loc_key; + u64 rcv_nxt; + struct mptcp_cb __rcu *mpcb; + u8 meta_tw:1, + in_list:1; +}; + +#define MPTCP_PM_NAME_MAX 16 +struct mptcp_pm_ops { + struct list_head list; + + /* Signal the creation of a new MPTCP-session. */ + void (*new_session)(const struct sock *meta_sk); + void (*release_sock)(struct sock *meta_sk); + void (*fully_established)(struct sock *meta_sk); + void (*close_session)(struct sock *meta_sk); + void (*new_remote_address)(struct sock *meta_sk); + int (*get_local_id)(const struct sock *meta_sk, sa_family_t family, + union inet_addr *addr, bool *low_prio); + void (*addr_signal)(struct sock *sk, unsigned *size, + struct tcp_out_options *opts, struct sk_buff *skb); + void (*add_raddr)(struct mptcp_cb *mpcb, const union inet_addr *addr, + sa_family_t family, __be16 port, u8 id); + void (*rem_raddr)(struct mptcp_cb *mpcb, u8 rem_id); + void (*init_subsocket_v4)(struct sock *sk, struct in_addr addr); + void (*init_subsocket_v6)(struct sock *sk, struct in6_addr addr); + void (*established_subflow)(struct sock *sk); + void (*delete_subflow)(struct sock *sk); + void (*prio_changed)(struct sock *sk, int low_prio); + + char name[MPTCP_PM_NAME_MAX]; + struct module *owner; +}; + +#define MPTCP_SCHED_NAME_MAX 16 +struct mptcp_sched_ops { + struct list_head list; + + struct sock * (*get_subflow)(struct sock *meta_sk, + struct sk_buff *skb, + bool zero_wnd_test); + struct sk_buff * (*next_segment)(struct sock *meta_sk, + int *reinject, + struct sock **subsk, + unsigned int *limit); + void (*init)(struct sock *sk); + void (*release)(struct sock *sk); + + char name[MPTCP_SCHED_NAME_MAX]; + struct module *owner; +}; + +struct mptcp_cb { + /* list of sockets in this multipath connection */ + struct hlist_head conn_list; + /* list of sockets that need a call to release_cb */ + struct hlist_head callback_list; + + /* Lock used for protecting the different rcu-lists of mptcp_cb */ + spinlock_t mpcb_list_lock; + + /* High-order bits of 64-bit sequence numbers */ + u32 snd_high_order[2]; + u32 rcv_high_order[2]; + + u16 send_infinite_mapping:1, + in_time_wait:1, + list_rcvd:1, /* XXX TO REMOVE */ + addr_signal:1, /* Path-manager wants us to call addr_signal */ + dss_csum:1, + server_side:1, + infinite_mapping_rcv:1, + infinite_mapping_snd:1, + dfin_combined:1, /* Was the DFIN combined with subflow-fin? */ + passive_close:1, + snd_hiseq_index:1, /* Index in snd_high_order of snd_nxt */ + rcv_hiseq_index:1; /* Index in rcv_high_order of rcv_nxt */ + +#define MPTCP_SCHED_DATA_SIZE 8 + u8 mptcp_sched[MPTCP_SCHED_DATA_SIZE] __aligned(8); + const struct mptcp_sched_ops *sched_ops; + + struct sk_buff_head reinject_queue; + /* First cache-line boundary is here minus 8 bytes. But from the + * reinject-queue only the next and prev pointers are regularly + * accessed. Thus, the whole data-path is on a single cache-line. + */ + + u64 csum_cutoff_seq; + u64 infinite_rcv_seq; + + /***** Start of fields, used for connection closure */ + unsigned char mptw_state; + u8 dfin_path_index; + + struct list_head tw_list; + + /***** Start of fields, used for subflow establishment and closure */ + refcount_t mpcb_refcnt; + + /* Mutex needed, because otherwise mptcp_close will complain that the + * socket is owned by the user. + * E.g., mptcp_sub_close_wq is taking the meta-lock. + */ + struct mutex mpcb_mutex; + + /***** Start of fields, used for subflow establishment */ + struct sock *meta_sk; + + /* Master socket, also part of the conn_list, this + * socket is the one that the application sees. + */ + struct sock *master_sk; + + __u64 mptcp_loc_key; + __u64 mptcp_rem_key; + __u32 mptcp_loc_token; + __u32 mptcp_rem_token; + +#define MPTCP_PM_SIZE 608 + u8 mptcp_pm[MPTCP_PM_SIZE] __aligned(8); + const struct mptcp_pm_ops *pm_ops; + + unsigned long path_index_bits; + + __u8 mptcp_ver; + + /* Original snd/rcvbuf of the initial subflow. + * Used for the new subflows on the server-side to allow correct + * autotuning + */ + int orig_sk_rcvbuf; + int orig_sk_sndbuf; + u32 orig_window_clamp; + + struct tcp_info *master_info; +}; + +#define MPTCP_VERSION_0 0 +#define MPTCP_VERSION_1 1 + +#define MPTCP_SUB_CAPABLE 0 +#define MPTCP_SUB_LEN_CAPABLE_SYN 12 +#define MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN 12 +#define MPTCP_SUB_LEN_CAPABLE_ACK 20 +#define MPTCP_SUB_LEN_CAPABLE_ACK_ALIGN 20 + +#define MPTCP_SUB_JOIN 1 +#define MPTCP_SUB_LEN_JOIN_SYN 12 +#define MPTCP_SUB_LEN_JOIN_SYN_ALIGN 12 +#define MPTCP_SUB_LEN_JOIN_SYNACK 16 +#define MPTCP_SUB_LEN_JOIN_SYNACK_ALIGN 16 +#define MPTCP_SUB_LEN_JOIN_ACK 24 +#define MPTCP_SUB_LEN_JOIN_ACK_ALIGN 24 + +#define MPTCP_SUB_DSS 2 +#define MPTCP_SUB_LEN_DSS 4 +#define MPTCP_SUB_LEN_DSS_ALIGN 4 + +/* Lengths for seq and ack are the ones without the generic MPTCP-option header, + * as they are part of the DSS-option. + * To get the total length, just add the different options together. + */ +#define MPTCP_SUB_LEN_SEQ 10 +#define MPTCP_SUB_LEN_SEQ_CSUM 12 +#define MPTCP_SUB_LEN_SEQ_ALIGN 12 + +#define MPTCP_SUB_LEN_SEQ_64 14 +#define MPTCP_SUB_LEN_SEQ_CSUM_64 16 +#define MPTCP_SUB_LEN_SEQ_64_ALIGN 16 + +#define MPTCP_SUB_LEN_ACK 4 +#define MPTCP_SUB_LEN_ACK_ALIGN 4 + +#define MPTCP_SUB_LEN_ACK_64 8 +#define MPTCP_SUB_LEN_ACK_64_ALIGN 8 + +/* This is the "default" option-length we will send out most often. + * MPTCP DSS-header + * 32-bit data sequence number + * 32-bit data ack + * + * It is necessary to calculate the effective MSS we will be using when + * sending data. + */ +#define MPTCP_SUB_LEN_DSM_ALIGN (MPTCP_SUB_LEN_DSS_ALIGN + \ + MPTCP_SUB_LEN_SEQ_ALIGN + \ + MPTCP_SUB_LEN_ACK_ALIGN) + +#define MPTCP_SUB_ADD_ADDR 3 +#define MPTCP_SUB_LEN_ADD_ADDR4 8 +#define MPTCP_SUB_LEN_ADD_ADDR4_VER1 16 +#define MPTCP_SUB_LEN_ADD_ADDR6 20 +#define MPTCP_SUB_LEN_ADD_ADDR6_VER1 28 +#define MPTCP_SUB_LEN_ADD_ADDR4_ALIGN 8 +#define MPTCP_SUB_LEN_ADD_ADDR4_ALIGN_VER1 16 +#define MPTCP_SUB_LEN_ADD_ADDR6_ALIGN 20 +#define MPTCP_SUB_LEN_ADD_ADDR6_ALIGN_VER1 28 + +#define MPTCP_SUB_REMOVE_ADDR 4 +#define MPTCP_SUB_LEN_REMOVE_ADDR 4 + +#define MPTCP_SUB_PRIO 5 +#define MPTCP_SUB_LEN_PRIO 3 +#define MPTCP_SUB_LEN_PRIO_ADDR 4 +#define MPTCP_SUB_LEN_PRIO_ALIGN 4 + +#define MPTCP_SUB_FAIL 6 +#define MPTCP_SUB_LEN_FAIL 12 +#define MPTCP_SUB_LEN_FAIL_ALIGN 12 + +#define MPTCP_SUB_FCLOSE 7 +#define MPTCP_SUB_LEN_FCLOSE 12 +#define MPTCP_SUB_LEN_FCLOSE_ALIGN 12 + + +#define OPTION_MPTCP (1 << 5) + +/* Max number of fastclose retransmissions */ +#define MPTCP_FASTCLOSE_RETRIES 3 + +#ifdef CONFIG_MPTCP + +/* Used for checking if the mptcp initialization has been successful */ +extern bool mptcp_init_failed; + +/* MPTCP options */ +#define OPTION_TYPE_SYN (1 << 0) +#define OPTION_TYPE_SYNACK (1 << 1) +#define OPTION_TYPE_ACK (1 << 2) +#define OPTION_MP_CAPABLE (1 << 3) +#define OPTION_DATA_ACK (1 << 4) +#define OPTION_ADD_ADDR (1 << 5) +#define OPTION_MP_JOIN (1 << 6) +#define OPTION_MP_FAIL (1 << 7) +#define OPTION_MP_FCLOSE (1 << 8) +#define OPTION_REMOVE_ADDR (1 << 9) +#define OPTION_MP_PRIO (1 << 10) + +/* MPTCP flags: both TX and RX */ +#define MPTCPHDR_SEQ 0x01 /* DSS.M option is present */ +#define MPTCPHDR_FIN 0x02 /* DSS.F option is present */ +#define MPTCPHDR_SEQ64_INDEX 0x04 /* index of seq in mpcb->snd_high_order */ +/* MPTCP flags: RX only */ +#define MPTCPHDR_ACK 0x08 +#define MPTCPHDR_SEQ64_SET 0x10 /* Did we received a 64-bit seq number? */ +#define MPTCPHDR_SEQ64_OFO 0x20 /* Is it not in our circular array? */ +#define MPTCPHDR_DSS_CSUM 0x40 +/* MPTCP flags: TX only */ +#define MPTCPHDR_INF 0x08 +#define MPTCP_REINJECT 0x10 /* Did we reinject this segment? */ + +struct mptcp_option { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 ver:4, + sub:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 sub:4, + ver:4; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif +}; + +struct mp_capable { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 ver:4, + sub:4; + __u8 h:1, + rsv:5, + b:1, + a:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 sub:4, + ver:4; + __u8 a:1, + b:1, + rsv:5, + h:1; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __u64 sender_key; + __u64 receiver_key; +} __attribute__((__packed__)); + +struct mp_join { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 b:1, + rsv:3, + sub:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 sub:4, + rsv:3, + b:1; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __u8 addr_id; + union { + struct { + u32 token; + u32 nonce; + } syn; + struct { + __u64 mac; + u32 nonce; + } synack; + struct { + __u8 mac[20]; + } ack; + } u; +} __attribute__((__packed__)); + +struct mp_dss { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u16 rsv1:4, + sub:4, + A:1, + a:1, + M:1, + m:1, + F:1, + rsv2:3; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u16 sub:4, + rsv1:4, + rsv2:3, + F:1, + m:1, + M:1, + a:1, + A:1; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif +}; + +struct mp_add_addr { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 ipver:4, + sub:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 sub:4, + ipver:4; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __u8 addr_id; + union { + struct { + struct in_addr addr; + __be16 port; + __u8 mac[8]; + } v4; + struct { + struct in6_addr addr; + __be16 port; + __u8 mac[8]; + } v6; + } u; +} __attribute__((__packed__)); + +struct mp_remove_addr { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 rsv:4, + sub:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 sub:4, + rsv:4; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + /* list of addr_id */ + __u8 addrs_id; +}; + +struct mp_fail { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u16 rsv1:4, + sub:4, + rsv2:8; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u16 sub:4, + rsv1:4, + rsv2:8; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __be64 data_seq; +} __attribute__((__packed__)); + +struct mp_fclose { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u16 rsv1:4, + sub:4, + rsv2:8; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u16 sub:4, + rsv1:4, + rsv2:8; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __u64 key; +} __attribute__((__packed__)); + +struct mp_prio { + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 b:1, + rsv:3, + sub:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 sub:4, + rsv:3, + b:1; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __u8 addr_id; +} __attribute__((__packed__)); + +static inline int mptcp_sub_len_dss(const struct mp_dss *m, const int csum) +{ + return 4 + m->A * (4 + m->a * 4) + m->M * (10 + m->m * 4 + csum * 2); +} + +#define MPTCP_SYSCTL 1 + +extern int sysctl_mptcp_enabled; +extern int sysctl_mptcp_version; +extern int sysctl_mptcp_checksum; +extern int sysctl_mptcp_debug; +extern int sysctl_mptcp_syn_retries; + +extern struct workqueue_struct *mptcp_wq; + +#define mptcp_debug(fmt, args...) \ + do { \ + if (unlikely(sysctl_mptcp_debug)) \ + pr_err(fmt, ##args); \ + } while (0) + +static inline struct sock *mptcp_to_sock(const struct mptcp_tcp_sock *mptcp) +{ + return (struct sock *)mptcp->tp; +} + +#define mptcp_for_each_sub(__mpcb, __mptcp) \ + hlist_for_each_entry_rcu(__mptcp, &((__mpcb)->conn_list), node) + +/* Must be called with the appropriate lock held */ +#define mptcp_for_each_sub_safe(__mpcb, __mptcp, __tmp) \ + hlist_for_each_entry_safe(__mptcp, __tmp, &((__mpcb)->conn_list), node) + +/* Iterates over all bit set to 1 in a bitset */ +#define mptcp_for_each_bit_set(b, i) \ + for (i = ffs(b) - 1; i >= 0; i = ffs(b >> (i + 1) << (i + 1)) - 1) + +#define mptcp_for_each_bit_unset(b, i) \ + mptcp_for_each_bit_set(~b, i) + +#define MPTCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mptcp.mptcp_statistics, field) +#define MPTCP_INC_STATS_BH(net, field) __SNMP_INC_STATS((net)->mptcp.mptcp_statistics, field) + +enum +{ + MPTCP_MIB_NUM = 0, + MPTCP_MIB_MPCAPABLEPASSIVE, /* Received SYN with MP_CAPABLE */ + MPTCP_MIB_MPCAPABLEACTIVE, /* Sent SYN with MP_CAPABLE */ + MPTCP_MIB_MPCAPABLEACTIVEACK, /* Received SYN/ACK with MP_CAPABLE */ + MPTCP_MIB_MPCAPABLEPASSIVEACK, /* Received third ACK with MP_CAPABLE */ + MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way handshake */ + MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */ + MPTCP_MIB_MPCAPABLERETRANSFALLBACK,/* Client-side stopped sending MP_CAPABLE after too many SYN-retransmissions */ + MPTCP_MIB_CSUMENABLED, /* Created MPTCP-connection with DSS-checksum enabled */ + MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */ + MPTCP_MIB_MPFAILRX, /* Received an MP_FAIL */ + MPTCP_MIB_CSUMFAIL, /* Received segment with invalid checksum */ + MPTCP_MIB_FASTCLOSERX, /* Recevied a FAST_CLOSE */ + MPTCP_MIB_FASTCLOSETX, /* Sent a FAST_CLOSE */ + MPTCP_MIB_FBACKSUB, /* Fallback upon ack without data-ack on new subflow */ + MPTCP_MIB_FBACKINIT, /* Fallback upon ack without data-ack on initial subflow */ + MPTCP_MIB_FBDATASUB, /* Fallback upon data without DSS at the beginning on new subflow */ + MPTCP_MIB_FBDATAINIT, /* Fallback upon data without DSS at the beginning on initial subflow */ + MPTCP_MIB_REMADDRSUB, /* Remove subflow due to REMOVE_ADDR */ + MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */ + MPTCP_MIB_JOINFALLBACK, /* Received MP_JOIN on session that has fallen back to reg. TCP */ + MPTCP_MIB_JOINSYNTX, /* Sent a SYN + MP_JOIN */ + MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */ + MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */ + MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */ + MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */ + MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */ + MPTCP_MIB_JOINACKFAIL, /* Third ACK on new subflow did not contain an MP_JOIN */ + MPTCP_MIB_JOINACKRTO, /* Retransmission timer for third ACK + MP_JOIN timed out */ + MPTCP_MIB_JOINACKRXMIT, /* Retransmitted an ACK + MP_JOIN */ + MPTCP_MIB_NODSSWINDOW, /* Received too many packets without a DSS-option */ + MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */ + MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */ + MPTCP_MIB_DSSTCPMISMATCH, /* DSS-mapping did not map with TCP's sequence numbers */ + MPTCP_MIB_DSSTRIMHEAD, /* Trimmed segment at the head (coalescing middlebox) */ + MPTCP_MIB_DSSSPLITTAIL, /* Trimmed segment at the tail (coalescing middlebox) */ + MPTCP_MIB_PURGEOLD, /* Removed old skb from the rcv-queue due to missing DSS-mapping */ + MPTCP_MIB_ADDADDRRX, /* Received an ADD_ADDR */ + MPTCP_MIB_ADDADDRTX, /* Sent an ADD_ADDR */ + MPTCP_MIB_REMADDRRX, /* Received a REMOVE_ADDR */ + MPTCP_MIB_REMADDRTX, /* Sent a REMOVE_ADDR */ + __MPTCP_MIB_MAX +}; + +#define MPTCP_MIB_MAX __MPTCP_MIB_MAX +struct mptcp_mib { + unsigned long mibs[MPTCP_MIB_MAX]; +}; + +extern struct lock_class_key meta_key; +extern char *meta_key_name; +extern struct lock_class_key meta_slock_key; +extern char *meta_slock_key_name; + +extern siphash_key_t mptcp_secret; + +/* This is needed to ensure that two subsequent key/nonce-generation result in + * different keys/nonces if the IPs and ports are the same. + */ +extern u32 mptcp_seed; + +#define MPTCP_HASH_SIZE 1024 + +extern struct hlist_nulls_head tk_hashtable[MPTCP_HASH_SIZE]; + +/* Request-sockets can be hashed in the tk_htb for collision-detection or in + * the regular htb for join-connections. We need to define different NULLS + * values so that we can correctly detect a request-socket that has been + * recycled. See also c25eb3bfb9729. + */ +#define MPTCP_REQSK_NULLS_BASE (1U << 29) + + +void mptcp_data_ready(struct sock *sk); +void mptcp_write_space(struct sock *sk); + +void mptcp_add_meta_ofo_queue(const struct sock *meta_sk, struct sk_buff *skb, + struct sock *sk); +void mptcp_cleanup_rbuf(struct sock *meta_sk, int copied); +int mptcp_add_sock(struct sock *meta_sk, struct sock *sk, u8 loc_id, u8 rem_id, + gfp_t flags); +void mptcp_del_sock(struct sock *sk); +void mptcp_update_metasocket(const struct sock *meta_sk); +void mptcp_reinject_data(struct sock *orig_sk, int clone_it); +void mptcp_update_sndbuf(const struct tcp_sock *tp); +void mptcp_send_fin(struct sock *meta_sk); +void mptcp_send_active_reset(struct sock *meta_sk, gfp_t priority); +bool mptcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp); +void tcp_parse_mptcp_options(const struct sk_buff *skb, + struct mptcp_options_received *mopt); +void mptcp_parse_options(const uint8_t *ptr, int opsize, + struct mptcp_options_received *mopt, + const struct sk_buff *skb, + struct tcp_sock *tp); +void mptcp_syn_options(const struct sock *sk, struct tcp_out_options *opts, + unsigned *remaining); +void mptcp_synack_options(struct request_sock *req, + struct tcp_out_options *opts, + unsigned *remaining); +void mptcp_established_options(struct sock *sk, struct sk_buff *skb, + struct tcp_out_options *opts, unsigned *size); +void mptcp_options_write(__be32 *ptr, struct tcp_sock *tp, + const struct tcp_out_options *opts, + struct sk_buff *skb); +void mptcp_close(struct sock *meta_sk, long timeout); +bool mptcp_doit(struct sock *sk); +int mptcp_create_master_sk(struct sock *meta_sk, __u64 remote_key, + __u8 mptcp_ver, u32 window); +int mptcp_check_req_fastopen(struct sock *child, struct request_sock *req); +int mptcp_check_req_master(struct sock *sk, struct sock *child, + struct request_sock *req, const struct sk_buff *skb, + int drop, u32 tsoff); +struct sock *mptcp_check_req_child(struct sock *meta_sk, + struct sock *child, + struct request_sock *req, + struct sk_buff *skb, + const struct mptcp_options_received *mopt); +u32 __mptcp_select_window(struct sock *sk); +void mptcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, + __u32 *rcv_wnd, __u32 *window_clamp, + int wscale_ok, __u8 *rcv_wscale, + __u32 init_rcv_wnd); +unsigned int mptcp_current_mss(struct sock *meta_sk); +int mptcp_select_size(const struct sock *meta_sk, bool first_skb, bool zc); +void mptcp_hmac_sha1(const u8 *key_1, const u8 *key_2, u32 *hash_out, + int arg_num, ...); +void mptcp_clean_rtx_infinite(const struct sk_buff *skb, struct sock *sk); +void mptcp_fin(struct sock *meta_sk); +void mptcp_meta_retransmit_timer(struct sock *meta_sk); +void mptcp_sub_retransmit_timer(struct sock *sk); +int mptcp_write_wakeup(struct sock *meta_sk, int mib); +void mptcp_sub_close_wq(struct work_struct *work); +void mptcp_sub_close(struct sock *sk, unsigned long delay); +struct sock *mptcp_select_ack_sock(const struct sock *meta_sk); +void mptcp_prepare_for_backlog(struct sock *sk, struct sk_buff *skb); +int mptcp_backlog_rcv(struct sock *meta_sk, struct sk_buff *skb); +void mptcp_ack_handler(struct timer_list *t); +bool mptcp_check_rtt(const struct tcp_sock *tp, int time); +int mptcp_check_snd_buf(const struct tcp_sock *tp); +bool mptcp_handle_options(struct sock *sk, const struct tcphdr *th, + const struct sk_buff *skb); +void __init mptcp_init(void); +void mptcp_destroy_sock(struct sock *sk); +int mptcp_rcv_synsent_state_process(struct sock *sk, struct sock **skptr, + const struct sk_buff *skb, + const struct mptcp_options_received *mopt); +unsigned int mptcp_xmit_size_goal(const struct sock *meta_sk, u32 mss_now, + int large_allowed); +int mptcp_init_tw_sock(struct sock *sk, struct tcp_timewait_sock *tw); +void mptcp_twsk_destructor(struct tcp_timewait_sock *tw); +void mptcp_time_wait(struct sock *sk, int state, int timeo); +void mptcp_disconnect(struct sock *meta_sk); +bool mptcp_should_expand_sndbuf(const struct sock *sk); +int mptcp_retransmit_skb(struct sock *meta_sk, struct sk_buff *skb); +void mptcp_tsq_flags(struct sock *sk); +void mptcp_tsq_sub_deferred(struct sock *meta_sk); +struct mp_join *mptcp_find_join(const struct sk_buff *skb); +void mptcp_hash_remove_bh(struct tcp_sock *meta_tp); +struct sock *mptcp_hash_find(const struct net *net, const u32 token); +int mptcp_lookup_join(struct sk_buff *skb, struct inet_timewait_sock *tw); +int mptcp_do_join_short(struct sk_buff *skb, + const struct mptcp_options_received *mopt, + struct net *net); +void mptcp_reqsk_destructor(struct request_sock *req); +void mptcp_connect_init(struct sock *sk); +void mptcp_sub_force_close(struct sock *sk); +int mptcp_sub_len_remove_addr_align(u16 bitfield); +void mptcp_join_reqsk_init(const struct mptcp_cb *mpcb, + const struct request_sock *req, + struct sk_buff *skb); +void mptcp_reqsk_init(struct request_sock *req, const struct sock *sk, + const struct sk_buff *skb, bool want_cookie); +int mptcp_conn_request(struct sock *sk, struct sk_buff *skb); +void mptcp_enable_sock(struct sock *sk); +void mptcp_disable_sock(struct sock *sk); +void mptcp_disable_static_key(void); +void mptcp_cookies_reqsk_init(struct request_sock *req, + struct mptcp_options_received *mopt, + struct sk_buff *skb); +void mptcp_mpcb_put(struct mptcp_cb *mpcb); +int mptcp_finish_handshake(struct sock *child, struct sk_buff *skb); +int mptcp_get_info(const struct sock *meta_sk, char __user *optval, int optlen); +void mptcp_clear_sk(struct sock *sk, int size); + +/* MPTCP-path-manager registration/initialization functions */ +int mptcp_register_path_manager(struct mptcp_pm_ops *pm); +void mptcp_unregister_path_manager(struct mptcp_pm_ops *pm); +void mptcp_init_path_manager(struct mptcp_cb *mpcb); +void mptcp_cleanup_path_manager(struct mptcp_cb *mpcb); +void mptcp_fallback_default(struct mptcp_cb *mpcb); +void mptcp_get_default_path_manager(char *name); +int mptcp_set_scheduler(struct sock *sk, const char *name); +int mptcp_set_path_manager(struct sock *sk, const char *name); +int mptcp_set_default_path_manager(const char *name); +extern struct mptcp_pm_ops mptcp_pm_default; + +/* MPTCP-scheduler registration/initialization functions */ +int mptcp_register_scheduler(struct mptcp_sched_ops *sched); +void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched); +void mptcp_init_scheduler(struct mptcp_cb *mpcb); +void mptcp_cleanup_scheduler(struct mptcp_cb *mpcb); +void mptcp_get_default_scheduler(char *name); +int mptcp_set_default_scheduler(const char *name); +bool mptcp_is_available(struct sock *sk, const struct sk_buff *skb, + bool zero_wnd_test); +bool mptcp_is_def_unavailable(struct sock *sk); +bool subflow_is_active(const struct tcp_sock *tp); +bool subflow_is_backup(const struct tcp_sock *tp); +struct sock *get_available_subflow(struct sock *meta_sk, struct sk_buff *skb, + bool zero_wnd_test); +extern struct mptcp_sched_ops mptcp_sched_default; + +/* Initializes function-pointers and MPTCP-flags */ +static inline void mptcp_init_tcp_sock(struct sock *sk) +{ + if (!mptcp_init_failed && sysctl_mptcp_enabled == MPTCP_SYSCTL) + mptcp_enable_sock(sk); +} + +static inline int mptcp_pi_to_flag(int pi) +{ + return 1 << (pi - 1); +} + +static inline +struct mptcp_request_sock *mptcp_rsk(const struct request_sock *req) +{ + return (struct mptcp_request_sock *)req; +} + +static inline +struct request_sock *rev_mptcp_rsk(const struct mptcp_request_sock *req) +{ + return (struct request_sock *)req; +} + +static inline bool mptcp_can_sendpage(struct sock *sk) +{ + struct mptcp_tcp_sock *mptcp; + + if (tcp_sk(sk)->mpcb->dss_csum) + return false; + + mptcp_for_each_sub(tcp_sk(sk)->mpcb, mptcp) { + struct sock *sk_it = mptcp_to_sock(mptcp); + + if (!(sk_it->sk_route_caps & NETIF_F_SG)) + return false; + } + + return true; +} + +static inline void mptcp_push_pending_frames(struct sock *meta_sk) +{ + /* We check packets out and send-head here. TCP only checks the + * send-head. But, MPTCP also checks packets_out, as this is an + * indication that we might want to do opportunistic reinjection. + */ + if (tcp_sk(meta_sk)->packets_out || tcp_send_head(meta_sk)) { + struct tcp_sock *tp = tcp_sk(meta_sk); + + /* We don't care about the MSS, because it will be set in + * mptcp_write_xmit. + */ + __tcp_push_pending_frames(meta_sk, 0, tp->nonagle); + } +} + +static inline void mptcp_send_reset(struct sock *sk) +{ + if (tcp_need_reset(sk->sk_state)) + tcp_sk(sk)->ops->send_active_reset(sk, GFP_ATOMIC); + mptcp_sub_force_close(sk); +} + +static inline void mptcp_sub_force_close_all(struct mptcp_cb *mpcb, + struct sock *except) +{ + struct mptcp_tcp_sock *mptcp; + struct hlist_node *tmp; + + mptcp_for_each_sub_safe(mpcb, mptcp, tmp) { + struct sock *sk_it = mptcp_to_sock(mptcp); + + if (sk_it != except) + mptcp_send_reset(sk_it); + } +} + +static inline bool mptcp_is_data_seq(const struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_SEQ; +} + +static inline bool mptcp_is_data_fin(const struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_FIN; +} + +/* Is it a data-fin while in infinite mapping mode? + * In infinite mode, a subflow-fin is in fact a data-fin. + */ +static inline bool mptcp_is_data_fin2(const struct sk_buff *skb, + const struct tcp_sock *tp) +{ + return mptcp_is_data_fin(skb) || + (tp->mpcb->infinite_mapping_rcv && + (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)); +} + +static inline u8 mptcp_get_64_bit(u64 data_seq, struct mptcp_cb *mpcb) +{ + u64 data_seq_high = (u32)(data_seq >> 32); + + if (mpcb->rcv_high_order[0] == data_seq_high) + return 0; + else if (mpcb->rcv_high_order[1] == data_seq_high) + return MPTCPHDR_SEQ64_INDEX; + else + return MPTCPHDR_SEQ64_OFO; +} + +/* Sets the data_seq and returns pointer to the in-skb field of the data_seq. + * If the packet has a 64-bit dseq, the pointer points to the last 32 bits. + */ +static inline __u32 *mptcp_skb_set_data_seq(const struct sk_buff *skb, + u32 *data_seq, + struct mptcp_cb *mpcb) +{ + __u32 *ptr = (__u32 *)(skb_transport_header(skb) + TCP_SKB_CB(skb)->dss_off); + + if (TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_SEQ64_SET) { + u64 data_seq64 = get_unaligned_be64(ptr); + + if (mpcb) + TCP_SKB_CB(skb)->mptcp_flags |= mptcp_get_64_bit(data_seq64, mpcb); + + *data_seq = (u32)data_seq64; + ptr++; + } else { + *data_seq = get_unaligned_be32(ptr); + } + + return ptr; +} + +static inline struct sock *mptcp_meta_sk(const struct sock *sk) +{ + return tcp_sk(sk)->meta_sk; +} + +static inline struct tcp_sock *mptcp_meta_tp(const struct tcp_sock *tp) +{ + return tcp_sk(tp->meta_sk); +} + +static inline int is_meta_tp(const struct tcp_sock *tp) +{ + return tp->mpcb && mptcp_meta_tp(tp) == tp; +} + +static inline int is_meta_sk(const struct sock *sk) +{ + return sk->sk_state != TCP_NEW_SYN_RECV && + sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP && + mptcp(tcp_sk(sk)) && mptcp_meta_sk(sk) == sk; +} + +static inline int is_master_tp(const struct tcp_sock *tp) +{ + return !mptcp(tp) || (!tp->mptcp->slave_sk && !is_meta_tp(tp)); +} + +static inline void mptcp_init_mp_opt(struct mptcp_options_received *mopt) +{ + mopt->saw_mpc = 0; + mopt->dss_csum = 0; + mopt->drop_me = 0; + + mopt->is_mp_join = 0; + mopt->join_ack = 0; + + mopt->saw_low_prio = 0; + mopt->low_prio = 0; + + mopt->saw_add_addr = 0; + mopt->more_add_addr = 0; + + mopt->saw_rem_addr = 0; + mopt->more_rem_addr = 0; + + mopt->mp_fail = 0; + mopt->mp_fclose = 0; +} + +static inline void mptcp_reset_mopt(struct tcp_sock *tp) +{ + struct mptcp_options_received *mopt = &tp->mptcp->rx_opt; + + mopt->saw_low_prio = 0; + mopt->saw_add_addr = 0; + mopt->more_add_addr = 0; + mopt->saw_rem_addr = 0; + mopt->more_rem_addr = 0; + mopt->join_ack = 0; + mopt->mp_fail = 0; + mopt->mp_fclose = 0; +} + +static inline __be32 mptcp_get_highorder_sndbits(const struct sk_buff *skb, + const struct mptcp_cb *mpcb) +{ + return htonl(mpcb->snd_high_order[(TCP_SKB_CB(skb)->mptcp_flags & + MPTCPHDR_SEQ64_INDEX) ? 1 : 0]); +} + +static inline u64 mptcp_get_data_seq_64(const struct mptcp_cb *mpcb, int index, + u32 data_seq_32) +{ + return ((u64)mpcb->rcv_high_order[index] << 32) | data_seq_32; +} + +static inline u64 mptcp_get_rcv_nxt_64(const struct tcp_sock *meta_tp) +{ + struct mptcp_cb *mpcb = meta_tp->mpcb; + return mptcp_get_data_seq_64(mpcb, mpcb->rcv_hiseq_index, + meta_tp->rcv_nxt); +} + +static inline void mptcp_check_sndseq_wrap(struct tcp_sock *meta_tp, int inc) +{ + if (unlikely(meta_tp->snd_nxt > meta_tp->snd_nxt + inc)) { + struct mptcp_cb *mpcb = meta_tp->mpcb; + mpcb->snd_hiseq_index = mpcb->snd_hiseq_index ? 0 : 1; + mpcb->snd_high_order[mpcb->snd_hiseq_index] += 2; + } +} + +static inline void mptcp_check_rcvseq_wrap(struct tcp_sock *meta_tp, + u32 old_rcv_nxt) +{ + if (unlikely(old_rcv_nxt > meta_tp->rcv_nxt)) { + struct mptcp_cb *mpcb = meta_tp->mpcb; + mpcb->rcv_high_order[mpcb->rcv_hiseq_index] += 2; + mpcb->rcv_hiseq_index = mpcb->rcv_hiseq_index ? 0 : 1; + } +} + +static inline int mptcp_sk_can_send(const struct sock *sk) +{ + return tcp_passive_fastopen(sk) || + ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && + !tcp_sk(sk)->mptcp->pre_established); +} + +static inline int mptcp_sk_can_recv(const struct sock *sk) +{ + return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2); +} + +static inline int mptcp_sk_can_send_ack(const struct sock *sk) +{ + return !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV | + TCPF_CLOSE | TCPF_LISTEN)) && + !tcp_sk(sk)->mptcp->pre_established; +} + +static inline bool mptcp_can_sg(const struct sock *meta_sk) +{ + struct mptcp_tcp_sock *mptcp; + + if (tcp_sk(meta_sk)->mpcb->dss_csum) + return false; + + mptcp_for_each_sub(tcp_sk(meta_sk)->mpcb, mptcp) { + struct sock *sk = mptcp_to_sock(mptcp); + + if (!mptcp_sk_can_send(sk)) + continue; + if (!(sk->sk_route_caps & NETIF_F_SG)) + return false; + } + return true; +} + +static inline void mptcp_set_rto(struct sock *sk) +{ + struct inet_connection_sock *micsk = inet_csk(mptcp_meta_sk(sk)); + struct tcp_sock *tp = tcp_sk(sk); + struct mptcp_tcp_sock *mptcp; + __u32 max_rto = 0; + + /* We are in recovery-phase on the MPTCP-level. Do not update the + * RTO, because this would kill exponential backoff. + */ + if (micsk->icsk_retransmits) + return; + + mptcp_for_each_sub(tp->mpcb, mptcp) { + struct sock *sk_it = mptcp_to_sock(mptcp); + + if ((mptcp_sk_can_send(sk_it) || sk_it->sk_state == TCP_SYN_RECV) && + inet_csk(sk_it)->icsk_retransmits == 0 && + inet_csk(sk_it)->icsk_backoff == 0 && + inet_csk(sk_it)->icsk_rto > max_rto) + max_rto = inet_csk(sk_it)->icsk_rto; + } + if (max_rto) { + micsk->icsk_rto = max_rto << 1; + + /* A successfull rto-measurement - reset backoff counter */ + micsk->icsk_backoff = 0; + } +} + +static inline void mptcp_sub_close_passive(struct sock *sk) +{ + struct sock *meta_sk = mptcp_meta_sk(sk); + struct tcp_sock *tp = tcp_sk(sk), *meta_tp = tcp_sk(meta_sk); + + /* Only close, if the app did a send-shutdown (passive close), and we + * received the data-ack of the data-fin. + */ + if (tp->mpcb->passive_close && meta_tp->snd_una == meta_tp->write_seq) + mptcp_sub_close(sk, 0); +} + +static inline void mptcp_fallback_close(struct mptcp_cb *mpcb, + struct sock *except) +{ + mptcp_sub_force_close_all(mpcb, except); + + if (mpcb->pm_ops->close_session) + mpcb->pm_ops->close_session(mptcp_meta_sk(except)); +} + +static inline bool mptcp_fallback_infinite(struct sock *sk, int flag) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct mptcp_cb *mpcb = tp->mpcb; + + /* If data has been acknowleged on the meta-level, fully_established + * will have been set before and thus we will not fall back to infinite + * mapping. + */ + if (likely(tp->mptcp->fully_established)) + return false; + + if (!(flag & MPTCP_FLAG_DATA_ACKED)) + return false; + + /* Don't fallback twice ;) */ + if (mpcb->infinite_mapping_snd) + return false; + + pr_debug("%s %#x will fallback - pi %d, src %pI4:%u dst %pI4:%u rcv_nxt %u from %pS\n", + __func__, mpcb->mptcp_loc_token, tp->mptcp->path_index, + &inet_sk(sk)->inet_saddr, ntohs(inet_sk(sk)->inet_sport), + &inet_sk(sk)->inet_daddr, ntohs(inet_sk(sk)->inet_dport), + tp->rcv_nxt, __builtin_return_address(0)); + if (!is_master_tp(tp)) { + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_FBACKSUB); + return true; + } + + mpcb->infinite_mapping_snd = 1; + mpcb->infinite_mapping_rcv = 1; + mpcb->infinite_rcv_seq = mptcp_get_rcv_nxt_64(mptcp_meta_tp(tp)); + tp->mptcp->fully_established = 1; + + mptcp_fallback_close(mpcb, sk); + + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_FBACKINIT); + + return false; +} + +static inline bool mptcp_v6_is_v4_mapped(const struct sock *sk) +{ + return sk->sk_family == AF_INET6 && + ipv6_addr_type(&inet6_sk(sk)->saddr) == IPV6_ADDR_MAPPED; +} + +/* We are in or are becoming to be in infinite mapping mode */ +static inline bool mptcp_in_infinite_mapping_weak(const struct mptcp_cb *mpcb) +{ + return mpcb->infinite_mapping_rcv || + mpcb->infinite_mapping_snd || + mpcb->send_infinite_mapping; +} + +static inline bool mptcp_can_new_subflow(const struct sock *meta_sk) +{ + /* Has been removed from the tk-table. Thus, no new subflows. + * + * Check for close-state is necessary, because we may have been closed + * without passing by mptcp_close(). + * + * When falling back, no new subflows are allowed either. + */ + return meta_sk->sk_state != TCP_CLOSE && + tcp_sk(meta_sk)->inside_tk_table && + !tcp_sk(meta_sk)->mpcb->infinite_mapping_rcv && + !tcp_sk(meta_sk)->mpcb->send_infinite_mapping; +} + +static inline int mptcp_subflow_count(const struct mptcp_cb *mpcb) +{ + struct mptcp_tcp_sock *mptcp; + int i = 0; + + mptcp_for_each_sub(mpcb, mptcp) + i++; + + return i; +} + +/* TCP and MPTCP mpc flag-depending functions */ +u16 mptcp_select_window(struct sock *sk); +void mptcp_tcp_set_rto(struct sock *sk); + +/* TCP and MPTCP flag-depending functions */ +bool mptcp_prune_ofo_queue(struct sock *sk); + +#else /* CONFIG_MPTCP */ +#define mptcp_debug(fmt, args...) \ + do { \ + } while (0) + +static inline struct sock *mptcp_to_sock(const struct mptcp_tcp_sock *mptcp) +{ + return NULL; +} + +#define mptcp_for_each_sub(__mpcb, __mptcp) \ + if (0) + +#define MPTCP_INC_STATS(net, field) \ + do { \ + } while(0) + +static inline bool mptcp_is_data_fin(const struct sk_buff *skb) +{ + return false; +} +static inline bool mptcp_is_data_seq(const struct sk_buff *skb) +{ + return false; +} +static inline struct sock *mptcp_meta_sk(const struct sock *sk) +{ + return NULL; +} +static inline struct tcp_sock *mptcp_meta_tp(const struct tcp_sock *tp) +{ + return NULL; +} +static inline int is_meta_sk(const struct sock *sk) +{ + return 0; +} +static inline int is_master_tp(const struct tcp_sock *tp) +{ + return 0; +} +static inline void mptcp_del_sock(const struct sock *sk) {} +static inline void mptcp_update_metasocket(const struct sock *meta_sk) {} +static inline void mptcp_reinject_data(struct sock *orig_sk, int clone_it) {} +static inline void mptcp_update_sndbuf(const struct tcp_sock *tp) {} +static inline void mptcp_clean_rtx_infinite(const struct sk_buff *skb, + const struct sock *sk) {} +static inline void mptcp_sub_close(struct sock *sk, unsigned long delay) {} +static inline void mptcp_set_rto(const struct sock *sk) {} +static inline void mptcp_send_fin(const struct sock *meta_sk) {} +static inline void mptcp_parse_options(const uint8_t *ptr, const int opsize, + struct mptcp_options_received *mopt, + const struct sk_buff *skb, + const struct tcp_sock *tp) {} +static inline void mptcp_syn_options(const struct sock *sk, + struct tcp_out_options *opts, + unsigned *remaining) {} +static inline void mptcp_synack_options(struct request_sock *req, + struct tcp_out_options *opts, + unsigned *remaining) {} + +static inline void mptcp_established_options(struct sock *sk, + struct sk_buff *skb, + struct tcp_out_options *opts, + unsigned *size) {} +static inline void mptcp_options_write(__be32 *ptr, struct tcp_sock *tp, + const struct tcp_out_options *opts, + struct sk_buff *skb) {} +static inline void mptcp_close(struct sock *meta_sk, long timeout) {} +static inline bool mptcp_doit(struct sock *sk) +{ + return false; +} +static inline int mptcp_check_req_fastopen(struct sock *child, + struct request_sock *req) +{ + return 1; +} +static inline int mptcp_check_req_master(const struct sock *sk, + const struct sock *child, + const struct request_sock *req, + const struct sk_buff *skb, + int drop, + u32 tsoff) +{ + return 1; +} +static inline struct sock *mptcp_check_req_child(const struct sock *meta_sk, + const struct sock *child, + const struct request_sock *req, + struct sk_buff *skb, + const struct mptcp_options_received *mopt) +{ + return NULL; +} +static inline unsigned int mptcp_current_mss(struct sock *meta_sk) +{ + return 0; +} +static inline void mptcp_sub_close_passive(struct sock *sk) {} +static inline bool mptcp_fallback_infinite(const struct sock *sk, int flag) +{ + return false; +} +static inline void mptcp_init_mp_opt(const struct mptcp_options_received *mopt) {} +static inline void mptcp_prepare_for_backlog(struct sock *sk, struct sk_buff *skb) {} +static inline bool mptcp_check_rtt(const struct tcp_sock *tp, int time) +{ + return false; +} +static inline int mptcp_check_snd_buf(const struct tcp_sock *tp) +{ + return 0; +} +static inline void mptcp_push_pending_frames(struct sock *meta_sk) {} +static inline void mptcp_send_reset(const struct sock *sk) {} +static inline void mptcp_sub_force_close_all(struct mptcp_cb *mpcb, + struct sock *except) {} +static inline bool mptcp_handle_options(struct sock *sk, + const struct tcphdr *th, + struct sk_buff *skb) +{ + return false; +} +static inline void mptcp_reset_mopt(struct tcp_sock *tp) {} +static inline void __init mptcp_init(void) {} +static inline bool mptcp_can_sg(const struct sock *meta_sk) +{ + return false; +} +static inline unsigned int mptcp_xmit_size_goal(const struct sock *meta_sk, + u32 mss_now, int large_allowed) +{ + return 0; +} +static inline void mptcp_destroy_sock(struct sock *sk) {} +static inline int mptcp_rcv_synsent_state_process(struct sock *sk, + struct sock **skptr, + struct sk_buff *skb, + const struct mptcp_options_received *mopt) +{ + return 0; +} +static inline bool mptcp_can_sendpage(struct sock *sk) +{ + return false; +} +static inline int mptcp_init_tw_sock(struct sock *sk, + struct tcp_timewait_sock *tw) +{ + return 0; +} +static inline void mptcp_twsk_destructor(struct tcp_timewait_sock *tw) {} +static inline void mptcp_disconnect(struct sock *meta_sk) {} +static inline void mptcp_tsq_flags(struct sock *sk) {} +static inline void mptcp_tsq_sub_deferred(struct sock *meta_sk) {} +static inline void mptcp_hash_remove_bh(struct tcp_sock *meta_tp) {} +static inline void mptcp_remove_shortcuts(const struct mptcp_cb *mpcb, + const struct sk_buff *skb) {} +static inline void mptcp_init_tcp_sock(struct sock *sk) {} +static inline void mptcp_disable_static_key(void) {} +static inline void mptcp_cookies_reqsk_init(struct request_sock *req, + struct mptcp_options_received *mopt, + struct sk_buff *skb) {} +static inline void mptcp_mpcb_put(struct mptcp_cb *mpcb) {} +static inline void mptcp_fin(struct sock *meta_sk) {} +static inline bool mptcp_in_infinite_mapping_weak(const struct mptcp_cb *mpcb) +{ + return false; +} +static inline bool mptcp_can_new_subflow(const struct sock *meta_sk) +{ + return false; +} + +#endif /* CONFIG_MPTCP */ + +#endif /* _MPTCP_H */ +#endif diff --git a/include/net/mptcp_v4.h b/include/net/mptcp_v4.h new file mode 100644 index 0000000000000000000000000000000000000000..97b14ea09c892f3d218373d90fa0e964278bf9e1 --- /dev/null +++ b/include/net/mptcp_v4.h @@ -0,0 +1,78 @@ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* + * MPTCP implementation + * + * Initial Design & Implementation: + * Sébastien Barré <sebastien.barre@uclouvain.be> + * + * Current Maintainer & Author: + * Christoph Paasch <christoph.paasch@uclouvain.be> + * + * Additional authors: + * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi> + * Gregory Detal <gregory.detal@uclouvain.be> + * Fabien Duchêne <fabien.duchene@uclouvain.be> + * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de> + * Lavkesh Lahngir <lavkesh51@gmail.com> + * Andreas Ripke <ripke@neclab.eu> + * Vlad Dogaru <vlad.dogaru@intel.com> + * Octavian Purdila <octavian.purdila@intel.com> + * John Ronan <jronan@tssg.org> + * Catalin Nicutar <catalin.nicutar@gmail.com> + * Brandon Heller <brandonh@stanford.edu> + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef MPTCP_V4_H_ +#define MPTCP_V4_H_ + + +#include <linux/in.h> +#include <linux/skbuff.h> +#include <net/mptcp.h> +#include <net/request_sock.h> +#include <net/sock.h> + +extern struct request_sock_ops mptcp_request_sock_ops; +extern const struct inet_connection_sock_af_ops mptcp_v4_specific; +extern struct tcp_request_sock_ops mptcp_request_sock_ipv4_ops; +extern struct tcp_request_sock_ops mptcp_join_request_sock_ipv4_ops; + +#ifdef CONFIG_MPTCP + +int mptcp_v4_do_rcv(struct sock *meta_sk, struct sk_buff *skb); +struct sock *mptcp_v4_search_req(const __be16 rport, const __be32 raddr, + const __be32 laddr, const struct net *net); +int __mptcp_init4_subsockets(struct sock *meta_sk, const struct mptcp_loc4 *loc, + __be16 sport, struct mptcp_rem4 *rem, + struct sock **subsk); +int mptcp_pm_v4_init(void); +void mptcp_pm_v4_undo(void); +u32 mptcp_v4_get_nonce(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport); +u64 mptcp_v4_get_key(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, + u32 seed); + +static inline int mptcp_init4_subsockets(struct sock *meta_sk, + const struct mptcp_loc4 *loc, + struct mptcp_rem4 *rem) +{ + return __mptcp_init4_subsockets(meta_sk, loc, 0, rem, NULL); +} + +#else + +static inline int mptcp_v4_do_rcv(const struct sock *meta_sk, + const struct sk_buff *skb) +{ + return 0; +} + +#endif /* CONFIG_MPTCP */ + +#endif /* MPTCP_V4_H_ */ +#endif diff --git a/include/net/mptcp_v6.h b/include/net/mptcp_v6.h new file mode 100644 index 0000000000000000000000000000000000000000..3e5c4bbacb1ec2298888350ab0241f6d654da2ff --- /dev/null +++ b/include/net/mptcp_v6.h @@ -0,0 +1,79 @@ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* + * MPTCP implementation + * + * Initial Design & Implementation: + * Sébastien Barré <sebastien.barre@uclouvain.be> + * + * Current Maintainer & Author: + * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi> + * + * Additional authors: + * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi> + * Gregory Detal <gregory.detal@uclouvain.be> + * Fabien Duchêne <fabien.duchene@uclouvain.be> + * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de> + * Lavkesh Lahngir <lavkesh51@gmail.com> + * Andreas Ripke <ripke@neclab.eu> + * Vlad Dogaru <vlad.dogaru@intel.com> + * Octavian Purdila <octavian.purdila@intel.com> + * John Ronan <jronan@tssg.org> + * Catalin Nicutar <catalin.nicutar@gmail.com> + * Brandon Heller <brandonh@stanford.edu> + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _MPTCP_V6_H +#define _MPTCP_V6_H + +#include <linux/in6.h> +#include <net/if_inet6.h> + +#include <net/mptcp.h> + + +#ifdef CONFIG_MPTCP +extern const struct inet_connection_sock_af_ops mptcp_v6_mapped; +extern const struct inet_connection_sock_af_ops mptcp_v6_specific; +extern struct request_sock_ops mptcp6_request_sock_ops; +extern struct tcp_request_sock_ops mptcp_request_sock_ipv6_ops; +extern struct tcp_request_sock_ops mptcp_join_request_sock_ipv6_ops; + +int mptcp_v6_do_rcv(struct sock *meta_sk, struct sk_buff *skb); +struct sock *mptcp_v6_search_req(const __be16 rport, const struct in6_addr *raddr, + const struct in6_addr *laddr, const struct net *net); +int __mptcp_init6_subsockets(struct sock *meta_sk, const struct mptcp_loc6 *loc, + __be16 sport, struct mptcp_rem6 *rem, + struct sock **subsk); +int mptcp_pm_v6_init(void); +void mptcp_pm_v6_undo(void); +__u32 mptcp_v6_get_nonce(const __be32 *saddr, const __be32 *daddr, + __be16 sport, __be16 dport); +u64 mptcp_v6_get_key(const __be32 *saddr, const __be32 *daddr, + __be16 sport, __be16 dport, u32 seed); + +static inline int mptcp_init6_subsockets(struct sock *meta_sk, + const struct mptcp_loc6 *loc, + struct mptcp_rem6 *rem) +{ + return __mptcp_init6_subsockets(meta_sk, loc, 0, rem, NULL); +} + +#else /* CONFIG_MPTCP */ + +#define mptcp_v6_mapped ipv6_mapped + +static inline int mptcp_v6_do_rcv(struct sock *meta_sk, struct sk_buff *skb) +{ + return 0; +} + +#endif /* CONFIG_MPTCP */ + +#endif /* _MPTCP_V6_H */ +#endif diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index bc88ac6c2e1d76dfca088de5da9d2189992abdbb..a7788020289ee59b0114e645500be3bbdd1aad6a 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -19,6 +19,9 @@ #include <net/netns/packet.h> #include <net/netns/ipv4.h> #include <net/netns/ipv6.h> +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#include <net/netns/mptcp.h> +#endif #include <net/netns/ieee802154_6lowpan.h> #include <net/netns/sctp.h> #include <net/netns/dccp.h> @@ -110,6 +113,11 @@ struct net { #if IS_ENABLED(CONFIG_IPV6) struct netns_ipv6 ipv6; #endif +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#if IS_ENABLED(CONFIG_MPTCP) + struct netns_mptcp mptcp; +#endif +#endif #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) struct netns_ieee802154_lowpan ieee802154_lowpan; #endif diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index ac4d70aeee1294648c2229d95597c22af8c04dc6..700320546919b6fffc65fa0347e5961fc8ffd804 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -23,10 +23,19 @@ #include <linux/netfilter/nf_conntrack_dccp.h> #include <linux/netfilter/nf_conntrack_sctp.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> +#if defined(CONFIG_BCM_KF_PROTO_IPSEC) && \ + (defined(CONFIG_NF_CONNTRACK_IPSEC) || defined(CONFIG_NF_CONNTRACK_IPSEC_MODULE)) +#include <linux/netfilter/nf_conntrack_ipsec.h> +#include <linux/netfilter/nf_conntrack_proto_esp.h> +#endif #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> #include <net/netfilter/nf_conntrack_tuple.h> +#if defined(CONFIG_BCM_KF_NETFILTER) +#include <linux/bcm_nfconn_ext.h> +#endif + /* per conntrack: protocol private data */ union nf_conntrack_proto { /* insert conntrack proto private data here */ @@ -34,6 +43,10 @@ union nf_conntrack_proto { struct ip_ct_sctp sctp; struct ip_ct_tcp tcp; struct nf_ct_gre gre; +#if defined(CONFIG_BCM_KF_PROTO_ESP) && \ + (defined(CONFIG_NF_CT_PROTO_ESP) || defined(CONFIG_NF_CT_PROTO_ESP_MODULE)) + struct nf_ct_esp esp; +#endif unsigned int tmpl_padto; }; @@ -66,6 +79,10 @@ struct nf_conn { spinlock_t lock; u16 cpu; +#if defined(CONFIG_BCM_KF_NETFILTER) + struct bcm_nf_conn_ext bcm_ext; +#endif + #ifdef CONFIG_NF_CONNTRACK_ZONES struct nf_conntrack_zone zone; #endif @@ -261,14 +278,47 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb) #define nfct_time_stamp ((u32)(jiffies)) +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) +extern bool bcm_nf_blog_ct_is_expired(struct nf_conn *ct); + /* jiffies until ct expires, 0 if already expired */ -static inline unsigned long nf_ct_expires(const struct nf_conn *ct) +static inline unsigned long nf_ct_expires(struct nf_conn *ct) { s32 timeout = ct->timeout - nfct_time_stamp; + if (timeout <= 0) { + bcm_nf_blog_ct_is_expired(ct); + timeout = ct->timeout - nfct_time_stamp; + } + return timeout > 0 ? timeout : 0; } +static inline bool nf_ct_is_expired(struct nf_conn *ct) +{ + /* when connectin time out is expired check if the connection + * is accelearted and update the time accordingly + */ + + if ((__s32)(ct->timeout - nfct_time_stamp) <= 0) + return bcm_nf_blog_ct_is_expired(ct); + else + return 0; +} + +static inline bool nf_ct_should_gc(struct nf_conn *ct) +{ + return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) && + !nf_ct_is_dying(ct); +} +#else +/* jiffies until ct expires, 0 if already expired */ +static inline unsigned long nf_ct_expires(const struct nf_conn *ct) +{ + s32 timeout = ct->timeout - nfct_time_stamp; + + return timeout > 0 ? timeout : 0; +} static inline bool nf_ct_is_expired(const struct nf_conn *ct) { return (__s32)(ct->timeout - nfct_time_stamp) <= 0; @@ -280,6 +330,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct) return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct); } +#endif struct kernel_param; diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h index bf0444e111a63a3e2b6520a21849a6c9bc83a6b3..bd3ae2893c2378f0053e004b280b1025786a446a 100644 --- a/include/net/netfilter/nf_conntrack_tuple.h +++ b/include/net/netfilter/nf_conntrack_tuple.h @@ -62,6 +62,12 @@ struct nf_conntrack_tuple { struct { __be16 key; } gre; +#if defined(CONFIG_BCM_KF_PROTO_ESP) && \ + (defined(CONFIG_NF_CT_PROTO_ESP) || defined(CONFIG_NF_CT_PROTO_ESP_MODULE)) + struct { + __be16 spi; + } esp; +#endif } u; /* The protocol. */ diff --git a/include/net/netns/mptcp.h b/include/net/netns/mptcp.h new file mode 100644 index 0000000000000000000000000000000000000000..2b8bb143a6dd911c92e50f95979128185b3ebf98 --- /dev/null +++ b/include/net/netns/mptcp.h @@ -0,0 +1,54 @@ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* + * MPTCP implementation - MPTCP namespace + * + * Initial Design & Implementation: + * Sébastien Barré <sebastien.barre@uclouvain.be> + * + * Current Maintainer: + * Christoph Paasch <christoph.paasch@uclouvain.be> + * + * Additional authors: + * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi> + * Gregory Detal <gregory.detal@uclouvain.be> + * Fabien Duchêne <fabien.duchene@uclouvain.be> + * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de> + * Lavkesh Lahngir <lavkesh51@gmail.com> + * Andreas Ripke <ripke@neclab.eu> + * Vlad Dogaru <vlad.dogaru@intel.com> + * Octavian Purdila <octavian.purdila@intel.com> + * John Ronan <jronan@tssg.org> + * Catalin Nicutar <catalin.nicutar@gmail.com> + * Brandon Heller <brandonh@stanford.edu> + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef __NETNS_MPTCP_H__ +#define __NETNS_MPTCP_H__ + +#include <linux/compiler.h> + +enum { + MPTCP_PM_FULLMESH = 0, + MPTCP_PM_MAX +}; + +struct mptcp_mib; + +struct netns_mptcp { + DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics); + +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_net_mptcp; +#endif + + void *path_managers[MPTCP_PM_MAX]; +}; + +#endif /* __NETNS_MPTCP_H__ */ +#endif diff --git a/include/net/sock.h b/include/net/sock.h index 75677050c82edb0a334f91175a438dc71317a8f1..c290cb4705963b5811c65a1be99c9dbe0cdce79d 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -817,6 +817,9 @@ enum sock_flags { SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ SOCK_TXTIME, +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + SOCK_MPTCP, /* MPTCP set on this socket */ +#endif }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) @@ -1124,6 +1127,9 @@ struct proto { void (*unhash)(struct sock *sk); void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + void (*clear_sk)(struct sock *sk, int size); +#endif /* Keeping track of sockets in use */ #ifdef CONFIG_PROC_FS diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h index 22ae260d686951f4005c1e9c903c244a92da31b0..2f455a9d5e920c324801c18c74a9255fc420b777 100644 --- a/include/net/tc_act/tc_vlan.h +++ b/include/net/tc_act/tc_vlan.h @@ -18,6 +18,10 @@ struct tcf_vlan_params { u16 tcfv_push_vid; __be16 tcfv_push_proto; u8 tcfv_push_prio; +#ifdef CONFIG_BCM_KF_ENHANCED_TC + bool tcfv_push_prio_exists; + bool tcfv_vlan_id_exists; +#endif /* CONFIG_BCM_KF_ENHANCED_TC */ struct rcu_head rcu; }; diff --git a/include/net/tcp.h b/include/net/tcp.h index 3f0d654984cf43fbbc5a51ebd4d654803d0e3649..972a347bc6edc9acf6ff1eadcac8787928716d23 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -185,6 +185,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#define TCPOPT_MPTCP 30 +#endif #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ #define TCPOPT_EXP 254 /* Experimental */ /* Magic number to be after the option value for sharing TCP @@ -241,6 +244,33 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); */ #define TFO_SERVER_WO_SOCKOPT1 0x400 +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* Flags from tcp_input.c for tcp_ack */ +#define FLAG_DATA 0x01 /* Incoming frame contained data. */ +#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ +#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ +#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ +#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ +#define FLAG_DATA_SACKED 0x20 /* New SACK. */ +#define FLAG_ECE 0x40 /* ECE in this ACK */ +#define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */ +#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ +#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ +#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ +#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ +#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ +#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ +#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ +#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ +#define FLAG_ACK_MAYBE_DELAYED 0x10000 /* Likely a delayed ACK */ + +#define MPTCP_FLAG_DATA_ACKED 0x20000 + +#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) +#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) +#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK) +#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) +#endif /* sysctl variables for tcp */ extern int sysctl_tcp_max_orphans; @@ -313,6 +343,98 @@ extern struct proto tcp_prot; #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/**** START - Exports needed for MPTCP ****/ +extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops; +extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops; + +struct mptcp_options_received; + +void tcp_cleanup_rbuf(struct sock *sk, int copied); +void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited); +int tcp_close_state(struct sock *sk); +void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, + const struct sk_buff *skb); +int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib); +void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb); +int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask); +unsigned int tcp_mss_split_point(const struct sock *sk, + const struct sk_buff *skb, + unsigned int mss_now, + unsigned int max_segs, + int nonagle); +bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss, int nonagle); +bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss); +unsigned int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb); +int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now); +int __pskb_trim_head(struct sk_buff *skb, int len); +void tcp_queue_skb(struct sock *sk, struct sk_buff *skb); +void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags); +void tcp_reset(struct sock *sk); +bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, + const u32 ack_seq, const u32 nwin); +bool tcp_urg_mode(const struct tcp_sock *tp); +void tcp_ack_probe(struct sock *sk); +void tcp_rearm_rto(struct sock *sk); +int tcp_write_timeout(struct sock *sk); +bool retransmits_timed_out(struct sock *sk, + unsigned int boundary, + unsigned int timeout); +void tcp_write_err(struct sock *sk); +void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr); +void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb); +void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now); + +void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req); +void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb); +struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb); +void tcp_v4_reqsk_destructor(struct request_sock *req); + +void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req); +void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); +struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb); +int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); +int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); +void tcp_v6_destroy_sock(struct sock *sk); +void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); +void tcp_v6_hash(struct sock *sk); +struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb); +struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); +void tcp_v6_reqsk_destructor(struct request_sock *req); + +unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, + int large_allowed); +u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb); + +void skb_clone_fraglist(struct sk_buff *skb); + +void inet_twsk_free(struct inet_timewait_sock *tw); +int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb); +/* These states need RST on ABORT according to RFC793 */ +static inline bool tcp_need_reset(int state) +{ + return (1 << state) & + (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | + TCPF_FIN_WAIT2 | TCPF_SYN_RECV); +} + +int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, + bool *fragstolen); +void tcp_ofo_queue(struct sock *sk); +void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb); +int linear_payload_sz(bool first_skb); +/**** END - Exports needed for MPTCP ****/ + +#endif void tcp_tasklet_init(void); void tcp_v4_err(struct sk_buff *skb, u32); @@ -412,7 +534,13 @@ int tcp_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma); void tcp_parse_options(const struct net *net, const struct sk_buff *skb, struct tcp_options_received *opt_rx, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) int estab, struct tcp_fastopen_cookie *foc); +#else + struct mptcp_options_received *mopt_rx, + int estab, struct tcp_fastopen_cookie *foc, + struct tcp_sock *tp); +#endif const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* @@ -421,6 +549,9 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +void tcp_v6_mtu_reduced(struct sock *sk); +#endif void tcp_req_err(struct sock *sk, u32 seq, bool abort); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(const struct sock *sk, @@ -538,7 +669,12 @@ static inline u32 tcp_cookie_time(void) u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); +#else +__u32 cookie_v4_init_sequence(struct request_sock *req, const struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif u64 cookie_init_timestamp(struct request_sock *req); bool cookie_timestamp_decode(const struct net *net, struct tcp_options_received *opt); @@ -552,7 +688,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); +#else +__u32 cookie_v6_init_sequence(struct request_sock *req, const struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif #endif /* tcp_output.c */ @@ -588,10 +729,20 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto); void tcp_skb_collapse_tstamp(struct sk_buff *skb, const struct sk_buff *next_skb); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +u16 tcp_select_window(struct sock *sk); +bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp); + +#endif /* tcp_input.c */ void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +void tcp_set_rto(struct sock *sk); +bool tcp_should_expand_sndbuf(const struct sock *sk); +#endif void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_fin(struct sock *sk); @@ -635,7 +786,11 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) } /* tcp.c */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) void tcp_get_info(struct sock *, struct tcp_info *); +#else +void tcp_get_info(struct sock *, struct tcp_info *, bool no_lock); +#endif /* Read 'sendfile()'-style from a TCP socket */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, @@ -823,6 +978,14 @@ struct tcp_skb_cb { u16 tcp_gso_size; }; }; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#ifdef CONFIG_MPTCP + __u8 mptcp_flags; /* flags for the MPTCP layer */ + __u8 dss_off; /* Number of 4-byte words until + * seq-number */ +#endif +#endif __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK. */ @@ -841,6 +1004,16 @@ struct tcp_skb_cb { has_rxtstamp:1, /* SKB has a RX timestamp */ unused:5; __u32 ack_seq; /* Sequence number ACK'd */ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#ifdef CONFIG_MPTCP + union { /* For MPTCP outgoing frames */ + __u32 path_mask; /* paths that tried to send this skb */ + __u32 dss[6]; /* DSS options */ + }; +#endif + +#endif union { struct { /* There is space for up to 24 bytes */ @@ -1361,6 +1534,21 @@ static inline int tcp_win_from_space(const struct sock *sk, int space) space - (space>>tcp_adv_win_scale); } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP +extern struct static_key mptcp_static_key; +static inline bool mptcp(const struct tcp_sock *tp) +{ + return static_key_false(&mptcp_static_key) && tp->mpc; +} +#else +static inline bool mptcp(const struct tcp_sock *tp) +{ + return 0; +} +#endif + +#endif /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { @@ -1911,6 +2099,32 @@ struct tcp_sock_af_ops { #endif }; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* TCP/MPTCP-specific functions */ +struct tcp_sock_ops { + u32 (*__select_window)(struct sock *sk); + u16 (*select_window)(struct sock *sk); + void (*select_initial_window)(const struct sock *sk, int __space, + __u32 mss, __u32 *rcv_wnd, + __u32 *window_clamp, int wscale_ok, + __u8 *rcv_wscale, __u32 init_rcv_wnd); + int (*select_size)(const struct sock *sk, bool first_skb, bool zc); + void (*init_buffer_space)(struct sock *sk); + void (*set_rto)(struct sock *sk); + bool (*should_expand_sndbuf)(const struct sock *sk); + void (*send_fin)(struct sock *sk); + bool (*write_xmit)(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp); + void (*send_active_reset)(struct sock *sk, gfp_t priority); + int (*write_wakeup)(struct sock *sk, int mib); + void (*retransmit_timer)(struct sock *sk); + void (*time_wait)(struct sock *sk, int state, int timeo); + void (*cleanup_rbuf)(struct sock *sk, int copied); + void (*cwnd_validate)(struct sock *sk, bool is_cwnd_limited); +}; +extern const struct tcp_sock_ops tcp_specific; + +#endif struct tcp_request_sock_ops { u16 mss_clamp; #ifdef CONFIG_TCP_MD5SIG @@ -1921,12 +2135,24 @@ struct tcp_request_sock_ops { const struct sock *sk, const struct sk_buff *skb); #endif +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) void (*init_req)(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb); +#else + int (*init_req)(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb, + bool want_cookie); +#endif #ifdef CONFIG_SYN_COOKIES +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __u32 (*cookie_init_seq)(const struct sk_buff *skb, __u16 *mss); +#else + __u32 (*cookie_init_seq)(struct request_sock *req, const struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif #endif struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, const struct request_sock *req); @@ -1940,15 +2166,25 @@ struct tcp_request_sock_ops { #ifdef CONFIG_SYN_COOKIES static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct request_sock *req, +#endif const struct sock *sk, struct sk_buff *skb, __u16 *mss) { tcp_synq_overflow(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) return ops->cookie_init_seq(skb, mss); +#else + return ops->cookie_init_seq(req, sk, skb, mss); +#endif } #else static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct request_sock *req, +#endif const struct sock *sk, struct sk_buff *skb, __u16 *mss) { diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h index 2875e169d7445e65b65110081215b5f62d6e911c..68e500f8e6815ce95d28d9540c7da496006ae69d 100644 --- a/include/net/tcp_states.h +++ b/include/net/tcp_states.h @@ -26,6 +26,13 @@ enum { TCP_LISTEN, TCP_CLOSING, /* Now a valid state */ TCP_NEW_SYN_RECV, +/* HACK: Enable this unconditionally (it is enabled in BCM's build system). + * There are static asserts failing if the enums do not match (see comment above). + */ +/* #if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) */ +#if 1 + TCP_RST_WAIT, +#endif TCP_MAX_STATES /* Leave at the end! */ }; @@ -47,6 +54,9 @@ enum { TCPF_LISTEN = (1 << TCP_LISTEN), TCPF_CLOSING = (1 << TCP_CLOSING), TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV), +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + TCPF_RST_WAIT = (1 << TCP_RST_WAIT), +#endif }; #endif /* _LINUX_TCP_STATES_H */ diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index a8f6020f1196edc9940cbb6c605a06279db4fd36..4dda89d3abb40e336db456a96eb5523550929ea4 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -58,6 +58,10 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, /* address family specific functions */ extern const struct inet_connection_sock_af_ops ipv4_specific; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +extern const struct inet_connection_sock_af_ops ipv6_mapped; +extern const struct inet_connection_sock_af_ops ipv6_specific; +#endif void inet6_destroy_sock(struct sock *sk); diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index ac55b328d61b22107cb2e3c2ab3591ca093ad9fb..281d4278b4e7e39c4064147d8796dcf341aadd70 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -10,6 +10,9 @@ #include <linux/tracepoint.h> #include <net/ipv6.h> #include <net/tcp.h> +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#include <net/mptcp.h> +#endif #include <linux/sock_diag.h> #define TP_STORE_V4MAPPED(__entry, saddr, daddr) \ @@ -178,6 +181,15 @@ DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust, TP_ARGS(sk) ); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +DEFINE_EVENT(tcp_event_sk_skb, mptcp_retransmit, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + + TP_ARGS(sk, skb) +); + +#endif TRACE_EVENT(tcp_retransmit_synack, TP_PROTO(const struct sock *sk, const struct request_sock *req), @@ -245,6 +257,9 @@ TRACE_EVENT(tcp_probe, __field(__u32, srtt) __field(__u32, rcv_wnd) __field(__u64, sock_cookie) +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + __field(__u8, mptcp) +#endif ), TP_fast_assign( @@ -271,13 +286,25 @@ TRACE_EVENT(tcp_probe, __entry->ssthresh = tcp_current_ssthresh(sk); __entry->srtt = tp->srtt_us >> 3; __entry->sock_cookie = sock_gen_cookie(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + __entry->mptcp = mptcp(tp) ? tp->mptcp->path_index : 0; +#endif ), +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) TP_printk("src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx", +#else + TP_printk("src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx mptcp=%d", +#endif __entry->saddr, __entry->daddr, __entry->mark, __entry->data_len, __entry->snd_nxt, __entry->snd_una, __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie) +#else + __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie, + __entry->mptcp) +#endif ); #endif /* _TRACE_TCP_H */ diff --git a/include/uapi/linux/atmdev.h b/include/uapi/linux/atmdev.h index a5c15cf23bd78b7bca429783292cd293a9381078..18c8b46ad822d8712c6039fcce222ca13fb10926 100644 --- a/include/uapi/linux/atmdev.h +++ b/include/uapi/linux/atmdev.h @@ -117,6 +117,14 @@ struct atm_dev_stats { #define ATM_BACKEND_PPP 1 /* PPPoATM - RFC2364 */ #define ATM_BACKEND_BR2684 2 /* Bridged RFC1483/2684 */ +#if defined(CONFIG_BCM_KF_PPP) +#define ATM_BACKEND_RT2684 3 /* Routed RFC1483/2684 */ +#define ATM_BACKEND_BR2684_BCM 4 /* Bridged RFC1483/2684 uses Broadcom ATMAPI*/ +#define ATM_BACKEND_PPP_BCM 5 /* PPPoA uses Broadcom bcmxtmrt driver */ +#define ATM_BACKEND_PPP_BCM_DISCONN 6 /* PPPoA LCP disconnect */ +#define ATM_BACKEND_PPP_BCM_CLOSE_DEV 7 /* PPPoA close device */ +#endif + /* for ATM_GETTYPE */ #define ATM_ITFTYP_LEN 8 /* maximum length of interface type name */ diff --git a/include/uapi/linux/bcm_atmdev.h b/include/uapi/linux/bcm_atmdev.h new file mode 100644 index 0000000000000000000000000000000000000000..a3febf166757817c343d44df38ae82b1d2234ba2 --- /dev/null +++ b/include/uapi/linux/bcm_atmdev.h @@ -0,0 +1,41 @@ +#ifndef __UAPI_BCM_ATMDEV_H__ +#define __UAPI_BCM_ATMDEV_H__ +/* +<:copyright-BRCM:2019:DUAL/GPL:standard + + Copyright (c) 2019 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +//#define ATM_EXTBACKENDIF _IOW('a',ATMIOC_SPECIAL+6,atm_backend_t) +//#define ATM_SETEXTFILT _IOW('a',ATMIOC_SPECIAL+7,atm_backend_t) + +#define ATM_BACKEND_RT2684 3 /* Routed RFC1483/2684 */ +#define ATM_BACKEND_BR2684_BCM 4 /* Bridged RFC1483/2684 uses Broadcom ATMAPI*/ +#define ATM_BACKEND_PPP_BCM 5 /* PPPoA uses Broadcom bcmxtmrt driver */ +#define ATM_BACKEND_PPP_BCM_DISCONN 6 /* PPPoA LCP disconnect */ +#define ATM_BACKEND_PPP_BCM_CLOSE_DEV 7 /* PPPoA close device */ + +#endif //__UAPI_BCM_ATMDEV_H__ diff --git a/include/uapi/linux/bcm_colors.h b/include/uapi/linux/bcm_colors.h new file mode 100644 index 0000000000000000000000000000000000000000..f8f913e505f905dc7c157dc1aab45fad12210456 --- /dev/null +++ b/include/uapi/linux/bcm_colors.h @@ -0,0 +1,93 @@ +/* + * <:copyright-BRCM:2016:DUAL/GPL:standard + * + * Copyright (c) 2016 Broadcom + * All Rights Reserved + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed + * to you under the terms of the GNU General Public License version 2 + * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, + * with the following added to such license: + * + * As a special exception, the copyright holders of this software give + * you permission to link this software with independent modules, and + * to copy and distribute the resulting executable under terms of your + * choice, provided that you also meet, for each linked independent + * module, the terms and conditions of the license of that module. + * An independent module is a module which is not derived from this + * software. The special exception does not apply to any modifications + * of the software. + * + * Not withstanding the above, under no circumstances may you combine + * this software in any way with any other Broadcom software provided + * under a license other than the GPL, without Broadcom's express prior + * written consent. + * + * :> + */ + +/*----------------------------------------------------------------------* + * NOTE: FOR USERSPACE USERS: + *----------------------------------------------------------------------* + *ALL USERSPACE DEVELOPERS MUST INCLUDE THIS FILE IN THEIR APPLICATIONS. + * + * EXAMPLE: + * #include <bcm_local_kernel_include/linux/bcm_colors.h> + *----------------------------------------------------------------------*/ + +/* + *-------------------------------------------------------------------------- + * Color encodings for console printing: + * + * This feature is controlled from top level make menuconfig, under + * Debug Selection>Enable Colorized Prints + * + * You may select a color specific to your subsystem by: + * #define CLRsys CLRg + * + * Usage: PRINT(CLRr "format" CLRNL); + *-------------------------------------------------------------------------- + */ + +#ifndef __UAPI_BCM_COLORS_H__ +#define __UAPI_BCM_COLORS_H__ + +#ifdef CONFIG_BCM_COLORIZE_PRINTS +#define BCMCOLOR(clr_code) clr_code +#else +#define BCMCOLOR(clr_code) +#endif + +/* White background */ +#define CLRr BCMCOLOR("\e[0;31m") /* red */ +#define CLRg BCMCOLOR("\e[0;32m") /* green */ +#define CLRy BCMCOLOR("\e[0;33m") /* yellow */ +#define CLRb BCMCOLOR("\e[0;34m") /* blue */ +#define CLRm BCMCOLOR("\e[0;35m") /* magenta */ +#define CLRc BCMCOLOR("\e[0;36m") /* cyan */ + +/* blacK "inverted" background */ +#define CLRrk BCMCOLOR("\e[0;31;40m") /* red on blacK */ +#define CLRgk BCMCOLOR("\e[0;32;40m") /* green on blacK */ +#define CLRyk BCMCOLOR("\e[0;33;40m") /* yellow on blacK */ +#define CLRmk BCMCOLOR("\e[0;35;40m") /* magenta on blacK */ +#define CLRck BCMCOLOR("\e[0;36;40m") /* cyan on blacK */ +#define CLRwk BCMCOLOR("\e[0;37;40m") /* whilte on blacK */ + +/* Colored background */ +#define CLRcb BCMCOLOR("\e[0;36;44m") /* cyan on blue */ +#define CLRyr BCMCOLOR("\e[0;33;41m") /* yellow on red */ +#define CLRym BCMCOLOR("\e[0;33;45m") /* yellow on magen */ + +/* Generic foreground colors */ +#define CLRhigh CLRm /* Highlight color */ +#define CLRbold CLRcb /* Bold color */ +#define CLRbold2 CLRym /* Bold2 color */ +#define CLRerr CLRyr /* Error color */ +#define CLRnorm BCMCOLOR("\e[0m") /* Normal color */ +#define CLRnl CLRnorm "\n" /* Normal + newline */ + +/* Each subsystem may define CLRsys */ + +#endif /* __BCM_COLORS_H__ */ diff --git a/include/uapi/linux/bcm_if_ether.h b/include/uapi/linux/bcm_if_ether.h new file mode 100644 index 0000000000000000000000000000000000000000..bbba398bee434be0df70dc4cb42d80939d8f951a --- /dev/null +++ b/include/uapi/linux/bcm_if_ether.h @@ -0,0 +1,37 @@ +/* +<:copyright-BRCM:2019:DUAL/GPL:standard + + Copyright (c) 2019 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +#ifndef _LINUX_BCM_IF_ETHER_H +#define _LINUX_BCM_IF_ETHER_H + +#define ETH_P_8021AG 0x8902 /* 802.1ag Connectivity Fault Mgmt */ +#define ETH_P_8023AH 0x8809 /* 802.3ah Ethernet OAM */ + + +#endif diff --git a/include/uapi/linux/bcm_maclimit.h b/include/uapi/linux/bcm_maclimit.h new file mode 100644 index 0000000000000000000000000000000000000000..dfe0a63e840f380f257889cba81c1d9c78d80a43 --- /dev/null +++ b/include/uapi/linux/bcm_maclimit.h @@ -0,0 +1,41 @@ +#ifndef __BCM_MACLIMIT_H_INCLUDED__ +#define __BCM_MACLIMIT_H_INCLUDED__ +/* +<:copyright-BRCM:2020:DUAL/GPL:standard + + Copyright (c) 2020 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ +struct mac_limit +{ + unsigned int enable; //dev mac limit enabled + unsigned int max; //dev and lower-devs max allow + unsigned int max_zero_drop; //max zero value is drop or not + unsigned int drop_count; //exceed max drop count + unsigned int min; //dev mac learning min commit + unsigned int reserve; //reserved for lower-devs' min + unsigned int learning_count; //dev and lower-devs learning count +}; +#endif \ No newline at end of file diff --git a/include/uapi/linux/bcm_netlink.h b/include/uapi/linux/bcm_netlink.h new file mode 100644 index 0000000000000000000000000000000000000000..a3ea9ece708dffe85cd8bd56d9e274c3483d4c0d --- /dev/null +++ b/include/uapi/linux/bcm_netlink.h @@ -0,0 +1,47 @@ +#ifndef __BCM_NETLINK_H_INCLUDED__ +#define __BCM_NETLINK_H_INCLUDED__ +/* +<:copyright-BRCM:2019:DUAL/GPL:standard + + Copyright (c) 2019 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ +#define NETLINK_WLCT 24 +#define NETLINK_BRCM_MONITOR 25 /*send events to userspace monitor task(broadcom specific)*/ +#define NETLINK_BRCM_EPON 26 +#define NETLINK_NDI 27 /* network identifier driver */ +#define NETLINK_DPI_QOS 28 /* DPI QoS */ +#define NETLINK_IGSC 29 /* for WIFI multicast igs sdb listing */ +#define NETLINK_BCM_MCAST 30 /* for multicast */ +#define NETLINK_WLCSM 31 /* for brcm wireless cfg[nvram]/statics/management extention */ + +/* Note that MAX netlink message ids is 32. Defined by MAX_LINKS + * macro in kernel/linux-<ver>/include/uapi/linux/netlink.h. + * Max netlink message ids have been reached and any new + * netlink message requirements must look at using NETLINK_GENERIC + * with a sub-type + */ + +#endif /* __BCM_NETLINK_H_INCLUDED__ */ diff --git a/include/uapi/linux/bcm_realtime.h b/include/uapi/linux/bcm_realtime.h new file mode 100644 index 0000000000000000000000000000000000000000..0d462bce7aa64699f3bc7f56ff97bb754094b0fc --- /dev/null +++ b/include/uapi/linux/bcm_realtime.h @@ -0,0 +1,77 @@ +/* +<:copyright-BRCM:2011:DUAL/GPL:standard + + Copyright (c) 2011 Broadcom + All Rights Reserved + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + +#ifndef _BCM_REALTIME_H_ +#define _BCM_REALTIME_H_ + +/* + * This file defines the real time priority levels used by the various + * threads in the system. It is important that all threads coordinate + * their priority levels so that the desired effect is achieved. + * These priorities are also related cgroups, so check the cgroups + * groupings and cpu allocations (if cgroups is enabled). + */ + +/** highest priority threads in the system. + * + * Threads at this priority require the absolute minium latency. However, + * they should only run very briefly (<2ms per run). + * These threads should also run at sched policy FIFO. + */ +#define BCM_RTPRIO_HIGH 75 + + +/** priority for the voip DSP. + * + * Note this is not for all voip threads, just the DSP thread. + * The other voice threads should be run at the other priorities that are + * defined. + */ +#define BCM_RTPRIO_VOIPDSP 35 + + +/** priority for all data forwarding. + * + * This is for data and video streaming. Not clear if we need to split out + * sub-categories here such as video, versus web data, versus voice. + * Probably need to use cgroups if a system needs to handle many types of + * streams. + * Threads running at this priority should use sched policy Round-Robin. + */ +#define BCM_RTPRIO_DATA 5 + +/** priority for all tasks that handle control messages related to data path. + * + * ex: bpm tasl handling allocation/free of data buffers. + * Threads running at this priority should use sched policy Round-Robin. + */ +#define BCM_RTPRIO_DATA_CONTROL 10 + +#endif /* _BCM_REALTIME_H_ */ + diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 8481fc7676c0b50447343d6a43a163249eb8b3f0..e548e94483378efb08ec2df27a13dfe1b69c6079 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2688,6 +2688,13 @@ enum { BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ BPF_TCP_NEW_SYN_RECV, +/* HACK: Enable this unconditionally (it is enabled in BCM's build system). + * There are static asserts failing if the enums do not match (see comment above). + */ +/* #if ((defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP)) || !defined(CONFIG_BCM_IN_KERNEL)) */ +#if 1 + BPF_TCP_RST_WAIT, +#endif BPF_TCP_MAX_STATES /* Leave at the end! */ }; diff --git a/include/uapi/linux/compiler.h b/include/uapi/linux/compiler.h new file mode 100644 index 0000000000000000000000000000000000000000..55f1cb7d31371e90fb13f24de73e51fef4dbdd1e --- /dev/null +++ b/include/uapi/linux/compiler.h @@ -0,0 +1,31 @@ +#if !defined(CONFIG_BCM_IN_KERNEL) +/* +<:copyright-gpl + Copyright 2014 Broadcom Corp. All Rights Reserved. + + This program is free software; you can distribute it and/or modify it + under the terms of the GNU General Public License (Version 2) as + published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. +:> + */ + +/* For userspace app to include compiler.h */ +#ifndef __UAPI_LINUX_COMPILER_H +#define __UAPI_LINUX_COMPILER_H + +#ifndef __user +#define __user +#endif + +#endif + +#endif diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h index 7fea0fd7d6f54debe3cd4356a1e7ca04ef9c9c7d..8eb1b28267138699f8370e66e341985eabbd017f 100644 --- a/include/uapi/linux/if.h +++ b/include/uapi/linux/if.h @@ -132,6 +132,11 @@ enum net_device_flags { #define IFF_ECHO IFF_ECHO #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ +#if ((defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP)) || !defined(CONFIG_BCM_IN_KERNEL)) +#define IFF_NOMULTIPATH 0x80000 /* Disable for MPTCP */ +#define IFF_MPBACKUP 0x100000 /* Use as backup path for MPTCP */ + +#endif #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\ IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 43391e2d1153adb701433d6794702b73f2d60297..b5cbc671c2e67d1e119ca365221afe6c24a7ca84 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -37,6 +37,17 @@ struct rtnl_link_stats { __u32 rx_compressed; __u32 tx_compressed; +#if defined(CONFIG_BCM_KF_EXTSTATS) + __u32 tx_multicast_packets; /* multicast packets transmitted */ + __u32 rx_multicast_bytes; /* multicast bytes recieved */ + __u32 tx_multicast_bytes; /* multicast bytes transmitted */ + __u32 rx_broadcast_packets; /* broadcast packets recieved */ + __u32 tx_broadcast_packets; /* broadcast packets transmitted */ + /* NOTE: Unicast packets are not counted but are instead calculated as needed + using total - (broadcast + multicast) */ + __u32 rx_unknown_packets; /* unknown protocol packets recieved */ +#endif + __u32 rx_nohandler; /* dropped, no handler found */ }; @@ -72,6 +83,17 @@ struct rtnl_link_stats64 { __u64 rx_compressed; __u64 tx_compressed; +#if defined(CONFIG_BCM_KF_EXTSTATS) + __u64 tx_multicast_packets; /* multicast packets transmitted */ + __u64 rx_multicast_bytes; /* multicast bytes recieved */ + __u64 tx_multicast_bytes; /* multicast bytes transmitted */ + __u64 rx_broadcast_packets; /* broadcast packets recieved */ + __u64 tx_broadcast_packets; /* broadcast packets transmitted */ + /* NOTE: Unicast packets are not counted but are instead calculated as needed + using total - (broadcast + multicast) */ + __u64 rx_unknown_packets; /* unknown protocol packets recieved */ +#endif + __u64 rx_nohandler; /* dropped, no handler found */ }; @@ -457,6 +479,9 @@ enum { IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, IFLA_MACSEC_PAD, +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) + IFLA_MACSEC_OFFLOAD, +#endif __IFLA_MACSEC_MAX, }; @@ -480,6 +505,15 @@ enum macsec_validation_type { MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, }; +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) +enum macsec_offload { + MACSEC_OFFLOAD_OFF = 0, + MACSEC_OFFLOAD_PHY = 1, + MACSEC_OFFLOAD_MAC = 2, + __MACSEC_OFFLOAD_END, + MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1, +}; +#endif /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h index 98e4d5d7c45ca7686b8506927294998980315bb7..cd9714410a9b7ab3ce92d005a04fb5ce4745f8d2 100644 --- a/include/uapi/linux/if_macsec.h +++ b/include/uapi/linux/if_macsec.h @@ -25,6 +25,10 @@ /* cipher IDs as per IEEE802.1AEbn-2011 */ #define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL #define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) +#define MACSEC_CIPHER_ID_GCM_AES_XPN_128 0x0080C20001000003ULL +#define MACSEC_CIPHER_ID_GCM_AES_XPN_256 0x0080C20001000004ULL +#endif /* deprecated cipher ID for GCM-AES-128 */ #define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL @@ -45,6 +49,9 @@ enum macsec_attrs { MACSEC_ATTR_RXSC_LIST, /* dump, nested, macsec_rxsc_attrs for each RXSC */ MACSEC_ATTR_TXSC_STATS, /* dump, nested, macsec_txsc_stats_attr */ MACSEC_ATTR_SECY_STATS, /* dump, nested, macsec_secy_stats_attr */ +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) + MACSEC_ATTR_OFFLOAD, /* config, nested, macsec_offload_attrs */ +#endif __MACSEC_ATTR_END, NUM_MACSEC_ATTR = __MACSEC_ATTR_END, MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1, @@ -92,11 +99,26 @@ enum macsec_sa_attrs { MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */ MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */ MACSEC_SA_ATTR_PAD, +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) + MACSEC_SA_ATTR_SSCI, /* config/dump, u32 - XPN only */ + MACSEC_SA_ATTR_SALT, /* config, 96-bit - XPN only */ +#endif __MACSEC_SA_ATTR_END, NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END, MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1, }; +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) +enum macsec_offload_attrs { + MACSEC_OFFLOAD_ATTR_UNSPEC, + MACSEC_OFFLOAD_ATTR_TYPE, /* config/dump, u8 0..2 */ + MACSEC_OFFLOAD_ATTR_PAD, + __MACSEC_OFFLOAD_ATTR_END, + NUM_MACSEC_OFFLOAD_ATTR = __MACSEC_OFFLOAD_ATTR_END, + MACSEC_OFFLOAD_ATTR_MAX = __MACSEC_OFFLOAD_ATTR_END - 1, +}; +#endif + enum macsec_nl_commands { MACSEC_CMD_GET_TXSC, MACSEC_CMD_ADD_RXSC, @@ -108,6 +130,9 @@ enum macsec_nl_commands { MACSEC_CMD_ADD_RXSA, MACSEC_CMD_DEL_RXSA, MACSEC_CMD_UPD_RXSA, +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) + MACSEC_CMD_UPD_OFFLOAD, +#endif }; /* u64 per-RXSC stats */ diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h new file mode 100644 index 0000000000000000000000000000000000000000..0c109b448df16e2ddeced505dacca47a658f431e --- /dev/null +++ b/include/uapi/linux/mptcp.h @@ -0,0 +1,151 @@ +#if ((defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP)) || !defined(CONFIG_BCM_IN_KERNEL)) +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Netlink API for Multipath TCP + * + * Author: Gregory Detal <gregory.detal@tessares.net> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_MPTCP_H +#define _LINUX_MPTCP_H + +#define MPTCP_GENL_NAME "mptcp" +#define MPTCP_GENL_EV_GRP_NAME "mptcp_events" +#define MPTCP_GENL_CMD_GRP_NAME "mptcp_commands" +#define MPTCP_GENL_VER 0x1 + +/* + * ATTR types defined for MPTCP + */ +enum { + MPTCP_ATTR_UNSPEC = 0, + + MPTCP_ATTR_TOKEN, /* u32 */ + MPTCP_ATTR_FAMILY, /* u16 */ + MPTCP_ATTR_LOC_ID, /* u8 */ + MPTCP_ATTR_REM_ID, /* u8 */ + MPTCP_ATTR_SADDR4, /* u32 */ + MPTCP_ATTR_SADDR6, /* struct in6_addr */ + MPTCP_ATTR_DADDR4, /* u32 */ + MPTCP_ATTR_DADDR6, /* struct in6_addr */ + MPTCP_ATTR_SPORT, /* u16 */ + MPTCP_ATTR_DPORT, /* u16 */ + MPTCP_ATTR_BACKUP, /* u8 */ + MPTCP_ATTR_ERROR, /* u8 */ + MPTCP_ATTR_FLAGS, /* u16 */ + MPTCP_ATTR_TIMEOUT, /* u32 */ + MPTCP_ATTR_IF_IDX, /* s32 */ + + __MPTCP_ATTR_AFTER_LAST +}; + +#define MPTCP_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1) + +/* + * Events generated by MPTCP: + * - MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport + * A new connection has been created. It is the good time to allocate + * memory and send ADD_ADDR if needed. Depending on the traffic-patterns + * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent. + * + * - MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport + * A connection is established (can start new subflows). + * + * - MPTCP_EVENT_CLOSED: token + * A connection has stopped. + * + * - MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] + * A new address has been announced by the peer. + * + * - MPTCP_EVENT_REMOVED: token, rem_id + * An address has been lost by the peer. + * + * - MPTCP_EVENT_SUB_ESTABLISHED: token, family, saddr4 | saddr6, + * daddr4 | daddr6, sport, dport, backup, + * if_idx [, error] + * A new subflow has been established. 'error' should not be set. + * + * - MPTCP_EVENT_SUB_CLOSED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport, backup, if_idx [, error] + * A subflow has been closed. An error (copy of sk_err) could be set if an + * error has been detected for this subflow. + * + * - MPTCP_EVENT_SUB_PRIORITY: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport, backup, if_idx [, error] + * The priority of a subflow has changed. 'error' should not be set. + * + * Commands for MPTCP: + * - MPTCP_CMD_ANNOUNCE: token, loc_id, family, saddr4 | saddr6 [, sport] + * Announce a new address to the peer. + * + * - MPTCP_CMD_REMOVE: token, loc_id + * Announce that an address has been lost to the peer. + * + * - MPTCP_CMD_SUB_CREATE: token, family, loc_id, rem_id, [saddr4 | saddr6, + * daddr4 | daddr6, dport [, sport, backup, if_idx]] + * Create a new subflow. + * + * - MPTCP_CMD_SUB_DESTROY: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport + * Close a subflow. + * + * - MPTCP_CMD_SUB_PRIORITY: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport, backup + * Change the priority of a subflow. + * + * - MPTCP_CMD_SET_FILTER: flags + * Set the filter on events. Set MPTCPF_* flags to only receive specific + * events. Default is to receive all events. + * + * - MPTCP_CMD_EXIST: token + * Check if this token is linked to an existing socket. + */ +enum { + MPTCP_CMD_UNSPEC = 0, + + MPTCP_EVENT_CREATED, + MPTCP_EVENT_ESTABLISHED, + MPTCP_EVENT_CLOSED, + + MPTCP_CMD_ANNOUNCE, + MPTCP_CMD_REMOVE, + MPTCP_EVENT_ANNOUNCED, + MPTCP_EVENT_REMOVED, + + MPTCP_CMD_SUB_CREATE, + MPTCP_CMD_SUB_DESTROY, + MPTCP_EVENT_SUB_ESTABLISHED, + MPTCP_EVENT_SUB_CLOSED, + + MPTCP_CMD_SUB_PRIORITY, + MPTCP_EVENT_SUB_PRIORITY, + + MPTCP_CMD_SET_FILTER, + + MPTCP_CMD_EXIST, + + __MPTCP_CMD_AFTER_LAST +}; + +#define MPTCP_CMD_MAX (__MPTCP_CMD_AFTER_LAST - 1) + +enum { + MPTCPF_EVENT_CREATED = (1 << 1), + MPTCPF_EVENT_ESTABLISHED = (1 << 2), + MPTCPF_EVENT_CLOSED = (1 << 3), + MPTCPF_EVENT_ANNOUNCED = (1 << 4), + MPTCPF_EVENT_REMOVED = (1 << 5), + MPTCPF_EVENT_SUB_ESTABLISHED = (1 << 6), + MPTCPF_EVENT_SUB_CLOSED = (1 << 7), + MPTCPF_EVENT_SUB_PRIORITY = (1 << 8), +}; + +#endif /* _LINUX_MPTCP_H */ +#endif diff --git a/include/uapi/linux/ndi.h b/include/uapi/linux/ndi.h new file mode 100644 index 0000000000000000000000000000000000000000..a641eda3ee3c39162ea1a521e4476cfa1180c3cd --- /dev/null +++ b/include/uapi/linux/ndi.h @@ -0,0 +1,81 @@ +#ifndef _LINUX_NDI_H +#define _LINUX_NDI_H + +/* 256 was chosen as the max length of a hostname in a DHCP packet is 255. */ +#define NDI_HOSTNAME_MAX_LEN 256 + +#if defined(CONFIG_BCM_KF_NDI) +#include <linux/if_ether.h> +#include <linux/list.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/dpi.h> + +struct ndi_dev { + u32 id; + struct in_addr ip4; + struct in6_addr ip6; + u8 mac[ETH_ALEN]; + char hostname[NDI_HOSTNAME_MAX_LEN]; + struct dpi_dev dpi; + + atomic_t refcount; + struct net_device *local_dev; /* the interface at which the + ndi_dev is detected */ + u8 state; + u8 probe_count; + struct hlist_node node; +}; + +struct ndi_info { + struct ndi_dev *dev; +}; + +#endif /* CONFIG_BCM_KF_NDI */ + +enum { + NDINLGRP_NONE, + NDINLGRP_DEV, + + __NDINLGRP_MAX +#define NDINLGRP_MAX (__NDINLGRP_MAX - 1) +}; + +enum { + NDINL_BASE = 16, + + NDINL_NEWDEVICE = 16, + NDINL_DELDEVICE, + NDINL_GETDEVICE, + NDINL_SETDEVICE, + + __NDINL_MAX, +#define NDINL_MAX (__NDINL_MAX - 1) +}; + +/* + * The following describe the netlink attributes used by NDI when + * transferring data to/from userspace. + */ +enum { + NDIA_DEV_UNSPEC, + NDIA_DEV_ID, + NDIA_DEV_IP4, + NDIA_DEV_IP6, + NDIA_DEV_MAC, + NDIA_DEV_HOSTNAME, + NDIA_DEV_ONLINE, + /* dpi fields */ + NDIA_DEV_DPI_VENDOR, + NDIA_DEV_DPI_OS, + NDIA_DEV_DPI_OS_CLASS, + NDIA_DEV_DPI_ID, + NDIA_DEV_DPI_CATEGORY, + NDIA_DEV_DPI_FAMILY, + NDIA_DEV_DPI_PRIO, + __NDIA_DEV_MAX +}; +#define NDIA_DEV_MAX (__NDIA_DEV_MAX - 1) + +#endif /* _LINUX_NDI_H */ + diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 336014bf8868c3f04b92cdbf31bdf2ccafc68a71..d6261374e0d24c1c8bea78cf584471bce29dc5d8 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -105,6 +105,15 @@ enum ip_conntrack_status { IPS_OFFLOAD_BIT = 14, IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT), +#if defined(CONFIG_BCM_KF_NETFILTER) || !defined(CONFIG_BCM_IN_KERNEL) + /* Conntrack eligible for Blogging */ + IPS_BLOG_BIT = 15, + IPS_BLOG = (1 << IPS_BLOG_BIT), + + /* ingress qos */ + IPS_IQOS_BIT = 16, + IPS_IQOS = (1 << IPS_IQOS_BIT), +#endif /* Be careful here, modifying these bits can make things messy, * so don't let users modify them directly. */ @@ -112,7 +121,11 @@ enum ip_conntrack_status { IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD), +#if defined(CONFIG_BCM_KF_NETFILTER) + __IPS_MAX_BIT = 17, +#else __IPS_MAX_BIT = 15, +#endif }; /* Connection tracking event types */ @@ -130,6 +143,9 @@ enum ip_conntrack_events { IPCT_SECMARK, /* new security mark has been set */ IPCT_LABEL, /* new connlabel has been set */ IPCT_SYNPROXY, /* synproxy has been set */ +#if defined(CONFIG_BCM_KF_DPI) + IPCT_DPI, /* dpi classification for ct is complete */ +#endif #ifdef __KERNEL__ __IPCT_MAX #endif diff --git a/include/uapi/linux/netfilter/nf_conntrack_pt.h b/include/uapi/linux/netfilter/nf_conntrack_pt.h new file mode 100644 index 0000000000000000000000000000000000000000..108cf9825a5677b99bab1c9721b121466fe28d9a --- /dev/null +++ b/include/uapi/linux/netfilter/nf_conntrack_pt.h @@ -0,0 +1,9 @@ +#ifndef _NF_CONNTRACK_PT_H +#define _NF_CONNTRACK_PT_H +/* PT tracking. */ +#define PT_MAX_ENTRIES 100 +#define PT_MAX_PORTS 1000 +#define PT_MAX_EXPECTED 255 +#define PT_TIMEOUT 180 + +#endif /* _NF_CONNTRACK_PT_H */ diff --git a/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h b/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h index 64390fac6f7eacbea0181ac3eb156593f28c876e..5bff8a1cff6828cb18c052dd74916e38b2f4464f 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h @@ -39,6 +39,12 @@ union nf_conntrack_man_proto { struct { __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ } gre; +#if defined(CONFIG_BCM_KF_PROTO_ESP) && \ + (defined(CONFIG_NF_CT_PROTO_ESP) || defined(CONFIG_NF_CT_PROTO_ESP_MODULE)) + struct { + __be16 spi; + } esp; +#endif }; #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h index 1d41810d17e2caffc6c7c31e586300d02da6947e..4e11fc08b39f3dc8c02e795faafae57ffa9639d8 100644 --- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h +++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -55,6 +55,12 @@ enum ctattr_type { CTA_LABELS, CTA_LABELS_MASK, CTA_SYNPROXY, +#if defined(CONFIG_BCM_KF_DPI) || !defined(CONFIG_BCM_IN_KERNEL) + CTA_DPI, +#endif +#if defined(CONFIG_BCM_KF_SGS) || !defined(CONFIG_BCM_IN_KERNEL) + CTA_SGS, +#endif __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) @@ -276,4 +282,27 @@ enum ctattr_expect_stats { }; #define CTA_STATS_EXP_MAX (__CTA_STATS_EXP_MAX - 1) +#if defined(CONFIG_BCM_KF_DPI) || !defined(CONFIG_BCM_IN_KERNEL) +enum ctattr_dpi { + CTA_DPI_UNSPEC, + CTA_DPI_APP_ID, + CTA_DPI_MAC, + CTA_DPI_STATUS, + CTA_DPI_URL, + __CTA_DPI_MAX, +}; +#define CTA_DPI_MAX (__CTA_DPI_MAX - 1) +#endif + +#if defined(CONFIG_BCM_KF_SGS) +enum ctattr_sgs { + CTA_SGS_UNSPEC, + CTA_SGS_SES_KEY, + CTA_SGS_SES_START, + CTA_SGS_SES_PAD, + __CTA_SGS_MAX, +}; +#define CTA_SGS_MAX (__CTA_SGS_MAX - 1) +#endif + #endif /* _IPCONNTRACK_NETLINK_H */ diff --git a/include/uapi/linux/netfilter/xt_blog.h b/include/uapi/linux/netfilter/xt_blog.h new file mode 100755 index 0000000000000000000000000000000000000000..b7914fd3c64ff1145670c24c986c9b45d798d4e1 --- /dev/null +++ b/include/uapi/linux/netfilter/xt_blog.h @@ -0,0 +1,11 @@ +#ifndef _XT_BLOG_H +#define _XT_BLOG_H + +#include <linux/types.h> + +struct xt_blog { + __u8 tcp_pure_ack; + __u8 invert; +}; + +#endif /*_XT_BLOG_H*/ diff --git a/include/uapi/linux/netfilter/xt_connlimit.h b/include/uapi/linux/netfilter/xt_connlimit.h index d4d1943dcd111c77e8dc93c1bda65608bffdff49..31948e066f7f9cc0f7bccee9ecba92d15c724d8f 100644 --- a/include/uapi/linux/netfilter/xt_connlimit.h +++ b/include/uapi/linux/netfilter/xt_connlimit.h @@ -10,6 +10,9 @@ struct xt_connlimit_data; enum { XT_CONNLIMIT_INVERT = 1 << 0, XT_CONNLIMIT_DADDR = 1 << 1, +#if defined(CONFIG_BCM_KF_NETFILTER) || !defined(CONFIG_BCM_IN_KERNEL) + XT_CONNLIMIT_DADDR_DPORT = 1 << 2, +#endif }; struct xt_connlimit_info { diff --git a/include/uapi/linux/netfilter/xt_flowlabel.h b/include/uapi/linux/netfilter/xt_flowlabel.h new file mode 100644 index 0000000000000000000000000000000000000000..f3df198dcd684fdbdc8a6851ec35c67c46eb1f67 --- /dev/null +++ b/include/uapi/linux/netfilter/xt_flowlabel.h @@ -0,0 +1,52 @@ +/* +* Copyright (c) 2003-2019 Broadcom +* All Rights Reserved +* +<:label-BRCM:2019:DUAL/GPL:standard + +Unless you and Broadcom execute a separate written software license +agreement governing use of this software, this software is licensed +to you under the terms of the GNU General Public License version 2 +(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, +with the following added to such license: + + As a special exception, the copyright holders of this software give + you permission to link this software with independent modules, and + to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent + module, the terms and conditions of the license of that module. + An independent module is a module which is not derived from this + software. The special exception does not apply to any modifications + of the software. + +Not withstanding the above, under no circumstances may you combine +this software in any way with any other Broadcom software provided +under a license other than the GPL, without Broadcom's express prior +written consent. + +:> +*/ + + +/* IP tables module for matching the value of the IPv6 flowlabel field + * + * BRCM, Feb, 1. 2019. + */ + + +#ifndef _XT_FLOWLABEL_H +#define _XT_FLOWLABEL_H + +#include <linux/types.h> + +#define XT_FLOWLABEL_MAX cpu_to_be32(0x000FFFFF) + + +/* match info */ +struct xt_flowlabel_info { + __be32 flowlabel; + __u8 invert; +}; + + +#endif /* _XT_FLOWLABEL_H */ diff --git a/include/uapi/linux/netfilter/xt_mac_extend.h b/include/uapi/linux/netfilter/xt_mac_extend.h new file mode 100644 index 0000000000000000000000000000000000000000..1fbbc87b27c2f4cc07574a53a606e054dd8740bc --- /dev/null +++ b/include/uapi/linux/netfilter/xt_mac_extend.h @@ -0,0 +1,15 @@ +#ifndef _XT_MAC_EXTEND_H +#define _XT_MAC_EXTEND_H + +/* extend from xt_mac.h for MAC address extend match operations, + * i.e, MAC/mask. + * BRCM, Jan, 31. 2019. + */ + + +struct xt_mac_info_extend { + unsigned char srcaddr[ETH_ALEN]; + unsigned char msk[ETH_ALEN]; + int invert; +}; +#endif /*_XT_MAC_EXTEND_H*/ diff --git a/include/uapi/linux/netfilter/xt_mscs.h b/include/uapi/linux/netfilter/xt_mscs.h new file mode 100644 index 0000000000000000000000000000000000000000..baefd7dfb7df329fb6be2d5c318ce23ad814f8fe --- /dev/null +++ b/include/uapi/linux/netfilter/xt_mscs.h @@ -0,0 +1,15 @@ +#ifndef _XT_MSCS_H +#define _XT_MSCS_H + +#include <linux/types.h> + +struct xt_mscs_tginfo { + __u8 up_limit; + __u8 mirror; +}; + +struct xt_mscs_mtinfo { + __u8 up_bitmap; +}; + +#endif /*_XT_MSCS_H*/ diff --git a/include/uapi/linux/netfilter/xt_tcpudp.h b/include/uapi/linux/netfilter/xt_tcpudp.h index 658c169998197f7e8c02ca590e4ca8d9661c1336..326146f1585f20ee52be639fb84a8871b9ae102e 100644 --- a/include/uapi/linux/netfilter/xt_tcpudp.h +++ b/include/uapi/linux/netfilter/xt_tcpudp.h @@ -12,6 +12,9 @@ struct xt_tcp { __u8 flg_mask; /* TCP flags mask byte */ __u8 flg_cmp; /* TCP flags compare byte */ __u8 invflags; /* Inverse flags */ +#if defined(CONFIG_BCM_KF_NETFILTER) + __u8 pure_ack; /* Pure ACK packet */ +#endif }; /* Values for "inv" field in struct ipt_tcp. */ diff --git a/include/uapi/linux/netfilter_bridge/ebt_blog.h b/include/uapi/linux/netfilter_bridge/ebt_blog.h new file mode 100755 index 0000000000000000000000000000000000000000..181f17c3ce575a03d16dfb8abec7be8ec692c797 --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_blog.h @@ -0,0 +1,12 @@ +#ifndef __LINUX_BRIDGE_EBT_BLOG_H +#define __LINUX_BRIDGE_EBT_BLOG_H + +#include <linux/types.h> + +struct ebt_blog_info +{ + __u8 tcp_pure_ack; + __u8 invert; +}; + +#endif diff --git a/include/uapi/linux/netfilter_bridge/ebt_ftos_t.h b/include/uapi/linux/netfilter_bridge/ebt_ftos_t.h new file mode 100644 index 0000000000000000000000000000000000000000..2e5099c7c2a58e43dd5c3b37db1e93b60b910f3d --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_ftos_t.h @@ -0,0 +1,22 @@ +#ifndef __LINUX_BRIDGE_EBT_FTOS_T_H +#define __LINUX_BRIDGE_EBT_FTOS_T_H + +struct ebt_ftos_t_info +{ + int ftos_set; + unsigned char ftos; + // EBT_ACCEPT, EBT_DROP or EBT_CONTINUE or EBT_RETURN + int target; +}; +#define EBT_FTOS_TARGET "ftos" + +#define FTOS_TARGET 0x01 +#define FTOS_SETFTOS 0x02 +#define FTOS_WMMFTOS 0x04 +#define FTOS_8021QFTOS 0x08 + +#define DSCP_MASK_SHIFT 5 +#define PRIO_LOC_NFMARK 0 +#define PRIO_LOC_NFMASK 7 + +#endif diff --git a/include/uapi/linux/netfilter_bridge/ebt_ip6_extend.h b/include/uapi/linux/netfilter_bridge/ebt_ip6_extend.h new file mode 100644 index 0000000000000000000000000000000000000000..e855c8f9d89787dc0afd4ffd2c6ccb24ba190dc7 --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_ip6_extend.h @@ -0,0 +1,42 @@ +/* + * ebt_ip6 + * + * Authors: + * Kuo-Lang Tseng <kuo-lang.tseng@intel.com> + * Manohar Castelino <manohar.r.castelino@intel.com> + * + * Jan 11, 2008 + * + * Extend by Broadcom at Jan 31, 2019 + */ +#ifndef __LINUX_BRIDGE_EBT_IP6_EXTEND_H +#define __LINUX_BRIDGE_EBT_IP6_EXTEND_H + +#include <linux/types.h> + +#define EBT_IP6_TCLASS_EXTEND 0x01 +#define EBT_IP6_FLOWLABEL_EXTEND 0x02 +#define EBT_IP6_RANGE_SRC 0x04 +#define EBT_IP6_RANGE_DST 0x08 + +#define EBT_IP6_MASK_EXTEND (EBT_IP6_TCLASS_EXTEND | EBT_IP6_FLOWLABEL_EXTEND | \ + EBT_IP6_RANGE_SRC | EBT_IP6_RANGE_DST) +#define EBT_IP6_MATCH_EXTEND "ip6-extend" + + +struct ebt_ip6range { + struct in6_addr ip_min, ip_max; +}; + +/* the same values are used for the invflags */ +struct ebt_ip6_extend_info { + __u8 flow_lbl[3]; + __u8 tclass[2]; + __u8 tclassmsk; + struct ebt_ip6range range_src; + struct ebt_ip6range range_dst; + __u8 bitmask; + __u8 invflags; +}; + +#endif diff --git a/include/uapi/linux/netfilter_bridge/ebt_ip_extend.h b/include/uapi/linux/netfilter_bridge/ebt_ip_extend.h new file mode 100644 index 0000000000000000000000000000000000000000..bc2749c56d44ac7d218907646018520714246d1d --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_ip_extend.h @@ -0,0 +1,48 @@ +/* + * ebt_ip + * + * Authors: + * Bart De Schuymer <bart.de.schuymer@pandora.be> + * + * April, 2002 + * + * Changes: + * added ip-sport and ip-dport + * Innominate Security Technologies AG <mhopf@innominate.com> + * September, 2002 + * + * Extend by Broadcom at Jan 31, 2019 + */ +#ifndef __LINUX_BRIDGE_EBT_IP_EXTEND_H +#define __LINUX_BRIDGE_EBT_IP_EXTEND_H + +#include <linux/types.h> + + +#define EBT_IP_TOS_EXTEND 0x01 +#define EBT_IP_DSCP_EXTEND 0x02 +#define EBT_IP_RANGE_SRC 0x04 +#define EBT_IP_RANGE_DST 0x08 + +#define EBT_IP_MASK_EXTEND (EBT_IP_TOS_EXTEND | EBT_IP_DSCP_EXTEND | \ + EBT_IP_RANGE_SRC | EBT_IP_RANGE_DST) +#define EBT_IP_MATCH_EXTEND "ip-extend" + + +struct ebt_iprange { + /* Inclusive: network order. */ + __be32 ip_min, ip_max; +}; + +/* the same values are used for the invflags */ +struct ebt_ip_extend_info { + __u8 tos[2]; + __u8 tosmask; + __u8 dscp; + struct ebt_iprange range_src; + struct ebt_iprange range_dst; + __u8 bitmask; + __u8 invflags; +}; + +#endif diff --git a/include/uapi/linux/netfilter_bridge/ebt_qos_map.h b/include/uapi/linux/netfilter_bridge/ebt_qos_map.h new file mode 100644 index 0000000000000000000000000000000000000000..d45d7ff31d08cdf1c23478a45b908bbef2475e9a --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_qos_map.h @@ -0,0 +1,9 @@ +#ifndef __LINUX_QOS_MAP_H +#define __LINUX_QOS_MAP_H + +struct ebt_qos_map_info +{ + int dscp2pbit; + int dscp2q; +}; +#endif diff --git a/include/uapi/linux/netfilter_bridge/ebt_reject.h b/include/uapi/linux/netfilter_bridge/ebt_reject.h new file mode 100644 index 0000000000000000000000000000000000000000..8b55769ef4836f5116c029f482dff1c344128646 --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_reject.h @@ -0,0 +1,14 @@ +#if defined(CONFIG_BCM_KF_NETFILTER) +#ifndef __LINUX_BRIDGE_EBT_REJECT_H +#define __LINUX_BRIDGE_EBT_REJECT_H + +enum ebt_reject_with { + EBT_ICMP6_POLICY_FAIL +}; + +struct ebt_reject_info { + int with; /* reject type */ +}; + +#endif +#endif diff --git a/include/uapi/linux/netfilter_bridge/ebt_skbvlan_m.h b/include/uapi/linux/netfilter_bridge/ebt_skbvlan_m.h new file mode 100644 index 0000000000000000000000000000000000000000..13fdff40284a805d73c3d850b4c975cdd5e35c35 --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_skbvlan_m.h @@ -0,0 +1,45 @@ +#ifndef __LINUX_BRIDGE_EBT_SKBVLAN_H +#define __LINUX_BRIDGE_EBT_SKBVLAN_H + +#include <linux/types.h> + +#define EBT_SKBVLAN_ID 0x0001 +#define EBT_SKBVLAN_PRIO 0x0002 +#define EBT_SKBVLAN_ENCAP 0x0004 +#define EBT_SKBVLAN_VLAN_TAG_0 0x0008 +#define EBT_SKBVLAN_VLAN_TAG_1 0x0010 +#define EBT_SKBVLAN_VLAN_TAG_2 0x0020 +#define EBT_SKBVLAN_VLAN_TAG_3 0x0040 +#define EBT_SKBVLAN_VLAN_TPID_0 0x0080 +#define EBT_SKBVLAN_VLAN_TPID_1 0x0100 +#define EBT_SKBVLAN_MASK (EBT_SKBVLAN_ID | EBT_SKBVLAN_PRIO | EBT_SKBVLAN_ENCAP | \ + EBT_SKBVLAN_VLAN_TAG_0 | EBT_SKBVLAN_VLAN_TAG_1 | EBT_SKBVLAN_VLAN_TAG_2 | \ + EBT_SKBVLAN_VLAN_TAG_3 |EBT_SKBVLAN_VLAN_TPID_0 | EBT_SKBVLAN_VLAN_TPID_1) + + + +#define EBT_SKBVLAN_MATCH "skbvlan" + +struct ebt_skbvlan_m_info { + __u16 id; /* VLAN ID {1-4095} */ + __u8 prio; /* VLAN User Priority {0-7} */ + __be16 encap; /* VLAN Encapsulated frame code {0-65535} */ + __u16 bitmask; /* Args bitmask bit 1=1 - ID arg, + bit 2=1 User-Priority arg, bit 3=1 encap*/ + __u16 invflags; /* Inverse bitmask bit 1=1 - inversed ID arg, + bit 2=1 - inversed Pirority arg */ + __be32 vlantag0[2]; + __be32 vlanmask0; + __be32 vlantag1[2]; + __be32 vlanmask1; + __be32 vlantag2[2]; + __be32 vlanmask2; + __be32 vlantag3[2]; + __be32 vlanmask3; + __be16 vlantpid0; + __be16 vlantpid1; +}; + + +#endif /* __LINUX_BRIDGE_EBT_SKBVLAN_H */ + diff --git a/include/uapi/linux/netfilter_bridge/ebt_time.h b/include/uapi/linux/netfilter_bridge/ebt_time.h new file mode 100644 index 0000000000000000000000000000000000000000..f47b531d7b9b8322d2fa71527c37950a054ec23c --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_time.h @@ -0,0 +1,14 @@ +#ifndef __LINUX_BRIDGE_EBT_TIME_H +#define __LINUX_BRIDGE_EBT_TIME_H + + +struct ebt_time_info { + u_int8_t days_match; /* 1 bit per day. -SMTWTFS */ + u_int16_t time_start; /* 0 < time_start < 23*60+59 = 1439 */ + u_int16_t time_stop; /* 0:0 < time_stat < 23:59 */ + u_int8_t kerneltime; /* ignore skb time (and use kerneltime) or not. */ +}; + +#define EBT_TIME_MATCH "time" + +#endif /* __LINUX_BRIDGE_EBT_TIME_H */ diff --git a/include/uapi/linux/netfilter_bridge/ebt_u32.h b/include/uapi/linux/netfilter_bridge/ebt_u32.h new file mode 100644 index 0000000000000000000000000000000000000000..2cd1c043b80455d440e390e280d33ca3f70266fa --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_u32.h @@ -0,0 +1,55 @@ +/* + * ebt_u32 + * + * Authors: + * extend by Broadcom at Jan 24, 2019 + * + * + */ + +#ifndef __LINUX_BRIDGE_EBT_U32_H +#define __LINUX_BRIDGE_EBT_U32_H + +#include <linux/types.h> + +enum ebt_u32_ops { + EBT_U32_AND, + EBT_U32_LEFTSH, + EBT_U32_RIGHTSH, + EBT_U32_AT, +}; + +struct ebt_u32_location_element { + __u32 number; + __u8 nextop; +}; + +struct ebt_u32_value_element { + __u32 min; + __u32 max; +}; + +/* + * Any way to allow for an arbitrary number of elements? + * For now, I settle with a limit of 10 each. + */ +#define EBT_U32_MAXSIZE 10 + +#define EBT_U32_MATCH "u32" + + +struct ebt_u32_test { + struct ebt_u32_location_element location[EBT_U32_MAXSIZE+1]; + struct ebt_u32_value_element value[EBT_U32_MAXSIZE+1]; + __u8 nnums; + __u8 nvalues; +}; + +struct ebt_u32_info { + struct ebt_u32_test tests[EBT_U32_MAXSIZE+1]; + __u8 ntests; + __u8 invert; +}; + +#endif + diff --git a/include/uapi/linux/netfilter_bridge/ebt_vtag_t.h b/include/uapi/linux/netfilter_bridge/ebt_vtag_t.h new file mode 100644 index 0000000000000000000000000000000000000000..cd803cc10aaeaa3277bd0579d4f6c8dee61fd610 --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_vtag_t.h @@ -0,0 +1,13 @@ +#ifndef __EBT_VTAG_T_H__ +#define __EBT_VTAG_T_H__ + + +struct ebt_vtag_t_info +{ + int vtag; + /* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */ + int target; +}; + +#endif //__EBT_VTAG_T_H__ + diff --git a/include/uapi/linux/netfilter_bridge/ebt_wmm_mark_t.h b/include/uapi/linux/netfilter_bridge/ebt_wmm_mark_t.h new file mode 100644 index 0000000000000000000000000000000000000000..629f39c58a65950b51b61d849e9555d86fff9a2f --- /dev/null +++ b/include/uapi/linux/netfilter_bridge/ebt_wmm_mark_t.h @@ -0,0 +1,27 @@ +#ifndef __LINUX_BRIDGE_EBT_MARK_T_H +#define __LINUX_BRIDGE_EBT_MARK_T_H + +#define WMM_MARK_DSCP 1 +#define WMM_MARK_8021D 2 + +#define WMM_MARK_DSCP_STR "dscp" +#define WMM_MARK_8021D_STR "vlan" + +#define PRIO_LOC_NFMARK 0 +#define PRIO_LOC_NFMASK 7 + +#define WMM_DSCP_MASK_SHIFT 5 +#define WMM_MARK_VALUE_NONE -1 + + +struct ebt_wmm_mark_t_info +{ + int mark; + int markpos; + int markset; + /* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */ + int target; +}; +#define EBT_WMM_MARK_TARGET "wmm-mark" + +#endif diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index fa43dd5a7b3dcc84d29ecf441e28986e2a6bcec4..caba6e1b631687c858799da9a775bd631d47e9e5 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3232,6 +3232,10 @@ enum nl80211_mpath_info { * defined in HE capabilities IE * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently * defined +#ifdef CONFIG_BCM_KF_NL80211_HE_6G_CAP_SUPPORT + * @NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA: HE 6GHz band capabilities (__le16), + * given for all 6 GHz band channels +#endif * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use */ enum nl80211_band_iftype_attr { @@ -3242,6 +3246,9 @@ enum nl80211_band_iftype_attr { NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, +#ifdef CONFIG_BCM_KF_NL80211_HE_6G_CAP_SUPPORT + NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA, +#endif /* CONFIG_BCM_KF_NL80211_HE_6G_CAP_SUPPORT */ /* keep last */ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, @@ -4284,6 +4291,10 @@ enum nl80211_key_attributes { NL80211_KEY_DEFAULT_MGMT, NL80211_KEY_TYPE, NL80211_KEY_DEFAULT_TYPES, +#ifdef CONFIG_BCM_KF_MISC_BACKPORTS + NL80211_KEY_MODE, + NL80211_KEY_DEFAULT_BEACON, +#endif /* CONFIG_BCM_KF_MISC_BACKPORTS */ /* keep last */ __NL80211_KEY_AFTER_LAST, @@ -4339,6 +4350,9 @@ enum nl80211_txrate_gi { * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz) +#ifdef CONFIG_BCM_KF_NL80211_6G_BAND_SUPPORT + * @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.1 GHz) +#endif * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -4346,6 +4360,13 @@ enum nl80211_band { NL80211_BAND_2GHZ, NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, +#ifndef CONFIG_BCM_KF_MISC_BACKPORTS +#ifdef CONFIG_BCM_KF_NL80211_6G_BAND_SUPPORT + NL80211_BAND_6GHZ, +#endif /* CONFIG_BCM_KF_NL80211_6G_BAND_SUPPORT */ +#else + NL80211_BAND_6GHZ, +#endif /* CONFIG_BCM_KF_MISC_BACKPORTS */ NUM_NL80211_BANDS, }; @@ -5259,6 +5280,19 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_TXQS, NL80211_EXT_FEATURE_SCAN_RANDOM_SN, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT, +#ifdef CONFIG_BCM_KF_MISC_BACKPORTS + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS, + NL80211_EXT_FEATURE_AP_PMKSA_CACHING, + NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD, + NL80211_EXT_FEATURE_EXT_KEY_ID, + NL80211_EXT_FEATURE_STA_TX_PWR, + NL80211_EXT_FEATURE_SAE_OFFLOAD, + NL80211_EXT_FEATURE_VLAN_OFFLOAD, + NL80211_EXT_FEATURE_AQL, + NL80211_EXT_FEATURE_BEACON_PROTECTION, +#endif /* CONFIG_BCM_KF_MISC_BACKPORTS */ /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index be382fb0592d8e74a74a490d3914ab0207b81cdc..3ff9595ea0d00b25c132f9b4be16b6c922036f6f 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -483,6 +483,37 @@ enum { TCA_FLOWER_KEY_ENC_OPTS, TCA_FLOWER_KEY_ENC_OPTS_MASK, +#ifdef CONFIG_BCM_KF_ENHANCED_TC + /* + * these are the keys userspace/gpl/apps/iproute2/iproute2-5.9.0/include/uapi/linux/pkt_cls.h + * knows about. We need to keep it in sync because of TCA_FLOWER_KEY_NUM_OF_VLANS. + */ + TCA_FLOWER_IN_HW_COUNT, + + TCA_FLOWER_KEY_PORT_SRC_MIN, /* be16 */ + TCA_FLOWER_KEY_PORT_SRC_MAX, /* be16 */ + TCA_FLOWER_KEY_PORT_DST_MIN, /* be16 */ + TCA_FLOWER_KEY_PORT_DST_MAX, /* be16 */ + + TCA_FLOWER_KEY_CT_STATE, /* u16 */ + TCA_FLOWER_KEY_CT_STATE_MASK, /* u16 */ + TCA_FLOWER_KEY_CT_ZONE, /* u16 */ + TCA_FLOWER_KEY_CT_ZONE_MASK, /* u16 */ + TCA_FLOWER_KEY_CT_MARK, /* u32 */ + TCA_FLOWER_KEY_CT_MARK_MASK, /* u32 */ + TCA_FLOWER_KEY_CT_LABELS, /* u128 */ + TCA_FLOWER_KEY_CT_LABELS_MASK, /* u128 */ + + TCA_FLOWER_KEY_MPLS_OPTS, + + TCA_FLOWER_KEY_HASH, /* u32 */ + TCA_FLOWER_KEY_HASH_MASK, /* u32 */ + + TCA_FLOWER_KEY_NUM_OF_VLANS, /* u8 */ + TCA_FLOWER_KEY_VLAN_DEI, /* u8 */ + TCA_FLOWER_KEY_CVLAN_DEI, /* u8 */ +#endif /* CONFIG_BCM_KF_ENHANCED_TC */ + __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/sgs.h b/include/uapi/linux/sgs.h new file mode 100644 index 0000000000000000000000000000000000000000..7f43d68d8af21fae5ea645714dd7fab285711a75 --- /dev/null +++ b/include/uapi/linux/sgs.h @@ -0,0 +1,56 @@ +/* + * <:copyright-BRCM:2019:DUAL/GPL:standard + * + * Copyright (c) 2019 Broadcom + * All Rights Reserved + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed + * to you under the terms of the GNU General Public License version 2 + * (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, + * with the following added to such license: + * + * As a special exception, the copyright holders of this software give + * you permission to link this software with independent modules, and + * to copy and distribute the resulting executable under terms of your + * choice, provided that you also meet, for each linked independent + * module, the terms and conditions of the license of that module. + * An independent module is a module which is not derived from this + * software. The special exception does not apply to any modifications + * of the software. + * + * Not withstanding the above, under no circumstances may you combine + * this software in any way with any other Broadcom software provided + * under a license other than the GPL, without Broadcom's express prior + * written consent. + * + * :> + */ + +#ifndef _LINUX_SGS_H_ +#define _LINUX_SGS_H_ + +#define SGS_CT_ACCEPT_BIT 0 +#define SGS_CT_BLOCK_BIT 1 +#define SGS_CT_SESSION_BIT 2 +#define SGS_CT_TERMINATED_BIT 3 + +#define SGS_MAGIC 0x600df00d /* Good Food */ +struct sgs_info { + unsigned long valid; + unsigned long flags; + unsigned long rcvcnt; + int rstcnt; +}; + +struct nf_conn; + +struct sgs_core_hooks { + void (*delete)(struct nf_conn *ct); +}; + +int sgs_core_hooks_register(struct sgs_core_hooks *h); +void sgs_nf_ct_delete_from_lists(struct nf_conn *ct); +void sgs_core_hooks_unregister(void); + +#endif /* _LINUX_SGS_H_ */ diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h index 0d7b5fd6605b049ecc2d997ed192a6c20611f5dd..b15e24d91ae1b9e0fc96affe62d4d4af6fdabef2 100644 --- a/include/uapi/linux/tc_act/tc_vlan.h +++ b/include/uapi/linux/tc_act/tc_vlan.h @@ -19,6 +19,10 @@ #define TCA_VLAN_ACT_PUSH 2 #define TCA_VLAN_ACT_MODIFY 3 +#ifdef CONFIG_BCM_KF_ENHANCED_TC +#define TCA_VLAN_PRIORITY_COPY 0xff +#endif /* CONFIG_BCM_KF_ENHANCED_TC */ + struct tc_vlan { tc_gen; int v_action; diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index e02d31986ff911b0547bd954abcc7339f4668ca6..9d11ed5e7c9ef19b74f31609af3ed501e451d474 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -18,10 +18,26 @@ #ifndef _UAPI_LINUX_TCP_H #define _UAPI_LINUX_TCP_H +#if ((defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP)) || !defined(CONFIG_BCM_IN_KERNEL)) + +#ifndef __KERNEL__ +#include <sys/socket.h> +#endif + +#include <asm/byteorder.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/socket.h> +#include <linux/types.h> + +#else + #include <linux/types.h> #include <asm/byteorder.h> #include <linux/socket.h> +#endif + struct tcphdr { __be16 source; __be16 dest; @@ -131,6 +147,15 @@ enum { #define TCP_REPAIR_OFF 0 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ +#if ((defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP)) || !defined(CONFIG_BCM_IN_KERNEL)) +#define MPTCP_ENABLED 42 +#define MPTCP_SCHEDULER 43 +#define MPTCP_PATH_MANAGER 44 +#define MPTCP_INFO 45 + +#define MPTCP_INFO_FLAG_SAVE_MASTER 0x01 + +#endif struct tcp_repair_opt { __u32 opt_code; __u32 opt_val; @@ -268,6 +293,55 @@ enum { TCP_NLA_REORD_SEEN, /* reordering events seen */ }; +#if ((defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP)) || !defined(CONFIG_BCM_IN_KERNEL)) +struct mptcp_meta_info { + __u8 mptcpi_state; + __u8 mptcpi_retransmits; + __u8 mptcpi_probes; + __u8 mptcpi_backoff; + + __u32 mptcpi_rto; + __u32 mptcpi_unacked; + + /* Times. */ + __u32 mptcpi_last_data_sent; + __u32 mptcpi_last_data_recv; + __u32 mptcpi_last_ack_recv; + + __u32 mptcpi_total_retrans; + + __u64 mptcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ + __u64 mptcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ +}; + +struct mptcp_sub_info { + union { + struct sockaddr src; + struct sockaddr_in src_v4; + struct sockaddr_in6 src_v6; + }; + + union { + struct sockaddr dst; + struct sockaddr_in dst_v4; + struct sockaddr_in6 dst_v6; + }; +}; + +struct mptcp_info { + __u32 tcp_info_len; /* Length of each struct tcp_info in subflows pointer */ + __u32 sub_len; /* Total length of memory pointed to by subflows pointer */ + __u32 meta_len; /* Length of memory pointed to by meta_info */ + __u32 sub_info_len; /* Length of each struct mptcp_sub_info in subflow_info pointer */ + __u32 total_sub_info_len; /* Total length of memory pointed to by subflow_info */ + + struct mptcp_meta_info *meta_info; + struct tcp_info *initial; + struct tcp_info *subflows; /* Pointer to array of tcp_info structs */ + struct mptcp_sub_info *subflow_info; +}; + +#endif /* for TCP_MD5SIG socket option */ #define TCP_MD5SIG_MAXKEYLEN 80 diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index aff5b5e59845a837d038532259673bd56fa49ddd..fbbe90b03a0a572707a896b3de05f2de0ab38cd0 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -104,6 +104,9 @@ struct mtd_write_req { #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ #define MTD_NO_ERASE 0x1000 /* No erase necessary */ #define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */ +#if defined(CONFIG_BCM_KF_MTD_BCMNAND) +#define MTD_NAND_NOP1 0x10000000 /* SLC NAND that only supports single page write (NOP=1) */ +#endif /* Some common devices / combinations of capabilities */ #define MTD_CAP_ROM 0 diff --git a/net/ipv4/netfilter/nft_reject_ipv4.ko b/net/ipv4/netfilter/nft_reject_ipv4.ko new file mode 100644 index 0000000000000000000000000000000000000000..58b529a745f7e5e8d23a8cc30b862fa4ed26fc2d --- /dev/null +++ b/net/ipv4/netfilter/nft_reject_ipv4.ko @@ -0,0 +1,6 @@ +This is a dummy file to please the OpenWrt build system which no longer +supports 4.19. Until we have found a better solution, this will please the +OpenWrt build system. Since the kernel or the modules compiled from this repo +never end up in the image that ends up on the device, the effort spent on +maintaining the package dependency hierarchy for 4.19 in the OpenWrt build +system is futile. diff --git a/net/ipv6/netfilter/nft_reject_ipv6.ko b/net/ipv6/netfilter/nft_reject_ipv6.ko new file mode 100644 index 0000000000000000000000000000000000000000..58b529a745f7e5e8d23a8cc30b862fa4ed26fc2d --- /dev/null +++ b/net/ipv6/netfilter/nft_reject_ipv6.ko @@ -0,0 +1,6 @@ +This is a dummy file to please the OpenWrt build system which no longer +supports 4.19. Until we have found a better solution, this will please the +OpenWrt build system. Since the kernel or the modules compiled from this repo +never end up in the image that ends up on the device, the effort spent on +maintaining the package dependency hierarchy for 4.19 in the OpenWrt build +system is futile. diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9e060c6a01ac299bd02aa87ef891c4d18d66c4a8..c13230129bdcabe5d132a8afedaa2e33dd43b63f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2686,6 +2686,13 @@ enum { BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ BPF_TCP_NEW_SYN_RECV, +/* HACK: Enable this unconditionally (it is enabled in BCM's build system). + * There are static asserts failing if the enums do not match (see comment above). + */ +/* #if ((defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP)) || !defined(CONFIG_BCM_IN_KERNEL)) */ +#if 1 + BPF_TCP_RST_WAIT, +#endif BPF_TCP_MAX_STATES /* Leave at the end! */ }; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 43391e2d1153adb701433d6794702b73f2d60297..5c4bb230585f176b6d62b719003da19a7d01dd75 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -457,6 +457,9 @@ enum { IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, IFLA_MACSEC_PAD, +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) + IFLA_MACSEC_OFFLOAD, +#endif __IFLA_MACSEC_MAX, }; @@ -480,6 +483,16 @@ enum macsec_validation_type { MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, }; +#if defined(CONFIG_BCM_KF_MACSEC_BACKPORT) +enum macsec_offload { + MACSEC_OFFLOAD_OFF = 0, + MACSEC_OFFLOAD_PHY = 1, + MACSEC_OFFLOAD_MAC = 2, + __MACSEC_OFFLOAD_END, + MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1, +}; +#endif + /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC,