lkml.org 
[lkml]   [2024]   [Apr]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [net-next PATCH 5/9] octeontx2-af: Add packet path between representor and VF
On Tue, Apr 16, 2024 at 10:38 AM Geetha sowjanya <gakula@marvell.com> wrote:
>
> This patch installs tcam rules to stree traffic representors
> and VF when swicthdev mode is set. To support this a HW loopback
> channel is reserved. Through this channel packet are routed
> between representor and VFs. "ESW_CFG" mbox is defined to
> notify AF for installing rules.
>
> Signed-off-by: Geetha sowjanya <gakula@marvell.com>
> ---
> .../net/ethernet/marvell/octeontx2/af/mbox.h | 7 +
> .../net/ethernet/marvell/octeontx2/af/rvu.h | 7 +-
> .../marvell/octeontx2/af/rvu_devlink.c | 6 +
> .../ethernet/marvell/octeontx2/af/rvu_nix.c | 7 +-
> .../ethernet/marvell/octeontx2/af/rvu_rep.c | 241 +++++++++++++++++-
> .../marvell/octeontx2/af/rvu_switch.c | 18 +-
> .../net/ethernet/marvell/octeontx2/nic/rep.c | 19 ++
> 7 files changed, 297 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> index c77c02730cf9..3b36da28a8f4 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> @@ -144,6 +144,7 @@ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
> M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
> M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \
> M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \
> +M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \
> /* CGX mbox IDs (range 0x200 - 0x3FF) */ \
> M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
> M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
> @@ -1532,6 +1533,12 @@ struct get_rep_cnt_rsp {
> u64 rsvd;
> };
>
> +struct esw_cfg_req {
> + struct mbox_msghdr hdr;
> + u8 ena;
> + u64 rsvd;
> +};
> +
> struct flow_msg {
> unsigned char dmac[6];
> unsigned char smac[6];
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
> index 1d76d52d7a5d..c8572d79a968 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
> @@ -596,6 +596,7 @@ struct rvu {
> u16 rep_pcifunc;
> int rep_cnt;
> u16 *rep2pfvf_map;
> + u8 rep_mode;
> };
>
> static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
> @@ -1025,7 +1026,7 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
> /* RVU Switch */
> void rvu_switch_enable(struct rvu *rvu);
> void rvu_switch_disable(struct rvu *rvu);
> -void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
> +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
> void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena);
>
> int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
> @@ -1039,4 +1040,8 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
> void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
> void rvu_mcs_exit(struct rvu *rvu);
>
> +/* Representor APIs */
> +int rvu_rep_pf_init(struct rvu *rvu);
> +int rvu_rep_install_mcam_rules(struct rvu *rvu);
> +void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
> #endif /* RVU_H */
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
> index 96c04f7d93f8..8a3b7fb61883 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
> @@ -1464,6 +1464,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
> struct rvu *rvu = rvu_dl->rvu;
> struct rvu_switch *rswitch;
>
> + if (rvu->rep_mode)
> + return -EOPNOTSUPP;
> +
> rswitch = &rvu->rswitch;
> *mode = rswitch->mode;
>
> @@ -1477,6 +1480,9 @@ static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
> struct rvu *rvu = rvu_dl->rvu;
> struct rvu_switch *rswitch;
>
> + if (rvu->rep_mode)
> + return -EOPNOTSUPP;
> +
> rswitch = &rvu->rswitch;
> switch (mode) {
> case DEVLINK_ESWITCH_MODE_LEGACY:
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> index 4ef5bb7b337f..75d5c1bc00e1 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> @@ -2738,7 +2738,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
> int schq;
> u64 cfg;
>
> - if (!is_pf_cgxmapped(rvu, pf))
> + if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc))
> return;
>
> cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
> @@ -4368,8 +4368,6 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
> if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
> ether_addr_copy(pfvf->default_mac, req->mac_addr);
>
> - rvu_switch_update_rules(rvu, pcifunc);
> -
> return 0;
> }
>
> @@ -5159,7 +5157,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
> pfvf = rvu_get_pfvf(rvu, pcifunc);
> set_bit(NIXLF_INITIALIZED, &pfvf->flags);
>
> - rvu_switch_update_rules(rvu, pcifunc);
> + rvu_switch_update_rules(rvu, pcifunc, true);
>
> return rvu_cgx_start_stop_io(rvu, pcifunc, true);
> }
> @@ -5187,6 +5185,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
> if (err)
> return err;
>
> + rvu_switch_update_rules(rvu, pcifunc, false);
> rvu_cgx_tx_enable(rvu, pcifunc, true);
>
> return 0;
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
> index d07cb356d3d6..5c015e8dfbbe 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
> @@ -13,6 +13,246 @@
> #include "rvu.h"
> #include "rvu_reg.h"
>
> +static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc)
> +{
> + int id;
> +
> + for (id = 0; id < rvu->rep_cnt; id++)
> + if (rvu->rep2pfvf_map[id] == pcifunc)
> + return id;
> + return -ENODEV;
> +}
> +
> +static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc,
> + u16 vlan_tci, int *vidx)
> +{
> + struct nix_vtag_config req = {0};
> + struct nix_vtag_config_rsp rsp = {0};
> + u64 etype = ETH_P_8021Q;
> + int err;
> +
> + /* Insert vlan tag */
> + req.hdr.pcifunc = pcifunc;
> + req.vtag_size = VTAGSIZE_T4;
> + req.cfg_type = 0; /* tx vlan cfg */
> + req.tx.cfg_vtag0 = true;
> + req.tx.vtag0 = etype << 48 | ntohs(vlan_tci);
> +
> + err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
> + if (err) {
> + dev_err(rvu->dev, "Tx vlan config failed\n");
> + return err;
> + }
> + *vidx = rsp.vtag0_idx;
> + return 0;
> +}
> +
> +static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc)
> +{
> + struct nix_vtag_config req = {0};
> + struct nix_vtag_config_rsp rsp;
> +
> + /* config strip, capture and size */
> + req.hdr.pcifunc = pcifunc;
> + req.vtag_size = VTAGSIZE_T4;
> + req.cfg_type = 1; /* rx vlan cfg */
> + req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
> + req.rx.strip_vtag = true;
> + req.rx.capture_vtag = false;
> +
> + return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
> +}
> +
> +static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc,
> + u16 entry, bool rte)
> +{
> + struct npc_install_flow_req req = { 0 };
> + struct npc_install_flow_rsp rsp = { 0 };
> + struct rvu_pfvf *pfvf;
> + u16 vlan_tci, rep_id;
> +
> + pfvf = rvu_get_pfvf(rvu, pcifunc);
> +
> + /* To stree the traffic from Representee to Representor */
> + rep_id = (u16)rvu_rep_get_vlan_id(rvu, pcifunc);
> + if (rte) {
> + vlan_tci = rep_id | 0x1ull << 8;
> + req.vf = rvu->rep_pcifunc;
> + req.op = NIX_RX_ACTIONOP_UCAST;
> + req.index = rep_id;
> + } else {
> + vlan_tci = rep_id;
> + req.vf = pcifunc;
> + req.op = NIX_RX_ACTION_DEFAULT;
> + }
> +
> + rvu_rep_rx_vlan_cfg(rvu, req.vf);
> + req.entry = entry;
> + req.hdr.pcifunc = 0; /* AF is requester */
> + req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG);
> + req.vtag0_valid = true;
> + req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
> + req.packet.vlan_etype = ETH_P_8021Q;
> + req.mask.vlan_etype = ETH_P_8021Q;
> + req.packet.vlan_tci = vlan_tci;
> + req.mask.vlan_tci = 0xffff;
> +
> + req.channel = RVU_SWITCH_LBK_CHAN;
> + req.chan_mask = 0xffff;
> + req.intf = pfvf->nix_rx_intf;
> +
> + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
> +}
> +
> +static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry,
> + bool rte)
> +{
> + struct npc_install_flow_req req = { 0 };
> + struct npc_install_flow_rsp rsp = { 0 };
> + struct rvu_pfvf *pfvf;
> + int vidx, err;
> + u16 vlan_tci;
> + u8 lbkid;
> +
> + pfvf = rvu_get_pfvf(rvu, pcifunc);
> + vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc);
> + if (rte)
> + vlan_tci |= 0x1ull << 8;
> +
> + err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx);
> + if (err)
> + return err;
> +
> + lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
> + req.hdr.pcifunc = 0; /* AF is requester */
> + if (rte) {
> + req.vf = pcifunc;
> + } else {
> + req.vf = rvu->rep_pcifunc;
> + req.packet.sq_id = vlan_tci;
> + req.mask.sq_id = 0xffff;
> + }
> +
> + req.entry = entry;
> + req.intf = pfvf->nix_tx_intf;
> + req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
> + req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
> + req.set_cntr = 1;
> + req.vtag0_def = vidx;
> + req.vtag0_op = 1;
> + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
> +}
> +
> +int rvu_rep_install_mcam_rules(struct rvu *rvu)
> +{
> + struct rvu_switch *rswitch = &rvu->rswitch;
> + u16 start = rswitch->start_entry;
> + struct rvu_hwinfo *hw = rvu->hw;
> + u16 pcifunc, entry = 0;
> + int pf, vf, numvfs;
> + int err, nixlf, i;
> + u8 rep;
> +
> + for (pf = 1; pf < hw->total_pfs; pf++) {
> + if (!is_pf_cgxmapped(rvu, pf))
> + continue;
> +
> + pcifunc = pf << RVU_PFVF_PF_SHIFT;
> + rvu_get_nix_blkaddr(rvu, pcifunc);
> + rep = true;
> + for (i = 0; i < 2; i++) {
> + err = rvu_rep_install_rx_rule(rvu, pcifunc, start + entry, rep);
> + if (err)
> + return err;
> + rswitch->entry2pcifunc[entry++] = pcifunc;
> +
> + err = rvu_rep_install_tx_rule(rvu, pcifunc, start + entry, rep);
> + if (err)
> + return err;
> + rswitch->entry2pcifunc[entry++] = pcifunc;
> + rep = false;
> + }
> +
> + rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
> + for (vf = 0; vf < numvfs; vf++) {
> + pcifunc = pf << RVU_PFVF_PF_SHIFT |
> + ((vf + 1) & RVU_PFVF_FUNC_MASK);
> + rvu_get_nix_blkaddr(rvu, pcifunc);
> +
> + /* Skip installimg rules if nixlf is not attached */
> + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
> + if (err)
> + continue;
> + rep = true;
> + for (i = 0; i < 2; i++) {
> + err = rvu_rep_install_rx_rule(rvu, pcifunc, start + entry, rep);
> + if (err)
> + return err;
> + rswitch->entry2pcifunc[entry++] = pcifunc;
> +
> + err = rvu_rep_install_tx_rule(rvu, pcifunc, start + entry, rep);
> + if (err)
> + return err;
> + rswitch->entry2pcifunc[entry++] = pcifunc;
> + rep = false;
> + }
> + }
> + }
> + return 0;
> +}
> +
> +void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
> +{
> + struct rvu_switch *rswitch = &rvu->rswitch;
> + struct npc_mcam *mcam = &rvu->hw->mcam;
> + u32 max = rswitch->used_entries;
> + int blkaddr;
> + u16 entry;
> +
> + if (!rswitch->used_entries)
> + return;
> +
> + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
> +
> + if (blkaddr < 0)
> + return;
> +
> + rvu_switch_enable_lbk_link(rvu, pcifunc, ena);
> + mutex_lock(&mcam->lock);
> + for (entry = 0; entry < max; entry++) {
> + if (rswitch->entry2pcifunc[entry] == pcifunc)
> + npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena);
> + }
> + mutex_unlock(&mcam->lock);
> +}
> +
> +int rvu_rep_pf_init(struct rvu *rvu)
> +{
> + u16 pcifunc = rvu->rep_pcifunc;
> + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
> +
> + set_bit(NIXLF_INITIALIZED, &pfvf->flags);
> + rvu_switch_enable_lbk_link(rvu, pcifunc, true);
> + rvu_rep_rx_vlan_cfg(rvu, pcifunc);
> + return 0;
> +}
> +
> +int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req,
> + struct msg_rsp *rsp)
> +{
> + if (req->hdr.pcifunc != rvu->rep_pcifunc)
> + return 0;
> +
> + rvu->rep_mode = req->ena;
> +
> + if (req->ena)
> + rvu_switch_enable(rvu);
> + else
> + rvu_switch_disable(rvu);
> +
> + return 0;
> +}
> +
> int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
> struct get_rep_cnt_rsp *rsp)
> {
> @@ -45,4 +285,3 @@ int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
> }
> return 0;
> }
> -
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
> index ceb81eebf65e..268efb7c1c15 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
> @@ -166,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu)
>
> alloc_req.contig = true;
> alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
> + if (rvu->rep_mode)
> + alloc_req.count = alloc_req.count * 4;
> ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
> &alloc_rsp);
> if (ret) {
> @@ -189,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu)
> rswitch->used_entries = alloc_rsp.count;
> rswitch->start_entry = alloc_rsp.entry;
>
> - ret = rvu_switch_install_rules(rvu);
> + if (rvu->rep_mode) {
> + rvu_rep_pf_init(rvu);
> + ret = rvu_rep_install_mcam_rules(rvu);
> + } else {
> + ret = rvu_switch_install_rules(rvu);
> + }
> if (ret)
> goto uninstall_rules;
>
> @@ -222,6 +229,9 @@ void rvu_switch_disable(struct rvu *rvu)
> if (!rswitch->used_entries)
> return;
>
> + if (rvu->rep_mode)
> + goto free_ents;
> +
> for (pf = 1; pf < hw->total_pfs; pf++) {
> if (!is_pf_cgxmapped(rvu, pf))
> continue;
> @@ -249,6 +259,7 @@ void rvu_switch_disable(struct rvu *rvu)
> }
> }
>
> +free_ents:
> uninstall_req.start = rswitch->start_entry;
> uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
> free_req.all = 1;
> @@ -258,12 +269,15 @@ void rvu_switch_disable(struct rvu *rvu)
> kfree(rswitch->entry2pcifunc);
> }
>
> -void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
> +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
> {
> struct rvu_switch *rswitch = &rvu->rswitch;
> u32 max = rswitch->used_entries;
> u16 entry;
>
> + if (rvu->rep_mode)
> + return rvu_rep_update_rules(rvu, pcifunc, ena);
> +
> if (!rswitch->used_entries)
> return;
>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
> index 187b00156bcd..1329617f8d6f 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
> @@ -28,6 +28,22 @@ MODULE_DESCRIPTION(DRV_STRING);
> MODULE_LICENSE("GPL");
> MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
>
> +static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena)
> +{
> + struct esw_cfg_req *req;
> +
> + mutex_lock(&priv->mbox.lock);
> + req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox);
> + if (!req) {
> + mutex_unlock(&priv->mbox.lock);
> + return -ENOMEM;
> + }
> + req->ena = ena;
> + otx2_sync_mbox_msg(&priv->mbox);
> + mutex_unlock(&priv->mbox.lock);
> + return 0;
> +}
> +
> static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
> {
> struct rep_dev *rep = netdev_priv(dev);
> @@ -170,6 +186,8 @@ static void rvu_rep_free_netdev(struct otx2_nic *priv)
>
> void rvu_rep_destroy(struct otx2_nic *priv)
> {
> + /* Remove mcam rules */
> + rvu_eswitch_config(priv, false);
> rvu_rep_free_cq_rsrc(priv);
> rvu_rep_free_netdev(priv);
> }
> @@ -221,6 +239,7 @@ int rvu_rep_create(struct otx2_nic *priv)
> if (err)
> goto exit;
>
> + rvu_eswitch_config(priv, true);
> return 0;
> exit:
> rvu_rep_free_netdev(priv);
> --
> 2.25.1
>
>


--
Regards,
Kalesh A P
[unhandled content-type:application/pkcs7-signature]
\
 
 \ /
  Last update: 2024-04-17 06:10    [W:0.060 / U:0.028 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site