Messages in this thread | | | Date | Thu, 29 Apr 2021 08:11:59 -0700 | From | Richard Cochran <> | Subject | Re: [PATCH v1 2/2] phy: nxp-c45-tja11xx: add timestamping support |
| |
On Wed, Apr 28, 2021 at 03:30:13PM +0300, Radu Pirea (NXP OSS) wrote:
> +#define VEND1_LTC_WR_NSEC_0 0x1106 > +#define VEND1_LTC_WR_NSEC_1 0x1107 > +#define VEND1_LTC_WR_SEC_0 0x1108 > +#define VEND1_LTC_WR_SEC_1 0x1109 > + > +#define VEND1_LTC_RD_NSEC_0 0x110A > +#define VEND1_LTC_RD_NSEC_1 0x110B > +#define VEND1_LTC_RD_SEC_0 0x110C > +#define VEND1_LTC_RD_SEC_1 0x110D
Weird ...
> struct nxp_c45_phy { > + struct phy_device *phydev; > + struct mii_timestamper mii_ts; > + struct ptp_clock *ptp_clock; > + struct ptp_clock_info caps; > + struct sk_buff_head tx_queue; > + struct sk_buff_head rx_queue; > + /* used to read the LTC counter atomic */ > + struct mutex ltc_read_lock; > + /* used to write the LTC counter atomic */ > + struct mutex ltc_write_lock;
You are sure that the RD and WR banks are completely independent? In any case, I think a single lock would be fine, because contention is normally very low.
> + int hwts_tx; > + int hwts_rx; > u32 tx_delay; > u32 rx_delay; > }; > @@ -110,6 +211,353 @@ struct nxp_c45_phy_stats { > u16 mask; > }; > > +static bool nxp_c45_poll_txts(struct phy_device *phydev) > +{ > + return phydev->irq <= 0; > +} > + > +static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp, > + struct timespec64 *ts, > + struct ptp_system_timestamp *sts) > +{ > + struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); > + > + mutex_lock(&priv->ltc_read_lock); > + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL, > + READ_LTC); > + ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, > + VEND1_LTC_RD_NSEC_0); > + ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, > + VEND1_LTC_RD_NSEC_1) << 16; > + ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, > + VEND1_LTC_RD_SEC_0); > + ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, > + VEND1_LTC_RD_SEC_1) << 16; > + mutex_unlock(&priv->ltc_read_lock); > + > + return 0; > +} > + > +static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp, > + const struct timespec64 *ts) > +{ > + struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); > + > + mutex_lock(&priv->ltc_write_lock); > + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0, > + ts->tv_nsec); > + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1, > + ts->tv_nsec >> 16); > + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0, > + ts->tv_sec); > + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1, > + ts->tv_sec >> 16); > + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL, > + LOAD_LTC); > + mutex_unlock(&priv->ltc_write_lock); > + > + return 0; > +} > + > +static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) > +{ > + struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); > + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); > + u64 subns_inc_val; > + bool inc; > + > + inc = ppb >= 0 ? true : false; > + ppb = abs(ppb); > + > + subns_inc_val = PPM_TO_SUBNS_INC(ppb); > + > + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0, > + subns_inc_val); > + subns_inc_val >>= 16; > + subns_inc_val |= CLK_RATE_ADJ_LD; > + if (inc) > + subns_inc_val |= CLK_RATE_ADJ_DIR; > + > + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1, > + subns_inc_val);
This needs a mutex to protect against concurrent callers.
> + return 0; > +} > + > +static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) > +{ > + struct timespec64 now, then; > + > + then = ns_to_timespec64(delta); > + nxp_c45_ptp_gettimex64(ptp, &now, NULL); > + now = timespec64_add(now, then); > + nxp_c45_ptp_settime64(ptp, &now);
Locking needed here, too. You will need an unlocked version of nxp_c45_ptp_settime64(), named like _nxp_c45_ptp_settime64() for example.
> + return 0; > +} > + > +static void nxp_c45_reconstruct_ts(struct timespec64 *ts, > + struct nxp_c45_hwts *hwts) > +{ > + ts->tv_nsec = hwts->nsec; > + if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK)) > + ts->tv_sec -= BIT(2); > + ts->tv_sec &= ~TS_SEC_MASK; > + ts->tv_sec |= hwts->sec & TS_SEC_MASK; > +} > + > +static bool nxp_c45_match_ts(struct ptp_header *header, > + struct nxp_c45_hwts *hwts, > + unsigned int type) > +{ > + return ntohs(header->sequence_id) == hwts->sequence_id && > + ptp_get_msgtype(header, type) == hwts->msg_type && > + header->domain_number == hwts->domain_number; > +} > + > +static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv, > + struct nxp_c45_hwts *hwts) > +{ > + bool valid; > + u16 reg;
This function is called from both interrupt and thread context. It needs locking. I suggest a single mutex that protects concurrent access to any PHY registers.
> + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL, > + RING_DONE); > + reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0); > + valid = !!(reg & RING_DATA_0_TS_VALID); > + if (!valid) > + return valid; > + > + hwts->domain_number = reg; > + hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8; > + hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10; > + hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, > + VEND1_EGR_RING_DATA_1_SEQ_ID); > + hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, > + VEND1_EGR_RING_DATA_2_NSEC_15_0); > + reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3); > + hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16; > + hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14; > + > + return valid; > +} > + > +static void nxp_c45_process_txts(struct nxp_c45_phy *priv, > + struct nxp_c45_hwts *txts) > +{ > + struct sk_buff *skb, *tmp, *skb_match = NULL; > + struct skb_shared_hwtstamps shhwtstamps; > + struct timespec64 ts; > + unsigned long flags; > + bool ts_match; > + s64 ts_ns; > + > + spin_lock_irqsave(&priv->tx_queue.lock, flags); > + skb_queue_walk_safe(&priv->tx_queue, skb, tmp) { > + ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts, > + NXP_C45_SKB_CB(skb)->type); > + if (!ts_match) > + continue; > + skb_match = skb; > + __skb_unlink(skb, &priv->tx_queue); > + break; > + } > + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); > + > + if (skb_match) { > + nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL); > + nxp_c45_reconstruct_ts(&ts, txts); > + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); > + ts_ns = timespec64_to_ns(&ts); > + shhwtstamps.hwtstamp = ns_to_ktime(ts_ns); > + skb_complete_tx_timestamp(skb_match, &shhwtstamps); > + } else { > + phydev_warn(priv->phydev, > + "the tx timestamp doesn't match with any skb\n"); > + } > +} > + > +static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp) > +{ > + struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); > + bool poll_txts = nxp_c45_poll_txts(priv->phydev); > + struct skb_shared_hwtstamps *shhwtstamps_rx; > + struct nxp_c45_hwts hwts; > + bool reschedule = false; > + struct timespec64 ts; > + struct sk_buff *skb; > + bool txts_valid; > + u32 ts_raw; > + > + while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) { > + txts_valid = nxp_c45_get_hwtxts(priv, &hwts); > + if (unlikely(!txts_valid)) { > + /* Still more skbs in the queue */ > + reschedule = true; > + break; > + } > + > + nxp_c45_process_txts(priv, &hwts); > + } > + > + while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) { > + nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
Probably better to to move this call before the 'while' loop.
> + ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2); > + hwts.sec = ts_raw >> 30; > + hwts.nsec = ts_raw & GENMASK(29, 0); > + nxp_c45_reconstruct_ts(&ts, &hwts); > + shhwtstamps_rx = skb_hwtstamps(skb); > + shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts)); > + NXP_C45_SKB_CB(skb)->header->reserved2 = 0; > + netif_rx_ni(skb); > + } > + > + return reschedule ? 1 : -1; > +}
Thanks, Richard
| |