diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c index 6fd6c203f..572a49e1b 100644 --- a/drivers/dma/ti/k3-psil-j721s2.c +++ b/drivers/dma/ti/k3-psil-j721s2.c @@ -108,7 +108,17 @@ static struct psil_ep j721s2_src_ep_map[] = { PSIL_PDMA_XY_PKT(0x461e), PSIL_PDMA_XY_PKT(0x461f), /* MAIN_CPSW2G */ - PSIL_ETHERNET(0x4640), + // PSIL_ETHERNET(0x4640), + { \ + .thread_id = 0x4640, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + .mapped_channel_id = 0, \ + }, \ + }, /* PDMA_USART_G0 - UART0-1 */ PSIL_PDMA_XY_PKT(0x4700), PSIL_PDMA_XY_PKT(0x4701), @@ -234,7 +244,17 @@ static struct psil_ep j721s2_dst_ep_map[] = { PSIL_ETHERNET(0xf006), PSIL_ETHERNET(0xf007), /* MAIN_CPSW2G */ - PSIL_ETHERNET(0xc640), + // PSIL_ETHERNET(0xc640), + { \ + .thread_id = 0xc640, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + .mapped_channel_id = 0, \ + }, \ + }, PSIL_ETHERNET(0xc641), PSIL_ETHERNET(0xc642), PSIL_ETHERNET(0xc643), diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index b0c9572b0..95d9c2eff 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -38,6 +38,8 @@ struct k3_udma_glue_common { struct psil_endpoint_config *ep_config; }; +extern struct udma_tchan; + struct k3_udma_glue_tx_channel { struct k3_udma_glue_common common; @@ -281,7 +283,10 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, if (xudma_is_pktdma(tx_chn->common.udmax)) tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id; else - tx_chn->udma_tchan_id = -1; + tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id; + // tx_chn->udma_tchan_id = -1; + pr_info("[DEBUG]tx_chn->common.ep_config->mapped_channel_id=%d\n", tx_chn->common.ep_config->mapped_channel_id); + pr_info("[DEBUG]---tx_chn->udma_tchan_id = %d---\n", tx_chn->udma_tchan_id); /* request and cfg UDMAP TX channel */ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, @@ -291,7 +296,10 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, dev_err(dev, "UDMAX tchanx get err %d\n", ret); goto err; } - tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); + pr_info("[DEBUG]------------tx_chn->udma_tchanx->id = %d-------%p--%p--\n",tx_chn->udma_tchanx->id, tx_chn->udma_tchanx, &(tx_chn->udma_tchanx->id)); + tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); + + pr_info("[DEBUG]2------------tx_chn->udma_tchan_id = %d-----------\n",tx_chn->udma_tchan_id); if (xudma_is_pktdma(tx_chn->common.udmax)) { tx_chn->common.chan_dev.class = &k3_udma_glue_devclass; @@ -899,8 +907,9 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, if (xudma_is_pktdma(rx_chn->common.udmax)) rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id; else - rx_chn->udma_rchan_id = -1; - + rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id; + // rx_chn->udma_rchan_id = -1; + pr_info("[DEBUG]rx_chn->udma_rchan_id = %d, ep_cfg->mapped_channel_id = %d\n", rx_chn->udma_rchan_id, ep_cfg->mapped_channel_id); /* request and cfg UDMAP RX channel */ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, rx_chn->udma_rchan_id); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index bbe935105..b23b93ec0 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -2629,7 +2629,7 @@ static int pktdma_alloc_chan_resources(struct dma_chan *chan) break; case DMA_DEV_TO_MEM: /* Slave transfer synchronized - dev to mem (RX) trasnfer */ - dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, + dev_info(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, uc->id); ret = udma_alloc_rx_resources(uc); @@ -2714,12 +2714,12 @@ static int pktdma_alloc_chan_resources(struct dma_chan *chan) udma_check_tx_completion); if (uc->tchan) - dev_dbg(ud->dev, + dev_info(ud->dev, "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", uc->id, uc->tchan->id, uc->tchan->tflow_id, uc->config.remote_thread_id); else if (uc->rchan) - dev_dbg(ud->dev, + dev_info(ud->dev, "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", uc->id, uc->rchan->id, uc->rflow->id, uc->config.remote_thread_id); @@ -4135,6 +4135,7 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) } ep_config = psil_get_ep_config(ucc->remote_thread_id); + pr_info("[DEBUG]====udma_dma_filter_fn====\n"); if (IS_ERR(ep_config)) { dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", ucc->remote_thread_id); @@ -4167,10 +4168,11 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) ucc->mapped_channel_id = ep_config->mapped_channel_id; ucc->default_flow_id = ep_config->default_flow_id; } else { - ucc->mapped_channel_id = -1; - ucc->default_flow_id = -1; + // ucc->mapped_channel_id = -1; + // ucc->default_flow_id = -1; + ucc->mapped_channel_id = ep_config->mapped_channel_id; + ucc->default_flow_id = ep_config->default_flow_id; } - if (ucc->ep_type != PSIL_EP_NATIVE) { const struct udma_match_data *match_data = ud->match_data; @@ -4190,13 +4192,13 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + ucc->metadata_size, ud->desc_align); - dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, + dev_info(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); return true; triggered_bchan: - dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, + dev_info(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, ucc->tr_trigger_type); return true; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 954967cd1..d8a33b121 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1165,7 +1165,7 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, am65_cpts_prep_tx_timestamp(common->cpts, skb); q_idx = skb_get_queue_mapping(skb); - dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx); + dev_err(dev, "%s skb_queue:%d\n", __func__, q_idx); tx_chn = &common->tx_chns[q_idx]; netif_txq = netdev_get_tx_queue(ndev, q_idx); @@ -1178,10 +1178,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, ndev->stats.tx_errors++; goto err_free_skb; } - first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); if (!first_desc) { - dev_dbg(dev, "Failed to allocate descriptor\n"); + dev_err(dev, "Failed to allocate descriptor\n"); dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE); goto busy_stop_q; @@ -1209,13 +1208,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, /* HW numerates bytes starting from 1 */ psdata[2] = ((cs_offset + 1) << 24) | ((cs_start + 1) << 16) | (skb->len - cs_start); - dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]); + dev_err(dev, "%s tx psdata:%#x\n", __func__, psdata[2]); } - + if (!skb_is_nonlinear(skb)) goto done_tx; - dev_dbg(dev, "fragmented SKB\n"); + dev_err(dev, "fragmented SKB\n"); /* Handle the case where skb is fragmented in pages */ cur_desc = first_desc; @@ -1280,13 +1279,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, netif_tx_stop_queue(netif_txq); /* Barrier, so that stop_queue visible to other cpus */ smp_mb__after_atomic(); - dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx); + dev_err(dev, "netif_tx_stop_queue %d\n", q_idx); /* re-check for smp */ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS) { netif_tx_wake_queue(netif_txq); - dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx); + dev_err(dev, "netif_tx_wake_queue %d\n", q_idx); } } @@ -3009,7 +3008,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) return -ENOENT; init_completion(&common->tdown_complete); - common->tx_ch_num = 1; + common->tx_ch_num = 8; common->pf_p0_rx_ptype_rrobin = false; common->default_vlan = 1; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6a4353f85..5d4966944 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2871,7 +2871,7 @@ int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) { int ret; - + pr_info("[DEBUG]dev_direct_xmit\n"); ret = __dev_direct_xmit(skb, queue_id); if (!dev_xmit_complete(ret)) kfree_skb(skb); diff --git a/net/core/dev.c b/net/core/dev.c index 38af77ac8..7f79d8466 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3578,6 +3578,7 @@ EXPORT_SYMBOL(netif_skb_features); static int xmit_one(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, bool more) { + pr_info("[DEBUG]=====xmit_one\n"); unsigned int len; int rc; @@ -4747,6 +4748,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, */ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) { + pr_info("[DEBUG]=====generic_xdp_tx\n"); struct net_device *dev = skb->dev; struct netdev_queue *txq; bool free_skb = true; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 691841dc6..53449becd 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -384,6 +384,7 @@ static int xsk_generic_xmit(struct sock *sk) mutex_lock(&xs->mutex); + pr_info("[DEBUG] xsk_generic_xmit\n"); if (xs->queue_id >= xs->dev->real_num_tx_queues) goto out;