From 0e0d2f2c0723651f61e8799f9c084c3a0c08a969 Mon Sep 17 00:00:00 2001 From: Manish Bhardwaj Date: Sat, 1 Oct 2022 13:33:06 +0000 Subject: [PATCH] nvidia-oot: add support for mttcan driver Using this patch we are adding support for mttcan driver in oot kernel. JIRA ESLC-6885 Signed-off-by: Manish Bhardwaj Change-Id: I83a6d43aa99278a546778cf1700e2bd106ec42a9 Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2785360 Reviewed-by: Bitan Biswas GVS: Gerrit_Virtual_Submit --- drivers/net/Makefile | 1 + drivers/net/can/Makefile | 4 + drivers/net/can/mttcan/Makefile | 8 + drivers/net/can/mttcan/hal/m_ttcan.c | 1029 +++++++++ drivers/net/can/mttcan/hal/m_ttcan_intr.c | 62 + drivers/net/can/mttcan/hal/m_ttcan_list.c | 102 + drivers/net/can/mttcan/hal/m_ttcan_ram.c | 456 ++++ drivers/net/can/mttcan/hal/m_ttcan_tt.c | 201 ++ drivers/net/can/mttcan/include/m_ttcan.h | 566 +++++ drivers/net/can/mttcan/include/m_ttcan_ivc.h | 22 + .../net/can/mttcan/include/m_ttcan_linux.h | 126 ++ .../net/can/mttcan/include/m_ttcan_regdef.h | 872 ++++++++ drivers/net/can/mttcan/native/m_ttcan_linux.c | 1972 +++++++++++++++++ drivers/net/can/mttcan/native/m_ttcan_sys.c | 741 +++++++ 14 files changed, 6162 insertions(+) create mode 100644 drivers/net/can/Makefile create mode 100644 drivers/net/can/mttcan/Makefile create mode 100644 drivers/net/can/mttcan/hal/m_ttcan.c create mode 100644 drivers/net/can/mttcan/hal/m_ttcan_intr.c create mode 100644 drivers/net/can/mttcan/hal/m_ttcan_list.c create mode 100644 drivers/net/can/mttcan/hal/m_ttcan_ram.c create mode 100644 drivers/net/can/mttcan/hal/m_ttcan_tt.c create mode 100644 drivers/net/can/mttcan/include/m_ttcan.h create mode 100644 drivers/net/can/mttcan/include/m_ttcan_ivc.h create mode 100644 drivers/net/can/mttcan/include/m_ttcan_linux.h create mode 100644 drivers/net/can/mttcan/include/m_ttcan_regdef.h create mode 100644 drivers/net/can/mttcan/native/m_ttcan_linux.c create mode 100644 drivers/net/can/mttcan/native/m_ttcan_sys.c diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 3016ffc6..f9899296 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -3,3 +3,4 @@ obj-m += ethernet/ obj-m += tegra_hv_net.o +obj-m += can/ diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile new file mode 100644 index 00000000..216870c8 --- /dev/null +++ b/drivers/net/can/Makefile @@ -0,0 +1,4 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: GPL-2.0-only + +obj-m += mttcan/ diff --git a/drivers/net/can/mttcan/Makefile b/drivers/net/can/mttcan/Makefile new file mode 100644 index 00000000..dc65a979 --- /dev/null +++ b/drivers/net/can/mttcan/Makefile @@ -0,0 +1,8 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: GPL-2.0-only + +obj-m := mttcan.o + +mttcan-y = native/m_ttcan_linux.o native/m_ttcan_sys.o hal/m_ttcan.o +mttcan-y += hal/m_ttcan_intr.o hal/m_ttcan_list.o hal/m_ttcan_ram.o +mttcan-y += hal/m_ttcan_tt.o diff --git a/drivers/net/can/mttcan/hal/m_ttcan.c b/drivers/net/can/mttcan/hal/m_ttcan.c new file mode 100644 index 00000000..39b256b2 --- /dev/null +++ b/drivers/net/can/mttcan/hal/m_ttcan.c @@ -0,0 +1,1029 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "../include/m_ttcan.h" +#include +#include + +#define MTTCAN_INIT_TIMEOUT 1000 + +void ttcan_print_version(struct ttcan_controller *ttcan) +{ + u32 crel, endn; + + crel = ttcan_read32(ttcan, ADR_MTTCAN_CREL); + endn = ttcan_read32(ttcan, ADR_MTTCAN_ENDN); + + pr_info("Release %d.%d.%d from %2.2x.%2.2x.201%1.1x\n", + (crel & MTT_CREL_REL_MASK) >> MTT_CREL_REL_SHIFT, + (crel & MTT_CREL_STEP_MASK) >> MTT_CREL_STEP_SHIFT, + (crel & MTT_CREL_SUBS_MASK) >> MTT_CREL_SUBS_SHIFT, + (crel & MTT_CREL_DAY_MASK) >> MTT_CREL_DAY_SHIFT, + (crel & MTT_CREL_MON_MASK) >> MTT_CREL_MON_SHIFT, + (crel & MTT_CREL_YEAR_MASK) >> MTT_CREL_YEAR_SHIFT); + pr_debug("CAN register access %s Endian Reg 0x%x\n", + (endn == 0x87654321) ? "PASS" : "FAIL", endn); +} + +int ttcan_write32_check(struct ttcan_controller *ttcan, + int reg, u32 val, u32 mask) +{ + u32 ret_val; + + ttcan_write32(ttcan, reg, val); + + ret_val = ttcan_read32(ttcan, reg); + + if ((ret_val & mask) == (val & mask)) + return 0; + else + pr_err("%s:addr: 0x%x write 0x%x read 0x%x mask 0x%x\n", + __func__, reg, val, ret_val, mask); + return -EIO; +} + +inline void ttcan_set_ok(struct ttcan_controller *ttcan) +{ + u32 val; + + val = ttcan_xread32(ttcan, ADDR_M_TTCAN_CNTRL_REG); + val |= M_TTCAN_CNTRL_REG_COK; + ttcan_xwrite32(ttcan, ADDR_M_TTCAN_CNTRL_REG, val); +} + +int ttcan_set_init(struct ttcan_controller *ttcan) +{ + u32 cccr_reg; + int timeout = MTTCAN_INIT_TIMEOUT; + + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + + if ((cccr_reg & MTT_CCCR_INIT_MASK) == 0) { + /* Controller not yet initialized */ + cccr_reg |= 1; + + ttcan_write32(ttcan, ADR_MTTCAN_CCCR, cccr_reg); + + do { + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + udelay(1); + timeout--; + } while ((cccr_reg & MTT_CCCR_INIT_MASK) == 0 && timeout); + + if (!timeout) { + pr_err("Controller %s Timeout\n", __func__); + return -ETIMEDOUT; + } + } + return 0; +} + +void ttcan_bus_off_seq(struct ttcan_controller *ttcan) +{ + /* We need to wait for 129 bus idle sequence (129*11 bits) + * according to CAN SPEC. Considering minimal bitrate (125 kbps), + * we need to wait for maximum of 12 msec + */ + mdelay(12); +} + +int ttcan_reset_init(struct ttcan_controller *ttcan) +{ + u32 cccr_reg; + int timeout = MTTCAN_INIT_TIMEOUT; + + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + + if (cccr_reg & MTT_CCCR_INIT_MASK) { + /* Controller was initialized */ + cccr_reg &= ~1; + + ttcan_write32(ttcan, ADR_MTTCAN_CCCR, cccr_reg); + + do { + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + udelay(1); + timeout--; + } while ((cccr_reg & MTT_CCCR_INIT_MASK) && timeout); + + if (!timeout) { + pr_err("Controller %s Timeout\n", __func__); + return -ETIMEDOUT; + } + } + return 0; +} + +int ttcan_set_config_change_enable(struct ttcan_controller *ttcan) +{ + u32 cccr_reg; + int timeout = MTTCAN_INIT_TIMEOUT; + + /* initialize the core */ + if (ttcan_set_init(ttcan)) + return -ETIMEDOUT; + + /* set configuration change enable bit */ + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + cccr_reg |= MTT_CCCR_CCE_MASK; + ttcan_write32(ttcan, ADR_MTTCAN_CCCR, cccr_reg); + + do { + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + udelay(1); + timeout--; + } while (((cccr_reg & MTT_CCCR_CCE_MASK) == 0) && timeout); + + if (!timeout) { + pr_err("Controller %s Timeout\n", __func__); + return -ETIMEDOUT; + } + + return 0; +} + +int ttcan_set_power(struct ttcan_controller *ttcan, int value) +{ + + u32 cccr_reg; + int timeout = MTTCAN_INIT_TIMEOUT; + + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + cccr_reg &= ~(MTT_CCCR_CSR_MASK); + cccr_reg |= ((~value) << MTT_CCCR_CSR_SHIFT) & MTT_CCCR_CSR_MASK; + ttcan_write32(ttcan, ADR_MTTCAN_CCCR, cccr_reg); + + do { + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + udelay(1); + timeout--; + } while (((cccr_reg & MTT_CCCR_CSA_MASK) >> MTT_CCCR_CSA_SHIFT) + == value && timeout); + + if (!timeout) { + pr_err("Controller %s Timeout\n", __func__); + return -ETIMEDOUT; + } + + return 0; +} + +void ttcan_reset_config_change_enable(struct ttcan_controller *ttcan) +{ + + /* reset the core */ + if (ttcan_reset_init(ttcan)) + return; + + /*CCCR.CCE is automatically reset when CCCR.INIT is reset */ +} + +void ttcan_disable_auto_retransmission(struct ttcan_controller *ttcan, + bool enable) +{ + u32 cccr_reg; + + /* set DAR bit */ + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + if (enable) + cccr_reg |= MTT_CCCR_DAR_MASK; + else + cccr_reg &= ~MTT_CCCR_DAR_MASK; + ttcan_write32_check(ttcan, ADR_MTTCAN_CCCR, cccr_reg, MTTCAN_CCCR_MSK); +} + +int ttcan_set_loopback(struct ttcan_controller *ttcan) +{ + u32 test_reg; + u32 cccr_reg; + int timeout = MTTCAN_INIT_TIMEOUT; + + /* set TEST.LBCK (external loopback) bit */ + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + test_reg = ttcan_read32(ttcan, ADR_MTTCAN_TEST); + + if (ttcan_protected(cccr_reg)) + return -EPERM; + + cccr_reg |= MTT_CCCR_TEST_MASK; + test_reg |= MTT_TEST_LBCK_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_CCCR, cccr_reg); + do { + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + udelay(1); + timeout--; + } while ((cccr_reg & MTT_CCCR_TEST_MASK) == 0 && timeout); + + if (!timeout) { + pr_err("%s: Timeout cccr = 0x%x\n", __func__, cccr_reg); + return -ETIMEDOUT; + } + + return ttcan_write32_check(ttcan, ADR_MTTCAN_TEST, + test_reg, MTTCAN_TEST_MSK); + +} + +int ttcan_set_bus_monitoring_mode(struct ttcan_controller *ttcan, bool enable) +{ + u32 cccr_reg; + + /* set MON bit(bus monitor mode */ + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + if (ttcan_protected(cccr_reg)) + return -EPERM; + if (enable) + cccr_reg |= MTT_CCCR_MON_MASK; + else + cccr_reg &= ~MTT_CCCR_MON_MASK; + return ttcan_write32_check(ttcan, ADR_MTTCAN_CCCR, + cccr_reg, MTTCAN_CCCR_MSK); +} + +int ttcan_set_normal_mode(struct ttcan_controller *ttcan) +{ + u32 cccr_reg; + u32 test_reg; + int timeout = MTTCAN_INIT_TIMEOUT; + + /* Clear loopback and monitor mode */ + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + test_reg = ttcan_read32(ttcan, ADR_MTTCAN_TEST); + + if (ttcan_protected(cccr_reg)) + return -EPERM; + + if (test_reg & MTT_TEST_LBCK_MASK) { + test_reg &= ~(MTT_TEST_LBCK_MASK); + if ((cccr_reg & MTT_CCCR_TEST_MASK) == 0) { + cccr_reg |= MTT_CCCR_TEST_MASK; + ttcan_write32(ttcan, ADR_MTTCAN_CCCR, cccr_reg); + do { + cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + udelay(1); + timeout--; + } while ((cccr_reg & MTT_CCCR_TEST_MASK) == 0 + && timeout); + + if (!timeout) { + pr_err("%s: Timeout cccr = 0x%x\n", __func__, + cccr_reg); + return -ETIMEDOUT; + } + } + ttcan_write32_check(ttcan, ADR_MTTCAN_TEST, test_reg, + MTTCAN_TEST_MSK); + } + cccr_reg &= ~(MTT_CCCR_MON_MASK); + cccr_reg &= ~(MTT_CCCR_TEST_MASK); + return ttcan_write32_check(ttcan, ADR_MTTCAN_CCCR, cccr_reg, + MTTCAN_CCCR_MSK); +} + +inline u32 ttcan_read_ecr(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_ECR); +} + +int ttcan_set_bitrate(struct ttcan_controller *ttcan) +{ + unsigned int temp_reg; + int ret = 0; + u32 cccr_reg; + u32 nbtp_reg; + u32 dbtp_reg; + u32 tdcr_reg; + + nbtp_reg = ((ttcan->bt_config.nominal.phase_seg2 - 1) << + MTT_NBTP_NTSEG2_SHIFT) & MTT_NBTP_NTSEG2_MASK; + nbtp_reg |= ((ttcan->bt_config.nominal.phase_seg1 + + ttcan->bt_config.nominal.prop_seg - 1) + << MTT_NBTP_NTSEG1_SHIFT) & MTT_NBTP_NTSEG1_MASK; + + nbtp_reg |= (ttcan->bt_config.nominal.sjw - 1) << + MTT_NBTP_NSJW_SHIFT & MTT_NBTP_NSJW_MASK; + nbtp_reg |= (ttcan->bt_config.nominal.brp - 1) << + MTT_NBTP_NBRP_SHIFT & MTT_NBTP_NBRP_MASK; + + pr_debug("%s NBTP(0x%x) value (0x%x)\n", __func__, ADR_MTTCAN_NBTP, + nbtp_reg); + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_NBTP, nbtp_reg, + MTTCAN_NBTP_MSK); + if (ret) { + pr_err("%s: Normal bitrate configuration failed\n", __func__); + return ret; + } + + if (ttcan->bt_config.fd_flags & CAN_FD_FLAG) { + ttcan->bt_config.data.tdc = ttcan->tdc; + + dbtp_reg = ((ttcan->bt_config.data.phase_seg2 - 1) << + MTT_DBTP_DTSEG2_SHIFT) & MTT_DBTP_DTSEG2_MASK; + dbtp_reg |= ((ttcan->bt_config.data.phase_seg1 + + ttcan->bt_config.data.prop_seg - 1) << + MTT_DBTP_DTSEG1_SHIFT) & MTT_DBTP_DTSEG1_MASK; + dbtp_reg |= ((ttcan->bt_config.data.sjw - 1) << + MTT_DBTP_DSJW_SHIFT) & MTT_DBTP_DSJW_MASK; + dbtp_reg |= ((ttcan->bt_config.data.brp - 1) << + MTT_DBTP_DBRP_SHIFT) & MTT_DBTP_DBRP_MASK; + dbtp_reg |= (ttcan->bt_config.data.tdc << MTT_DBTP_TDC_SHIFT) & + MTT_DBTP_TDC_MASK; + + tdcr_reg = (ttcan->bt_config.data.tdc_offset << + MTT_TDCR_TDCO_SHIFT) & MTT_TDCR_TDCO_MASK; + + tdcr_reg |= ttcan->tdc_offset; + + pr_debug("%s DBTP(0x%x) value (0x%x)\n", __func__, + ADR_MTTCAN_DBTP, dbtp_reg); + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_DBTP, + dbtp_reg, MTTCAN_DBTP_MSK); + if (ret) { + pr_err("%s: Fast bitrate configuration failed\n", + __func__); + return ret; + } + + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_TDCR, + tdcr_reg, MTTCAN_TDCR_MSK); + if (ret) { + pr_err("%s: Fast bitrate configuration failed\n", + __func__); + return ret; + } + + temp_reg = cccr_reg = ttcan_read32(ttcan, ADR_MTTCAN_CCCR); + if (ttcan->bt_config.fd_flags & CAN_FD_FLAG) + cccr_reg |= MTT_CCCR_FDOE_MASK; + else + cccr_reg &= ~(MTT_CCCR_FDOE_MASK); + + if (ttcan->bt_config.fd_flags & CAN_BRS_FLAG) + cccr_reg |= MTT_CCCR_BRSE_MASK; + else + cccr_reg &= ~(MTT_CCCR_BRSE_MASK); + + if (ttcan->bt_config.fd_flags & CAN_FD_NON_ISO_FLAG) + cccr_reg |= MTT_CCCR_NISO_MASK; + else + cccr_reg &= ~(MTT_CCCR_NISO_MASK); + + if (temp_reg != cccr_reg) { + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_CCCR, + cccr_reg, MTTCAN_CCCR_MSK); + if (ret) { + pr_err("%s: Error in enabling FD\n", __func__); + return ret; + } + } + + } + return ret; +} + +int ttcan_tx_req_pending(struct ttcan_controller *ttcan) +{ + u32 txbrp_reg = ttcan_read32(ttcan, ADR_MTTCAN_TXBRP); + + if (txbrp_reg) + return 1; + return 0; +} + +int ttcan_tx_buff_req_pending(struct ttcan_controller *ttcan, u8 index) +{ + u32 txbrp_reg; + u32 mask = 1 << index; + + txbrp_reg = ttcan_read32(ttcan, ADR_MTTCAN_TXBRP); + + if (txbrp_reg & mask) + return 1; + else + return 0; +} + +bool ttcan_tx_buffers_full(struct ttcan_controller *ttcan) +{ + u32 txbrp_reg; + u32 txfqs_full = 1; + u32 mask = (1 << ttcan->tx_config.ded_buff_num) - 1; + + /* If FIFO/queue is enabled, check if full bit is set */ + if (ttcan->tx_config.fifo_q_num) + txfqs_full = (ttcan_read32(ttcan, ADR_MTTCAN_TXFQS) & + MTT_TXFQS_TFQF_MASK) >> MTT_TXFQS_TFQF_SHIFT; + + /* Check for pending Tx requests in msg buffer */ + txbrp_reg = ttcan_read32(ttcan, ADR_MTTCAN_TXBRP) & mask; + + if ((txbrp_reg == mask) && txfqs_full) + return true; + else + return false; +} + +void ttcan_tx_ded_msg_write(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd, + u8 index) +{ + u32 ram_addr = ttcan->mram_cfg[MRAM_TXB].off + + (index * ttcan->e_size.tx_buffer); + ttcan_write_tx_msg_ram(ttcan, ram_addr, ttcanfd, index); + ttcan->tx_buf_dlc[index] = ttcanfd->d_len; +} + +void ttcan_tx_trigger_msg_transmit(struct ttcan_controller *ttcan, u8 index) +{ + ttcan_write32(ttcan, ADR_MTTCAN_TXBAR, (1 << index)); +} + +int ttcan_tx_msg_buffer_write(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd) +{ + int msg_no; + u32 txbrp_free = ~ttcan_read32(ttcan, ADR_MTTCAN_TXBRP); + + /* mask out buffers that are previously reserved by SW */ + txbrp_free &= ~ttcan->tx_object; + + /* mask for buffers reserved for Tx message buffers */ + txbrp_free &= (1 << ttcan->tx_config.ded_buff_num) - 1; + + msg_no = ffs(txbrp_free) - 1; + if (msg_no < 0) + return -ENOMEM; + + /* Write to CAN controller message RAM */ + ttcan_tx_ded_msg_write(ttcan, ttcanfd, msg_no); + + return msg_no; +} + +int ttcan_set_tx_buffer_addr(struct ttcan_controller *ttcan) +{ + int ret = 0; + u32 txbc_reg; + u32 txesc_reg; + u32 tx_intr_en; + u32 rel_start_addr = ttcan->mram_cfg[MRAM_TXB].off >> 2; + enum ttcan_data_field_size dfs = ttcan->tx_config.dfs; + + + txbc_reg = (rel_start_addr << MTT_TXBC_TBSA_SHIFT) & MTT_TXBC_TBSA_MASK; + txbc_reg |= (ttcan->tx_config.ded_buff_num << MTT_TXBC_NDTB_SHIFT) & + MTT_TXBC_NDTB_MASK; + txbc_reg |= (ttcan->tx_config.fifo_q_num << MTT_TXBC_TFQS_SHIFT) & + MTT_TXBC_TFQS_MASK; + + if (ttcan->tx_config.flags & 0x1) + txbc_reg |= MTT_TXBC_TFQM_MASK; /* Queue mode */ + else + txbc_reg &= ~(MTT_TXBC_TFQM_MASK); /* FIFO mode */ + + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_TXBC, txbc_reg, + MTTCAN_TXBC_MSK); + if (ret) { + pr_err("%s: Error in setting ADR_MTTCAN_TXBC\n", __func__); + return ret; + } + + txesc_reg = (dfs << MTT_TXESC_TBDS_SHIFT) & MTT_TXESC_TBDS_MASK; + + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_TXESC, txesc_reg, + MTTCAN_TXESC_MSK); + if (ret) { + pr_err("%s: Error in setting ADR_MTTCAN_TXESC\n", __func__); + return ret; + } + + tx_intr_en = (1 << (ttcan->tx_config.ded_buff_num + + ttcan->tx_config.fifo_q_num)) - 1; + /* Enable TC interrupt for tx buffers + queue */ + ttcan_write32(ttcan, ADR_MTTCAN_TXBTIE, tx_intr_en); + /* Enable TCF interrupt for tx buffers */ + ttcan_write32(ttcan, ADR_MTTCAN_TXBCIE, tx_intr_en); + + return ret; +} + +/* Queue Message in Tx Queue + * Return + * -ve in case of error + * idx written buffer index + */ +int ttcan_tx_fifo_queue_msg(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd) +{ + u32 txfqs_reg; + u32 put_idx; + + txfqs_reg = ttcan_read32(ttcan, ADR_MTTCAN_TXFQS); + + /* Test for Tx FIFO/Queue full */ + if (txfqs_reg & MTT_TXFQS_TFQF_MASK) + return -ENOMEM; + + /* Test if Tx index is previously reserved in SW */ + put_idx = (txfqs_reg & MTT_TXFQS_TFQPI_MASK) >> MTT_TXFQS_TFQPI_SHIFT; + if (ttcan->tx_object & (1 << put_idx)) + return -ENOMEM; + + /* Write to CAN controller message RAM */ + ttcan_tx_ded_msg_write(ttcan, ttcanfd, put_idx); + + return put_idx; + +} + +/* Check tx fifo status +* return 1 if fifo full +*/ +int ttcan_tx_fifo_full(struct ttcan_controller *ttcan) +{ + u32 txfqs_reg; + txfqs_reg = ttcan_read32(ttcan, ADR_MTTCAN_TXFQS); + return (txfqs_reg & MTT_TXFQS_TFQF_MASK) >> MTT_TXFQS_TFQF_SHIFT; +} + +static int process_rx_mesg(struct ttcan_controller *ttcan, u32 addr) +{ + struct ttcanfd_frame ttcanfd = {0}; + ttcan_read_rx_msg_ram(ttcan, addr, &ttcanfd); + return add_msg_controller_list(ttcan, &ttcanfd, &ttcan->rx_b, BUFFER); +} + +int ttcan_read_rx_buffer(struct ttcan_controller *ttcan) +{ + u32 ndat1, ndat2; + u32 read_addr; + int msgs_read = 0; + + ndat1 = ttcan_read32(ttcan, ADR_MTTCAN_NDAT1); + ndat2 = ttcan_read32(ttcan, ADR_MTTCAN_NDAT2); + + while (ndat1 != 0 || ndat2 != 0) { + u32 bit_set1 = ffs(ndat1) - 1; + u32 bit_set2 = ffs(ndat2) - 1; + if (ndat1) { + read_addr = ttcan->mram_cfg[MRAM_RXB].off + (bit_set1 * + ttcan->e_size.rx_buffer); + if (process_rx_mesg(ttcan, read_addr)) + return msgs_read; + ttcan_write32(ttcan, ADR_MTTCAN_NDAT1, + (1 << (bit_set1))); + msgs_read++; + } + + if (ndat2) { + read_addr = ttcan->mram_cfg[MRAM_RXB].off + (bit_set2 * + ttcan->e_size.rx_buffer); + if (process_rx_mesg(ttcan, read_addr)) + return msgs_read; + ttcan_write32(ttcan, ADR_MTTCAN_NDAT2, + (1 << (bit_set2))); + msgs_read++; + } + ndat1 &= ~(1 << (bit_set1)); + ndat2 &= ~(1 << (bit_set2)); + } + + return msgs_read; +} + +/* Tx Evt Fifo */ + +unsigned int ttcan_read_txevt_fifo(struct ttcan_controller *ttcan) +{ + struct mttcan_tx_evt_element txevt; + u32 txefs; + u32 read_addr; + int q_read = 0; + int msgs_read = 0; + + txefs = ttcan_read32(ttcan, ADR_MTTCAN_TXEFS); + + if (!(txefs & MTT_TXEFS_EFFL_MASK)) { + pr_debug("%s: Tx Event FIFO empty\n", __func__); + return 0; + } + q_read = ttcan->tx_config.evt_q_num; + while ((txefs & MTT_TXEFS_EFFL_MASK) && q_read--) { + + u32 get_idx = + (txefs & MTT_TXEFS_EFGI_MASK) >> MTT_TXEFS_EFGI_SHIFT; + read_addr = + ttcan->mram_cfg[MRAM_TXE].off + + (get_idx * TX_EVENT_FIFO_ELEM_SIZE); + + pr_debug("%s:txevt: read_addr %x EFGI %x\n", __func__, + read_addr, get_idx); + + ttcan_read_txevt_ram(ttcan, read_addr, &txevt); + if (add_event_controller_list(ttcan, &txevt, + &ttcan->tx_evt) < 0) { + pr_err("%s: failed to add to list\n", __func__); + return msgs_read; + } + ttcan_write32(ttcan, ADR_MTTCAN_TXEFA, get_idx); + txefs = ttcan_read32(ttcan, ADR_MTTCAN_TXEFS); + msgs_read++; + } + return msgs_read; +} + +/* Rx FIFO section */ + +unsigned int ttcan_read_rx_fifo0(struct ttcan_controller *ttcan) +{ + u32 rxf0s_reg; + struct ttcanfd_frame ttcanfd = {0}; + u32 read_addr; + int q_read = 0; + unsigned int msgs_read = 0; + + rxf0s_reg = ttcan_read32(ttcan, ADR_MTTCAN_RXF0S); + + if (!(rxf0s_reg & MTT_RXF0S_F0FL_MASK)) { + return msgs_read; + } + + /* Read at max queue size in one attempt */ + q_read = ttcan->mram_cfg[MRAM_RXF0].num; + + while ((rxf0s_reg & MTT_RXF0S_F0FL_MASK) && q_read--) { + u32 get_idx = (rxf0s_reg & MTT_RXF0S_F0GI_MASK) >> + MTT_RXF0S_F0GI_SHIFT; + if (ttcan->rx_config.rxq0_bmsk & (1 << get_idx)) { + /* All ready process on High priority */ + ttcan_write32(ttcan, ADR_MTTCAN_RXF0A, get_idx); + ttcan->rx_config.rxq0_bmsk &= ~(1U << get_idx); + rxf0s_reg = ttcan_read32(ttcan, ADR_MTTCAN_RXF0S); + continue; + } + + read_addr = ttcan->mram_cfg[MRAM_RXF0].off + + (get_idx * ttcan->e_size.rx_fifo0); + + pr_debug("%s:fifo0: read_addr %x FOGI %x\n", __func__, + read_addr, get_idx); + + ttcan_read_rx_msg_ram(ttcan, read_addr, &ttcanfd); + if (add_msg_controller_list(ttcan, &ttcanfd, + &ttcan->rx_q0, FIFO_0) < 0) { + pr_err("%s: failed to add to list\n", __func__); + return msgs_read; + } + ttcan_write32(ttcan, ADR_MTTCAN_RXF0A, get_idx); + rxf0s_reg = ttcan_read32(ttcan, ADR_MTTCAN_RXF0S); + msgs_read++; + } + return msgs_read; +} + +unsigned int ttcan_read_rx_fifo1(struct ttcan_controller *ttcan) +{ + u32 rxf1s_reg; + struct ttcanfd_frame ttcanfd = {0}; + u32 read_addr; + int q_read = 0; + int msgs_read = 0; + + rxf1s_reg = ttcan_read32(ttcan, ADR_MTTCAN_RXF1S); + + if (!(rxf1s_reg & MTT_RXF1S_F1FL_MASK)) { + return msgs_read; + } + + /* Read at max queue size in one attempt */ + q_read = ttcan->mram_cfg[MRAM_RXF1].num; + + while ((rxf1s_reg & MTT_RXF1S_F1FL_MASK) && q_read--) { + u32 get_idx = (rxf1s_reg & MTT_RXF1S_F1GI_MASK) >> + MTT_RXF1S_F1GI_SHIFT; + if (ttcan->rx_config.rxq1_bmsk & (1 << get_idx)) { + /* All ready process on High priority */ + ttcan_write32(ttcan, ADR_MTTCAN_RXF1A, get_idx); + ttcan->rx_config.rxq1_bmsk &= ~(1U << get_idx); + rxf1s_reg = ttcan_read32(ttcan, ADR_MTTCAN_RXF1S); + continue; + } + read_addr = ttcan->mram_cfg[MRAM_RXF1].off + + (get_idx * ttcan->e_size.rx_fifo1); + + pr_debug("%s:fifo1: read_addr %x FOGI %x\n", __func__, + read_addr, get_idx); + + ttcan_read_rx_msg_ram(ttcan, read_addr, &ttcanfd); + if (add_msg_controller_list(ttcan, &ttcanfd, + &ttcan->rx_q1, FIFO_1) < 0) { + pr_err("%s: failed to add to list\n", __func__); + return msgs_read; + } + ttcan_write32(ttcan, ADR_MTTCAN_RXF1A, get_idx); + rxf1s_reg = ttcan_read32(ttcan, ADR_MTTCAN_RXF1S); + msgs_read++; + } + return msgs_read; +} + +/* Returns message read else return 0 */ +unsigned int ttcan_read_rx_fifo(struct ttcan_controller *ttcan) +{ + int msgs_read = 0; + + msgs_read = ttcan_read_rx_fifo0(ttcan); + + if (ttcan->mram_cfg[MRAM_RXF1].num) + msgs_read += ttcan_read_rx_fifo1(ttcan); + + return msgs_read; +} + +unsigned int ttcan_read_hp_mesgs(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd) +{ + u32 hpms; + u32 fltr_idx; + u32 buf_idx; + u32 msi; + u32 read_addr; + + hpms = ttcan_read32(ttcan, ADR_MTTCAN_HPMS); + fltr_idx = (hpms & MTT_HPMS_FIDX_MASK) >> MTT_HPMS_FIDX_SHIFT; + buf_idx = (hpms & MTT_HPMS_BIDX_MASK) >> MTT_HPMS_BIDX_SHIFT; + msi = (hpms & MTT_HPMS_MSI_MASK) >> MTT_HPMS_MSI_SHIFT; + + if (hpms & MTT_HPMS_FLST_MASK) { + /* Extended Filter list */ + pr_debug("Xtd Filter:%d Matched\n", fltr_idx); + pr_debug("0x%llx\n", ttcan_get_xtd_id_filter(ttcan, fltr_idx)); + } else { + /* Standard Filter list */ + pr_debug("Std Filter:%d Matched\n", fltr_idx); + pr_debug("0x%x\n", ttcan_get_std_id_filter(ttcan, fltr_idx)); + } + + switch (msi) { + default: + pr_debug("High Priority Interrupt received, no mesg\n"); + return 0; + case 1: + pr_info("High Priority FIFO Mesg lost\n"); + return 0; + case 2: + read_addr = ttcan->mram_cfg[MRAM_RXF0].off + + (buf_idx * ttcan->e_size.rx_fifo0); + ttcan_read_rx_msg_ram(ttcan, read_addr, ttcanfd); + ttcan->rx_config.rxq0_bmsk |= 1 << buf_idx; + break; + case 3: + read_addr = ttcan->mram_cfg[MRAM_RXF1].off + + buf_idx * ttcan->e_size.rx_fifo1; + ttcan_read_rx_msg_ram(ttcan, read_addr, ttcanfd); + ttcan->rx_config.rxq1_bmsk |= 1 << buf_idx; + } + return 1; +} + +/* Rx Buff Section */ +void ttcan_set_rx_buffers_elements(struct ttcan_controller *ttcan) +{ + u32 rxbc_reg = 0; + u32 rxf0c_reg = 0; + u32 rxf1c_reg = 0; + u32 rxesc_reg = 0; + u32 rel_phy_addr; + enum ttcan_data_field_size rxbuf_dfs; + enum ttcan_data_field_size rxfifo0_dfs; + enum ttcan_data_field_size rxfifo1_dfs; + + u32 rxq0 = ttcan->mram_cfg[MRAM_RXF0].num; + u32 rxq1 = ttcan->mram_cfg[MRAM_RXF1].num; + + /* Set Rx Buffer Address */ + rel_phy_addr = ttcan->mram_cfg[MRAM_RXB].off >> 2; + rxbc_reg = (rel_phy_addr << MTT_RXBC_RBSA_SHIFT) & MTT_RXBC_RBSA_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_RXBC, rxbc_reg); + + /* Set RXFIFO 0 */ + rel_phy_addr = ttcan->mram_cfg[MRAM_RXF0].off >> 2; + rxf0c_reg = ((rxq0 / 2) << MTT_RXF0C_F0WM_SHIFT) & MTT_RXF0C_F0WM_MASK; + rxf0c_reg |= (rxq0 << MTT_RXF0C_F0S_SHIFT) & MTT_RXF0C_F0S_MASK; + rxf0c_reg |= (rel_phy_addr << MTT_RXF0C_F0SA_SHIFT) & + MTT_RXF0C_F0SA_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_RXF0C, rxf0c_reg); + + /* Set RxFIFO 1 */ + rel_phy_addr = ttcan->mram_cfg[MRAM_RXF1].off >> 2; + rxf1c_reg = ((rxq1 / 2) << MTT_RXF1C_F1WM_SHIFT) & MTT_RXF1C_F1WM_MASK; + rxf1c_reg |= (rxq1 << MTT_RXF1C_F1S_SHIFT) & MTT_RXF1C_F1S_MASK; + rxf1c_reg |= (rel_phy_addr << MTT_RXF1C_F1SA_SHIFT) & + MTT_RXF1C_F1SA_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_RXF1C, rxf1c_reg); + + + /* Set Rx element datasize */ + rxbuf_dfs = get_dfs(ttcan->rx_config.rxb_dsize); + rxfifo0_dfs = get_dfs(ttcan->rx_config.rxq0_dsize); + rxfifo1_dfs = get_dfs(ttcan->rx_config.rxq1_dsize); + + rxesc_reg = (rxbuf_dfs << MTT_RXESC_RBDS_SHIFT) & MTT_RXESC_RBDS_MASK; + rxesc_reg |= (rxfifo0_dfs << MTT_RXESC_F0DS_SHIFT) & + MTT_RXESC_F0DS_MASK; + rxesc_reg |= (rxfifo1_dfs << MTT_RXESC_F1DS_SHIFT) & + MTT_RXESC_F1DS_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_RXESC, rxesc_reg); + + ttcan->e_size.rx_buffer = + RXB_ELEM_HEADER_SIZE + ttcan->rx_config.rxb_dsize; + ttcan->e_size.rx_fifo0 = + RXB_ELEM_HEADER_SIZE + ttcan->rx_config.rxq0_dsize; + ttcan->e_size.rx_fifo1 = + RXB_ELEM_HEADER_SIZE + ttcan->rx_config.rxq1_dsize; +} + +/* Filters Section */ + +int ttcan_set_gfc(struct ttcan_controller *ttcan, u32 regval) +{ + int ret = 0; + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_GFC, regval, + MTTCAN_GFC_MSK); + if (ret) + pr_err("%s: unable to set GFC register\n", __func__); + + return ret; +} + +u32 ttcan_get_gfc(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_GFC); +} + +int ttcan_set_xidam(struct ttcan_controller *ttcan, u32 regval) +{ + int ret = 0; + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_XIDAM, regval, + MTTCAN_XIDAM_MSK); + if (ret) + pr_err("%s: unable to set XIDAM register\n", __func__); + return ret; +} + +u32 ttcan_get_xidam(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_XIDAM); +} + +int ttcan_set_ttrmc(struct ttcan_controller *ttcan, u32 regval) +{ + int ret = 0; + ret = ttcan_write32_check(ttcan, ADR_MTTCAN_TTRMC, regval, + MTTCAN_TTRMC_MSK); + if (ret) + pr_err("%s: unable to set TTRMC register\n", __func__); + return ret; +} + +u32 ttcan_get_ttrmc(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_TTRMC); +} + +void ttcan_set_std_id_filter_addr(struct ttcan_controller *ttcan) +{ + u32 sidfc_reg = 0; + u32 rel_start_addr = ttcan->mram_cfg[MRAM_SIDF].off >> 2; + u32 list_size = ttcan->mram_cfg[MRAM_SIDF].num; + + if (list_size > 128) + list_size = 128; + + sidfc_reg = (rel_start_addr << MTT_SIDFC_FLSSA_SHIFT) & + MTT_SIDFC_FLSSA_MASK; + sidfc_reg |= (list_size << MTT_SIDFC_LSS_SHIFT) & MTT_SIDFC_LSS_MASK; + ttcan_write32(ttcan, ADR_MTTCAN_SIDFC, sidfc_reg); +} + +void ttcan_set_xtd_id_filter_addr(struct ttcan_controller *ttcan) +{ + u32 xidfc_reg = 0; + u32 list_size = ttcan->mram_cfg[MRAM_XIDF].num; + u32 rel_start_addr = ttcan->mram_cfg[MRAM_XIDF].off >> 2; + + if (list_size > 64) + list_size = 64; + + xidfc_reg = (rel_start_addr << MTT_XIDFC_FLESA_SHIFT) & + MTT_XIDFC_FLESA_MASK; + xidfc_reg |= (list_size << MTT_XIDFC_LSE_SHIFT) & MTT_XIDFC_LSE_MASK; + ttcan_write32(ttcan, ADR_MTTCAN_XIDFC, xidfc_reg); + +} + +inline void ttcan_set_timestamp_offset_sel(struct ttcan_controller *ttcan) +{ + u32 val; + + val = ttcan_xread32(ttcan, ADDR_M_TTCAN_TIME_STAMP); + val |= M_TTCAN_TIME_STAMP_OFFSET_SEL; + ttcan_xwrite32(ttcan, ADDR_M_TTCAN_TIME_STAMP, val); +} + +void ttcan_set_time_stamp_conf(struct ttcan_controller *ttcan, + u16 timer_prescalar, + enum ttcan_timestamp_source timer_type) +{ + u32 tscc = 0; + + if (timer_type == TS_EXTERNAL) { + ttcan_set_timestamp_offset_sel(ttcan); + tscc = (timer_type << MTT_TSCC_TSS_SHIFT) & MTT_TSCC_TSS_MASK; + } else { + if (timer_prescalar > 15) + timer_prescalar = 15; + + tscc = (timer_prescalar << MTT_TSCC_TCP_SHIFT) + & MTT_TSCC_TCP_MASK; + tscc |= (timer_type << MTT_TSCC_TSS_SHIFT) & MTT_TSCC_TSS_MASK; + ttcan->ts_prescalar = timer_prescalar + 1; + } + + ttcan_write32(ttcan, ADR_MTTCAN_TSCC, tscc); +} + +void ttcan_set_txevt_fifo_conf(struct ttcan_controller *ttcan) +{ + u32 txefc = 0; + u32 rel_addr = ttcan->mram_cfg[MRAM_TXE].off >> 2; + u32 evf_size = ttcan->mram_cfg[MRAM_TXE].num; + + txefc = ((evf_size / 2) << MTT_TXEFC_EFWM_SHIFT) & + MTT_TXEFC_EFWM_MASK; + + txefc |= evf_size << MTT_TXEFC_EFS_SHIFT & MTT_TXEFC_EFS_MASK; + txefc |= rel_addr << MTT_TXEFC_EFSA_SHIFT & MTT_TXEFC_EFSA_MASK; + ttcan_write32(ttcan, ADR_MTTCAN_TXEFC, txefc); + ttcan->tx_config.evt_q_num = evf_size; +} + +void ttcan_set_xtd_mask_add(struct ttcan_controller *ttcan, int extid_mask) +{ + u32 xidam_reg = 0; + + xidam_reg = MTT_XIDAM_EIDM_MASK & 0x1FFFFFFF; + ttcan_write32(ttcan, ADR_MTTCAN_XIDAM, xidam_reg); +} + +u32 ttcan_read_tx_complete_reg(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_TXBTO); +} + +void ttcan_set_tx_cancel_request(struct ttcan_controller *ttcan, u32 txbcr) +{ + ttcan_write32(ttcan, ADR_MTTCAN_TXBCR, txbcr); +} + +u32 ttcan_read_tx_cancelled_reg(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_TXBCF); +} + +u32 ttcan_read_psr(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_PSR); +} + +int ttcan_controller_init(struct ttcan_controller *ttcan, u32 irq_flag, + u32 tt_irq_flag) +{ + if (!ttcan) { + pr_err("TTCAN controller NULL\n"); + return -EINVAL; + } + + ttcan->intr_enable_reg = irq_flag; + ttcan->intr_tt_enable_reg = tt_irq_flag; + spin_lock_init(&ttcan->lock); + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +cycle_t ttcan_read_ts_cntr(const struct cyclecounter *ccnt) +#else +u64 ttcan_read_ts_cntr(const struct cyclecounter *ccnt) +#endif +{ + struct mttcan_priv *priv = container_of(ccnt, struct mttcan_priv, cc); + + return ttcan_read32(priv->ttcan, ADR_MTTCAN_TSCV); +} diff --git a/drivers/net/can/mttcan/hal/m_ttcan_intr.c b/drivers/net/can/mttcan/hal/m_ttcan_intr.c new file mode 100644 index 00000000..c3a1bc88 --- /dev/null +++ b/drivers/net/can/mttcan/hal/m_ttcan_intr.c @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "../include/m_ttcan.h" + +void ttcan_clear_intr(struct ttcan_controller *ttcan) +{ + ttcan_write32(ttcan, ADR_MTTCAN_IR, 0xFFFFFFFF); +} + +void ttcan_clear_tt_intr(struct ttcan_controller *ttcan) +{ + ttcan_write32(ttcan, ADR_MTTCAN_TTIR, 0xFFFFFFFF); +} + +u32 ttcan_read_ir(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_IR); +} + +void ttcan_ir_write(struct ttcan_controller *ttcan, u32 value) +{ + return ttcan_write32(ttcan, ADR_MTTCAN_IR, value); +} + +void ttcan_ttir_write(struct ttcan_controller *ttcan, u32 value) +{ + return ttcan_write32(ttcan, ADR_MTTCAN_TTIR, value); +} + +u32 ttcan_read_ttir(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_TTIR); +} + +void ttcan_ier_write(struct ttcan_controller *ttcan, u32 val) +{ + ttcan_write32(ttcan, ADR_MTTCAN_IE, val); +} + +void ttcan_ttier_write(struct ttcan_controller *ttcan, u32 val) +{ + ttcan_write32(ttcan, ADR_MTTCAN_TTIE, val); +} + +void ttcan_set_intrpts(struct ttcan_controller *ttcan, int enable) +{ + if (enable) { + ttcan_write32(ttcan, ADR_MTTCAN_IE, ttcan->intr_enable_reg); + ttcan_write32(ttcan, ADR_MTTCAN_TTIE, + ttcan->intr_tt_enable_reg); + ttcan_write32(ttcan, ADR_MTTCAN_ILE, 0x1); + } else { + ttcan_write32(ttcan, ADR_MTTCAN_IE, 0); + ttcan_write32(ttcan, ADR_MTTCAN_TTIE, 0); + ttcan_write32(ttcan, ADR_MTTCAN_ILE, 0x0); + } + pr_debug("%s:%s intr %x\n", __func__, enable ? "enabled" : "disabled", + ttcan->intr_enable_reg); +} diff --git a/drivers/net/can/mttcan/hal/m_ttcan_list.c b/drivers/net/can/mttcan/hal/m_ttcan_list.c new file mode 100644 index 00000000..cd228148 --- /dev/null +++ b/drivers/net/can/mttcan/hal/m_ttcan_list.c @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "../include/m_ttcan.h" + +#define TTCAN_MAX_LIST_MEMBERS 128 + +inline int is_msg_list_full(struct ttcan_controller *ttcan, + enum ttcan_rx_type rxtype) +{ + return ttcan->list_status & rxtype & 0xFF; +} + +int add_msg_controller_list(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd, + struct list_head *rx_q, + enum ttcan_rx_type rxtype) +{ + + struct ttcan_rx_msg_list *msg_list; + + msg_list = (struct ttcan_rx_msg_list *) + kzalloc(sizeof(struct ttcan_rx_msg_list), GFP_ATOMIC); + if (msg_list == NULL) { + pr_err("%s: memory allocation failed\n", __func__); + return -ENOMEM; + } + + INIT_LIST_HEAD(&msg_list->recv_list); + + memcpy(&msg_list->msg, ttcanfd, sizeof(struct ttcanfd_frame)); + + spin_lock(&ttcan->lock); + + if (is_msg_list_full(ttcan, rxtype)) { + pr_err("%s: mesg list is full\n", __func__); + kfree(msg_list); + spin_unlock(&ttcan->lock); + return -ENOMEM; + } + + list_add_tail(&msg_list->recv_list, rx_q); + + switch (rxtype) { + case BUFFER: + if (++ttcan->rxb_mem >= TTCAN_MAX_LIST_MEMBERS) + ttcan->list_status |= BUFFER; + break; + case FIFO_0: + if (++ttcan->rxq0_mem >= TTCAN_MAX_LIST_MEMBERS) + ttcan->list_status |= FIFO_0; + break; + case FIFO_1: + if (++ttcan->rxq1_mem >= TTCAN_MAX_LIST_MEMBERS) + ttcan->list_status |= FIFO_1; + default: + break; + } + spin_unlock(&ttcan->lock); + + return 0; +} + +int add_event_controller_list(struct ttcan_controller *ttcan, + struct mttcan_tx_evt_element *txevt, + struct list_head *evt_q) +{ + + struct ttcan_txevt_msg_list *evt_list; + + evt_list = + (struct ttcan_txevt_msg_list *) + kzalloc(sizeof(struct ttcan_txevt_msg_list), GFP_ATOMIC); + if (evt_list == NULL) { + pr_err("%s: memory allocation failed\n", __func__); + return -ENOMEM; + } + + INIT_LIST_HEAD(&evt_list->txevt_list); + + memcpy(&evt_list->txevt, txevt, sizeof(struct mttcan_tx_evt_element)); + + spin_lock(&ttcan->lock); + + if (is_msg_list_full(ttcan, TX_EVT)) { + pr_err("%s: mesg list is full\n", __func__); + kfree(evt_list); + spin_unlock(&ttcan->lock); + return -1; + } + + list_add_tail(&evt_list->txevt_list, evt_q); + + if (++ttcan->evt_mem >= TTCAN_MAX_LIST_MEMBERS) + ttcan->list_status |= TX_EVT; + + spin_unlock(&ttcan->lock); + + return 0; +} diff --git a/drivers/net/can/mttcan/hal/m_ttcan_ram.c b/drivers/net/can/mttcan/hal/m_ttcan_ram.c new file mode 100644 index 00000000..ba8843aa --- /dev/null +++ b/drivers/net/can/mttcan/hal/m_ttcan_ram.c @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "../include/m_ttcan.h" + +int ttcan_read_txevt_ram(struct ttcan_controller *ttcan, u32 read_addr, + struct mttcan_tx_evt_element *txevt) +{ + void __iomem *msg_addr = read_addr + ttcan->mram_vbase; + if (!txevt) + return -1; + + txevt->f0 = readl(msg_addr); + txevt->f1 = readl(msg_addr + CAN_WORD_IN_BYTES); + + return 0; +} + +int ttcan_read_rx_msg_ram(struct ttcan_controller *ttcan, u64 read_addrs, + struct ttcanfd_frame *ttcanfd) +{ + u32 i = 0, byte_index; + u32 msg_data; + void __iomem *addr_in_msg_ram = read_addrs + ttcan->mram_vbase; + int bytes_to_read = CANFD_MAX_DLEN; + + if (!ttcanfd) + return -1; + + ttcanfd->flags |= CAN_DIR_RX; + + while (bytes_to_read) { + msg_data = + readl(addr_in_msg_ram + (i * CAN_WORD_IN_BYTES)); + + switch (i) { + case 0: + ttcanfd->can_id = 0; + ttcanfd->flags = 0; + + if (msg_data & RX_BUF_XTD) { + /* Extended Frame */ + ttcanfd->can_id = CAN_FMT | + ((msg_data & RX_BUF_EXTID_MASK) & + CAN_EXT_ID_MASK); + } else { + ttcanfd->can_id = + ((msg_data & RX_BUF_STDID_MASK) >> + RX_BUF_STDID_SHIFT) & CAN_STD_ID_MASK; + } + + if (msg_data & RX_BUF_RTR) + ttcanfd->can_id |= CAN_RTR; + + if (msg_data & RX_BUF_ESI) + ttcanfd->flags |= CAN_ESI_FLAG; + break; + case 1: + ttcanfd->d_len = + ttcan_dlc2len((msg_data & RX_BUF_DLC_MASK) + >> RX_BUF_DLC_SHIFT); + bytes_to_read = ttcanfd->d_len; + + if (msg_data & RX_BUF_FDF) + ttcanfd->flags |= CAN_FD_FLAG; + + if (msg_data & RX_BUF_BRS) + ttcanfd->flags |= CAN_BRS_FLAG; + ttcanfd->tstamp = msg_data & RX_BUF_RXTS_MASK; + break; + default: + byte_index = (i - 2) * CAN_WORD_IN_BYTES; + switch (bytes_to_read) { + default: + case 4: + ttcanfd->data[byte_index + 3] = + (msg_data >> 24) & 0xFF; + case 3: + ttcanfd->data[byte_index + 2] = + (msg_data >> 16) & 0xFF; + case 2: + ttcanfd->data[byte_index + 1] = + (msg_data >> 8) & 0xFF; + case 1: + ttcanfd->data[byte_index + 0] = + msg_data & 0xFF; + } + + if (bytes_to_read >= 4) + bytes_to_read -= 4; + else + bytes_to_read = 0; + pr_debug("%s addr %p received 0x%x\n", __func__, + addr_in_msg_ram + + (i * CAN_WORD_IN_BYTES), msg_data); + break; + } + i++; + } + pr_debug("%s:received ID(0x%x) %s %s(%s)\n", __func__, + (ttcanfd->can_id & CAN_FMT) ? + (ttcanfd->can_id & CAN_EXT_ID_MASK) : + (ttcanfd->can_id & CAN_STD_ID_MASK), + (ttcanfd->can_id & CAN_FMT) ? "XTD" : "STD", + (ttcanfd->flags & CAN_FD_FLAG) ? "FD" : "NON-FD", + (ttcanfd->flags & CAN_BRS_FLAG) ? "BRS" : "NOBRS"); + + return i; +} + +int ttcan_write_tx_msg_ram(struct ttcan_controller *ttcan, u32 write_addrs, + struct ttcanfd_frame *ttcanfd, u8 index) +{ + u32 msg_data, idx; + int bytes_to_write; + void __iomem *addr_in_msg_ram = write_addrs + ttcan->mram_vbase; + + /* T0 */ + if (ttcanfd->can_id & CAN_FMT) + msg_data = (ttcanfd->can_id & CAN_EXT_ID_MASK) | TX_BUF_XTD; + else + msg_data = + (ttcanfd->can_id & CAN_STD_ID_MASK) << TX_BUF_STDID_SHIFT; + + if (ttcanfd->can_id & CAN_RTR) + msg_data |= TX_BUF_RTR; + + /* This flag is ORed with error passive flag while sending */ + if (ttcanfd->flags & CAN_ESI_FLAG) + msg_data |= TX_BUF_ESI; + + pr_debug("T0: addr %p msg %x\n", addr_in_msg_ram, msg_data); + writel(msg_data, addr_in_msg_ram); + + /* T1 */ + msg_data = + (ttcan_len2dlc(ttcanfd->d_len) << TX_BUF_DLC_SHIFT) & + TX_BUF_DLC_MASK; + + if (ttcan->tx_config.evt_q_num) + msg_data |= TX_BUF_EFC; + + if (ttcanfd->flags & CAN_FD_FLAG) + msg_data |= TX_BUF_FDF; + + if (ttcanfd->flags & CAN_BRS_FLAG) + msg_data |= TX_BUF_BRS; + + msg_data |= index << TX_BUF_MM_SHIFT; + + pr_debug("%s:buf_id(%d):- %s(%s)\n", __func__, index, + (ttcanfd->flags & CAN_FD_FLAG) ? "FD" : "NON-FD", + (ttcanfd->flags & CAN_BRS_FLAG) ? "BRS" : "NOBRS"); + + pr_debug("T1: addr %p msg %x\n", (addr_in_msg_ram+CAN_WORD_IN_BYTES), + msg_data); + writel(msg_data, (addr_in_msg_ram + CAN_WORD_IN_BYTES)); + + bytes_to_write = ttcanfd->d_len; + + idx = 0; + + while (bytes_to_write > 0) { + msg_data = 0; + switch (bytes_to_write) { + default: + case 4: + msg_data = ttcanfd->data[idx + 3] << 24; + case 3: + msg_data += ttcanfd->data[idx + 2] << 16; + case 2: + msg_data += ttcanfd->data[idx + 1] << 8; + case 1: + msg_data += ttcanfd->data[idx + 0] << 0; + } + + pr_debug("T2: addr %p msg %x\n", (addr_in_msg_ram + + (((idx >> 2) + 2) * CAN_WORD_IN_BYTES)), + msg_data); + writel(msg_data, (addr_in_msg_ram + + (((idx >> 2) + 2) * CAN_WORD_IN_BYTES))); + idx += 4; + bytes_to_write -= 4; + } + + return idx; +} + +void ttcan_set_std_id_filter(struct ttcan_controller *ttcan, void *std_shadow, + int filter_index, u8 sft, u8 sfec, u32 sfid1, + u32 sfid2) +{ + u32 filter_elem = 0; + void __iomem *filter_addr = + ttcan->mram_vbase + ttcan->mram_cfg[MRAM_SIDF].off; + u32 filter_offset = + (filter_index * SIDF_ELEM_SIZE); + + filter_elem = (sfid2 << MTT_STD_FLTR_SFID2_SHIFT) & + MTT_STD_FLTR_SFID2_MASK; + filter_elem |= (sfid1 << MTT_STD_FLTR_SFID1_SHIFT) & + MTT_STD_FLTR_SFID1_MASK; + filter_elem |= (sfec << MTT_STD_FLTR_SFEC_SHIFT) & + MTT_STD_FLTR_SFEC_MASK; + filter_elem |= (sft << MTT_STD_FLTR_SFT_SHIFT) & MTT_STD_FLTR_SFT_MASK; + + pr_debug("%s %p\n", __func__, (filter_addr + filter_offset)); + memcpy((char *)std_shadow + filter_offset, &filter_elem, + SIDF_ELEM_SIZE); + writel(filter_elem, (void __iomem *)(filter_addr + filter_offset)); +} + +void ttcan_prog_std_id_fltrs(struct ttcan_controller *ttcan, void *std_shadow) +{ + int idx; + u32 list_size = ttcan->mram_cfg[MRAM_SIDF].num; + void __iomem *filter_addr = ttcan->mram_vbase + + ttcan->mram_cfg[MRAM_SIDF].off; + for (idx = 0; idx < list_size; idx++) { + u32 offset = idx * SIDF_ELEM_SIZE; + writel(*(u32 *)((u8 *)std_shadow + offset), + (void __iomem *)(filter_addr + offset)); + } +} + +u32 ttcan_get_std_id_filter(struct ttcan_controller *ttcan, int idx) +{ + void __iomem *filter_addr = ttcan->mram_vbase + + ttcan->mram_cfg[MRAM_SIDF].off; + + u32 filter_offset = idx * SIDF_ELEM_SIZE; + return readl(filter_addr + filter_offset); +} + +void ttcan_prog_xtd_id_fltrs(struct ttcan_controller *ttcan, void *xtd_shadow) +{ + int idx; + u32 list_size = ttcan->mram_cfg[MRAM_XIDF].num; + void __iomem *filter_addr = ttcan->mram_vbase + + ttcan->mram_cfg[MRAM_XIDF].off; + + for (idx = 0; idx < list_size; idx++) { + u32 offset = idx * XIDF_ELEM_SIZE; + + writel(*(u32 *)((u8 *)xtd_shadow + offset), + (void __iomem *)(filter_addr + offset)); + writel(*(u32 *)((u8 *)xtd_shadow + offset + CAN_WORD_IN_BYTES), + (void __iomem *)(filter_addr + offset + + CAN_WORD_IN_BYTES)); + } +} + +void ttcan_set_xtd_id_filter(struct ttcan_controller *ttcan, void *xtd_shadow, + int filter_index, u8 eft, u8 efec, u32 efid1, + u32 efid2) +{ + struct mttcan_xtd_id_filt_element xfilter_elem; + void __iomem *filter_addr = + ttcan->mram_vbase + ttcan->mram_cfg[MRAM_XIDF].off; + u32 filter_offset = filter_index * XIDF_ELEM_SIZE; + + xfilter_elem.f0 = (efec << MTT_XTD_FLTR_F0_EFEC_SHIFT) & + MTT_XTD_FLTR_F0_EFEC_MASK; + xfilter_elem.f0 |= (efid1 << MTT_XTD_FLTR_F0_EFID1_SHIFT) & + MTT_XTD_FLTR_F0_EFID1_MASK; + xfilter_elem.f1 = (eft << MTT_XTD_FLTR_F1_EFT_SHIFT) & + MTT_XTD_FLTR_F1_EFT_MASK; + xfilter_elem.f1 |= (efid2 << MTT_XTD_FLTR_F1_EFID2_SHIFT) & + MTT_XTD_FLTR_F1_EFID2_MASK; + + memcpy((char *)xtd_shadow + filter_offset, &xfilter_elem, + XIDF_ELEM_SIZE); + writel(xfilter_elem.f0, + (void __iomem *)(filter_addr + filter_offset)); + writel(xfilter_elem.f1, + (void __iomem *)(filter_addr + filter_offset + + CAN_WORD_IN_BYTES)); + pr_debug("%s %x %p\n", __func__, xfilter_elem.f0, + (filter_addr + filter_offset)); + pr_debug("%s %x %p\n", __func__, xfilter_elem.f1, + (filter_addr + filter_offset + CAN_WORD_IN_BYTES)); +} + +u64 ttcan_get_xtd_id_filter(struct ttcan_controller *ttcan, int idx) +{ + u64 xtd; + void __iomem *filter_addr = ttcan->mram_vbase + + ttcan->mram_cfg[MRAM_XIDF].off; + u32 offset = idx * XIDF_ELEM_SIZE; + + xtd = ((u64) readl(filter_addr + offset + CAN_WORD_IN_BYTES)) << 32; + xtd |= readl(filter_addr + offset); + return xtd; +} + +u64 ttcan_get_trigger_mem(struct ttcan_controller *ttcan, int idx) +{ + u64 trig; + void __iomem *addr = ttcan->mram_vbase + ttcan->mram_cfg[MRAM_TMC].off; + u32 offset = idx * TRIG_ELEM_SIZE; + + trig = ((u64) readl(addr + offset + CAN_WORD_IN_BYTES)) << 32; + trig |= readl(addr + offset); + return trig; +} + +void ttcan_prog_trigger_mem(struct ttcan_controller *ttcan, void *tmc_shadow) +{ + int idx; + void __iomem *filter_addr = ttcan->mram_vbase + + ttcan->mram_cfg[MRAM_TMC].off; + u32 list_size = ttcan->mram_cfg[MRAM_TMC].num; + + for (idx = 0; idx < list_size; idx++) { + u32 offset = idx * TRIG_ELEM_SIZE; + + writel(*(u32 *)((char *)tmc_shadow + idx), + (void __iomem *)(filter_addr + offset)); + writel(*(u32 *)((char *)tmc_shadow + idx), + (void __iomem *)(filter_addr + offset + + CAN_WORD_IN_BYTES)); + } +} + +int ttcan_set_trigger_mem(struct ttcan_controller *ttcan, void *tmc_shadow, + int trig_index, u16 time_mark, u16 cycle_code, + u8 tmin, u8 tmex, u16 trig_type, u8 filter_type, + u8 mesg_num) +{ + struct mttcan_trig_mem_element trig_elem; + void __iomem *trig_mem_addr = ttcan->mram_vbase + + ttcan->mram_cfg[MRAM_TMC].off; + u32 idx = trig_index * TRIG_ELEM_SIZE; + + if (trig_index > 63) { + pr_err("%s: Incorrect Index\n", __func__); + return -1; + } + + if (cycle_code > 127) { + pr_err("%s: Invalid cycle code\n", __func__); + return -1; + } + + /* TBD: ASC is disabled - Hardcoded */ + /* Mesg. Status Count is set 0 */ + trig_elem.f0 = (time_mark << MTT_TRIG_ELE_F0_TM_SHIFT) & + MTT_TRIG_ELE_F0_TM_MASK; + trig_elem.f0 |= cycle_code << MTT_TRIG_ELE_F0_CC_SHIFT & + MTT_TRIG_ELE_F0_CC_MASK; + /* ASC = 0 */; + trig_elem.f0 |= (tmin << MTT_TRIG_ELE_F0_TMIN_SHIFT) & + MTT_TRIG_ELE_F0_TMIN_MASK; + trig_elem.f0 |= (tmex << MTT_TRIG_ELE_F0_TMEX_SHIFT) & + MTT_TRIG_ELE_F0_TMEX_MASK; + trig_elem.f0 |= (trig_type << MTT_TRIG_ELE_F0_TYPE_SHIFT) & + MTT_TRIG_ELE_F0_TYPE_MASK; + trig_elem.f1 = (filter_type << MTT_TRIG_ELE_F1_FTYPE_SHIFT) & + MTT_TRIG_ELE_F1_FTYPE_MASK; + trig_elem.f1 |= (mesg_num << MTT_TRIG_ELE_F1_MNR_SHIFT) & + MTT_TRIG_ELE_F1_MNR_MASK; + + memcpy((char *)tmc_shadow + idx, &trig_elem, TRIG_ELEM_SIZE); + writel(trig_elem.f0, (void __iomem *)(trig_mem_addr + idx)); + writel(trig_elem.f1, (void __iomem *)(trig_mem_addr + idx + + CAN_WORD_IN_BYTES)); + + return 0; +} + +int ttcan_mesg_ram_config(struct ttcan_controller *ttcan, + u32 *arr, u32 *tx_conf, u32 *rx_conf) +{ + + ttcan->mram_cfg[MRAM_SIDF].off = arr[0]; + ttcan->mram_cfg[MRAM_SIDF].num = arr[1]; + + ttcan->mram_cfg[MRAM_XIDF].off = ttcan->mram_cfg[MRAM_SIDF].off + + ttcan->mram_cfg[MRAM_SIDF].num * SIDF_ELEM_SIZE; + ttcan->mram_cfg[MRAM_XIDF].num = arr[2]; + + ttcan->mram_cfg[MRAM_RXF0].off = ttcan->mram_cfg[MRAM_XIDF].off + + ttcan->mram_cfg[MRAM_XIDF].num * XIDF_ELEM_SIZE; + ttcan->mram_cfg[MRAM_RXF0].num = arr[3]; + + ttcan->mram_cfg[MRAM_RXF1].off = ttcan->mram_cfg[MRAM_RXF0].off + + ttcan->mram_cfg[MRAM_RXF0].num * MAX_RXB_ELEM_SIZE; + ttcan->mram_cfg[MRAM_RXF1].num = arr[4]; + + ttcan->mram_cfg[MRAM_RXB].off = ttcan->mram_cfg[MRAM_RXF1].off + + ttcan->mram_cfg[MRAM_RXF1].num * MAX_RXB_ELEM_SIZE; + ttcan->mram_cfg[MRAM_RXB].num = arr[5]; + + ttcan->mram_cfg[MRAM_TXE].off = ttcan->mram_cfg[MRAM_RXB].off + + ttcan->mram_cfg[MRAM_RXB].num * MAX_RXB_ELEM_SIZE; + ttcan->mram_cfg[MRAM_TXE].num = arr[6]; + + ttcan->mram_cfg[MRAM_TXB].off = ttcan->mram_cfg[MRAM_TXE].off + + ttcan->mram_cfg[MRAM_TXE].num * TX_EVENT_FIFO_ELEM_SIZE; + ttcan->mram_cfg[MRAM_TXB].num = arr[7]; + + ttcan->mram_cfg[MRAM_TMC].off = ttcan->mram_cfg[MRAM_TXB].off + + ttcan->mram_cfg[MRAM_TXB].num * MAX_TXB_ELEM_SIZE; + ttcan->mram_cfg[MRAM_TMC].num = arr[8]; + + if ((ttcan->mram_size <= + ttcan->mram_cfg[MRAM_TMC].off + ttcan->mram_cfg[MRAM_TMC].num * + TRIG_ELEM_SIZE - ttcan->mram_cfg[MRAM_SIDF].off)) { + pr_err("%s: Incorrect config for Message RAM\n", __func__); + return -EINVAL; + } + + if ((tx_conf[0] + tx_conf[1] > ttcan->mram_cfg[MRAM_TXB].num)) { + pr_err("%s: Incorrect tx-config in dt.\n", __func__); + return -EINVAL; + } + + if ((tx_conf[0] != 0) && (tx_conf[1] != 0)) { + pr_err("%s: Incorrect tx-config in dt.\n", __func__); + pr_err("Using both Tx buf and Fifo not allowed (Errata 21)\n"); + return -EINVAL; + } + + ttcan->tx_config.ded_buff_num = tx_conf[TX_CONF_TXB]; + ttcan->tx_config.fifo_q_num = tx_conf[TX_CONF_TXQ]; + ttcan->tx_config.flags = tx_conf[TX_CONF_QMODE]; + ttcan->e_size.tx_buffer = TXB_ELEM_HEADER_SIZE + tx_conf[TX_CONF_BSIZE]; + ttcan->tx_config.dfs = get_dfs(tx_conf[TX_CONF_BSIZE]); + ttcan->rx_config.rxb_dsize = rx_conf[RX_CONF_RXB]; + ttcan->rx_config.rxq0_dsize = rx_conf[RX_CONF_RXF0]; + ttcan->rx_config.rxq1_dsize = rx_conf[RX_CONF_RXF1]; + + pr_info("\t Message RAM Configuration\n" + "\t| base addr |0x%08lx|\n\t| sidfc_flssa |0x%08x|\n\t| xidfc_flesa |0x%08x|\n" + "\t| rxf0c_f0sa |0x%08x|\n\t| rxf1c_f1sa |0x%08x|\n\t| rxbc_rbsa |0x%08x|\n" + "\t| txefc_efsa |0x%08x|\n\t| txbc_tbsa |0x%08x|\n\t| tmc_tmsa |0x%08x|\n" + "\t| mram size |0x%08x|\n", + ttcan->mram_base, ttcan->mram_cfg[MRAM_SIDF].off, + ttcan->mram_cfg[MRAM_XIDF].off, ttcan->mram_cfg[MRAM_RXF0].off, + ttcan->mram_cfg[MRAM_RXF1].off, ttcan->mram_cfg[MRAM_RXB].off, + ttcan->mram_cfg[MRAM_TXE].off, ttcan->mram_cfg[MRAM_TXB].off, + ttcan->mram_cfg[MRAM_TMC].off, ttcan->mram_size); + return 0; +} + +void ttcan_mesg_ram_init(struct ttcan_controller *ttcan) +{ + u32 offset; + + for (offset = 0; offset < ttcan->mram_size; + offset += CAN_WORD_IN_BYTES) { + writel(0, ttcan->mram_vbase + offset); + } +} diff --git a/drivers/net/can/mttcan/hal/m_ttcan_tt.c b/drivers/net/can/mttcan/hal/m_ttcan_tt.c new file mode 100644 index 00000000..4c13b1ff --- /dev/null +++ b/drivers/net/can/mttcan/hal/m_ttcan_tt.c @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "../include/m_ttcan.h" + +/* TT Reference Message Config */ +#define TTRMC_REF_PAYLOAD (0x1 << 31) +#define TTRMC_EXT_ID (0x1 << 30) +#define TTRMC_STD_ID_SHIFT 0x12 + +/* TT Operation Config */ + +#define TTOCF_MODE_0 0x3 +#define TTOCF_MODE_1 0x2 +#define TTCOF_MODE_2 0x1 +#define TTOCF_EVT_TRIG 0x0 + +#define TTOCF_GAP 0x4 +#define TTOCF_POTENTIAL_TM 0x8 +#define TTOCF_LDSDL_SHIFT 0x5 +#define TTOCF_IRTO_SHIFT 0x8 + +#define TTOCF_EXT_CLK_SYNC 0x8000 +#define TTOCF_AWL_SHIFT 0x10 +#define TTOCF_GBL_TIME_FLT (0x1 << 24) +#define TTOCF_CLK_CALIB (0x1 << 25) +#define TTOCF_EVT_POLARITY (0x1 << 26) + +enum time_master_info { + TM_MASTER = 0, + TM_POTENTIAL, + TM_SLAVE, +}; + +inline void ttcan_clean_tt_intr(struct ttcan_controller *ttcan) +{ + ttcan_write32(ttcan, ADR_MTTCAN_TTIR, 0xFFFFFFFF); +} + +void ttcan_set_ref_mesg(struct ttcan_controller *ttcan, + u32 id, u32 rmps, u32 xtd) +{ + u32 rid; + u32 ttrmc = (xtd << MTT_TTRMC_XTD_SHIFT) & + MTT_TTRMC_XTD_MASK; + ttrmc |= (rmps << MTT_TTRMC_RMPS_SHIFT) & + MTT_TTRMC_RMPS_MASK; + if (xtd) + rid = (id & 0x1FFFFFFF); + else + rid = ((id & 0x7FF) << TTRMC_STD_ID_SHIFT); + ttrmc |= (rid << MTT_TTRMC_RID_SHIFT) & MTT_TTRMC_RID_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_TTRMC, ttrmc); +} + +void ttcan_set_tt_config(struct ttcan_controller *ttcan, u32 evtp, + u32 ecc, u32 egtf, u32 awl, u32 eecs, + u32 irto, u32 ldsdl, u32 tm, u32 gen, u32 om) +{ + u32 ttocf = 0; + + ttocf = (evtp << MTT_TTOCF_EVTP_SHIFT) & MTT_TTOCF_EVTP_MASK; + ttocf |= (ecc << MTT_TTOCF_ECC_SHIFT) & MTT_TTOCF_ECC_MASK; + ttocf |= (egtf << MTT_TTOCF_EGTF_SHIFT) & MTT_TTOCF_EGTF_MASK; + ttocf |= (awl << MTT_TTOCF_AWL_SHIFT) & MTT_TTOCF_AWL_MASK; + ttocf |= (eecs << MTT_TTOCF_EECS_SHIFT) & MTT_TTOCF_EECS_MASK; + ttocf |= (irto << MTT_TTOCF_IRTO_SHIFT) & MTT_TTOCF_IRTO_MASK; + ttocf |= (ldsdl << MTT_TTOCF_LDSDL_SHIFT) & + MTT_TTOCF_LDSDL_MASK; + ttocf |= (tm << MTT_TTOCF_TM_SHIFT) & MTT_TTOCF_TM_MASK; + ttocf |= (gen << MTT_TTOCF_GEN_SHIFT) & MTT_TTOCF_GEN_MASK; + ttocf |= (om << MTT_TTOCF_OM_SHIFT) & MTT_TTOCF_OM_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_TTOCF, ttocf); +} + +inline u32 ttcan_get_ttocf(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_TTOCF); +} + +inline void ttcan_set_tttmc(struct ttcan_controller *ttcan, u32 value) +{ + ttcan_write32(ttcan, ADR_MTTCAN_TTTMC, value); +} + +inline u32 ttcan_get_tttmc(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_TTTMC); +} + +inline void ttcan_set_txbar(struct ttcan_controller *ttcan, u32 value) +{ + ttcan_write32(ttcan, ADR_MTTCAN_TXBAR, value); +} + +inline u32 ttcan_get_ttmlm(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_TTMLM); +} + +inline u32 ttcan_get_cccr(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_CCCR); +} + +inline u32 ttcan_get_ttost(struct ttcan_controller *ttcan) +{ + return ttcan_read32(ttcan, ADR_MTTCAN_TTOST); +} + +static bool validate_matrix_cyc_cnt(u32 cyc_num) +{ + if (cyc_num && ((cyc_num + 1) & cyc_num)) + return false; + return true; +} + +int ttcan_set_matrix_limits(struct ttcan_controller *ttcan, + u32 entt, u32 txew, u32 css, u32 ccm) +{ + u32 ttmlm = 0; + + if (!validate_matrix_cyc_cnt(ccm) || + (txew > 15) || (css > 2) || (entt > 4095)) { + pr_err("%s: Invalid parameters\n", __func__); + return -1; + } + ttmlm = (entt << MTT_TTMLM_ENTT_SHIFT) & MTT_TTMLM_ENTT_MASK; + ttmlm |= (txew << MTT_TTMLM_TXEW_SHIFT) & MTT_TTMLM_TXEW_MASK; + ttmlm |= (css << MTT_TTMLM_CSS_SHIFT) & MTT_TTMLM_CSS_MASK; + ttmlm |= (ccm << MTT_TTMLM_CCM_SHIFT) & MTT_TTMLM_CCM_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_TTMLM, ttmlm); + + return 0; +} + +#define MTTCAN_TT_TIMEOUT 1000 +int ttcan_set_tur_config(struct ttcan_controller *ttcan, u16 denominator, + u16 numerator, int local_timing_enable) +{ + int timeout = MTTCAN_TT_TIMEOUT; + u32 turcf = ttcan_read32(ttcan, ADR_MTTCAN_TURCF); + + if (!denominator && local_timing_enable) { + pr_err("%s: Invalid Denominator value\n", __func__); + return -1; + } + + if (turcf & MTT_TURCF_ELT_MASK) { + turcf &= ~(MTT_TURCF_ELT_MASK); + ttcan_write32(ttcan, ADR_MTTCAN_TURCF, turcf); + } + + if (!local_timing_enable) + return 0; + + if (turcf & MTT_TURCF_ELT_MASK) { + while ((turcf & MTT_TURCF_ELT_MASK) && timeout) { + udelay(1); + timeout--; + turcf = ttcan_read32(ttcan, ADR_MTTCAN_TURCF); + } + if (!timeout) { + pr_err("%s: TURCF access is locked\n", __func__); + return -1; + } + } + + /* TBD: Take care when the DEN or NUM is programmed + * outside TT Conf mode + */ + + turcf = (numerator << MTT_TURCF_NCL_SHIFT) & MTT_TURCF_NCL_MASK; + turcf |= (denominator << MTT_TURCF_DC_SHIFT) & MTT_TURCF_DC_MASK; + turcf |= MTT_TURCF_ELT_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_TURCF, turcf); + + return 0; +} + +void ttcan_set_trigger_mem_conf(struct ttcan_controller *ttcan) +{ + u32 tttmc = 0; + u32 rel_start_addr = ttcan->mram_cfg[MRAM_TMC].off >> 2; + u8 elem_num = ttcan->mram_cfg[MRAM_TMC].num; + + if (elem_num > 64) + elem_num = 64; + + tttmc = (elem_num << MTT_TTTMC_TME_SHIFT) & MTT_TTTMC_TME_MASK; + tttmc |= (rel_start_addr << MTT_TTTMC_TMSA_SHIFT) & + MTT_TTTMC_TMSA_MASK; + + ttcan_write32(ttcan, ADR_MTTCAN_TTTMC, tttmc); +} diff --git a/drivers/net/can/mttcan/include/m_ttcan.h b/drivers/net/can/mttcan/include/m_ttcan.h new file mode 100644 index 00000000..ab878a17 --- /dev/null +++ b/drivers/net/can/mttcan/include/m_ttcan.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef __M_TTCAN_DEF +#define __M_TTCAN_DEF + +#include + +#include "m_ttcan_regdef.h" +#include "m_ttcan_linux.h" + +/* Message RAM config */ +/* +-------------------+ + * | 11 bit filter | + * +-------------------+ + * | 29 bit filter | + * +-------------------+ + * | RX FIFO 0 | + * +-------------------+ + * | RX FIFO 1 | + * +-------------------+ + * | RX BUFFERS | + * +-------------------+ + * | TX EVENT FIFO | + * +-------------------+ + * | TX BUFFERS | + * +-------------------+ + * | MEM TRIGG ELMTS | + * +-------------------+ + */ + +#define MAX_RXB_ELEM_SIZE 72 +#define MAX_TXB_ELEM_SIZE 72 +#define TX_EVENT_FIFO_ELEM_SIZE 8 +#define SIDF_ELEM_SIZE 4 +#define XIDF_ELEM_SIZE 8 +#define TXB_ELEM_HEADER_SIZE 8 +#define RXB_ELEM_HEADER_SIZE 8 +#define TRIG_ELEM_SIZE 8 + + +#define CAN_STD_ID 11 +#define CAN_EXT_ID 29 + +#define CAN_STD_ID_MASK 0x000007FFU +#define CAN_EXT_ID_MASK 0x1FFFFFFFU +#define CAN_ERR_MASK 0x1FFFFFFFU + +#define CAN_FMT 0x80000000U /*1= EXT/ 0 = STD */ +#define CAN_RTR 0x40000000U /* RTR */ +#define CAN_ERR 0x20000000U /* ERR/Data message frame */ + +#define CAN_BRS_MASK 0xFE +#define CAN_ESI_MASK 0xFD +#define CAN_FD_MASK 0xFB +#define CAN_DIR_MASK 0xF7 + +#define CAN_BRS_FLAG 0x01 +#define CAN_ESI_FLAG 0x02 +#define CAN_FD_FLAG 0x04 +#define CAN_DIR_RX 0x08 +#define CAN_FD_NON_ISO_FLAG 0x10 + +#define MTTCAN_RAM_SIZE 4096 +#define CAN_WORD_IN_BYTES 4 + +/* ISO 11898-1 */ +#define CAN_MAX_DLC 8 +#define CAN_MAX_DLEN 8 + +/* ISO 11898-7 */ +#define CANFD_MAX_DLC 15 +#define CANFD_MAX_DLEN 64 + +#define MAX_DATA_LEN 64 +#define MAX_RX_ENTRIES 64 +#define MAX_LEC 8 + +#define NUM_CAN_CONTROLLERS 2 + +/* Global Filter Confugration */ +#define GFC_ANFS_RXFIFO_0 0U +#define GFC_ANFS_RXFIFO_1 1U +#define GFC_ANFS_REJECT 3U + +#define GFC_ANFE_RXFIFO_0 0U +#define GFC_ANFE_RXFIFO_1 1U +#define GFC_ANFE_REJECT 3U + +#define GFC_RRFS_REJECT 1U +#define GFC_RRFE_REJECT 1U + +/* Filter Element Configuration */ +#define FEC_RXFIFO_0 1U +#define FEC_RXFIFO_1 2U +#define FEC_RXFIFO_0_PRIO 5U +#define FEC_RXFIFO_1_PRIO 6U +#define FEC_RXBUF 7U + +/* Last Error Code */ +enum ttcan_lec_type { + LEC_NO_ERROR = 0, + LEC_STUFF_ERROR = 1, + LEC_FORM_ERROR = 2, + LEC_ACK_ERROR = 3, + LEC_BIT1_ERROR = 4, + LEC_BIT0_ERROR = 5, + LEC_CRC_ERROR = 6, + LEC_NO_CHANGE = 7, +}; + +/*Size of data in an element */ +enum ttcan_data_field_size { + BYTE8 = 0, + BYTE12 = 1, + BYTE16 = 2, + BYTE20 = 3, + BYTE24 = 4, + BYTE32 = 5, + BYTE48 = 6, + BYTE64 = 7 +}; + +enum ttcan_timestamp_source { + TS_DISABLE = 0, + TS_INTERNAL = 1, + TS_EXTERNAL = 2, + TS_DISABLE2 = 3 +}; + +enum ttcan_rx_type { + BUFFER = 1, + FIFO_0 = 2, + FIFO_1 = 4, + TX_EVT = 8 +}; + + +enum ttcan_mram_item { + MRAM_SIDF = 0, + MRAM_XIDF, + MRAM_RXF0, + MRAM_RXF1, + MRAM_RXB, + MRAM_TXE, + MRAM_TXB, + MRAM_TMC, + MRAM_ELEMS +}; + +enum ttcan_tx_conf { + TX_CONF_TXB = 0, + TX_CONF_TXQ, + TX_CONF_QMODE, + TX_CONF_BSIZE, + TX_CONF_MAX +}; + +enum ttcan_rx_conf { + RX_CONF_RXB = 0, + RX_CONF_RXF0, + RX_CONF_RXF1, + RX_CONF_MAX +}; + + +struct ttcan_mram_elem { + u16 off; + u16 num; +}; +/* bit 0 - 28 : CAN identifier + * bit 29 : type of frame (0 = data, 1 = error) + * bit 30 : RTR + * bit 31 : frame format type (0 = std, 1 = ext) + */ + +/* bit 0 : 1 = BRS; 0 = Normal + * bit 1 : 1 = Error Passive; 0 = Error Active + * bit 2 : 1 = CAN FD; 0 = Normal + * bit 4 : 1 = RX; 0 = Tx Direction + */ + + +struct ttcanfd_frame { + u32 can_id; /* FMT/RTR/ERR/ID */ + u8 d_len; /* data length */ + u8 flags; /* FD flags */ + u8 resv0; + u8 resv1; + u8 data[MAX_RX_ENTRIES] __aligned(8); + /* Any new structure entries should be placed below the comment */ + u32 tstamp; +}; + +struct __attribute__((__packed__)) ivc_ttcanfd_frame { + u16 cmdid; + u16 ext_cmdid; + union { + struct ttcanfd_frame frame; + u32 data[19]; + } payload; +}; + +struct ttcan_element_size { + u16 rx_fifo0; + u16 rx_fifo1; + u16 rx_buffer; + u16 tx_buffer; + u16 tx_fifo; +}; + +struct ttcan_bittiming { + u32 bitrate; /* Bit-rate in bits/second */ + u32 sampling_point; /* Sampling point in one-tenth of a percent */ + u32 tq; /* Time quanta in nenoseconds */ + u32 prop_seg; /* Propogation segment in TQs */ + u32 phase_seg1; /* Phase buffer segment 1 in TQ */ + u32 phase_seg2; /* Phase buffer segment 2 in TQs */ + u32 sjw; /* (re) synchronization jump width in TQs */ + u32 brp; /* bit rate prescalar */ + u32 tdc; /* transceiver delay comp. (1 is enable) */ + u32 tdc_offset; /* transceiver delay comp. offset */ + u32 tdc_filter_window; /* transceiver delay comp. filter window */ +}; + +struct can_bittiming_const_fd { + u8 name[16]; /* Name of the CAN controller hardware */ + u32 tseg1_min; /* Time segement 1 = prop_seg + phase_seg1 */ + u32 tseg1_max; + u32 tseg2_min; /* Time segement 2 = phase_seg2 */ + u32 tseg2_max; + u32 sjw_max; /* Synchronisation jump width */ + u32 brp_min; /* Bit-rate prescaler */ + u32 brp_max; + u32 brp_inc; +}; + +struct ttcan_bittiming_fd { + struct ttcan_bittiming nominal; /* Arb phase bit timing */ + struct ttcan_bittiming data; /* Data phase bit timing */ + u32 fd_flags; /* bit 0: FD; bit 1: BRS */ +}; + +struct ttcan_txbuff_config { + u32 fifo_q_num; + u32 ded_buff_num; + u32 evt_q_num; + enum ttcan_data_field_size dfs; + u32 flags; /* bit 0: 0=Fifo, 1=Queue */ +}; + +struct ttcan_rxbuff_config { + u32 rxq0_dsize; + u32 rxq1_dsize; + u32 rxb_dsize; + u64 rxq0_bmsk; + u64 rxq1_bmsk; + u64 rxb_bmsk; +}; + +struct ttcan_filter_config { + u32 std_fltr_size; + u32 xtd_fltr_size; +}; + +struct ttcan_rx_msg_list { + struct ttcanfd_frame msg; + struct list_head recv_list; +}; + +struct ttcan_txevt_msg_list { + struct mttcan_tx_evt_element txevt; + struct list_head txevt_list; +}; + +struct ttcan_controller { + spinlock_t lock; + struct ttcan_element_size e_size; + struct ttcan_bittiming_fd bt_config; + struct ttcan_txbuff_config tx_config; + struct ttcan_rxbuff_config rx_config; + struct ttcan_filter_config fltr_config; + struct ttcan_mram_elem mram_cfg[MRAM_ELEMS]; + struct list_head rx_q0; + struct list_head rx_q1; + struct list_head rx_b; + struct list_head tx_evt; + void __iomem *base; /* controller regs space should be remapped. */ + void __iomem *xbase; /* extra registers are mapped */ + void __iomem *mram_vbase; + size_t mram_base; + u32 mram_size; + u8 tx_buf_dlc[32]; + u32 id; + u32 proto_state; + u32 intr_enable_reg; + u32 intr_tt_enable_reg; + u32 ts_prescalar; + u32 tt_mem_elements; + u32 tdc; + u32 tdc_offset; + unsigned long tx_object; + unsigned long tx_obj_cancelled; + int rxq0_mem; + int rxq1_mem; + int rxb_mem; + int evt_mem; + u16 list_status; /* bit 0: 1=Full; */ + u16 resv0; +}; + +struct ttcan_ivc_msg { + int length; + void *data; +}; + +static inline u8 ttcan_dlc2len(u8 dlc) +{ + return can_fd_dlc2len(dlc); +} + +static inline u8 ttcan_len2dlc(u8 len) +{ + if (len > 64) + return 0xF; + return can_fd_len2dlc(len); +} + +static inline enum ttcan_data_field_size +get_dfs(u32 bytes) +{ + switch (bytes) { + case 8: + return BYTE8; + case 12: + return BYTE12; + case 16: + return BYTE16; + case 20: + return BYTE20; + case 24: + return BYTE24; + case 32: + return BYTE32; + case 48: + return BYTE48; + case 64: + return BYTE64; + default: + return 0; + } +} +static inline int data_in_element( + enum ttcan_data_field_size dfs) +{ + switch (dfs) { + case BYTE8: + return 8; + case BYTE12: + return 12; + case BYTE16: + return 16; + case BYTE20: + return 20; + case BYTE24: + return 24; + case BYTE32: + return 32; + case BYTE48: + return 48; + case BYTE64: + return 64; + default: + return 0; + } +} +static inline u32 ttcan_xread32(struct ttcan_controller *ttcan, int reg) +{ + return (u32) readl(ttcan->xbase + reg); +} + +static inline u32 ttcan_read32(struct ttcan_controller *ttcan, int reg) +{ + return (u32) readl(ttcan->base + reg); +} + +static inline void ttcan_xwrite32(struct ttcan_controller *ttcan, + int reg, u32 val) +{ + writel(val, ttcan->xbase + reg); +} + +static inline void ttcan_write32(struct ttcan_controller *ttcan, + int reg, u32 val) +{ + writel(val, ttcan->base + reg); +} + +static inline int ttcan_protected(u32 cccr_reg) +{ + if ((cccr_reg & 0x3) != 0x3) { + pr_err("%s: protected\n", __func__); + return -EPERM; + } + return 0; +} + +void ttcan_print_version(struct ttcan_controller *ttcan); +int ttcan_write32_check(struct ttcan_controller *ttcan, + int offset, u32 val, u32 mask); +void ttcan_set_ok(struct ttcan_controller *ttcan); +int ttcan_set_init(struct ttcan_controller *ttcan); +int ttcan_reset_init(struct ttcan_controller *ttcan); +void ttcan_bus_off_seq(struct ttcan_controller *ttcan); +int ttcan_set_power(struct ttcan_controller *ttcan, int value); +int ttcan_set_config_change_enable(struct ttcan_controller *ttcan); +void ttcan_reset_config_change_enable(struct ttcan_controller *ttcan); +int ttcan_set_baudrate(struct ttcan_controller *ttcan, int fdflags); + +int ttcan_read_txevt_ram(struct ttcan_controller *ttcan, + u32 read_addr, struct mttcan_tx_evt_element *txevt); +int ttcan_read_rx_msg_ram(struct ttcan_controller *ttcan, + u64 addr_in_msg_ram, + struct ttcanfd_frame *ttcanfd); +int ttcan_write_tx_msg_ram(struct ttcan_controller *ttcan, + u32 addr_in_msg_ram, + struct ttcanfd_frame *ttcanfd, + u8 index); + +unsigned int ttcan_read_txevt_fifo(struct ttcan_controller *ttcan); + +unsigned int ttcan_read_rx_fifo0(struct ttcan_controller *ttcan); +unsigned int ttcan_read_rx_fifo1(struct ttcan_controller *ttcan); +unsigned int ttcan_read_rx_fifo(struct ttcan_controller *ttcan); +unsigned int ttcan_read_hp_mesgs(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd); + +void ttcan_set_rx_buffers_elements(struct ttcan_controller *ttcan); + +int ttcan_set_tx_buffer_addr(struct ttcan_controller *ttcan); +int ttcan_tx_fifo_full(struct ttcan_controller *ttcan); +bool ttcan_tx_buffers_full(struct ttcan_controller *ttcan); + +int ttcan_tx_fifo_queue_msg(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd); +int ttcan_tx_fifo_get_free_element(struct ttcan_controller *ttcan); + +int ttcan_tx_buf_req_pending(struct ttcan_controller *ttcan, u8 index); +void ttcan_tx_ded_msg_write(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd, + u8 index); +void ttcan_tx_trigger_msg_transmit(struct ttcan_controller *ttcan, u8 index); +int ttcan_tx_msg_buffer_write(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd); + +void ttcan_prog_std_id_fltrs(struct ttcan_controller *ttcan, void *std_shadow); +void ttcan_set_std_id_filter(struct ttcan_controller *ttcan, void *std_shadow, + int filter_index, u8 sft, u8 sfec, u32 sfid1, + u32 sfid2); +u32 ttcan_get_std_id_filter(struct ttcan_controller *ttcan, int idx); + +void ttcan_prog_xtd_id_fltrs(struct ttcan_controller *ttcan, void *xtd_shadow); +void ttcan_set_xtd_id_filter(struct ttcan_controller *ttcan, void *xtd_shadow, + int filter_index, u8 eft, u8 efec, u32 efid1, + u32 efid2); +u64 ttcan_get_xtd_id_filter(struct ttcan_controller *ttcan, int idx); + +void ttcan_set_std_id_filter_addr(struct ttcan_controller *ttcan); +void ttcan_set_xtd_id_filter_addr(struct ttcan_controller *ttcan); +int ttcan_set_gfc(struct ttcan_controller *ttcan, u32 regval); +u32 ttcan_get_gfc(struct ttcan_controller *ttcan); + +int ttcan_set_xidam(struct ttcan_controller *ttcan, u32 regval); +u32 ttcan_get_xidam(struct ttcan_controller *ttcan); + +void ttcan_set_timestamp_offset_sel(struct ttcan_controller *ttcan); +void ttcan_set_time_stamp_conf(struct ttcan_controller *ttcan, + u16 timer_prescalar, + enum ttcan_timestamp_source time_type); +void ttcan_set_txevt_fifo_conf(struct ttcan_controller *ttcan); +void ttcan_set_xtd_mask_add(struct ttcan_controller *ttcan, int extid_mask); +/* Mesg RAM partition */ +void ttcan_mesg_ram_init(struct ttcan_controller *ttcan); +int ttcan_mesg_ram_config(struct ttcan_controller *ttcan, + u32 *arr, u32 *tx_conf , u32 *rx_conf); +int ttcan_controller_init(struct ttcan_controller *ttcan, u32 irq_flag, + u32 tt_irq_flag); + +u32 ttcan_read_ecr(struct ttcan_controller *ttcan); +u32 ttcan_read_tx_complete_reg(struct ttcan_controller *ttcan); +void ttcan_set_tx_cancel_request(struct ttcan_controller *ttcan, u32 txbcr); +u32 ttcan_read_tx_cancelled_reg(struct ttcan_controller *ttcan); +u32 ttcan_read_psr(struct ttcan_controller *ttcan); +int ttcan_read_rx_buffer(struct ttcan_controller *ttcan); +int ttcan_set_bitrate(struct ttcan_controller *ttcan); +int ttcan_tx_req_pending(struct ttcan_controller *ttcan); +int ttcan_tx_buff_req_pending(struct ttcan_controller *ttcan, u8 index); + +void ttcan_disable_auto_retransmission( + struct ttcan_controller *ttcan, + bool enable); +int ttcan_set_bus_monitoring_mode(struct ttcan_controller *ttcan, bool enable); +int ttcan_set_loopback(struct ttcan_controller *ttcan); +int ttcan_set_normal_mode(struct ttcan_controller *ttcan); + +/* Interrupt APIs */ +void ttcan_clear_intr(struct ttcan_controller *ttcan); +void ttcan_clear_tt_intr(struct ttcan_controller *ttcan); +void ttcan_ir_write(struct ttcan_controller *ttcan, u32 value); +void ttcan_ttir_write(struct ttcan_controller *ttcan, u32 value); +u32 ttcan_read_ir(struct ttcan_controller *ttcan); +u32 ttcan_read_ttir(struct ttcan_controller *ttcan); +void ttcan_ier_write(struct ttcan_controller *ttcan, u32 val); +void ttcan_ttier_write(struct ttcan_controller *ttcan, u32 val); +void ttcan_set_intrpts(struct ttcan_controller *ttcan, int enable); + +/* TTCAN APIS */ +void ttcan_set_trigger_mem_conf(struct ttcan_controller *ttcan); +int ttcan_set_ttrmc(struct ttcan_controller *ttcan, u32 regval); +u32 ttcan_get_ttrmc(struct ttcan_controller *ttcan); + +void ttcan_set_tt_config(struct ttcan_controller *ttcan, u32 evtp, + u32 ecc, u32 egtf, u32 awl, u32 eecs, + u32 irto, u32 ldsdl, u32 tm, u32 gen, u32 om); +void ttcan_set_ttocf(struct ttcan_controller *ttcan, u32 value); +u32 ttcan_get_ttocf(struct ttcan_controller *ttcan); +void ttcan_set_ttmlm(struct ttcan_controller *ttcan, u32 value); +u32 ttcan_get_ttmlm(struct ttcan_controller *ttcan); +void ttcan_set_tttmc(struct ttcan_controller *ttcan, u32 value); +u32 ttcan_get_tttmc(struct ttcan_controller *ttcan); +u32 ttcan_get_cccr(struct ttcan_controller *ttcan); +void ttcan_set_txbar(struct ttcan_controller *ttcan, u32 value); +u32 ttcan_get_ttost(struct ttcan_controller *ttcan); +int ttcan_set_trigger_mem(struct ttcan_controller *ttcan, void *tmc_shadow, + int trig_index, u16 time_mark, u16 cycle_code, u8 tmin, u8 tmex, + u16 trig_type, u8 filter_type, u8 mesg_num); +u64 ttcan_get_trigger_mem(struct ttcan_controller *ttcan, int idx); + + +void ttcan_set_ref_mesg(struct ttcan_controller *ttcan, u32 id, + u32 rmps, u32 xtd); + +int ttcan_set_matrix_limits(struct ttcan_controller *ttcan, + u32 entt, u32 txew, u32 css, u32 ccm); + +int ttcan_set_tur_config(struct ttcan_controller *ttcan, u16 denominator, + u16 numerator, int local_timing_enable); + +void ttcan_prog_trigger_mem(struct ttcan_controller *ttcan, void *tmc_shadow); + +/* list APIs */ +int add_msg_controller_list(struct ttcan_controller *ttcan, + struct ttcanfd_frame *ttcanfd, struct list_head *rx_q, + enum ttcan_rx_type rxtype); + +int add_event_controller_list(struct ttcan_controller *ttcan, + struct mttcan_tx_evt_element *txevt, + struct list_head *evt_q); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +u64 ttcan_read_ts_cntr(const struct cyclecounter *ccnt); +#else +cycle_t ttcan_read_ts_cntr(const struct cyclecounter *ccnt); +#endif +#endif diff --git a/drivers/net/can/mttcan/include/m_ttcan_ivc.h b/drivers/net/can/mttcan/include/m_ttcan_ivc.h new file mode 100644 index 00000000..31a422bb --- /dev/null +++ b/drivers/net/can/mttcan/include/m_ttcan_ivc.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _M_TTCAN_IVC_H +#define _M_TTCAN_IVC_H + +/*Size of data in an element */ +enum m_ttcan_ivc_msgid { + MTTCAN_MSG_TX = 1, + MTTCAN_MSG_RX = 2, + MTTCAN_MSG_TX_COMPL = 3, + MTTCAN_MSG_STAT_CHG = 4, + MTTCAN_MSG_BERR_CHG = 5, + MTTCAN_MSG_RX_LOST_FRAME = 6, + MTTCAN_MSG_TXEVT = 7, + MTTCAN_CMD_CAN_ENABLE = 8, + MTTCAN_MSG_LAST +}; + +#endif diff --git a/drivers/net/can/mttcan/include/m_ttcan_linux.h b/drivers/net/can/mttcan/include/m_ttcan_linux.h new file mode 100644 index 00000000..adc9cab8 --- /dev/null +++ b/drivers/net/can/mttcan/include/m_ttcan_linux.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _M_TTCAN_LINUX_H +#define _M_TTCAN_LINUX_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_CLK_SRC_TEGRA18_US_TIMER +#include +#endif + +#include +#include "m_ttcan_ivc.h" + +#define MTTCAN_RX_FIFO_INTR (0xFF) +#define MTTCAN_RX_HP_INTR (0x1 << 8) +#define MTTCAN_TX_EV_FIFO_INTR (0xF << 12) + +#define MTTCAN_ERR_INTR (0x1FF9 << 17) +#define MTTCAN_BUS_OFF (1 << 25) +#define MTTCAN_ERR_WARN (1 << 24) +#define MTTCAN_ERR_PASS (1 << 23) + +#define MTT_CAN_NAPI_WEIGHT 64 +#define MTT_CAN_TX_OBJ_NUM 32 +#define MTT_CAN_MAX_MRAM_ELEMS 9 +#define MTT_MAX_TX_CONF 4 +#define MTT_MAX_RX_CONF 3 + +#define MTTCAN_POLL_TIME 50 +#define MTTCAN_HWTS_ROLLOVER 250 +/* block period in ms */ +#define TX_BLOCK_PERIOD 200 +#define TSC_REF_CLK_RATE 31250000 + +#define MTTCAN_TSC_SIZE 16U +#define MTTCAN_TSC_MASK 0xFFFFULL +#define TSC_REF_CLK_SHIFT 9U + +struct tegra_mttcan_soc_info { + bool set_can_core_clk; + unsigned long can_core_clk_rate; + unsigned long can_clk_rate; + bool use_external_timer; +}; + +struct can_gpio { + int gpio; + int active_low; +}; + +struct mttcan_priv { + struct can_priv can; + struct ttcan_controller *ttcan; + const struct tegra_mttcan_soc_info *sinfo; + struct delayed_work can_work; + struct delayed_work drv_restart_work; + struct napi_struct napi; + struct net_device *dev; + struct device *device; + struct clk *can_clk, *host_clk, *core_clk; + struct can_gpio gpio_can_en; + struct can_gpio gpio_can_stb; + struct timer_list timer; + struct cyclecounter cc; + struct timecounter tc; + struct hwtstamp_config hwtstamp_config; + struct mbox_client cl; + struct completion xfer_completion; + struct mbox_chan *mbox; + raw_spinlock_t tc_lock; /* lock to protect timecounter infra */ + spinlock_t tslock; /* lock to protect ioctl */ + spinlock_t tx_lock; /* lock to protect transmit path */ + void __iomem *regs; + void __iomem *mres; + void *std_shadow; + void *xtd_shadow; + void *tmc_shadow; + u32 gfc_reg; + u32 xidam_reg; + u32 irq_flags; + u32 irq_ttflags; + u32 irqstatus; + u32 tt_irqstatus; + u32 instance; + int tt_intrs; + int tt_param[2]; + u32 mram_param[MTT_CAN_MAX_MRAM_ELEMS]; + u32 tx_conf[MTT_MAX_TX_CONF]; /**/ + u32 rx_conf[MTT_MAX_RX_CONF]; /**/ + bool poll; + bool hwts_rx_en; + u32 resp; +}; + +int mttcan_create_sys_files(struct device *dev); +void mttcan_delete_sys_files(struct device *dev); +#endif diff --git a/drivers/net/can/mttcan/include/m_ttcan_regdef.h b/drivers/net/can/mttcan/include/m_ttcan_regdef.h new file mode 100644 index 00000000..51a7b4b3 --- /dev/null +++ b/drivers/net/can/mttcan/include/m_ttcan_regdef.h @@ -0,0 +1,872 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef M_TTCAN_REGDEF_H_ +#define M_TTCAN_REGDEF_H_ + +#define ADR_MTTCAN_CREL 0x00 + +#define ADR_MTTCAN_ENDN 0x004 +#define DEF_MTTCAN_ENDN 0x87654321 + +#define ADR_MTTCAN_DBTP 0x00C +#define DEF_MTTCAN_DBTP 0x00000A33 +#define MTTCAN_DBTP_MSK 0x009F1FFF + +#define ADR_MTTCAN_TEST 0x010 +#define DEF_MTTCAN_TEST 0x00000080 +#define MTTCAN_TEST_MSK 0xFFFF8000 + +#define ADR_MTTCAN_RWD 0x014 +#define DEF_MTTCAN_RWD 0x00000000 + +#define ADR_MTTCAN_CCCR 0x018 +#define DEF_MTTCAN_CCCR 0x00000001 +#define MTTCAN_CCCR_MSK 0X0000F3FF + +#define ADR_MTTCAN_NBTP 0x01C +#define DEF_MTTCAN_NBTP 0x00000A33 +#define MTTCAN_NBTP_MSK 0x03FF3FFF + +#define ADR_MTTCAN_TSCC 0x020 +#define DEF_MTTCAN_TSCC 0x00000000 + +#define ADR_MTTCAN_TSCV 0x024 +#define DEF_MTTCAN_TSCV 0x00000000 + +#define ADR_MTTCAN_TOCC 0x028 +#define DEF_MTTCAN_TOCC 0xFFFF0000 + +#define ADR_MTTCAN_TOCV 0x02C +#define DEF_MTTCAN_TOCV 0x0000FFFF + +#define ADR_MTTCAN_ECR 0x040 +#define DEF_MTTCAN_ECR 0x00000000 + +#define ADR_MTTCAN_PSR 0x044 +#define DEF_MTTCAN_PSR 0x00000707 + +#define ADR_MTTCAN_TDCR 0x048 +#define DEF_MTTCAN_TDCR 0x00000000 +#define MTTCAN_TDCR_MSK 0x00007F7F + +#define ADR_MTTCAN_IR 0x050 +#define DEF_MTTCAN_IR 0x00000000 + +#define ADR_MTTCAN_IE 0x054 +#define DEF_MTTCAN_IE 0x00000000 + +#define ADR_MTTCAN_ILS 0x058 +#define DEF_MTTCAN_ILS 0x00000000 + +#define ADR_MTTCAN_ILE 0x05C +#define DEF_MTTCAN_ILE 0x00000000 + +#define ADR_MTTCAN_GFC 0x080 +#define DEF_MTTCAN_GFC 0x00000000 +#define MTTCAN_GFC_MSK 0x0000003F + +#define ADR_MTTCAN_SIDFC 0x084 +#define DEF_MTTCAN_SIDFC 0x00000000 + +#define ADR_MTTCAN_XIDFC 0x088 +#define DEF_MTTCAN_XIDFC 0x00000000 + +#define ADR_MTTCAN_XIDAM 0x090 +#define DEF_MTTCAN_XIDAM 0x1FFFFFFF +#define MTTCAN_XIDAM_MSK 0x1FFFFFFF + +#define ADR_MTTCAN_HPMS 0x094 +#define DEF_MTTCAN_HPMS 0x00000000 + +#define ADR_MTTCAN_NDAT1 0x098 +#define DEF_MTTCAN_NDAT1 0x00000000 + +#define ADR_MTTCAN_NDAT2 0x09C +#define DEF_MTTCAN_NDAT2 0x00000000 + +#define ADR_MTTCAN_RXF0C 0x0A0 +#define DEF_MTTCAN_RXF0C 0x00000000 + +#define ADR_MTTCAN_RXF0S 0x0A4 +#define DEF_MTTCAN_RXF0S 0x00000000 + +#define ADR_MTTCAN_RXF0A 0x0A8 +#define DEF_MTTCAN_RXF0A 0x00000000 + +#define ADR_MTTCAN_RXBC 0x0AC +#define DEF_MTTCAN_RXBC 0x00000000 + +#define ADR_MTTCAN_RXF1C 0x0B0 +#define DEF_MTTCAN_RXF1C 0x00000000 + +#define ADR_MTTCAN_RXF1S 0x0B4 +#define DEF_MTTCAN_RXF1S 0x00000000 + +#define ADR_MTTCAN_RXF1A 0x0B8 +#define DEF_MTTCAN_RXF1A 0x00000000 + +#define ADR_MTTCAN_RXESC 0x0BC +#define DEF_MTTCAN_RXESC 0x00000000 + +#define ADR_MTTCAN_TXBC 0x0C0 +#define DEF_MTTCAN_TXBC 0x00000000 +#define MTTCAN_TXBC_MSK 0x7F3FFFFC + +#define ADR_MTTCAN_TXFQS 0x0C4 +#define DEF_MTTCAN_TXFQS 0x00000000 + +#define ADR_MTTCAN_TXESC 0x0C8 +#define DEF_MTTCAN_TXESC 0x00000000 +#define MTTCAN_TXESC_MSK 0x00000007 + +#define ADR_MTTCAN_TXBRP 0x0CC +#define DEF_MTTCAN_TXBRP 0x00000000 + +#define ADR_MTTCAN_TXBAR 0x0D0 +#define DEF_MTTCAN_TXBAR 0x00000000 + +#define ADR_MTTCAN_TXBCR 0x0D4 +#define DEF_MTTCAN_TXBCR 0x00000000 + +#define ADR_MTTCAN_TXBTO 0x0D8 +#define DEF_MTTCAN_TXBTO 0x00000000 + +#define ADR_MTTCAN_TXBCF 0x0DC +#define DEF_MTTCAN_TXBCF 0x00000000 + +#define ADR_MTTCAN_TXBTIE 0x0E0 +#define DEF_MTTCAN_TXBTIE 0x00000000 + +#define ADR_MTTCAN_TXBCIE 0x0E4 +#define DEF_MTTCAN_TXBCIE 0x00000000 + +#define ADR_MTTCAN_TXEFC 0x0F0 +#define DEF_MTTCAN_TXEFC 0x00000000 + +#define ADR_MTTCAN_TXEFS 0x0F4 +#define DEF_MTTCAN_TXEFS 0x00000000 + +#define ADR_MTTCAN_TXEFA 0x0F8 +#define DEF_MTTCAN_TXEFA 0x00000000 + +#define ADR_MTTCAN_TTTMC 0x100 +#define DEF_MTTCAN_TTTMC 0x00000000 + +#define ADR_MTTCAN_TTRMC 0x104 +#define DEF_MTTCAN_TTRMC 0x00000000 +#define MTTCAN_TTRMC_MSK 0xDFFFFFFF + +#define ADR_MTTCAN_TTOCF 0x108 +#define DEF_MTTCAN_TTOCF 0x00010000 + +#define ADR_MTTCAN_TTMLM 0x10C +#define DEF_MTTCAN_TTMLM 0x00000000 + +#define ADR_MTTCAN_TURCF 0x110 +#define DEF_MTTCAN_TURCF 0x10000000 + +#define ADR_MTTCAN_TTOCN 0x114 +#define DEF_MTTCAN_TTOCN 0x00000000 + +#define ADR_MTTCAN_TTGTP 0x118 +#define DEF_MTTCAN_TTGTP 0x00000000 + +#define ADR_MTTCAN_TTTMK 0x11C +#define DEF_MTTCAN_TTTMK 0x00000000 + +#define ADR_MTTCAN_TTIR 0x120 +#define DEF_MTTCAN_TTIR 0x00000000 + +#define ADR_MTTCAN_TTIE 0x124 +#define DEF_MTTCAN_TTIE 0x00000000 + +#define ADR_MTTCAN_TTILS 0x128 +#define DEF_MTTCAN_TTILS 0x00000000 + +#define ADR_MTTCAN_TTOST 0x12C +#define DEF_MTTCAN_TTOST 0x00000080 + +#define ADR_MTTCAN_TURNA 0x130 +#define DEF_MTTCAN_TURNA 0x00010000 + +#define ADR_MTTCAN_TTLGT 0x134 +#define DEF_MTTCAN_TTLGT 0x00000000 + +#define ADR_MTTCAN_TTCTC 0x138 +#define DEF_MTTCAN_TTCTC 0x003F0000 + +#define ADR_MTTCAN_TTCPT 0x13C +#define DEF_MTTCAN_TTCPT 0x00000000 + +#define ADR_MTTCAN_TTCSM 0x140 +#define DEF_MTTCAN_TTCSM 0x00000000 + +#define MTT_CREL_DAY_SHIFT 0 +#define MTT_CREL_DAY_MASK (((1<<8)-1) << MTT_CREL_DAY_SHIFT) +#define MTT_CREL_MON_SHIFT 8 +#define MTT_CREL_MON_MASK (((1<<8)-1) << MTT_CREL_MON_SHIFT) +#define MTT_CREL_YEAR_SHIFT 16 +#define MTT_CREL_YEAR_MASK (((1<<4)-1) << MTT_CREL_YEAR_SHIFT) +#define MTT_CREL_SUBS_SHIFT 20 +#define MTT_CREL_SUBS_MASK (((1<<4)-1) << MTT_CREL_SUBS_SHIFT) +#define MTT_CREL_STEP_SHIFT 24 +#define MTT_CREL_STEP_MASK (((1<<4)-1) << MTT_CREL_STEP_SHIFT) +#define MTT_CREL_REL_SHIFT 28 +#define MTT_CREL_REL_MASK (((1<<4)-1) << MTT_CREL_REL_SHIFT) + +#define MTT_DBTP_DSJW_SHIFT 0 +#define MTT_DBTP_DSJW_MASK (((1<<4)-1) << MTT_DBTP_DSJW_SHIFT) +#define MTT_DBTP_DTSEG2_SHIFT 4 +#define MTT_DBTP_DTSEG2_MASK (((1<<4)-1) << MTT_DBTP_DTSEG2_SHIFT) +#define MTT_DBTP_DTSEG1_SHIFT 8 +#define MTT_DBTP_DTSEG1_MASK (((1<<5)-1) << MTT_DBTP_DTSEG1_SHIFT) +#define MTT_DBTP_DBRP_SHIFT 16 +#define MTT_DBTP_DBRP_MASK (((1<<5)-1) << MTT_DBTP_DBRP_SHIFT) +#define MTT_DBTP_TDC_SHIFT 23 +#define MTT_DBTP_TDC_MASK (((1<<1)-1) << MTT_DBTP_TDC_SHIFT) + +#define MTT_TEST_TAM_SHIFT 0 +#define MTT_TEST_TAM_MASK (((1<<1)-1) << MTT_TEST_TAM_SHIFT) +#define MTT_TEST_TAT_SHIFT 1 +#define MTT_TEST_TAT_MASK (((1<<1)-1) << MTT_TEST_TAT_SHIFT) +#define MTT_TEST_CAM_SHIFT 2 +#define MTT_TEST_CAM_MASK (((1<<1)-1) << MTT_TEST_CAM_SHIFT) +#define MTT_TEST_CAT_SHIFT 3 +#define MTT_TEST_CAT_MASK (((1<<1)-1) << MTT_TEST_CAT_SHIFT) +#define MTT_TEST_LBCK_SHIFT 4 +#define MTT_TEST_LBCK_MASK (((1<<1)-1) << MTT_TEST_LBCK_SHIFT) +#define MTT_TEST_TX_SHIFT 5 +#define MTT_TEST_TX_MASK (((1<<2)-1) << MTT_TEST_TX_SHIFT) +#define MTT_TEST_RX_SHIFT 7 +#define MTT_TEST_RX_MASK (((1<<1)-1) << MTT_TEST_RX_SHIFT) + +#define MTT_CCCR_INIT_SHIFT 0 +#define MTT_CCCR_INIT_MASK (((1<<1)-1) << MTT_CCCR_INIT_SHIFT) +#define MTT_CCCR_CCE_SHIFT 1 +#define MTT_CCCR_CCE_MASK (((1<<1)-1) << MTT_CCCR_CCE_SHIFT) +#define MTT_CCCR_ASM_SHIFT 2 +#define MTT_CCCR_ASM_MASK (((1<<1)-1) << MTT_CCCR_ASM_SHIFT) +#define MTT_CCCR_CSA_SHIFT 3 +#define MTT_CCCR_CSA_MASK (((1<<1)-1) << MTT_CCCR_CSA_SHIFT) +#define MTT_CCCR_CSR_SHIFT 4 +#define MTT_CCCR_CSR_MASK (((1<<1)-1) << MTT_CCCR_CSR_SHIFT) +#define MTT_CCCR_MON_SHIFT 5 +#define MTT_CCCR_MON_MASK (((1<<1)-1) << MTT_CCCR_MON_SHIFT) +#define MTT_CCCR_DAR_SHIFT 6 +#define MTT_CCCR_DAR_MASK (((1<<1)-1) << MTT_CCCR_DAR_SHIFT) +#define MTT_CCCR_TEST_SHIFT 7 +#define MTT_CCCR_TEST_MASK (((1<<1)-1) << MTT_CCCR_TEST_SHIFT) +#define MTT_CCCR_FDOE_SHIFT 8 +#define MTT_CCCR_FDOE_MASK (((1<<1)-1) << MTT_CCCR_FDOE_SHIFT) +#define MTT_CCCR_BRSE_SHIFT 9 +#define MTT_CCCR_BRSE_MASK (((1<<1)-1) << MTT_CCCR_BRSE_SHIFT) +#define MTT_CCCR_PXHD_SHIFT 12 +#define MTT_CCCR_PXHD_MASK (((1<<1)-1) << MTT_CCCR_PXHD_SHIFT) +#define MTT_CCCR_EFB1_SHIFT 13 +#define MTT_CCCR_EFB1_MASK (((1<<1)-1) << MTT_CCCR_EFB1_SHIFT) +#define MTT_CCCR_TXP_SHIFT 14 +#define MTT_CCCR_TXP_MASK (((1<<1)-1) << MTT_CCCR_TXP_SHIFT) +#define MTT_CCCR_NISO_SHIFT 15 +#define MTT_CCCR_NISO_MASK (((1<<1)-1) << MTT_CCCR_NISO_SHIFT) + +#define MTT_NBTP_NTSEG2_SHIFT 0 +#define MTT_NBTP_NTSEG2_MASK (((1<<7)-1) << MTT_NBTP_NTSEG2_SHIFT) +#define MTT_NBTP_NTSEG1_SHIFT 8 +#define MTT_NBTP_NTSEG1_MASK (((1<<8)-1) << MTT_NBTP_NTSEG1_SHIFT) +#define MTT_NBTP_NBRP_SHIFT 16 +#define MTT_NBTP_NBRP_MASK (((1<<9)-1) << MTT_NBTP_NBRP_SHIFT) +#define MTT_NBTP_NSJW_SHIFT 25 +#define MTT_NBTP_NSJW_MASK (((1<<7)-1) << MTT_NBTP_NSJW_SHIFT) + +#define MTT_TSCC_TSS_SHIFT 0 +#define MTT_TSCC_TSS_MASK (((1<<2)-1) << MTT_TSCC_TSS_SHIFT) +#define MTT_TSCC_TCP_SHIFT 16 +#define MTT_TSCC_TCP_MASK (((1<<4)-1) << MTT_TSCC_TCP_SHIFT) + +#define MTT_ECR_TEC_SHIFT 0 +#define MTT_ECR_TEC_MASK (((1<<8)-1) << MTT_ECR_TEC_SHIFT) +#define MTT_ECR_REC_SHIFT 8 +#define MTT_ECR_REC_MASK (((1<<7)-1) << MTT_ECR_REC_SHIFT) +#define MTT_ECR_RP_SHIFT 15 +#define MTT_ECR_RP_MASK (((1<<1)-1) << MTT_ECR_RP_SHIFT) +#define MTT_ECR_CEL_SHIFT 16 +#define MTT_ECR_CEL_MASK (((1<<8)-1) << MTT_ECR_CEL_SHIFT) + +#define MTT_PSR_LEC_SHIFT 0 +#define MTT_PSR_LEC_MASK (((1<<3)-1) << MTT_PSR_LEC_SHIFT) +#define MTT_PSR_ACT_SHIFT 3 +#define MTT_PSR_ACT_MASK (((1<<2)-1) << MTT_PSR_ACT_SHIFT) +#define MTT_PSR_EP_SHIFT 5 +#define MTT_PSR_EP_MASK (((1<<1)-1) << MTT_PSR_EP_SHIFT) +#define MTT_PSR_EW_SHIFT 6 +#define MTT_PSR_EW_MASK (((1<<1)-1) << MTT_PSR_EW_SHIFT) +#define MTT_PSR_BO_SHIFT 7 +#define MTT_PSR_BO_MASK (((1<<1)-1) << MTT_PSR_BO_SHIFT) +#define MTT_PSR_DLEC_SHIFT 8 +#define MTT_PSR_DLEC_MASK (((1<<3)-1) << MTT_PSR_DLEC_SHIFT) +#define MTT_PSR_RESI_SHIFT 11 +#define MTT_PSR_RESI_MASK (((1<<1)-1) << MTT_PSR_RESI_SHIFT) +#define MTT_PSR_RBRS_SHIFT 12 +#define MTT_PSR_RBRS_MASK (((1<<1)-1) << MTT_PSR_RBRS_SHIFT) +#define MTT_PSR_RFDF_SHIFT 13 +#define MTT_PSR_RFDF_MASK (((1<<1)-1) << MTT_PSR_RFDF_SHIFT) +#define MTT_PSR_PXE_SHIFT 14 +#define MTT_PSR_PXE_MASK (((1<<1)-1) << MTT_PSR_PXE_SHIFT) +#define MTT_PSR_TDCV_SHIFT 16 +#define MTT_PSR_TDCV_MASK (((1<<7)-1) << MTT_PSR_TDCV_SHIFT) + +#define MTT_TDCR_TDCF_SHIFT 0 +#define MTT_TDCR_TDCF_MASK (((1<<7)-1) << MTT_TDCR_TDCF_SHIFT) +#define MTT_TDCR_TDCO_SHIFT 8 +#define MTT_TDCR_TDCO_MASK (((1<<7)-1) << MTT_TDCR_TDCO_SHIFT) + +#define MTT_IR_RF0N_SHIFT 0 +#define MTT_IR_RF0N_MASK (((1<<1)-1) << MTT_IR_RF0N_SHIFT) +#define MTT_IR_RF0W_SHIFT 1 +#define MTT_IR_RF0W_MASK (((1<<1)-1) << MTT_IR_RF0W_SHIFT) +#define MTT_IR_RF0F_SHIFT 2 +#define MTT_IR_RF0F_MASK (((1<<1)-1) << MTT_IR_RF0F_SHIFT) +#define MTT_IR_RF0L_SHIFT 3 +#define MTT_IR_RF0L_MASK (((1<<1)-1) << MTT_IR_RF0L_SHIFT) +#define MTT_IR_RF1N_SHIFT 4 +#define MTT_IR_RF1N_MASK (((1<<1)-1) << MTT_IR_RF1N_SHIFT) +#define MTT_IR_RF1W_SHIFT 5 +#define MTT_IR_RF1W_MASK (((1<<1)-1) << MTT_IR_RF1W_SHIFT) +#define MTT_IR_RF1F_SHIFT 6 +#define MTT_IR_RF1F_MASK (((1<<1)-1) << MTT_IR_RF1F_SHIFT) +#define MTT_IR_RF1L_SHIFT 7 +#define MTT_IR_RF1L_MASK (((1<<1)-1) << MTT_IR_RF1L_SHIFT) +#define MTT_IR_HPM_SHIFT 8 +#define MTT_IR_HPM_MASK (((1<<1)-1) << MTT_IR_HPM_SHIFT) +#define MTT_IR_TC_SHIFT 9 +#define MTT_IR_TC_MASK (((1<<1)-1) << MTT_IR_TC_SHIFT) +#define MTT_IR_TCF_SHIFT 10 +#define MTT_IR_TCF_MASK (((1<<1)-1) << MTT_IR_TCF_SHIFT) +#define MTT_IR_TFE_SHIFT 11 +#define MTT_IR_TFE_MASK (((1<<1)-1) << MTT_IR_TFE_SHIFT) +#define MTT_IR_TEFN_SHIFT 12 +#define MTT_IR_TEFN_MASK (((1<<1)-1) << MTT_IR_TEFN_SHIFT) +#define MTT_IR_TEFW_SHIFT 13 +#define MTT_IR_TEFW_MASK (((1<<1)-1) << MTT_IR_TEFW_SHIFT) +#define MTT_IR_TEFF_SHIFT 14 +#define MTT_IR_TEFF_MASK (((1<<1)-1) << MTT_IR_TEFF_SHIFT) +#define MTT_IR_TEFL_SHIFT 15 +#define MTT_IR_TEFL_MASK (((1<<1)-1) << MTT_IR_TEFL_SHIFT) +#define MTT_IR_TSW_SHIFT 16 +#define MTT_IR_TSW_MASK (((1<<1)-1) << MTT_IR_TSW_SHIFT) +#define MTT_IR_MRAF_SHIFT 17 +#define MTT_IR_MRAF_MASK (((1<<1)-1) << MTT_IR_MRAF_SHIFT) +#define MTT_IR_TOO_SHIFT 18 +#define MTT_IR_TOO_MASK (((1<<1)-1) << MTT_IR_TOO_SHIFT) +#define MTT_IR_DRX_SHIFT 19 +#define MTT_IR_DRX_MASK (((1<<1)-1) << MTT_IR_DRX_SHIFT) +#define MTT_IR_BEC_SHIFT 20 +#define MTT_IR_BEC_MASK (((1<<1)-1) << MTT_IR_BEC_SHIFT) +#define MTT_IR_BEU_SHIFT 21 +#define MTT_IR_BEU_MASK (((1<<1)-1) << MTT_IR_BEU_SHIFT) +#define MTT_IR_ELO_SHIFT 22 +#define MTT_IR_ELO_MASK (((1<<1)-1) << MTT_IR_ELO_SHIFT) +#define MTT_IR_EP_SHIFT 23 +#define MTT_IR_EP_MASK (((1<<1)-1) << MTT_IR_EP_SHIFT) +#define MTT_IR_EW_SHIFT 24 +#define MTT_IR_EW_MASK (((1<<1)-1) << MTT_IR_EW_SHIFT) +#define MTT_IR_BO_SHIFT 25 +#define MTT_IR_BO_MASK (((1<<1)-1) << MTT_IR_BO_SHIFT) +#define MTT_IR_WDI_SHIFT 26 +#define MTT_IR_WDI_MASK (((1<<1)-1) << MTT_IR_WDI_SHIFT) +#define MTT_IR_PEA_SHIFT 27 +#define MTT_IR_PEA_MASK (((1<<1)-1) << MTT_IR_PEA_SHIFT) +#define MTT_IR_PED_SHIFT 28 +#define MTT_IR_PED_MASK (((1<<1)-1) << MTT_IR_PED_SHIFT) +#define MTT_IR_ARA_SHIFT 29 +#define MTT_IR_ARA_MASK (((1<<1)-1) << MTT_IR_ARA_SHIFT) + +#define MTT_IE_RF0NE_SHIFT 0 +#define MTT_IE_RF0NE_MASK (((1<<1)-1) << MTT_IE_RF0NE_SHIFT) +#define MTT_IE_RF0WE_SHIFT 1 +#define MTT_IE_RF0WE_MASK (((1<<1)-1) << MTT_IE_RF0WE_SHIFT) +#define MTT_IE_RF0FE_SHIFT 2 +#define MTT_IE_RF0FE_MASK (((1<<1)-1) << MTT_IE_RF0FE_SHIFT) +#define MTT_IE_RF0LE_SHIFT 3 +#define MTT_IE_RF0LE_MASK (((1<<1)-1) << MTT_IE_RF0LE_SHIFT) +#define MTT_IE_RF1NE_SHIFT 4 +#define MTT_IE_RF1NE_MASK (((1<<1)-1) << MTT_IE_RF1NE_SHIFT) +#define MTT_IE_RF1WE_SHIFT 5 +#define MTT_IE_RF1WE_MASK (((1<<1)-1) << MTT_IE_RF1WE_SHIFT) +#define MTT_IE_RF1FE_SHIFT 6 +#define MTT_IE_RF1FE_MASK (((1<<1)-1) << MTT_IE_RF1FE_SHIFT) +#define MTT_IE_RF1LE_SHIFT 7 +#define MTT_IE_RF1LE_MASK (((1<<1)-1) << MTT_IE_RF1LE_SHIFT) +#define MTT_IE_HPME_SHIFT 8 +#define MTT_IE_HPME_MASK (((1<<1)-1) << MTT_IE_HPME_SHIFT) +#define MTT_IE_TCE_SHIFT 9 +#define MTT_IE_TCE_MASK (((1<<1)-1) << MTT_IE_TCE_SHIFT) +#define MTT_IE_TCFE_SHIFT 10 +#define MTT_IE_TCFE_MASK (((1<<1)-1) << MTT_IE_TCFE_SHIFT) +#define MTT_IE_TFEE_SHIFT 11 +#define MTT_IE_TFEE_MASK (((1<<1)-1) << MTT_IE_TFEE_SHIFT) +#define MTT_IE_TEFNE_SHIFT 12 +#define MTT_IE_TEFNE_MASK (((1<<1)-1) << MTT_IE_TEFNE_SHIFT) +#define MTT_IE_TEFWE_SHIFT 13 +#define MTT_IE_TEFWE_MASK (((1<<1)-1) << MTT_IE_TEFWE_SHIFT) +#define MTT_IE_TEFFE_SHIFT 14 +#define MTT_IE_TEFFE_MASK (((1<<1)-1) << MTT_IE_TEFFE_SHIFT) +#define MTT_IE_TEFLE_SHIFT 15 +#define MTT_IE_TEFLE_MASK (((1<<1)-1) << MTT_IE_TEFLE_SHIFT) +#define MTT_IE_TESWE_SHIFT 16 +#define MTT_IE_TESWE_MASK (((1<<1)-1) << MTT_IE_TESWE_SHIFT) +#define MTT_IE_MRAFE_SHIFT 17 +#define MTT_IE_MRAFE_MASK (((1<<1)-1) << MTT_IE_MRAFE_SHIFT) +#define MTT_IE_TOOE_SHIFT 18 +#define MTT_IE_TOOE_MASK (((1<<1)-1) << MTT_IE_TOOE_SHIFT) +#define MTT_IE_DRXE_SHIFT 19 +#define MTT_IE_DRXE_MASK (((1<<1)-1) << MTT_IE_DRXE_SHIFT) +#define MTT_IE_BECE_SHIFT 20 +#define MTT_IE_BECE_MASK (((1<<1)-1) << MTT_IE_BECE_SHIFT) +#define MTT_IE_BEUE_SHIFT 21 +#define MTT_IE_BEUE_MASK (((1<<1)-1) << MTT_IE_BEUE_SHIFT) +#define MTT_IE_ELOE_SHIFT 22 +#define MTT_IE_ELOE_MASK (((1<<1)-1) << MTT_IE_ELOE_SHIFT) +#define MTT_IE_EPE_SHIFT 23 +#define MTT_IE_EPE_MASK (((1<<1)-1) << MTT_IE_EPE_SHIFT) +#define MTT_IE_EWE_SHIFT 24 +#define MTT_IE_EWE_MASK (((1<<1)-1) << MTT_IE_EWE_SHIFT) +#define MTT_IE_BOE_SHIFT 25 +#define MTT_IE_BOE_MASK (((1<<1)-1) << MTT_IE_BOE_SHIFT) +#define MTT_IE_WDIE_SHIFT 26 +#define MTT_IE_WDIE_MASK (((1<<1)-1) << MTT_IE_WDIE_SHIFT) +#define MTT_IE_PEAE_SHIFT 27 +#define MTT_IE_PEAE_MASK (((1<<1)-1) << MTT_IE_PEAE_SHIFT) +#define MTT_IE_PEDE_SHIFT 28 +#define MTT_IE_PEDE_MASK (((1<<1)-1) << MTT_IE_PEDE_SHIFT) +#define MTT_IE_ARAE_SHIFT 29 +#define MTT_IE_ARAE_MASK (((1<<1)-1) << MTT_IE_ARAE_SHIFT) + +#define MTT_GFC_RRFE_SHIFT 0 +#define MTT_GFC_RRFE_MASK (((1<<1)-1) << MTT_GFC_RRFE_SHIFT) +#define MTT_GFC_RRFS_SHIFT 1 +#define MTT_GFC_RRFS_MASK (((1<<1)-1) << MTT_GFC_RRFS_SHIFT) +#define MTT_GFC_ANFE_SHIFT 2 +#define MTT_GFC_ANFE_MASK (((1<<2)-1) << MTT_GFC_ANFE_SHIFT) +#define MTT_GFC_ANFS_SHIFT 4 +#define MTT_GFC_ANFS_MASK (((1<<2)-1) << MTT_GFC_ANFS_SHIFT) + +#define MTT_SIDFC_FLSSA_SHIFT 2 +#define MTT_SIDFC_FLSSA_MASK (((1<<14)-1) << MTT_SIDFC_FLSSA_SHIFT) +#define MTT_SIDFC_LSS_SHIFT 16 +#define MTT_SIDFC_LSS_MASK (((1<<8)-1) << MTT_SIDFC_LSS_SHIFT) + +#define MTT_XIDFC_FLESA_SHIFT 2 +#define MTT_XIDFC_FLESA_MASK (((1<<14)-1) << MTT_XIDFC_FLESA_SHIFT) +#define MTT_XIDFC_LSE_SHIFT 16 +#define MTT_XIDFC_LSE_MASK (((1<<7)-1) << MTT_XIDFC_LSE_SHIFT) + +#define MTT_XIDAM_EIDM_SHIFT 0 +#define MTT_XIDAM_EIDM_MASK (((1<<29)-1) << MTT_XIDAM_EIDM_SHIFT) + +#define MTT_HPMS_BIDX_SHIFT 0 +#define MTT_HPMS_BIDX_MASK (((1<<6)-1) << MTT_HPMS_BIDX_SHIFT) +#define MTT_HPMS_MSI_SHIFT 6 +#define MTT_HPMS_MSI_MASK (((1<<2)-1) << MTT_HPMS_MSI_SHIFT) +#define MTT_HPMS_FIDX_SHIFT 8 +#define MTT_HPMS_FIDX_MASK (((1<<7)-1) << MTT_HPMS_FIDX_SHIFT) +#define MTT_HPMS_FLST_SHIFT 15 +#define MTT_HPMS_FLST_MASK (((1<<1)-1) << MTT_HPMS_FLST_SHIFT) + +#define MTT_RXF0C_F0SA_SHIFT 2 +#define MTT_RXF0C_F0SA_MASK (((1<<14)-1) << MTT_RXF0C_F0SA_SHIFT) +#define MTT_RXF0C_F0S_SHIFT 16 +#define MTT_RXF0C_F0S_MASK (((1<<7)-1) << MTT_RXF0C_F0S_SHIFT) +#define MTT_RXF0C_F0WM_SHIFT 24 +#define MTT_RXF0C_F0WM_MASK (((1<<7)-1) << MTT_RXF0C_F0WM_SHIFT) +#define MTT_RXF0C_F0OM_SHIFT 31 +#define MTT_RXF0C_F0OM_MASK (((1<<1)-1) << MTT_RXF0C_F0OM_SHIFT) + +#define MTT_RXF0S_F0FL_SHIFT 0 +#define MTT_RXF0S_F0FL_MASK (((1<<7)-1) << MTT_RXF0S_F0FL_SHIFT) +#define MTT_RXF0S_F0GI_SHIFT 8 +#define MTT_RXF0S_F0GI_MASK (((1<<6)-1) << MTT_RXF0S_F0GI_SHIFT) +#define MTT_RXF0S_F0PI_SHIFT 16 +#define MTT_RXF0S_F0PI_MASK (((1<<6)-1) << MTT_RXF0S_F0PI_SHIFT) +#define MTT_RXF0S_F0F_SHIFT 24 +#define MTT_RXF0S_F0F_MASK (((1<<1)-1) << MTT_RXF0S_F0F_SHIFT) +#define MTT_RXF0S_RF0L_SHIFT 25 +#define MTT_RXF0S_RF0L_MASK (((1<<1)-1) << MTT_RXF0S_RF0L_SHIFT) + +#define MTT_RXF0A_F0AI_SHIFT 0 +#define MTT_RXF0A_F0AI_MASK (((1<<6)-1) << MTT_RXF0A_F0AI_SHIFT) + +#define MTT_RXBC_RBSA_SHIFT 2 +#define MTT_RXBC_RBSA_MASK (((1<<14)-1) << MTT_RXBC_RBSA_SHIFT) + +#define MTT_RXF1C_F1SA_SHIFT 2 +#define MTT_RXF1C_F1SA_MASK (((1<<14)-1) << MTT_RXF1C_F1SA_SHIFT) +#define MTT_RXF1C_F1S_SHIFT 16 +#define MTT_RXF1C_F1S_MASK (((1<<7)-1) << MTT_RXF1C_F1S_SHIFT) +#define MTT_RXF1C_F1WM_SHIFT 24 +#define MTT_RXF1C_F1WM_MASK (((1<<7)-1) << MTT_RXF1C_F1WM_SHIFT) +#define MTT_RXF1C_F1OM_SHIFT 31 +#define MTT_RXF1C_F1OM_MASK (((1<<1)-1) << MTT_RXF1C_F1OM_SHIFT) + +#define MTT_RXF1S_F1FL_SHIFT 0 +#define MTT_RXF1S_F1FL_MASK (((1<<7)-1) << MTT_RXF1S_F1FL_SHIFT) +#define MTT_RXF1S_F1GI_SHIFT 8 +#define MTT_RXF1S_F1GI_MASK (((1<<6)-1) << MTT_RXF1S_F1GI_SHIFT) +#define MTT_RXF1S_F1PI_SHIFT 16 +#define MTT_RXF1S_F1PI_MASK (((1<<6)-1) << MTT_RXF1S_F1PI_SHIFT) +#define MTT_RXF1S_F1F_SHIFT 24 +#define MTT_RXF1S_F1F_MASK (((1<<1)-1) << MTT_RXF1S_F1F_SHIFT) +#define MTT_RXF1S_RF1L_SHIFT 25 +#define MTT_RXF1S_RF1L_MASK (((1<<1)-1) << MTT_RXF1S_RF1L_SHIFT) +#define MTT_RXF1S_DMS_SHIFT 30 +#define MTT_RXF1S_DMS_MASK (((1<<2)-1) << MTT_RXF1S_DMS_SHIFT) + +#define MTT_RXF1A_F1AI_SHIFT 0 +#define MTT_RXF1A_F1AI_MASK (((1<<6)-1) << MTT_RXF1A_F1AI_SHIFT) + +#define MTT_RXESC_F0DS_SHIFT 0 +#define MTT_RXESC_F0DS_MASK (((1<<3)-1) << MTT_RXESC_F0DS_SHIFT) +#define MTT_RXESC_F1DS_SHIFT 4 +#define MTT_RXESC_F1DS_MASK (((1<<3)-1) << MTT_RXESC_F1DS_SHIFT) +#define MTT_RXESC_RBDS_SHIFT 8 +#define MTT_RXESC_RBDS_MASK (((1<<3)-1) << MTT_RXESC_RBDS_SHIFT) + +#define MTT_TXESC_TBDS_SHIFT 0 +#define MTT_TXESC_TBDS_MASK (((1<<3)-1) << MTT_TXESC_TBDS_SHIFT) + +#define MTT_TXBC_TBSA_SHIFT 2 +#define MTT_TXBC_TBSA_MASK (((1<<14)-1) << MTT_TXBC_TBSA_SHIFT) +#define MTT_TXBC_NDTB_SHIFT 16 +#define MTT_TXBC_NDTB_MASK (((1<<6)-1) << MTT_TXBC_NDTB_SHIFT) +#define MTT_TXBC_TFQS_SHIFT 24 +#define MTT_TXBC_TFQS_MASK (((1<<6)-1) << MTT_TXBC_TFQS_SHIFT) +#define MTT_TXBC_TFQM_SHIFT 30 +#define MTT_TXBC_TFQM_MASK (((1<<1)-1) << MTT_TXBC_TFQM_SHIFT) + +#define MTT_TXFQS_TFFL_SHIFT 0 +#define MTT_TXFQS_TFFL_MASK (((1<<6)-1) << MTT_TXFQS_TFFL_SHIFT) +#define MTT_TXFQS_TFGI_SHIFT 8 +#define MTT_TXFQS_TFGI_MASK (((1<<5)-1) << MTT_TXFQS_TFGI_SHIFT) +#define MTT_TXFQS_TFQPI_SHIFT 16 +#define MTT_TXFQS_TFQPI_MASK (((1<<5)-1) << MTT_TXFQS_TFQPI_SHIFT) +#define MTT_TXFQS_TFQF_SHIFT 21 +#define MTT_TXFQS_TFQF_MASK (((1<<1)-1) << MTT_TXFQS_TFQF_SHIFT) + +#define MTT_TXEFC_EFSA_SHIFT 2 +#define MTT_TXEFC_EFSA_MASK (((1<<14)-1) << MTT_TXEFC_EFSA_SHIFT) +#define MTT_TXEFC_EFS_SHIFT 16 +#define MTT_TXEFC_EFS_MASK (((1<<6)-1) << MTT_TXEFC_EFS_SHIFT) +#define MTT_TXEFC_EFWM_SHIFT 24 +#define MTT_TXEFC_EFWM_MASK (((1<<6)-1) << MTT_TXEFC_EFWM_SHIFT) + +#define MTT_TXEFS_EFFL_SHIFT 0 +#define MTT_TXEFS_EFFL_MASK (((1<<6)-1) << MTT_TXEFS_EFFL_SHIFT) +#define MTT_TXEFS_EFGI_SHIFT 8 +#define MTT_TXEFS_EFGI_MASK (((1<<5)-1) << MTT_TXEFS_EFGI_SHIFT) +#define MTT_TXEFS_EFPI_SHIFT 16 +#define MTT_TXEFS_EFPI_MASK (((1<<5)-1) << MTT_TXEFS_EFPI_SHIFT) +#define MTT_TXEFS_EFF_SHIFT 24 +#define MTT_TXEFS_EFF_MASK (((1<<1)-1) << MTT_TXEFS_EFF_SHIFT) +#define MTT_TXEFS_TFFL_SHIFT 25 +#define MTT_TXEFS_TFFL_MASK (((1<<1)-1) << MTT_TXEFS_TFFL_SHIFT) + +#define MTT_TXEFA_EFAI_SHIFT 0 +#define MTT_TXEFA_EFAI_MASK (((1<<5)-1) << MTT_TXEFA_EFAI_SHIFT) + +#define MTT_TTTMC_TMSA_SHIFT 2 +#define MTT_TTTMC_TMSA_MASK (((1<<14)-1) << MTT_TTTMC_TMSA_SHIFT) +#define MTT_TTTMC_TME_SHIFT 16 +#define MTT_TTTMC_TME_MASK (((1<<7)-1) << MTT_TTTMC_TME_SHIFT) + +#define MTT_TTRMC_RID_SHIFT 0 +#define MTT_TTRMC_RID_MASK (((1<<29)-1) << MTT_TTRMC_RID_SHIFT) +#define MTT_TTRMC_XTD_SHIFT 30 +#define MTT_TTRMC_XTD_MASK (((1<<1)-1) << MTT_TTRMC_XTD_SHIFT) +#define MTT_TTRMC_RMPS_SHIFT 31 +#define MTT_TTRMC_RMPS_MASK (((1<<1)-1) << MTT_TTRMC_RMPS_SHIFT) + +#define MTT_TTOCF_OM_SHIFT 0 +#define MTT_TTOCF_OM_MASK (((1<<2)-1) << MTT_TTOCF_OM_SHIFT) +#define MTT_TTOCF_GEN_SHIFT 3 +#define MTT_TTOCF_GEN_MASK (((1<<1)-1) << MTT_TTOCF_GEN_SHIFT) +#define MTT_TTOCF_TM_SHIFT 4 +#define MTT_TTOCF_TM_MASK (((1<<1)-1) << MTT_TTOCF_TM_SHIFT) +#define MTT_TTOCF_LDSDL_SHIFT 5 +#define MTT_TTOCF_LDSDL_MASK (((1<<3)-1) << MTT_TTOCF_LDSDL_SHIFT) +#define MTT_TTOCF_IRTO_SHIFT 8 +#define MTT_TTOCF_IRTO_MASK (((1<<7)-1) << MTT_TTOCF_IRTO_SHIFT) +#define MTT_TTOCF_EECS_SHIFT 15 +#define MTT_TTOCF_EECS_MASK (((1<<1)-1) << MTT_TTOCF_EECS_SHIFT) +#define MTT_TTOCF_AWL_SHIFT 16 +#define MTT_TTOCF_AWL_MASK (((1<<8)-1) << MTT_TTOCF_AWL_SHIFT) +#define MTT_TTOCF_EGTF_SHIFT 24 +#define MTT_TTOCF_EGTF_MASK (((1<<1)-1) << MTT_TTOCF_EGTF_SHIFT) +#define MTT_TTOCF_ECC_SHIFT 25 +#define MTT_TTOCF_ECC_MASK (((1<<1)-1) << MTT_TTOCF_ECC_SHIFT) +#define MTT_TTOCF_EVTP_SHIFT 26 +#define MTT_TTOCF_EVTP_MASK (((1<<1)-1) << MTT_TTOCF_EVTP_SHIFT) + +#define MTT_TTMLM_CCM_SHIFT 0 +#define MTT_TTMLM_CCM_MASK (((1<<6)-1) << MTT_TTMLM_CCM_SHIFT) +#define MTT_TTMLM_CSS_SHIFT 6 +#define MTT_TTMLM_CSS_MASK (((1<<2)-1) << MTT_TTMLM_CSS_SHIFT) +#define MTT_TTMLM_TXEW_SHIFT 8 +#define MTT_TTMLM_TXEW_MASK (((1<<4)-1) << MTT_TTMLM_TXEW_SHIFT) +#define MTT_TTMLM_ENTT_SHIFT 16 +#define MTT_TTMLM_ENTT_MASK (((1<<12)-1) << MTT_TTMLM_ENTT_SHIFT) + +#define MTT_TURCF_NCL_SHIFT 0 +#define MTT_TURCF_NCL_MASK (((1<<16)-1) << MTT_TURCF_NCL_SHIFT) +#define MTT_TURCF_DC_SHIFT 16 +#define MTT_TURCF_DC_MASK (((1<<14)-1) << MTT_TURCF_DC_SHIFT) +#define MTT_TURCF_ELT_SHIFT 31 +#define MTT_TURCF_ELT_MASK (((1<<1)-1) << MTT_TURCF_ELT_SHIFT) + +#define MTT_TTOCN_SGT_SHIFT 0 +#define MTT_TTOCN_SGT_MASK (((1<<1)-1) << MTT_TTOCN_SGT_SHIFT) +#define MTT_TTOCN_ECS_SHIFT 1 +#define MTT_TTOCN_ECS_MASK (((1<<1)-1) << MTT_TTOCN_ECS_SHIFT) +#define MTT_TTOCN_SWP_SHIFT 2 +#define MTT_TTOCN_SWP_MASK (((1<<1)-1) << MTT_TTOCN_SWP_SHIFT) +#define MTT_TTOCN_SWS_SHIFT 3 +#define MTT_TTOCN_SWS_MASK (((1<<2)-1) << MTT_TTOCN_SWS_SHIFT) +#define MTT_TTOCN_RTIE_SHIFT 5 +#define MTT_TTOCN_RTIE_MASK (((1<<1)-1) << MTT_TTOCN_RTIE_SHIFT) +#define MTT_TTOCN_TMC_SHIFT 6 +#define MTT_TTOCN_TMC_MASK (((1<<2)-1) << MTT_TTOCN_TMC_SHIFT) +#define MTT_TTOCN_TTIE_SHIFT 8 +#define MTT_TTOCN_TTIE_MASK (((1<<1)-1) << MTT_TTOCN_TTIE_SHIFT) +#define MTT_TTOCN_GCS_SHIFT 9 +#define MTT_TTOCN_GCS_MASK (((1<<1)-1) << MTT_TTOCN_GCS_SHIFT) +#define MTT_TTOCN_FGP_SHIFT 10 +#define MTT_TTOCN_FGP_MASK (((1<<1)-1) << MTT_TTOCN_FGP_SHIFT) +#define MTT_TTOCN_TMG_SHIFT 11 +#define MTT_TTOCN_TMG_MASK (((1<<1)-1) << MTT_TTOCN_TMG_SHIFT) +#define MTT_TTOCN_NIG_SHIFT 12 +#define MTT_TTOCN_NIG_MASK (((1<<1)-1) << MTT_TTOCN_NIG_SHIFT) +#define MTT_TTOCN_ESCN_SHIFT 13 +#define MTT_TTOCN_ESCN_MASK (((1<<1)-1) << MTT_TTOCN_ESCN_SHIFT) +#define MTT_TTOCN_LCKC_SHIFT 15 +#define MTT_TTOCN_LCKC_MASK (((1<<1)-1) << MTT_TTOCN_LCKC_SHIFT) + +#define MTT_TTGTP_TP_SHIFT 0 +#define MTT_TTGTP_TP_MASK (((1<<16)-1) << MTT_TTGTP_TP_SHIFT) +#define MTT_TTGTP_16_SHIFT 16 +#define MTT_TTGTP_16_MASK (((1<<)-1) << MTT_TTGTP_16_SHIFT) + +#define MTT_TTTMK_TM_SHIFT 0 +#define MTT_TTTMK_TM_MASK (((1<<16)-1) << MTT_TTTMK_TM_SHIFT) +#define MTT_TTTMK_TICC_SHIFT 16 +#define MTT_TTTMK_TICC_MASK (((1<<7)-1) << MTT_TTTMK_TICC_SHIFT) +#define MTT_TTTMK_LCKM_SHIFT 31 +#define MTT_TTTMK_LCKM_MASK (((1<<1)-1) << MTT_TTTMK_LCKM_SHIFT) + +#define MTT_TTIR_SBC_SHIFT 0 +#define MTT_TTIR_SBC_MASK (((1<<1)-1) << MTT_TTIR_SBC_SHIFT) +#define MTT_TTIR_SMC_SHIFT 1 +#define MTT_TTIR_SMC_MASK (((1<<1)-1) << MTT_TTIR_SMC_SHIFT) +#define MTT_TTIR_CSM_SHIFT 2 +#define MTT_TTIR_CSM_MASK (((1<<1)-1) << MTT_TTIR_CSM_SHIFT) +#define MTT_TTIR_SOG_SHIFT 3 +#define MTT_TTIR_SOG_MASK (((1<<1)-1) << MTT_TTIR_SOG_SHIFT) +#define MTT_TTIR_RTMI_SHIFT 4 +#define MTT_TTIR_RTMI_MASK (((1<<1)-1) << MTT_TTIR_RTMI_SHIFT) +#define MTT_TTIR_TTMI_SHIFT 5 +#define MTT_TTIR_TTMI_MASK (((1<<1)-1) << MTT_TTIR_TTMI_SHIFT) +#define MTT_TTIR_SWE_SHIFT 6 +#define MTT_TTIR_SWE_MASK (((1<<1)-1) << MTT_TTIR_SWE_SHIFT) +#define MTT_TTIR_GTW_SHIFT 7 +#define MTT_TTIR_GTW_MASK (((1<<1)-1) << MTT_TTIR_GTW_SHIFT) +#define MTT_TTIR_GTD_SHIFT 8 +#define MTT_TTIR_GTD_MASK (((1<<1)-1) << MTT_TTIR_GTD_SHIFT) +#define MTT_TTIR_GTE_SHIFT 9 +#define MTT_TTIR_GTE_MASK (((1<<1)-1) << MTT_TTIR_GTE_SHIFT) +#define MTT_TTIR_TXU_SHIFT 10 +#define MTT_TTIR_TXU_MASK (((1<<1)-1) << MTT_TTIR_TXU_SHIFT) +#define MTT_TTIR_TXO_SHIFT 11 +#define MTT_TTIR_TXO_MASK (((1<<1)-1) << MTT_TTIR_TXO_SHIFT) +#define MTT_TTIR_SE1_SHIFT 12 +#define MTT_TTIR_SE1_MASK (((1<<1)-1) << MTT_TTIR_SE1_SHIFT) +#define MTT_TTIR_SE2_SHIFT 13 +#define MTT_TTIR_SE2_MASK (((1<<1)-1) << MTT_TTIR_SE2_SHIFT) +#define MTT_TTIR_ELC_SHIFT 14 +#define MTT_TTIR_ELC_MASK (((1<<1)-1) << MTT_TTIR_ELC_SHIFT) +#define MTT_TTIR_IWT_SHIFT 15 +#define MTT_TTIR_IWT_MASK (((1<<1)-1) << MTT_TTIR_IWT_SHIFT) +#define MTT_TTIR_WT_SHIFT 16 +#define MTT_TTIR_WT_MASK (((1<<1)-1) << MTT_TTIR_WT_SHIFT) +#define MTT_TTIR_AW_SHIFT 17 +#define MTT_TTIR_AW_MASK (((1<<1)-1) << MTT_TTIR_AW_SHIFT) +#define MTT_TTIR_CER_SHIFT 18 +#define MTT_TTIR_CER_MASK (((1<<1)-1) << MTT_TTIR_CER_SHIFT) + +#define MTT_TTIRE_SBCE_SHIFT 0 +#define MTT_TTIRE_SBCE_MASK (((1<<1)-1) << MTT_TTIRE_SBCE_SHIFT) +#define MTT_TTIRE_SMCE_SHIFT 1 +#define MTT_TTIRE_SMCE_MASK (((1<<1)-1) << MTT_TTIRE_SMCE_SHIFT) +#define MTT_TTIRE_CSME_SHIFT 2 +#define MTT_TTIRE_CSME_MASK (((1<<1)-1) << MTT_TTIRE_CSME_SHIFT) +#define MTT_TTIRE_SOGE_SHIFT 3 +#define MTT_TTIRE_SOGE_MASK (((1<<1)-1) << MTT_TTIRE_SOGE_SHIFT) +#define MTT_TTIRE_RTMIE_SHIFT 4 +#define MTT_TTIRE_RTMIE_MASK (((1<<1)-1) << MTT_TTIRE_RTMIE_SHIFT) +#define MTT_TTIRE_TTMIE_SHIFT 5 +#define MTT_TTIRE_TTMIE_MASK (((1<<1)-1) << MTT_TTIRE_TTMIE_SHIFT) +#define MTT_TTIRE_SWEE_SHIFT 6 +#define MTT_TTIRE_SWEE_MASK (((1<<1)-1) << MTT_TTIRE_SWEE_SHIFT) +#define MTT_TTIRE_GTWE_SHIFT 7 +#define MTT_TTIRE_GTWE_MASK (((1<<1)-1) << MTT_TTIRE_GTWE_SHIFT) +#define MTT_TTIRE_GTDE_SHIFT 8 +#define MTT_TTIRE_GTDE_MASK (((1<<1)-1) << MTT_TTIRE_GTDE_SHIFT) +#define MTT_TTIRE_GTEE_SHIFT 9 +#define MTT_TTIRE_GTEE_MASK (((1<<1)-1) << MTT_TTIRE_GTEE_SHIFT) +#define MTT_TTIRE_TXUE_SHIFT 10 +#define MTT_TTIRE_TXUE_MASK (((1<<1)-1) << MTT_TTIRE_TXUE_SHIFT) +#define MTT_TTIRE_TXOE_SHIFT 11 +#define MTT_TTIRE_TXOE_MASK (((1<<1)-1) << MTT_TTIRE_TXOE_SHIFT) +#define MTT_TTIRE_SE1E_SHIFT 12 +#define MTT_TTIRE_SE1E_MASK (((1<<1)-1) << MTT_TTIRE_SE1E_SHIFT) +#define MTT_TTIRE_SE2E_SHIFT 13 +#define MTT_TTIRE_SE2E_MASK (((1<<1)-1) << MTT_TTIRE_SE2E_SHIFT) +#define MTT_TTIRE_ELCE_SHIFT 14 +#define MTT_TTIRE_ELCE_MASK (((1<<1)-1) << MTT_TTIRE_ELCE_SHIFT) +#define MTT_TTIRE_IWTE_SHIFT 15 +#define MTT_TTIRE_IWTE_MASK (((1<<1)-1) << MTT_TTIRE_IWTE_SHIFT) +#define MTT_TTIRE_WTE_SHIFT 16 +#define MTT_TTIRE_WTE_MASK (((1<<1)-1) << MTT_TTIRE_WTE_SHIFT) +#define MTT_TTIRE_AWE_SHIFT 17 +#define MTT_TTIRE_AWE_MASK (((1<<1)-1) << MTT_TTIRE_AWE_SHIFT) +#define MTT_TTIRE_CERE_SHIFT 18 +#define MTT_TTIRE_CERE_MASK (((1<<1)-1) << MTT_TTIRE_CERE_SHIFT) + +#define MTT_STD_FLTR_SFID2_SHIFT 0 +#define MTT_STD_FLTR_SFID2_MASK (((1<<11)-1) << MTT_STD_FLTR_SFID2_SHIFT) +#define MTT_STD_FLTR_SFID1_SHIFT 16 +#define MTT_STD_FLTR_SFID1_MASK (((1<<11)-1) << MTT_STD_FLTR_SFID1_SHIFT) +#define MTT_STD_FLTR_SFEC_SHIFT 27 +#define MTT_STD_FLTR_SFEC_MASK (((1<<3)-1) << MTT_STD_FLTR_SFEC_SHIFT) +#define MTT_STD_FLTR_SFT_SHIFT 30 +#define MTT_STD_FLTR_SFT_MASK (((1<<2)-1) << MTT_STD_FLTR_SFT_SHIFT) + +#define MTT_XTD_FLTR_F1_EFID2_SHIFT 0 +#define MTT_XTD_FLTR_F1_EFID2_MASK (((1<<29)-1) << MTT_XTD_FLTR_F1_EFID2_SHIFT) +#define MTT_XTD_FLTR_F1_EFT_SHIFT 30 +#define MTT_XTD_FLTR_F1_EFT_MASK (((1<<2)-1) << MTT_XTD_FLTR_F1_EFT_SHIFT) +#define MTT_XTD_FLTR_F0_EFID1_SHIFT 0 +#define MTT_XTD_FLTR_F0_EFID1_MASK (((1<<29)-1) << MTT_XTD_FLTR_F0_EFID1_SHIFT) +#define MTT_XTD_FLTR_F0_EFEC_SHIFT 29 +#define MTT_XTD_FLTR_F0_EFEC_MASK (((1<<3)-1) << MTT_XTD_FLTR_F0_EFEC_SHIFT) + +#define MTT_TXEVT_ELE_F1_TXTS_SHIFT 0 +#define MTT_TXEVT_ELE_F1_TXTS_MASK (((1<<16)-1) << MTT_TXEVT_ELE_F1_TXTS_SHIFT) +#define MTT_TXEVT_ELE_F1_DLC_SHIFT 16 +#define MTT_TXEVT_ELE_F1_DLC_MASK (((1<<4)-1) << MTT_TXEVT_ELE_F1_DLC_SHIFT) +#define MTT_TXEVT_ELE_F1_BRS_SHIFT 20 +#define MTT_TXEVT_ELE_F1_BRS_MASK (((1<<1)-1) << MTT_TXEVT_ELE_F1_BRS_SHIFT) +#define MTT_TXEVT_ELE_F1_FDF_SHIFT 21 +#define MTT_TXEVT_ELE_F1_FDF_MASK (((1<<1)-1) << MTT_TXEVT_ELE_F1_FDF_SHIFT) +#define MTT_TXEVT_ELE_F1_ET_SHIFT 22 +#define MTT_TXEVT_ELE_F1_ET_MASK (((1<<2)-1) << MTT_TXEVT_ELE_F1_ET_SHIFT) +#define MTT_TXEVT_ELE_F1_MM_SHIFT 24 +#define MTT_TXEVT_ELE_F1_MM_MASK (((1<<8)-1) << MTT_TXEVT_ELE_F1_MM_SHIFT) +#define MTT_TXEVT_ELE_F0_ID_SHIFT 0 +#define MTT_TXEVT_ELE_F0_ID_MASK (((1<<29)-1) << MTT_TXEVT_ELE_F0_ID_SHIFT) +#define MTT_TXEVT_ELE_F0_RTR_SHIFT 29 +#define MTT_TXEVT_ELE_F0_RTR_MASK (((1<<1)-1) << MTT_TXEVT_ELE_F0_RTR_SHIFT) +#define MTT_TXEVT_ELE_F0_XTD_SHIFT 30 +#define MTT_TXEVT_ELE_F0_XTD_MASK (((1<<1)-1) << MTT_TXEVT_ELE_F0_XTD_SHIFT) +#define MTT_TXEVT_ELE_F0_ESI_SHIFT 31 +#define MTT_TXEVT_ELE_F0_ESI_MASK (((1<<1)-1) << MTT_TXEVT_ELE_F0_ESI_SHIFT) + +#define MTT_TRIG_ELE_F1_MSC_SHIFT 0 +#define MTT_TRIG_ELE_F1_MSC_MASK (((1<<3)-1) << MTT_TRIG_ELE_F1_MSC_SHIFT) +#define MTT_TRIG_ELE_F1_MNR_SHIFT 16 +#define MTT_TRIG_ELE_F1_MNR_MASK (((1<<7)-1) << MTT_TRIG_ELE_F1_MNR_SHIFT) +#define MTT_TRIG_ELE_F1_FTYPE_SHIFT 23 +#define MTT_TRIG_ELE_F1_FTYPE_MASK (((1<<1)-1) << MTT_TRIG_ELE_F1_FTYPE_SHIFT) +#define MTT_TRIG_ELE_F0_TYPE_SHIFT 0 +#define MTT_TRIG_ELE_F0_TYPE_MASK (((1<<4)-1) << MTT_TRIG_ELE_F0_TYPE_SHIFT) +#define MTT_TRIG_ELE_F0_TMEX_SHIFT 4 +#define MTT_TRIG_ELE_F0_TMEX_MASK (((1<<1)-1) << MTT_TRIG_ELE_F0_TMEX_SHIFT) +#define MTT_TRIG_ELE_F0_TMIN_SHIFT 5 +#define MTT_TRIG_ELE_F0_TMIN_MASK (((1<<1)-1) << MTT_TRIG_ELE_F0_TMIN_SHIFT) +#define MTT_TRIG_ELE_F0_ASC_SHIFT 6 +#define MTT_TRIG_ELE_F0_ASC_MASK (((1<<2)-1) << MTT_TRIG_ELE_F0_ASC_SHIFT) +#define MTT_TRIG_ELE_F0_CC_SHIFT 8 +#define MTT_TRIG_ELE_F0_CC_MASK (((1<<7)-1) << MTT_TRIG_ELE_F0_CC_SHIFT) +#define MTT_TRIG_ELE_F0_TM_SHIFT 16 +#define MTT_TRIG_ELE_F0_TM_MASK (((1<<16)-1) << MTT_TRIG_ELE_F0_TM_SHIFT) + +/* Extended Message ID Filter */ +struct mttcan_xtd_id_filt_element { + unsigned int f1; + unsigned int f0; +}; + +/* Tx Event FIFO Element */ +struct mttcan_tx_evt_element { + unsigned int f1; + unsigned int f0; +}; + +struct mttcan_trig_mem_element { + unsigned int f1; + unsigned int f0; +}; + +/* Rx Buffer */ +#define RX_BUF_ESI BIT(31) +#define RX_BUF_XTD BIT(30) +#define RX_BUF_RTR BIT(29) +#define RX_BUF_STDID_SHIFT 18 +#define RX_BUF_STDID_MASK (((1<<11)-1) << RX_BUF_STDID_SHIFT) +#define RX_BUF_EXTID_MASK ((1<<29)-1) + +#define RX_BUF_ANMF BIT(31) +#define RX_BUF_FIDX_SHIFT 24 +#define RX_BUF_FIDX_MASK (((1<<7)-1) << RX_BUF_FIDX_SHIFT) +#define RX_BUF_FDF BIT(21) +#define RX_BUF_BRS BIT(20) +#define RX_BUF_DLC_SHIFT 16 +#define RX_BUF_DLC_MASK (((1<<4)-1) << RX_BUF_DLC_SHIFT) +#define RX_BUF_RXTS_SHIFT 0 +#define RX_BUF_RXTS_MASK (((1<<16)-1) << RX_BUF_RXTS_SHIFT) + +/* Tx Buffer */ +#define TX_BUF_ESI BIT(31) +#define TX_BUF_XTD BIT(30) +#define TX_BUF_RTR BIT(29) +#define TX_BUF_STDID_SHIFT 18 +#define TX_BUF_STDID_MASK (((1<<11)-1) << TX_BUF_STDID_SHIFT) +#define TX_BUF_EXTID_MASK ((1<<29)-1) + +#define TX_BUF_MM_SHIFT 24 +#define TX_BUF_MM_MASK (0xFF << TX_BUF_MM_SHIFT) +#define TX_BUF_EFC BIT(23) +#define TX_BUF_FDF BIT(21) +#define TX_BUF_BRS BIT(20) +#define TX_BUF_DLC_SHIFT 16 +#define TX_BUF_DLC_MASK (0xF << TX_BUF_DLC_SHIFT) + +/* Glue logic apperature */ +#define ADDR_M_TTCAN_IR 0x00 +#define ADDR_M_TTCAN_TTIR 0x04 +#define ADDR_M_TTCAN_TXBRP 0x08 +#define ADDR_M_TTCAN_FD_DATA 0x0C +#define ADDR_M_TTCAN_STATUS_REG 0x10 +#define ADDR_M_TTCAN_CNTRL_REG 0x14 +#define ADDR_M_TTCAN_DMA_INTF0 0x18 +#define ADDR_M_TTCAN_CLK_STOP 0x1C +#define ADDR_M_TTCAN_HSM_MASK0 0x20 +#define ADDR_M_TTCAN_HSM_MASK1 0x24 +#define ADDR_M_TTCAN_EXT_SYC_SLT 0x28 +#define ADDR_M_TTCAN_HSM_SW_OVRD 0x2C +#define ADDR_M_TTCAN_TIME_STAMP 0x30 + +#define M_TTCAN_CNTRL_REG_COK (1<<3) +#define M_TTCAN_TIME_STAMP_OFFSET_SEL 4 + +#endif /* M_TTCAN_REGDEF_H_ */ diff --git a/drivers/net/can/mttcan/native/m_ttcan_linux.c b/drivers/net/can/mttcan/native/m_ttcan_linux.c new file mode 100644 index 00000000..1deabdd8 --- /dev/null +++ b/drivers/net/can/mttcan/native/m_ttcan_linux.c @@ -0,0 +1,1972 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "../include/m_ttcan.h" +#include +#include + +static void mttcan_start(struct net_device *dev); + +/* We are reading cntvct_el0 for TSC time. We are not issuing ISB + * before reading the counter as by the time CAN irq comes and + * CAN softirq is executed, we would have lot of instruction executed. + * And we only wants to ensure that counter is read after CAN HW + * captures the timestamp and not before. + */ +static inline u64 _arch_counter_get_cntvct(void) +{ + u64 cval; + + asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); + + return cval; +} + +static u64 mttcan_extend_timestamp(u16 captured, u64 tsc, u32 shift) +{ + u64 aligned_capture; + u64 masked_tsc; + u64 top_tsc; + + aligned_capture = ((u64)captured) << shift; + masked_tsc = tsc & (MTTCAN_TSC_MASK << shift); + top_tsc = (tsc >> (MTTCAN_TSC_SIZE + shift)) << + (MTTCAN_TSC_SIZE + shift); + + /* Capture is assumed in the past. If there was no rollover on the + * 16 bits, masked_tsc >= aligned_capture, top_tsc can be used as is. + * If there was a rollover on the 16 bits, masked tsc < + * masked tsc < aligned_capture and top_tsc must be decreased + * (by one rollover of CAN timestamp) + */ + if (masked_tsc < aligned_capture) + top_tsc = top_tsc - (0x1ULL << (MTTCAN_TSC_SIZE + shift)); + + return (top_tsc | aligned_capture); +} + +static int mttcan_hw_init(struct mttcan_priv *priv) +{ + int err = 0; + u32 ie = 0, ttie = 0; + struct ttcan_controller *ttcan = priv->ttcan; + + ttcan_set_ok(ttcan); + + if (!priv->poll) { + ie = 0x3BBEF7FF; + ttie = 0x50C03; + } + err = ttcan_controller_init(ttcan, ie, ttie); + if (err) + return err; + + err = ttcan_mesg_ram_config(ttcan, (u32 *)priv->mram_param, + (u32 *)priv->tx_conf, (u32 *)priv->rx_conf); + if (err) + return err; + + /* initialize mttcan message RAM with 0s */ + ttcan_mesg_ram_init(ttcan); + + err = ttcan_set_config_change_enable(ttcan); + if (err) + return err; + + /* Reset XIDAM to default */ + priv->xidam_reg = DEF_MTTCAN_XIDAM; + ttcan_set_xidam(ttcan, DEF_MTTCAN_XIDAM); + + /* Rx buffers set */ + ttcan_set_rx_buffers_elements(ttcan); + + ttcan_set_std_id_filter_addr(ttcan); + ttcan_set_xtd_id_filter_addr(ttcan); + + if (priv->sinfo->use_external_timer) + ttcan_set_time_stamp_conf(ttcan, 9, TS_EXTERNAL); + else + ttcan_set_time_stamp_conf(ttcan, 9, TS_INTERNAL); + + ttcan_set_txevt_fifo_conf(ttcan); + ttcan_set_tx_buffer_addr(ttcan); + + if (priv->tt_param[0]) { + dev_info(priv->device, "TTCAN Enabled\n"); + ttcan_disable_auto_retransmission(ttcan, true); + ttcan_set_trigger_mem_conf(ttcan); + ttcan_set_tur_config(ttcan, 0x0800, 0x0000, 1); + } + + if (ttcan->mram_cfg[MRAM_SIDF].num) { + priv->std_shadow = devm_kzalloc(priv->device, + (ttcan->mram_cfg[MRAM_SIDF].num * SIDF_ELEM_SIZE), + GFP_KERNEL); + if (!priv->std_shadow) + return -ENOMEM; + ttcan_prog_std_id_fltrs(ttcan, priv->std_shadow); + } + if (ttcan->mram_cfg[MRAM_XIDF].num) { + priv->xtd_shadow = devm_kzalloc(priv->device, + (ttcan->mram_cfg[MRAM_XIDF].num * XIDF_ELEM_SIZE), + GFP_KERNEL); + if (!priv->xtd_shadow) + return -ENOMEM; + ttcan_prog_xtd_id_fltrs(ttcan, priv->xtd_shadow); + } + if (ttcan->mram_cfg[MRAM_TMC].num) { + priv->tmc_shadow = devm_kzalloc(priv->device, + (ttcan->mram_cfg[MRAM_TMC].num * TRIG_ELEM_SIZE), + GFP_KERNEL); + if (!priv->tmc_shadow) + return -ENOMEM; + ttcan_prog_trigger_mem(ttcan, priv->tmc_shadow); + } + + ttcan_print_version(ttcan); + + raw_spin_lock_init(&priv->tc_lock); + spin_lock_init(&priv->tslock); + spin_lock_init(&priv->tx_lock); + + return err; +} + +static inline void mttcan_hw_deinit(const struct mttcan_priv *priv) +{ + struct ttcan_controller *ttcan = priv->ttcan; + ttcan_set_init(ttcan); +} + +static int mttcan_hw_reinit(const struct mttcan_priv *priv) +{ + int err = 0; + + struct ttcan_controller *ttcan = priv->ttcan; + + ttcan_set_ok(ttcan); + + err = ttcan_set_config_change_enable(ttcan); + if (err) + return err; + + /* Reset XIDAM to default */ + ttcan_set_xidam(ttcan, priv->xidam_reg); + + /* Rx buffers set */ + ttcan_set_rx_buffers_elements(ttcan); + + ttcan_set_std_id_filter_addr(ttcan); + ttcan_set_xtd_id_filter_addr(ttcan); + ttcan_set_time_stamp_conf(ttcan, 9, TS_INTERNAL); + ttcan_set_txevt_fifo_conf(ttcan); + + ttcan_set_tx_buffer_addr(ttcan); + + if (priv->tt_param[0]) { + dev_info(priv->device, "TTCAN Enabled\n"); + ttcan_disable_auto_retransmission(ttcan, true); + ttcan_set_trigger_mem_conf(ttcan); + ttcan_set_tur_config(ttcan, 0x0800, 0x0000, 1); + } + + if (ttcan->mram_cfg[MRAM_SIDF].num) + ttcan_prog_std_id_fltrs(ttcan, priv->std_shadow); + if (ttcan->mram_cfg[MRAM_XIDF].num) + ttcan_prog_xtd_id_fltrs(ttcan, priv->xtd_shadow); + if (ttcan->mram_cfg[MRAM_TMC].num) + ttcan_prog_trigger_mem(ttcan, priv->tmc_shadow); + + return err; +} + +static const struct can_bittiming_const mttcan_normal_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 255, + .tseg2_min = 0, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 127, + .sjw_max = 127, + .brp_min = 1, + .brp_max = 511, + .brp_inc = 1, +}; + +static const struct can_bittiming_const mttcan_data_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 31, + .tseg2_min = 0, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 15, + .sjw_max = 15, + .brp_min = 1, + .brp_max = 15, + .brp_inc = 1, +}; + +static const struct tegra_mttcan_soc_info t186_mttcan_sinfo = { + .set_can_core_clk = false, + .can_core_clk_rate = 40000000, + .can_clk_rate = 40000000, + .use_external_timer = false, +}; + +static const struct tegra_mttcan_soc_info t194_mttcan_sinfo = { + .set_can_core_clk = true, + .can_core_clk_rate = 50000000, + .can_clk_rate = 200000000, + .use_external_timer = true, +}; + +static const struct of_device_id mttcan_of_table[] = { + { .compatible = "nvidia,tegra186-mttcan", .data = &t186_mttcan_sinfo}, + { .compatible = "nvidia,tegra194-mttcan", .data = &t194_mttcan_sinfo}, + {}, +}; + +MODULE_DEVICE_TABLE(of, mttcan_of_table); + +static inline void mttcan_pm_runtime_enable(const struct mttcan_priv *priv) +{ + if (priv->device) + pm_runtime_enable(priv->device); +} + +static inline void mttcan_pm_runtime_disable(const struct mttcan_priv *priv) +{ + if (priv->device) + pm_runtime_disable(priv->device); +} + +static inline void mttcan_pm_runtime_get_sync(const struct mttcan_priv *priv) +{ + if (priv->device) + pm_runtime_get_sync(priv->device); +} + +static inline void mttcan_pm_runtime_put_sync(const struct mttcan_priv *priv) +{ + if (priv->device) + pm_runtime_put_sync(priv->device); +} + +static void mttcan_handle_lost_frame(struct net_device *dev, int fifo_num) +{ + struct mttcan_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + u32 ack_ir; + struct sk_buff *skb; + struct can_frame *frame; + + if (fifo_num) + ack_ir = MTT_IR_RF1L_MASK; + else + ack_ir = MTT_IR_RF0L_MASK; + ttcan_ir_write(priv->ttcan, ack_ir); + + skb = alloc_can_err_skb(dev, &frame); + if (unlikely(!skb)) + return; + + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_errors++; + stats->rx_over_errors++; + netif_receive_skb(skb); +} + +static void mttcan_rx_hwtstamp(struct mttcan_priv *priv, + struct sk_buff *skb, struct ttcanfd_frame *msg) +{ + u64 ns; + u64 tsc, extended_tsc; + unsigned long flags; + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + + if (priv->sinfo->use_external_timer) { + /* Read the current TSC and calculate the MSB of captured + * CAN TSC timestamp. Finally convert it to nsec. + */ + tsc = _arch_counter_get_cntvct(); + extended_tsc = mttcan_extend_timestamp(msg->tstamp, tsc, + TSC_REF_CLK_SHIFT); + ns = extended_tsc << 5; + } else { + raw_spin_lock_irqsave(&priv->tc_lock, flags); + ns = timecounter_cyc2time(&priv->tc, msg->tstamp); + raw_spin_unlock_irqrestore(&priv->tc_lock, flags); + } + + memset(hwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static int mttcan_hpm_do_receive(struct net_device *dev, + struct ttcanfd_frame *msg) +{ + struct mttcan_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct canfd_frame *fd_frame; + struct can_frame *frame; + + if (msg->flags & CAN_FD_FLAG) { + skb = alloc_canfd_skb(dev, &fd_frame); + if (!skb) { + stats->rx_dropped++; + return 0; + } + memcpy(fd_frame, msg, sizeof(struct canfd_frame)); + stats->rx_bytes += fd_frame->len; + } else { + skb = alloc_can_skb(dev, &frame); + if (!skb) { + stats->rx_dropped++; + return 0; + } + frame->can_id = msg->can_id; + frame->can_dlc = msg->d_len; + memcpy(frame->data, &msg->data, frame->can_dlc); + stats->rx_bytes += frame->can_dlc; + } + + if (priv->hwts_rx_en) + mttcan_rx_hwtstamp(priv, skb, msg); + + netif_receive_skb(skb); + stats->rx_packets++; + + return 1; +} + +static int mttcan_read_rcv_list(struct net_device *dev, + struct list_head *rcv, + enum ttcan_rx_type rx_type, + int rec_msgs, int quota) +{ + unsigned int pushed; + unsigned long flags; + struct mttcan_priv *priv = netdev_priv(dev); + struct ttcan_rx_msg_list *rx; + struct net_device_stats *stats = &dev->stats; + struct list_head *cur, *next, rx_q; + + if (list_empty(rcv)) + return 0; + + INIT_LIST_HEAD(&rx_q); + + spin_lock_irqsave(&priv->ttcan->lock, flags); + switch (rx_type) { + case BUFFER: + priv->ttcan->rxb_mem = 0; + priv->ttcan->list_status &= ~(BUFFER & 0xFF); + break; + case FIFO_0: + priv->ttcan->rxq0_mem = 0; + priv->ttcan->list_status &= ~(FIFO_0 & 0xFF); + break; + case FIFO_1: + priv->ttcan->rxq1_mem = 0; + priv->ttcan->list_status &= ~(FIFO_1 & 0xFF); + default: + break; + } + list_splice_init(rcv, &rx_q); + spin_unlock_irqrestore(&priv->ttcan->lock, flags); + + pushed = rec_msgs; + list_for_each_safe(cur, next, &rx_q) { + struct sk_buff *skb; + struct canfd_frame *fd_frame; + struct can_frame *frame; + if (!quota--) + break; + list_del_init(cur); + + rx = list_entry(cur, struct ttcan_rx_msg_list, recv_list); + if (rx->msg.flags & CAN_FD_FLAG) { + skb = alloc_canfd_skb(dev, &fd_frame); + if (!skb) { + stats->rx_dropped += pushed; + return 0; + } + memcpy(fd_frame, &rx->msg, sizeof(struct canfd_frame)); + stats->rx_bytes += fd_frame->len; + } else { + skb = alloc_can_skb(dev, &frame); + if (!skb) { + stats->rx_dropped += pushed; + return 0; + } + frame->can_id = rx->msg.can_id; + if (rx->msg.d_len > CAN_MAX_DLEN) { + netdev_warn(dev, "invalid datalen %d\n", + rx->msg.d_len); + frame->can_dlc = CAN_MAX_DLEN; + } else { + frame->can_dlc = rx->msg.d_len; + } + memcpy(frame->data, &rx->msg.data, frame->can_dlc); + stats->rx_bytes += frame->can_dlc; + } + + if (priv->hwts_rx_en) + mttcan_rx_hwtstamp(priv, skb, &rx->msg); + kfree(rx); + netif_receive_skb(skb); + stats->rx_packets++; + pushed--; + } + return rec_msgs - pushed; +} + +static int mttcan_state_change(struct net_device *dev, + enum can_state error_type) +{ + u32 ecr; + struct mttcan_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + struct can_berr_counter bec; + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + ecr = ttcan_read_ecr(priv->ttcan); + bec.rxerr = (ecr & MTT_ECR_REC_MASK) >> MTT_ECR_REC_SHIFT; + bec.txerr = (ecr & MTT_ECR_TEC_MASK) >> MTT_ECR_TEC_SHIFT; + + switch (error_type) { + case CAN_STATE_ERROR_WARNING: + /* error warning state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_WARNING; + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + priv->can.can_stats.error_passive++; + priv->can.state = CAN_STATE_ERROR_PASSIVE; + cf->can_id |= CAN_ERR_CRTL; + if (ecr & MTT_ECR_RP_MASK) + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + if (bec.txerr > 127) + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + priv->can.state = CAN_STATE_BUS_OFF; + cf->can_id |= CAN_ERR_BUSOFF; + /* + * disable all interrupts in bus-off mode to ensure that + * the CPU is not hogged down + */ + ttcan_set_intrpts(priv->ttcan, 0); + priv->can.can_stats.bus_off++; + + netif_carrier_off(dev); + + if (priv->can.restart_ms) + schedule_delayed_work(&priv->drv_restart_work, + msecs_to_jiffies(priv->can.restart_ms)); + + break; + default: + break; + } + netif_receive_skb(skb); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + return 1; +} + +static int mttcan_handle_bus_err(struct net_device *dev, + enum ttcan_lec_type lec_type) +{ + struct mttcan_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + if (lec_type == LEC_NO_ERROR) + return 0; + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + /* common for all type of bus errors */ + priv->can.can_stats.bus_error++; + stats->rx_errors++; + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + + switch (lec_type) { + case LEC_STUFF_ERROR: + netdev_err(dev, "Stuff Error Detected\n"); + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + case LEC_FORM_ERROR: + netdev_err(dev, "Format Error Detected\n"); + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case LEC_ACK_ERROR: + if (printk_ratelimit()) + netdev_err(dev, "Acknowledgement Error Detected\n"); + cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | + CAN_ERR_PROT_LOC_ACK_DEL); + break; + case LEC_BIT1_ERROR: + netdev_err(dev, "Bit1 Error Detected\n"); + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + case LEC_BIT0_ERROR: + netdev_err(dev, "Bit0 Error Detected\n"); + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + case LEC_CRC_ERROR: + netdev_err(dev, "CRC Error Detected\n"); + cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | + CAN_ERR_PROT_LOC_CRC_DEL); + break; + default: + break; + } + + netif_receive_skb(skb); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + return 1; +} + +static void mttcan_tx_event(struct net_device *dev) +{ + struct mttcan_priv *priv = netdev_priv(dev); + struct ttcan_txevt_msg_list *evt; + struct list_head *cur, *next, evt_q; + struct mttcan_tx_evt_element txevt; + u32 xtd, id; + unsigned long flags; + + INIT_LIST_HEAD(&evt_q); + + spin_lock_irqsave(&priv->ttcan->lock, flags); + + if (list_empty(&priv->ttcan->tx_evt)) { + spin_unlock_irqrestore(&priv->ttcan->lock, flags); + return; + } + + priv->ttcan->evt_mem = 0; + priv->ttcan->list_status &= ~(TX_EVT & 0xFF); + list_splice_init(&priv->ttcan->tx_evt, &evt_q); + spin_unlock_irqrestore(&priv->ttcan->lock, flags); + + list_for_each_safe(cur, next, &evt_q) { + list_del_init(cur); + + evt = list_entry(cur, struct ttcan_txevt_msg_list, txevt_list); + memcpy(&txevt, &evt->txevt, + sizeof(struct mttcan_tx_evt_element)); + kfree(evt); + xtd = (txevt.f0 & MTT_TXEVT_ELE_F0_XTD_MASK) >> + MTT_TXEVT_ELE_F0_XTD_SHIFT; + id = (txevt.f0 & MTT_TXEVT_ELE_F0_ID_MASK) >> + MTT_TXEVT_ELE_F0_ID_SHIFT; + + pr_debug("%s:(index %u) ID %x(%s %s %s) Evt_Type %02d\n", + __func__, (txevt.f1 & MTT_TXEVT_ELE_F1_MM_MASK) >> + MTT_TXEVT_ELE_F1_MM_SHIFT, + xtd ? id : id >> 18, xtd ? "XTD" : "STD", + txevt.f1 & MTT_TXEVT_ELE_F1_FDF_MASK ? "FD" : "NON-FD", + txevt.f1 & MTT_TXEVT_ELE_F1_BRS_MASK ? "BRS" : "NOBRS", + (txevt.f1 & MTT_TXEVT_ELE_F1_ET_MASK) + >> MTT_TXEVT_ELE_F1_ET_SHIFT); + } +} + +static void mttcan_tx_complete(struct net_device *dev) +{ + struct mttcan_priv *priv = netdev_priv(dev); + struct ttcan_controller *ttcan = priv->ttcan; + struct net_device_stats *stats = &dev->stats; + u32 msg_no; + u32 completed_tx; + + spin_lock(&priv->tx_lock); + completed_tx = ttcan_read_tx_complete_reg(ttcan); + + /* apply mask to consider only active CAN Tx transactions */ + completed_tx &= ttcan->tx_object; + + while (completed_tx) { + msg_no = ffs(completed_tx) - 1; + can_get_echo_skb(dev, msg_no, NULL); + can_led_event(dev, CAN_LED_EVENT_TX); + clear_bit(msg_no, &ttcan->tx_object); + stats->tx_packets++; + stats->tx_bytes += ttcan->tx_buf_dlc[msg_no]; + completed_tx &= ~(1U << msg_no); + } + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + spin_unlock(&priv->tx_lock); +} + +static void mttcan_tx_cancelled(struct net_device *dev) +{ + struct mttcan_priv *priv = netdev_priv(dev); + struct ttcan_controller *ttcan = priv->ttcan; + struct net_device_stats *stats = &dev->stats; + u32 buff_bit, cancelled_reg, cancelled_msg, msg_no; + + spin_lock(&priv->tx_lock); + cancelled_reg = ttcan_read_tx_cancelled_reg(ttcan); + + /* cancelled_msg are newly cancelled message for current interrupt */ + cancelled_msg = (ttcan->tx_obj_cancelled ^ cancelled_reg) & + ~(ttcan->tx_obj_cancelled); + ttcan->tx_obj_cancelled = cancelled_reg; + + if (cancelled_msg && netif_queue_stopped(dev)) + netif_wake_queue(dev); + + while (cancelled_msg) { + msg_no = ffs(cancelled_msg) - 1; + buff_bit = 1U << msg_no; + if (ttcan->tx_object & buff_bit) { + can_free_echo_skb(dev, msg_no, NULL); + clear_bit(msg_no, &ttcan->tx_object); + cancelled_msg &= ~(buff_bit); + stats->tx_aborted_errors++; + } else { + pr_debug("%s TCF %x ttcan->tx_object %lx\n", __func__, + cancelled_msg, ttcan->tx_object); + break; + } + } + spin_unlock(&priv->tx_lock); +} + +static int mttcan_poll_ir(struct napi_struct *napi, int quota) +{ + int work_done = 0; + int rec_msgs = 0; + struct net_device *dev = napi->dev; + struct mttcan_priv *priv = netdev_priv(dev); + u32 ir, ack, ttir, ttack, psr; + + ir = priv->irqstatus; + ttir = priv->tt_irqstatus; + + netdev_dbg(dev, "IR %x\n", ir); + if (!ir && !ttir) + goto end; + + if (ir) { + if (ir & MTTCAN_ERR_INTR) { + psr = priv->ttcan->proto_state; + ack = ir & MTTCAN_ERR_INTR; + ttcan_ir_write(priv->ttcan, ack); + if ((ir & MTT_IR_EW_MASK) && (psr & MTT_PSR_EW_MASK)) { + work_done += mttcan_state_change(dev, + CAN_STATE_ERROR_WARNING); + netdev_warn(dev, + "entered error warning state\n"); + } + if ((ir & MTT_IR_EP_MASK) && (psr & MTT_PSR_EP_MASK)) { + work_done += mttcan_state_change(dev, + CAN_STATE_ERROR_PASSIVE); + netdev_err(dev, + "entered error passive state\n"); + } + if ((ir & MTT_IR_BO_MASK) && (psr & MTT_PSR_BO_MASK)) { + work_done += + mttcan_state_change(dev, CAN_STATE_BUS_OFF); + netdev_err(dev, "entered bus off state\n"); + } + if (((ir & MTT_IR_EP_MASK) && !(psr & MTT_PSR_EP_MASK)) + || ((ir & MTT_IR_EW_MASK) && + !(psr & MTT_PSR_EW_MASK))) { + if (ir & MTT_IR_EP_MASK) { + netdev_dbg(dev, + "left error passive state\n"); + priv->can.state = + CAN_STATE_ERROR_WARNING; + } else { + netdev_dbg(dev, + "left error warning state\n"); + priv->can.state = + CAN_STATE_ERROR_ACTIVE; + } + } + + /* Handle Bus error change */ + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + if ((ir & MTT_IR_PED_MASK) || + (ir & MTT_IR_PEA_MASK)) { + enum ttcan_lec_type lec; + + if (ir & MTT_IR_PEA_MASK) + lec = (psr & MTT_PSR_LEC_MASK) + >> MTT_PSR_LEC_SHIFT; + else + lec = (psr & MTT_PSR_DLEC_MASK) + >> MTT_PSR_DLEC_SHIFT; + work_done += + mttcan_handle_bus_err(dev, lec); + + if (printk_ratelimit()) + netdev_err(dev, + "IR %#x PSR %#x\n", + ir, psr); + } + } + if (ir & MTT_IR_WDI_MASK) + netdev_warn(dev, + "Message RAM watchdog not handled\n"); + + if (ir & MTT_IR_BEC_MASK) + netdev_warn(dev, "mram Bit error detected" + "and corrected\n"); + + if (ir & MTT_IR_BEU_MASK) + netdev_warn(dev, "mram Bit error detected" + "and uncorrected\n"); + } + + if (ir & MTT_IR_TOO_MASK) { + ack = MTT_IR_TOO_MASK; + ttcan_ir_write(priv->ttcan, ack); + netdev_warn(dev, "Rx timeout not handled\n"); + } + + /* High Priority Message */ + if (ir & MTTCAN_RX_HP_INTR) { + struct ttcanfd_frame ttcanfd; + ack = MTT_IR_HPM_MASK; + ttcan_ir_write(priv->ttcan, ack); + if (ttcan_read_hp_mesgs(priv->ttcan, &ttcanfd)) + work_done += mttcan_hpm_do_receive(dev, + &ttcanfd); + pr_debug("%s: hp mesg received\n", __func__); + } + + /* Handle dedicated buffer */ + if (ir & MTT_IR_DRX_MASK) { + ack = MTT_IR_DRX_MASK; + ttcan_ir_write(priv->ttcan, ack); + rec_msgs = ttcan_read_rx_buffer(priv->ttcan); + work_done += + mttcan_read_rcv_list(dev, &priv->ttcan->rx_b, + BUFFER, rec_msgs, + quota - work_done); + pr_debug("%s: buffer mesg received\n", __func__); + + } + + /* Handle RX Fifo interrupt */ + if (ir & MTTCAN_RX_FIFO_INTR) { + if (ir & MTT_IR_RF1L_MASK) { + netdev_warn(dev, "%s: some msgs lost on in Q1\n", + __func__); + ack = MTT_IR_RF1L_MASK; + ttcan_ir_write(priv->ttcan, ack); + mttcan_handle_lost_frame(dev, 1); + work_done++; + } + if (ir & MTT_IR_RF0L_MASK) { + netdev_warn(dev, "%s: some msgs lost on in Q0\n", + __func__); + ack = MTT_IR_RF0L_MASK; + ttcan_ir_write(priv->ttcan, ack); + mttcan_handle_lost_frame(dev, 0); + work_done++; + } + + if (ir & (MTT_IR_RF1F_MASK | MTT_IR_RF1W_MASK | + MTT_IR_RF1N_MASK)) { + ack = ir & (MTT_IR_RF1F_MASK | + MTT_IR_RF1W_MASK | + MTT_IR_RF1N_MASK); + ttcan_ir_write(priv->ttcan, ack); + + rec_msgs = ttcan_read_rx_fifo1(priv->ttcan); + work_done += + mttcan_read_rcv_list(dev, + &priv->ttcan->rx_q1, + FIFO_1, rec_msgs, + quota - work_done); + pr_debug("%s: msg received in Q1\n", __func__); + } + if (ir & (MTT_IR_RF0F_MASK | MTT_IR_RF0W_MASK | + MTT_IR_RF0N_MASK)) { + ack = ir & (MTT_IR_RF0F_MASK | + MTT_IR_RF0W_MASK | + MTT_IR_RF0N_MASK); + ttcan_ir_write(priv->ttcan, ack); + rec_msgs = ttcan_read_rx_fifo0(priv->ttcan); + work_done += + mttcan_read_rcv_list(dev, + &priv->ttcan->rx_q0, + FIFO_0, rec_msgs, + quota - work_done); + pr_debug("%s: msg received in Q0\n", __func__); + } + } + + /* Handle Timer wrap around */ + if (ir & MTT_IR_TSW_MASK) { + ack = MTT_IR_TSW_MASK; + ttcan_ir_write(priv->ttcan, ack); + } + + /* Handle Transmission cancellation finished + * TCF interrupt is set when transmission cancelled is request + * by TXBCR register but in case wherer DAR (one-shot) is set + * the Tx buffers which transmission is not complete due to some + * reason are not retransmitted and for those buffers + * corresponding bit in TXBCF is set. Handle them to release + * Tx queue lockup in software. + */ + if ((ir & MTT_IR_TCF_MASK) || (priv->can.ctrlmode & + CAN_CTRLMODE_ONE_SHOT)) { + if (ir & MTT_IR_TCF_MASK) { + ack = MTT_IR_TCF_MASK; + ttcan_ir_write(priv->ttcan, ack); + } + mttcan_tx_cancelled(dev); + } + + if (ir & MTT_IR_TC_MASK) { + ack = MTT_IR_TC_MASK; + ttcan_ir_write(priv->ttcan, ack); + mttcan_tx_complete(dev); + } + + if (ir & MTT_IR_TFE_MASK) { + /* + * netdev_info(dev, "Tx Fifo Empty %x\n", ir); + */ + ack = MTT_IR_TFE_MASK; + ttcan_ir_write(priv->ttcan, ack); + } + + /* Handle Tx Event */ + if (ir & MTTCAN_TX_EV_FIFO_INTR) { + /* New Tx Event */ + if ((ir & MTT_IR_TEFN_MASK) || + (ir & MTT_IR_TEFW_MASK)) { + ttcan_read_txevt_fifo(priv->ttcan); + mttcan_tx_event(dev); + } + + if ((ir & MTT_IR_TEFL_MASK) && + priv->ttcan->tx_config.evt_q_num) + if (printk_ratelimit()) + netdev_warn(dev, "Tx event lost\n"); + + ack = MTTCAN_TX_EV_FIFO_INTR; + ttcan_ir_write(priv->ttcan, ack); + } + + } + + if (ttir) { + /* Handle CAN TT interrupts */ + unsigned int tt_err = 0; + unsigned int ttost = 0; + + if (ttir & 0x7B100) { + tt_err = 1; + ttost = ttcan_get_ttost(priv->ttcan); + } + if (ttir & MTT_TTIR_CER_MASK) + netdev_warn(dev, "TT Configuration Error\n"); + if (ttir & MTT_TTIR_AW_MASK) + netdev_warn(dev, "TT Application wdt triggered\n"); + if (ttir & MTT_TTIR_WT_MASK) + netdev_warn(dev, "TT Referrence Mesg missing\n"); + if (ttir & MTT_TTIR_IWT_MASK) + netdev_warn(dev, "TT Initialization Watch Triggered\n"); + if (ttir & MTT_TTIR_SE2_MASK || ttir & MTT_TTIR_SE1_MASK) + netdev_warn(dev, "TT Scheduling error SE%d\n", + (ttir & MTT_TTIR_SE1_MASK) ? 1 : 2); + if (ttir & MTT_TTIR_TXO_MASK) + netdev_warn(dev, "TT Tx count overflow\n"); + if (ttir & MTT_TTIR_TXU_MASK) + netdev_warn(dev, "TT Tx count underflow\n"); + if (ttir & MTT_TTIR_GTE_MASK) + netdev_warn(dev, "TT Global timer error\n"); + if (ttir & MTT_TTIR_GTD_MASK) + netdev_warn(dev, "TT Global time discontinuity\n"); + if (ttir & MTT_TTIR_GTW_MASK) + netdev_info(dev, "TT Global time wrapped\n"); + if (ttir & MTT_TTIR_SWE_MASK) + netdev_info(dev, "TT Stop watch event\n"); + if (ttir & MTT_TTIR_TTMI_MASK) + netdev_warn(dev, "TT TMI event (int)\n"); + if (ttir & MTT_TTIR_RTMI_MASK) + netdev_warn(dev, "TT Register TMI\n"); + if (ttir & MTT_TTIR_SOG_MASK) + netdev_info(dev, "TT Start of Gap\n"); + if (ttir & MTT_TTIR_SMC_MASK) + netdev_info(dev, "TT Start of Matrix Cycle\n"); + if (ttir & MTT_TTIR_SBC_MASK) + netdev_info(dev, "TT Start of Basic Cycle\n"); + if (tt_err) + netdev_err(dev, "TTOST 0x%x\n", ttost); + ttack = 0xFFFFFFFF; + ttcan_ttir_write(priv->ttcan, ttack); + } +end: + if (work_done < quota) { + napi_complete(napi); + + if (priv->can.state != CAN_STATE_BUS_OFF) + ttcan_set_intrpts(priv->ttcan, 1); + } + + return work_done; +} + +static int mttcan_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct mttcan_priv *priv = netdev_priv(dev); + u32 ecr; + + mttcan_pm_runtime_get_sync(priv); + + ecr = ttcan_read_ecr(priv->ttcan); + bec->rxerr = (ecr & MTT_ECR_REC_MASK) >> MTT_ECR_REC_SHIFT; + bec->txerr = (ecr & MTT_ECR_TEC_MASK) >> MTT_ECR_TEC_SHIFT; + + mttcan_pm_runtime_put_sync(priv); + + return 0; +} + +static int mttcan_do_set_bittiming(struct net_device *dev) +{ + int err = 0; + struct mttcan_priv *priv = netdev_priv(dev); + const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.data_bittiming; + + memcpy(&priv->ttcan->bt_config.nominal, bt, + sizeof(struct can_bittiming)); + memcpy(&priv->ttcan->bt_config.data, dbt, + sizeof(struct can_bittiming)); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + priv->ttcan->bt_config.fd_flags = CAN_FD_FLAG | CAN_BRS_FLAG; + else + priv->ttcan->bt_config.fd_flags = 0; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + priv->ttcan->bt_config.fd_flags |= CAN_FD_NON_ISO_FLAG; + + err = ttcan_set_bitrate(priv->ttcan); + if (err) { + netdev_err(priv->dev, "Unable to set bitrate\n"); + return err; + } + + netdev_info(priv->dev, "Bitrate set\n"); + return 0; +} + +static void mttcan_controller_config(struct net_device *dev) +{ + struct mttcan_priv *priv = netdev_priv(dev); + + /* set CCCR.INIT and then CCCR.CCE */ + ttcan_set_config_change_enable(priv->ttcan); + + pr_info("%s: ctrlmode %x\n", __func__, priv->can.ctrlmode); + /* enable automatic retransmission */ + if ((priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) || + priv->tt_param[0]) + ttcan_disable_auto_retransmission(priv->ttcan, true); + else + ttcan_disable_auto_retransmission(priv->ttcan, false); + + if ((priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) && + (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) { + /* internal loopback mode : useful for self-test function */ + ttcan_set_bus_monitoring_mode(priv->ttcan, true); + ttcan_set_loopback(priv->ttcan); + + } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + /* external loopback mode : useful for self-test function */ + ttcan_set_bus_monitoring_mode(priv->ttcan, false); + ttcan_set_loopback(priv->ttcan); + + } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { + /* silent mode : bus-monitoring mode */ + ttcan_set_bus_monitoring_mode(priv->ttcan, true); + } else + /* clear bus montor or external loopback mode */ + ttcan_set_normal_mode(priv->ttcan); + + /* set bit timing and start controller */ + mttcan_do_set_bittiming(dev); +} + +/* Adjust the timer by resetting the timecounter structure periodically */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(4,15,0) +static void mttcan_timer_cb(struct timer_list *timer) +#else +static void mttcan_timer_cb(unsigned long data) +#endif +{ + unsigned long flags; + u64 tref; + int ret = 0; +#if LINUX_VERSION_CODE > KERNEL_VERSION(4,15,0) + struct mttcan_priv *priv = container_of(timer, struct mttcan_priv, timer); +#else + struct mttcan_priv *priv = (struct mttcan_priv *)data; +#endif + + raw_spin_lock_irqsave(&priv->tc_lock, flags); + ret = nvpps_get_ptp_ts(&tref); + if (ret != 0) { + tref = ktime_to_ns(ktime_get()); + } + timecounter_init(&priv->tc, &priv->cc, tref); + raw_spin_unlock_irqrestore(&priv->tc_lock, flags); + mod_timer(&priv->timer, + jiffies + (msecs_to_jiffies(MTTCAN_HWTS_ROLLOVER))); +} + +static void mttcan_bus_off_restart(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct mttcan_priv *priv = container_of(dwork, struct mttcan_priv, + drv_restart_work); + struct net_device *dev = priv->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *cf; + + /* send restart message upstream */ + skb = alloc_can_err_skb(dev, &cf); + if (!skb) { + netdev_err(dev, "error skb allocation failed\n"); + goto restart; + } + cf->can_id |= CAN_ERR_RESTARTED; + + netif_rx(skb); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + +restart: + netdev_dbg(dev, "restarted\n"); + priv->can.can_stats.restarts++; + + mttcan_start(dev); + netif_carrier_on(dev); +} + +static void mttcan_start(struct net_device *dev) +{ + struct mttcan_priv *priv = netdev_priv(dev); + struct ttcan_controller *ttcan = priv->ttcan; + u32 psr = 0; + + if (ttcan->proto_state) { + psr = ttcan->proto_state; + ttcan->proto_state = 0; + } else { + psr = ttcan_read_psr(ttcan); + } + + if (psr & MTT_PSR_BO_MASK) { + /* Set state as Error Active after restart from BUS OFF */ + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } else if (psr & MTT_PSR_EP_MASK) { + /* Error Passive */ + priv->can.state = CAN_STATE_ERROR_PASSIVE; + } else if (psr & MTT_PSR_EW_MASK) { + /* Error Warning */ + priv->can.state = CAN_STATE_ERROR_WARNING; + } else { + /* Error Active */ + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + + mttcan_controller_config(dev); + + ttcan_clear_intr(ttcan); + ttcan_clear_tt_intr(ttcan); + + /* start Tx/Rx and enable protected mode */ + if (!priv->tt_param[0]) { + ttcan_reset_init(ttcan); + + if (psr & MTT_PSR_BO_MASK) { + netdev_info(dev, "wait for bus off seq\n"); + ttcan_bus_off_seq(ttcan); + } + } + + ttcan_set_intrpts(priv->ttcan, 1); + + if (priv->poll) + schedule_delayed_work(&priv->can_work, + msecs_to_jiffies(MTTCAN_POLL_TIME)); +} + +static void mttcan_stop(struct mttcan_priv *priv) +{ + ttcan_set_intrpts(priv->ttcan, 0); + + priv->can.state = CAN_STATE_STOPPED; + priv->ttcan->proto_state = 0; + + ttcan_set_config_change_enable(priv->ttcan); +} + +static int mttcan_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + mttcan_start(dev); + netif_wake_queue(dev); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static struct net_device *alloc_mttcan_dev(void) +{ + struct net_device *dev; + struct mttcan_priv *priv; + + dev = alloc_candev(sizeof(struct mttcan_priv), MTT_CAN_TX_OBJ_NUM); + if (!dev) + return NULL; + + /* TODO:- check if we need to disable local loopback */ + dev->flags = (IFF_NOARP | IFF_ECHO); + + priv = netdev_priv(dev); + + priv->dev = dev; + priv->can.bittiming_const = &mttcan_normal_bittiming_const; + priv->can.data_bittiming_const = &mttcan_data_bittiming_const; + priv->can.do_set_bittiming = mttcan_do_set_bittiming; + priv->can.do_set_mode = mttcan_set_mode; + priv->can.do_get_berr_counter = mttcan_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO + | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_ONE_SHOT; + + netif_napi_add(dev, &priv->napi, mttcan_poll_ir, MTT_CAN_NAPI_WEIGHT); + + return dev; +} + +static irqreturn_t mttcan_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct mttcan_priv *priv = netdev_priv(dev); + + priv->irqstatus = ttcan_read_ir(priv->ttcan); + priv->tt_irqstatus = ttcan_read_ttir(priv->ttcan); + + if (!priv->irqstatus && !priv->tt_irqstatus) + return IRQ_NONE; + + /* if there is error, read the PSR register now */ + if (priv->irqstatus & MTTCAN_ERR_INTR) + priv->ttcan->proto_state = ttcan_read_psr(priv->ttcan); + + /* If tt_stop > 0, then stop when TT interrupt count > tt_stop */ + if (priv->tt_param[1] && priv->tt_irqstatus) + if (priv->tt_intrs++ > priv->tt_param[1]) + ttcan_set_config_change_enable(priv->ttcan); + + /* disable and clear all interrupts */ + ttcan_set_intrpts(priv->ttcan, 0); + + /* schedule the NAPI */ + napi_schedule(&priv->napi); + + return IRQ_HANDLED; +} + +static void mttcan_work(struct work_struct *work) +{ + struct mttcan_priv *priv = container_of(to_delayed_work(work), + struct mttcan_priv, can_work); + + priv->irqstatus = ttcan_read_ir(priv->ttcan); + priv->tt_irqstatus = ttcan_read_ttir(priv->ttcan); + + if (priv->irqstatus || priv->tt_irqstatus) { + /* disable and clear all interrupts */ + ttcan_set_intrpts(priv->ttcan, 0); + + /* schedule the NAPI */ + napi_schedule(&priv->napi); + } + schedule_delayed_work(&priv->can_work, + msecs_to_jiffies(MTTCAN_POLL_TIME)); +} + +static int mttcan_power_up(struct mttcan_priv *priv) +{ + int level; + mttcan_pm_runtime_get_sync(priv); + + if (gpio_is_valid(priv->gpio_can_stb.gpio)) { + level = !priv->gpio_can_stb.active_low; + gpio_direction_output(priv->gpio_can_stb.gpio, level); + } + + if (gpio_is_valid(priv->gpio_can_en.gpio)) { + level = !priv->gpio_can_en.active_low; + gpio_direction_output(priv->gpio_can_en.gpio, level); + } + + return ttcan_set_power(priv->ttcan, 1); +} + +static int mttcan_power_down(struct net_device *dev) +{ + int level; + struct mttcan_priv *priv = netdev_priv(dev); + + if (ttcan_set_power(priv->ttcan, 0)) + return -ETIMEDOUT; + + if (gpio_is_valid(priv->gpio_can_stb.gpio)) { + level = priv->gpio_can_stb.active_low; + gpio_direction_output(priv->gpio_can_stb.gpio, level); + } + + if (gpio_is_valid(priv->gpio_can_en.gpio)) { + level = priv->gpio_can_en.active_low; + gpio_direction_output(priv->gpio_can_en.gpio, level); + } + + mttcan_pm_runtime_put_sync(priv); + + return 0; +} + +static int mttcan_open(struct net_device *dev) +{ + int err; + struct mttcan_priv *priv = netdev_priv(dev); + + mttcan_pm_runtime_get_sync(priv); + + err = mttcan_power_up(priv); + if (err) { + netdev_err(dev, "unable to power on\n"); + goto exit_open_fail; + } + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + goto exit_open_fail; + } + + err = request_irq(dev->irq, mttcan_isr, 0, dev->name, dev); + if (err < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto fail; + } + + napi_enable(&priv->napi); + can_led_event(dev, CAN_LED_EVENT_OPEN); + + mttcan_start(dev); + netif_start_queue(dev); + + return 0; + +fail: + close_candev(dev); +exit_open_fail: + mttcan_pm_runtime_put_sync(priv); + return err; +} + +static int mttcan_close(struct net_device *dev) +{ + struct mttcan_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + mttcan_stop(priv); + free_irq(dev->irq, dev); + priv->hwts_rx_en = false; + close_candev(dev); + mttcan_power_down(dev); + mttcan_pm_runtime_put_sync(priv); + + can_led_event(dev, CAN_LED_EVENT_STOP); + return 0; +} + +static netdev_tx_t mttcan_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + int msg_no = -1; + struct mttcan_priv *priv = netdev_priv(dev); + struct canfd_frame *frame = (struct canfd_frame *)skb->data; + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + if (can_is_canfd_skb(skb)) + frame->flags |= CAN_FD_FLAG; + + spin_lock_bh(&priv->tx_lock); + + /* Write Tx message to controller */ + msg_no = ttcan_tx_msg_buffer_write(priv->ttcan, + (struct ttcanfd_frame *)frame); + if (msg_no < 0) + msg_no = ttcan_tx_fifo_queue_msg(priv->ttcan, + (struct ttcanfd_frame *)frame); + + if (msg_no < 0) { + netif_stop_queue(dev); + spin_unlock_bh(&priv->tx_lock); + return NETDEV_TX_BUSY; + } + can_put_echo_skb(skb, dev, msg_no, NULL); + + /* Set go bit for non-TTCAN messages */ + if (!priv->tt_param[0]) + ttcan_tx_trigger_msg_transmit(priv->ttcan, msg_no); + + /* State management for Tx complete/cancel processing */ + if (test_and_set_bit(msg_no, &priv->ttcan->tx_object) && + printk_ratelimit()) + netdev_err(dev, "Writing to occupied echo_skb buffer\n"); + clear_bit(msg_no, &priv->ttcan->tx_obj_cancelled); + + spin_unlock_bh(&priv->tx_lock); + + return NETDEV_TX_OK; +} + +static int mttcan_change_mtu(struct net_device *dev, int new_mtu) +{ + if (dev->flags & IFF_UP) + return -EBUSY; + + if (new_mtu != CANFD_MTU) + dev->mtu = new_mtu; + return 0; +} + +static void mttcan_init_cyclecounter(struct mttcan_priv *priv) +{ + priv->cc.read = ttcan_read_ts_cntr; + priv->cc.mask = CLOCKSOURCE_MASK(16); + priv->cc.shift = 0; + + if (priv->sinfo->use_external_timer) { + /* external timer is driven by TSC_REF_CLK and uses + * bit [5:20] of that 64 bit timer by default. By + * selecting OFFSET_SEL as 4, we are now using bit + * [9:24] and thats why multiplication by 512 (2^9) + */ + priv->cc.mult = ((u64)NSEC_PER_SEC * 512) / + TSC_REF_CLK_RATE; + } else { + priv->cc.mult = ((u64)NSEC_PER_SEC * + priv->ttcan->ts_prescalar) / + priv->ttcan->bt_config.nominal.bitrate; + } +} + +static int mttcan_handle_hwtstamp_set(struct mttcan_priv *priv, + struct ifreq *ifr) +{ + struct hwtstamp_config config; + unsigned long flags; + u64 tref; + bool rx_config_chg = false; + int ret = 0; + + if (copy_from_user(&config, ifr->ifr_data, + sizeof(struct hwtstamp_config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + /* time stamp no incoming packet at all */ + case HWTSTAMP_FILTER_NONE: + config.rx_filter = HWTSTAMP_FILTER_NONE; + if (priv->hwts_rx_en == true) + rx_config_chg = true; + priv->hwts_rx_en = false; + break; + /* time stamp any incoming packet */ + case HWTSTAMP_FILTER_ALL: + if ((!priv->sinfo->use_external_timer) && + (priv->can.ctrlmode & CAN_CTRLMODE_FD)) { + netdev_err(priv->dev, + "HW Timestamp not supported in FD mode\n"); + return -ERANGE; + } + + config.rx_filter = HWTSTAMP_FILTER_ALL; + if (priv->hwts_rx_en == false) + rx_config_chg = true; + break; + default: + return -ERANGE; + } + + priv->hwtstamp_config = config; + /* Setup hardware time stamping cyclecounter */ + if (rx_config_chg && (config.rx_filter == HWTSTAMP_FILTER_ALL)) { + mttcan_init_cyclecounter(priv); + + /* we use TSC as base time for T194 and PTP for T186. */ + if (priv->sinfo->use_external_timer) { + raw_spin_lock_irqsave(&priv->tc_lock, flags); + priv->hwts_rx_en = true; + raw_spin_unlock_irqrestore(&priv->tc_lock, flags); + } else { + raw_spin_lock_irqsave(&priv->tc_lock, flags); + ret = nvpps_get_ptp_ts(&tref); + if (ret != 0) { + dev_err(priv->device, "HW PTP not running\n"); + tref = ktime_to_ns(ktime_get()); + } + timecounter_init(&priv->tc, &priv->cc, tref); + priv->hwts_rx_en = true; + raw_spin_unlock_irqrestore(&priv->tc_lock, flags); + + mod_timer(&priv->timer, jiffies + + (msecs_to_jiffies(MTTCAN_HWTS_ROLLOVER))); + } + } + + return (copy_to_user(ifr->ifr_data, &config, + sizeof(struct hwtstamp_config))) ? -EFAULT : 0; +} + +static int mttcan_handle_hwtstamp_get(struct mttcan_priv *priv, + struct ifreq *ifr) +{ + return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, + sizeof(struct hwtstamp_config)) ? -EFAULT : 0; +} + +static int mttcan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mttcan_priv *priv = netdev_priv(dev); + int ret = 0; + + spin_lock(&priv->tslock); + switch (cmd) { + case SIOCSHWTSTAMP: + ret = mttcan_handle_hwtstamp_set(priv, ifr); + break; + case SIOCGHWTSTAMP: + ret = mttcan_handle_hwtstamp_get(priv, ifr); + break; + default: + ret = -EOPNOTSUPP; + } + spin_unlock(&priv->tslock); + + return ret; +} + +static const struct net_device_ops mttcan_netdev_ops = { + .ndo_open = mttcan_open, + .ndo_stop = mttcan_close, + .ndo_start_xmit = mttcan_start_xmit, + .ndo_change_mtu = mttcan_change_mtu, + .ndo_do_ioctl = mttcan_ioctl, +}; + +static int register_mttcan_dev(struct net_device *dev) +{ + int err; + + dev->netdev_ops = &mttcan_netdev_ops; + err = register_candev(dev); + if (!err) + devm_can_led_init(dev); + + return err; +} + +static int mttcan_prepare_clock(struct mttcan_priv *priv) +{ + int err; + + mttcan_pm_runtime_enable(priv); + + err = clk_prepare_enable(priv->can_clk); + if (err) { + dev_err(priv->device, "CAN clk enable failed\n"); + return err; + } + + err = clk_prepare_enable(priv->host_clk); + if (err) { + dev_err(priv->device, "CAN_HOST clk enable failed\n"); + clk_disable_unprepare(priv->can_clk); + } + + if (priv->sinfo->set_can_core_clk) { + err = clk_prepare_enable(priv->core_clk); + if (err) { + dev_err(priv->device, "CAN_CORE clk enable failed\n"); + clk_disable_unprepare(priv->host_clk); + clk_disable_unprepare(priv->can_clk); + } + } + + return err; +} + +static void mttcan_unprepare_clock(struct mttcan_priv *priv) +{ + if (priv->sinfo->set_can_core_clk) + clk_disable_unprepare(priv->core_clk); + + clk_disable_unprepare(priv->host_clk); + clk_disable_unprepare(priv->can_clk); +} + +static void unregister_mttcan_dev(struct net_device *dev) +{ + struct mttcan_priv *priv = netdev_priv(dev); + + unregister_candev(dev); + mttcan_pm_runtime_disable(priv); +} + +static void free_mttcan_dev(struct net_device *dev) +{ + struct mttcan_priv *priv = netdev_priv(dev); + + netif_napi_del(&priv->napi); + free_candev(dev); +} + +static int set_can_clk_src_and_rate(struct mttcan_priv *priv) +{ + int ret = 0; + unsigned long rate = priv->sinfo->can_clk_rate; + unsigned long new_rate = 0; + struct clk *host_clk = NULL, *can_clk = NULL, *core_clk = NULL; + struct clk *pclk = NULL; + const char *pclk_name; + + /* get the appropriate clk */ + host_clk = devm_clk_get(priv->device, "can_host"); + can_clk = devm_clk_get(priv->device, "can"); + if (IS_ERR(host_clk) || IS_ERR(can_clk)) { + dev_err(priv->device, "no CAN clock defined\n"); + return -ENODEV; + } + + if (priv->sinfo->set_can_core_clk) { + core_clk = devm_clk_get(priv->device, "can_core"); + if (IS_ERR(core_clk)) { + dev_err(priv->device, "no CAN_CORE clock defined\n"); + return -ENODEV; + } + } + + ret = of_property_read_string(priv->device->of_node, + "pll_source", &pclk_name); + if (ret) { + dev_warn(priv->device, "pll source not defined\n"); + return -ENODEV; + } + + pclk = clk_get(priv->device, pclk_name); + if (IS_ERR(pclk)) { + dev_warn(priv->device, "%s clock not defined\n", pclk_name); + return -ENODEV; + } + + ret = clk_set_parent(can_clk, pclk); + if (ret) { + dev_warn(priv->device, "unable to set CAN_CLK parent\n"); + return -ENODEV; + } + + new_rate = clk_round_rate(can_clk, rate); + if (!new_rate) + dev_warn(priv->device, "incorrect CAN clock rate\n"); + + ret = clk_set_rate(can_clk, new_rate > 0 ? new_rate : rate); + if (ret) { + dev_warn(priv->device, "unable to set CAN clock rate\n"); + return -EINVAL; + } + + ret = clk_set_rate(host_clk, new_rate > 0 ? new_rate : rate); + if (ret) { + dev_warn(priv->device, "unable to set CAN_HOST clock rate\n"); + return -EINVAL; + } + + if (priv->sinfo->set_can_core_clk) { + rate = priv->sinfo->can_core_clk_rate; + new_rate = clk_round_rate(core_clk, rate); + if (!new_rate) + dev_warn(priv->device, "incorrect CAN_CORE clock rate\n"); + + ret = clk_set_rate(core_clk, new_rate > 0 ? new_rate : rate); + if (ret) { + dev_warn(priv->device, "unable to set CAN_CORE clock rate\n"); + return -EINVAL; + } + } + + priv->can_clk = can_clk; + priv->host_clk = host_clk; + + if (priv->sinfo->set_can_core_clk) { + priv->core_clk = core_clk; + priv->can.clock.freq = clk_get_rate(core_clk); + } else { + priv->can.clock.freq = clk_get_rate(can_clk); + } + + return 0; +} + +static int mttcan_probe(struct platform_device *pdev) +{ + int ret = 0; + int irq = 0; + enum of_gpio_flags flags; + void __iomem *regs = NULL, *xregs = NULL; + void __iomem *mram_addr = NULL; + struct net_device *dev; + struct mttcan_priv *priv; + struct resource *ext_res; + struct reset_control *rstc; + struct resource *mesg_ram, *ctrl_res; + const struct tegra_mttcan_soc_info *sinfo; + struct device_node *np; + + sinfo = of_device_get_match_data(&pdev->dev); + if (!sinfo) { + dev_err(&pdev->dev, "No device match found\n"); + return -EINVAL; + } + + np = pdev->dev.of_node; + if (!np) { + dev_err(&pdev->dev, "No valid device node, probe failed\n"); + return -EINVAL; + } + + /* get the platform data */ + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + ret = -ENODEV; + dev_err(&pdev->dev, "IRQ not defined\n"); + goto exit; + } + + dev = alloc_mttcan_dev(); + if (!dev) { + ret = -ENOMEM; + dev_err(&pdev->dev, "CAN device allocation failed\n"); + goto exit; + } + + priv = netdev_priv(dev); + priv->sinfo = sinfo; + + /* mem0 Controller Register Space + * mem1 Controller Extra Registers Space + * mem2 Controller Messege RAM Space + */ + ctrl_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ext_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + mesg_ram = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!ctrl_res || !ext_res || !mesg_ram) { + ret = -ENODEV; + dev_err(&pdev->dev, "Resource allocation failed\n"); + goto exit_free_can; + } + + rstc = devm_reset_control_get(&pdev->dev, "can"); + if (IS_ERR(rstc)) { + dev_err(&pdev->dev, "Missing controller reset\n"); + ret = PTR_ERR(rstc); + goto exit_free_can; + } + reset_control_reset(rstc); + + regs = devm_ioremap_resource(&pdev->dev, ctrl_res); + xregs = devm_ioremap_resource(&pdev->dev, ext_res); + mram_addr = devm_ioremap_resource(&pdev->dev, mesg_ram); + + if (!mram_addr || !xregs || !regs) { + dev_err(&pdev->dev, "failed to map can port\n"); + ret = -ENOMEM; + goto exit; + } + + /* allocate the mttcan device */ + + dev->irq = irq; + priv->device = &pdev->dev; + + if (set_can_clk_src_and_rate(priv)) + goto exit_free_device; + + /* set device-tree properties */ + priv->gpio_can_en.gpio = of_get_named_gpio_flags(np, + "gpio_can_en", 0, &flags); + priv->gpio_can_en.active_low = flags & OF_GPIO_ACTIVE_LOW; + priv->gpio_can_stb.gpio = of_get_named_gpio_flags(np, + "gpio_can_stb", 0, &flags); + priv->gpio_can_stb.active_low = flags & OF_GPIO_ACTIVE_LOW; + priv->instance = of_alias_get_id(np, "mttcan"); + priv->poll = of_property_read_bool(np, "use-polling"); + of_property_read_u32_array(np, "tt-param", priv->tt_param, 2); + if (of_property_read_u32_array(np, "tx-config", + priv->tx_conf, TX_CONF_MAX)) { + dev_err(priv->device, "tx-config missing\n"); + goto exit_free_device; + } + if (of_property_read_u32_array(np, "rx-config", + priv->rx_conf, RX_CONF_MAX)) { + dev_err(priv->device, "rx-config missing\n"); + goto exit_free_device; + } + if (of_property_read_u32_array(np, "mram-params", + priv->mram_param, MTT_CAN_MAX_MRAM_ELEMS)) { + dev_err(priv->device, "mram-param missing\n"); + goto exit_free_device; + } + + if (gpio_is_valid(priv->gpio_can_stb.gpio)) { + if (devm_gpio_request(priv->device, priv->gpio_can_stb.gpio, + "gpio_can_stb") < 0) { + dev_err(priv->device, "stb gpio request failed\n"); + goto exit_free_device; + } + } + if (gpio_is_valid(priv->gpio_can_en.gpio)) { + if (devm_gpio_request(priv->device, priv->gpio_can_en.gpio, + "gpio_can_en") < 0) { + dev_err(priv->device, "stb gpio request failed\n"); + goto exit_free_device; + } + } + + + /* allocate controller struct memory and set fields */ + priv->ttcan = + devm_kzalloc(priv->device, sizeof(struct ttcan_controller), + GFP_KERNEL); + if (!priv->ttcan) { + dev_err(priv->device, + "cannot allocate memory for ttcan_controller\n"); + goto exit_free_device; + } + memset(priv->ttcan, 0, sizeof(struct ttcan_controller)); + priv->ttcan->base = regs; + priv->ttcan->xbase = xregs; + priv->ttcan->mram_base = mesg_ram->start; + priv->ttcan->mram_size = mesg_ram->end - mesg_ram->start + 1; + priv->ttcan->id = priv->instance; + priv->ttcan->mram_vbase = mram_addr; + INIT_LIST_HEAD(&priv->ttcan->rx_q0); + INIT_LIST_HEAD(&priv->ttcan->rx_q1); + INIT_LIST_HEAD(&priv->ttcan->rx_b); + INIT_LIST_HEAD(&priv->ttcan->tx_evt); + + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + if (priv->poll) { + dev_info(&pdev->dev, "Polling Mode enabled\n"); + INIT_DELAYED_WORK(&priv->can_work, mttcan_work); + } + INIT_DELAYED_WORK(&priv->drv_restart_work, mttcan_bus_off_restart); + + ret = mttcan_prepare_clock(priv); + if (ret) + goto exit_free_device; + + ret = mttcan_hw_init(priv); + if (ret) + goto exit_free_device; + + ret = register_mttcan_dev(dev); + if (ret) { + dev_err(&pdev->dev, "registering %s failed (err=%d)\n", + KBUILD_MODNAME, ret); + goto exit_hw_deinit; + } + + ret = mttcan_create_sys_files(&dev->dev); + if (ret) + goto exit_unreg_candev; + +#if LINUX_VERSION_CODE > KERNEL_VERSION(4,15,0) + timer_setup(&priv->timer, mttcan_timer_cb, 0); +#else + setup_timer(&priv->timer, mttcan_timer_cb, (unsigned long)priv); +#endif + + dev_info(&dev->dev, "%s device registered (regs=%p, irq=%d)\n", + KBUILD_MODNAME, priv->ttcan->base, dev->irq); + + return 0; + +exit_unreg_candev: + unregister_mttcan_dev(dev); +exit_hw_deinit: + mttcan_hw_deinit(priv); + mttcan_unprepare_clock(priv); +exit_free_device: + platform_set_drvdata(pdev, NULL); +exit_free_can: + free_mttcan_dev(dev); +exit: + dev_err(&pdev->dev, "probe failed\n"); + return ret; +} + +static int mttcan_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct mttcan_priv *priv = netdev_priv(dev); + + if (priv->poll) + cancel_delayed_work_sync(&priv->can_work); + + dev_info(&dev->dev, "%s\n", __func__); + + del_timer_sync(&priv->timer); + mttcan_delete_sys_files(&dev->dev); + unregister_mttcan_dev(dev); + mttcan_unprepare_clock(priv); + platform_set_drvdata(pdev, NULL); + free_mttcan_dev(dev); + + return 0; +} + +#ifdef CONFIG_PM +static int mttcan_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret; + struct net_device *ndev = platform_get_drvdata(pdev); + struct mttcan_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + + if (ndev->flags & IFF_UP) { + mttcan_stop(priv); + ret = mttcan_power_down(ndev); + if (ret) { + netdev_err(ndev, "failed to enter power down mode\n"); + return ret; + } + } + + priv->can.state = CAN_STATE_SLEEPING; + return 0; +} + +static int mttcan_resume(struct platform_device *pdev) +{ + int ret; + struct net_device *ndev = platform_get_drvdata(pdev); + struct mttcan_priv *priv = netdev_priv(ndev); + + if (ndev->flags & IFF_UP) { + ret = mttcan_power_up(priv); + if (ret) + return ret; + } + + if (priv->hwts_rx_en) + mod_timer(&priv->timer, + jiffies + (msecs_to_jiffies(MTTCAN_HWTS_ROLLOVER))); + + ret = mttcan_hw_reinit(priv); + if (ret) + return ret; + + if (ndev->flags & IFF_UP) + mttcan_start(ndev); + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + return 0; +} +#endif + +static struct platform_driver mttcan_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mttcan_of_table), + }, + .probe = mttcan_probe, + .remove = mttcan_remove, +#ifdef CONFIG_PM + .suspend = mttcan_suspend, + .resume = mttcan_resume, +#endif +}; + +module_platform_driver(mttcan_plat_driver); +MODULE_AUTHOR("Manoj Chourasia "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Platform CAN bus driver for Bosch M_TTCAN controller"); diff --git a/drivers/net/can/mttcan/native/m_ttcan_sys.c b/drivers/net/can/mttcan/native/m_ttcan_sys.c new file mode 100644 index 00000000..f2d10780 --- /dev/null +++ b/drivers/net/can/mttcan/native/m_ttcan_sys.c @@ -0,0 +1,741 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "../include/m_ttcan.h" + +#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ + SB_BARRIER_INSN"nop\n", \ + ARM64_HAS_SB)) + +static int mttcan_check_fec_validity(struct mttcan_priv *priv, + unsigned int fec); + +static ssize_t show_std_fltr(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + int cur_filter_size = priv->ttcan->fltr_config.std_fltr_size; + ssize_t ret, total = 0; + int i = 0; + + ret = sprintf(buf, "%s\n", "Standard Filters"); + if (ret < 0) { + pr_err("sprintf() failed at line %d\n", __LINE__); + return -ENOMEM; + } + + total += ret; + while (cur_filter_size--) { + ret = sprintf(buf+total, "%d. 0x%x\n", i, + ttcan_get_std_id_filter(priv->ttcan, i)); + if (ret < 0) { + pr_err("sprintf() failed at line %d\n", __LINE__); + return -ENOMEM; + } + + total += ret; + i++; + } + return total; +} + +static ssize_t show_xtd_fltr(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + int cur_filter_size = priv->ttcan->fltr_config.xtd_fltr_size; + ssize_t ret, total = 0; + int i = 0; + + ret = sprintf(buf, "%s\n", "Extended Filters"); + if (ret < 0) { + pr_err("sprintf() failed at line %d\n", __LINE__); + return -ENOMEM; + } + + total += ret; + while (cur_filter_size--) { + ret = sprintf(buf+total, "%d. 0x%llx\n", i, + ttcan_get_xtd_id_filter(priv->ttcan, i)); + if (ret < 0) { + pr_err("sprintf() failed at line %d\n", __LINE__); + return -ENOMEM; + } + + total += ret; + i++; + } + return total; +} + +static ssize_t show_gfc_fltr(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "%s\n0x%x\n", "Global filter", + ttcan_get_gfc(priv->ttcan)); +} + +static ssize_t store_gfc_fltr(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct net_device *ndev = to_net_dev(dev); + struct mttcan_priv *priv = netdev_priv(ndev); + struct ttcan_controller *ttcan = priv->ttcan; + unsigned int anfs, anfe; + unsigned int rrfs, rrfe; + u32 gfc; + int ret; + + if (ndev->flags & IFF_UP) { + dev_err(dev, "GFC cannot be configured as device is running\n"); + return -EBUSY; + } + + ret = sscanf(buf, "anfs=%u anfe=%u rrfs=%u rrfe=%u", &anfs, + &anfe, &rrfs, &rrfe); + if ((ret < 4) || ((anfs | anfe) > 3) || ((rrfs | rrfe) > 1)) { + dev_err(dev, "Invalid Global filter\n"); + pr_err("usage:anfs=0..3 anfe=0..3 rrfs=0/1 rrfe=0/1\n"); + return -EINVAL; + } + + if (((anfs == GFC_ANFS_RXFIFO_0) || (anfe == GFC_ANFE_RXFIFO_0)) + && (ttcan->mram_cfg[MRAM_RXF0].num == 0U)) { + dev_err(priv->device, "RX FIFO 0 is not used currently."); + dev_err(priv->device, " Change it via DT\n"); + return -EINVAL; + } + + if (((anfs == GFC_ANFS_RXFIFO_1) || (anfe == GFC_ANFE_RXFIFO_1)) + && (ttcan->mram_cfg[MRAM_RXF1].num == 0U)) { + dev_err(priv->device, "RX FIFO 1 is not used currently."); + dev_err(priv->device, " Change it via DT\n"); + return -EINVAL; + } + + gfc = 0; + gfc = (anfs << MTT_GFC_ANFS_SHIFT) & MTT_GFC_ANFS_MASK; + gfc |= (anfe << MTT_GFC_ANFE_SHIFT) & MTT_GFC_ANFE_MASK; + gfc |= (rrfs << MTT_GFC_RRFS_SHIFT) & MTT_GFC_RRFS_MASK; + gfc |= (rrfe << MTT_GFC_RRFE_SHIFT) & MTT_GFC_RRFE_MASK; + + priv->gfc_reg = gfc; + ttcan_set_gfc(priv->ttcan, gfc); + + return count; +} + +static ssize_t show_xidam(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "%s\n0x%x\n", "XIDAM", + ttcan_get_xidam(priv->ttcan)); +} + +static ssize_t store_xidam(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct net_device *ndev = to_net_dev(dev); + struct mttcan_priv *priv = netdev_priv(ndev); + unsigned int xidam; + + if (ndev->flags & IFF_UP) { + dev_err(dev, "XIDAM is protected, device is running\n"); + return -EBUSY; + } + /* usage: xidam=MASK */ + if (sscanf(buf, "xidam=%u", &xidam) != 1) { + dev_err(dev, "Invalid XIDAM MASK\n"); + pr_err("usage: xidam=MASK\n"); + return -EINVAL; + } + + priv->xidam_reg = xidam; + ttcan_set_xidam(priv->ttcan, xidam); + + return count; +} + +static int mttcan_check_fec_validity(struct mttcan_priv *priv, + unsigned int fec) +{ + struct ttcan_controller *ttcan = priv->ttcan; + + if (fec > FEC_RXBUF) { + dev_err(priv->device, "sfec/efec should be in range 0-7\n"); + return -EINVAL; + } + + if (((fec == FEC_RXFIFO_0) || (fec == FEC_RXFIFO_0_PRIO)) && + (ttcan->mram_cfg[MRAM_RXF0].num == 0U)) { + dev_err(priv->device, "RX FIFO 0 is not used currently."); + dev_err(priv->device, " Change it via DT\n"); + return -EINVAL; + } + + if (((fec == FEC_RXFIFO_1) || (fec == FEC_RXFIFO_1_PRIO)) && + (ttcan->mram_cfg[MRAM_RXF1].num == 0U)) { + dev_err(priv->device, "RX FIFO 1 is not used currently."); + dev_err(priv->device, " Change it via DT\n"); + return -EINVAL; + } + + if ((fec == FEC_RXBUF) && (ttcan->mram_cfg[MRAM_RXB].num == 0U)) { + dev_err(priv->device, "RX Buffer is not used currently."); + dev_err(priv->device, " Change it via DT\n"); + return -EINVAL; + } + + return 0; +} + +static ssize_t store_std_fltr(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + struct net_device *ndev = to_net_dev(dev); + unsigned int sft, sfec; + unsigned int sfid1, sfid2; + int idx = -1, cur_filter_size; + int items; + int ret; + + if (ndev->flags & IFF_UP) { + dev_err(dev, "device is running\n"); + return -EBUSY; + } + /* usage: sft="0/1/2/3" sfec=1...7 sfid1="ID1" sfid2="ID2" idx=%u + */ + ret = sscanf(buf, "sft=%u sfec=%u sfid1=%X sfid2=%X idx=%u", &sft, + &sfec, &sfid1, &sfid2, &idx); + if (ret < 4) { + /* Not passing index is allowed */ + dev_err(dev, "Invalid std filter\n"); + pr_err("usage:sft=0..3 sfec=0..7 sfid1=ID1h sfid2=ID2h idx=i\n"); + return -EINVAL; + } + items = ret; + + cur_filter_size = priv->ttcan->fltr_config.std_fltr_size; + + if ((idx > cur_filter_size) || (idx == -1)) { + if (cur_filter_size >= priv->ttcan->mram_cfg[MRAM_SIDF].num) { + dev_err(dev, "Max Invalid std filter Index\n"); + return -ENOSPC; + } + } + + ret = mttcan_check_fec_validity(priv, sfec); + if (ret < 0) { + dev_err(dev, "Invalid sfec value\n"); + return -EINVAL; + } + + if (items == 5) { + if (idx > cur_filter_size) { + dev_err(dev, "Invalid std filter Index\n"); + return -EINVAL; + } + + /* array access based on user provided index/data */ + spec_bar(); + ttcan_set_std_id_filter(priv->ttcan, priv->std_shadow, + idx, (u8)sft, (u8)sfec, sfid1, sfid2); + if (idx == cur_filter_size) + priv->ttcan->fltr_config.std_fltr_size++; + } else { + /* array access based on user provided index/data */ + spec_bar(); + ttcan_set_std_id_filter(priv->ttcan, priv->std_shadow, + cur_filter_size, (u8) sft, (u8)sfec, sfid1, sfid2); + priv->ttcan->fltr_config.std_fltr_size++; + } + return count; +} + +static ssize_t store_xtd_fltr(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + struct net_device *ndev = to_net_dev(dev); + unsigned int eft, efec; + unsigned int efid1, efid2; + int idx = -1, cur_filter_size; + int items; + int ret; + + if (ndev->flags & IFF_UP) { + dev_err(dev, "device is running\n"); + return -EBUSY; + } + /* usage: eft="0/1/2/3" efec=1...7 efid1="ID1h" efid2="ID2h" idx=%u + */ + ret = sscanf(buf, "eft=%u efec=%u efid1=%X efid2=%X idx=%u", &eft, + &efec, &efid1, &efid2, &idx); + if (ret < 4) { + /* Not passing index is allowed */ + dev_err(dev, "Invalid xtd filter\n"); + pr_err("usage:eft=0..3 efec=0..7 efid1=ID1h efid2=ID2h idx=i\n"); + return -EINVAL; + } + items = ret; + + cur_filter_size = priv->ttcan->fltr_config.xtd_fltr_size; + + if ((idx > cur_filter_size) || (idx == -1)) { + if (cur_filter_size >= priv->ttcan->mram_cfg[MRAM_XIDF].num) { + dev_err(dev, "Max Invalid xtd filter Index\n"); + return -ENOSPC; + } + } + + ret = mttcan_check_fec_validity(priv, efec); + if (ret < 0) { + dev_err(dev, "Invalid efec value\n"); + return -EINVAL; + } + + if (items == 5) { + if (idx > cur_filter_size) { + dev_err(dev, "Invalid xtd filter Index\n"); + return -EINVAL; + } + + /* array access based on user provided index/data */ + spec_bar(); + ttcan_set_xtd_id_filter(priv->ttcan, priv->xtd_shadow, + idx, (u8) eft, (u8) efec, efid1, efid2); + if (idx == cur_filter_size) + priv->ttcan->fltr_config.xtd_fltr_size++; + } else { + /* array access based on user provided index/data */ + spec_bar(); + ttcan_set_xtd_id_filter(priv->ttcan, priv->xtd_shadow, + cur_filter_size, (u8) eft, (u8) efec, efid1, efid2); + priv->ttcan->fltr_config.xtd_fltr_size++; + } + return count; +} + +static ssize_t show_tx_cancel(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "%s\n0x%x\n", "TXBCF", + ttcan_read_tx_cancelled_reg(priv->ttcan)); +} + +static ssize_t store_tx_cancel(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct net_device *ndev = to_net_dev(dev); + struct mttcan_priv *priv = netdev_priv(ndev); + unsigned int txcbr; + + /* usage: txbcr=bit_mask for buffer */ + if (sscanf(buf, "txbcr=%X", &txcbr) != 1) { + dev_err(dev, "Invalid TXBCR value\n"); + pr_err("%s usage: txbcr=bit_mask to cancel\n", buf); + return -EINVAL; + } + + ttcan_set_tx_cancel_request(priv->ttcan, txcbr); + + return count; +} + +static ssize_t show_ttrmc(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "%s\n0x%x\n", "TTRMC", + ttcan_get_ttrmc(priv->ttcan)); +} + +static ssize_t store_ttrmc(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct net_device *ndev = to_net_dev(dev); + struct mttcan_priv *priv = netdev_priv(ndev); + unsigned int rmps, xtd, rid; + + if (ndev->flags & IFF_UP) { + dev_err(dev, "TTRMC is protected, device is running\n"); + return -EBUSY; + } + /* usage: rmps=0/1 xtd=0/1 rid=ReferenceID */ + if (sscanf(buf, "rmps=%u xtd=%u rid=%X", &rmps, &xtd, &rid) != 3) { + dev_err(dev, "Invalid TTRMC\n"); + pr_err("usage: rmps=0/1 xtd=0/1 rid=ReferenceID in hex\n"); + return -EINVAL; + } + + ttcan_set_ref_mesg(priv->ttcan, rid, rmps, xtd); + + return count; +} + +static ssize_t show_ttocf(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "%s\n0x%x\n", "TTOCF", + ttcan_get_ttocf(priv->ttcan)); +} + +static ssize_t store_ttocf(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct net_device *ndev = to_net_dev(dev); + struct mttcan_priv *priv = netdev_priv(ndev); + unsigned int evtp, ecc, egtf, awl, eecs, irto, ldsdl, tm, gen, om; + + if (ndev->flags & IFF_UP) { + dev_err(dev, "TTCOF is protected, device is running\n"); + return -EBUSY; + } + /* usage: evtp=0/1 ecc=0/1 egtf=0/1 awl=0..255 eecs=0 irto=0..127 + * ldsdl=0..7 tm=0/1 gen=0/1 om=0..4 */ + if (sscanf(buf, + "evtp=%u ecc=%u egtf=%u awl=%u eecs=%u irto=%u ldsdl=%u tm=%u gen=%u om=%u", + &evtp, &ecc, &egtf, &awl, &eecs, &irto, &ldsdl, + &tm, &gen, &om) != 10) { + dev_err(dev, "Invalid TTOCF\n"); + pr_err("usage: evtp=0/1 ecc=0/1 egtf=0/1 awl=0..255 eecs=0 irto=0..127 ldsdl=0..7 tm=0/1 gen=0/1 om=0..4\n"); + return -EINVAL; + } + + ttcan_set_tt_config(priv->ttcan, evtp, ecc, egtf, + awl, eecs, irto, ldsdl, tm, gen, om); + + return count; +} + +static ssize_t show_ttmlm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "%s\n0x%x\n", "TTMLM", + ttcan_get_ttmlm(priv->ttcan)); +} + +static ssize_t store_ttmlm(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct net_device *ndev = to_net_dev(dev); + struct mttcan_priv *priv = netdev_priv(ndev); + unsigned int entt, txew, css, ccm; + + if (ndev->flags & IFF_UP) { + dev_err(dev, "TTMLM is protected, device is running\n"); + return -EBUSY; + } + /* usage: entt=0...4095 txew=0..15 css=0..2 ccm=0..63(2*pow(n)-1) */ + if (sscanf(buf, "entt=%u txew=%u css=%u ccm=%u", + &entt, &txew, &css, &ccm) != 4) { + dev_err(dev, "Invalid TTMLM\n"); + pr_err("usage: entt=0...4095 txew=0..15 css=0..2 ccm=0..63(2*pow(n)-1)\n"); + return -EINVAL; + } + + ttcan_set_matrix_limits(priv->ttcan, entt, txew, css, ccm); + + return count; +} + +static ssize_t show_tttmc(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "%s\n0x%x\n", "TTTMC", + ttcan_get_tttmc(priv->ttcan)); +} + +static ssize_t store_tttmc(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct net_device *ndev = to_net_dev(dev); + struct mttcan_priv *priv = netdev_priv(ndev); + u32 tme, tttmc; + + if (ndev->flags & IFF_UP) { + dev_err(dev, "TTTMC is protected, device is running\n"); + return -EBUSY; + } + /* usage: tme=number of elements */ + if (sscanf(buf, "tme=%u", &tme) != 1) { + dev_err(dev, "Invalid TTTMC\n"); + pr_err("usage: tme=0..64 (Num Elements)\n"); + return -EINVAL; + } + + if (tme > 64) + tme = 64; + + tttmc = ttcan_get_tttmc(priv->ttcan); + tttmc &= ~MTT_TTTMC_TME_MASK; + tttmc |= (tme << MTT_TTTMC_TME_SHIFT) & + MTT_TTTMC_TME_MASK; + + ttcan_set_tttmc(priv->ttcan, tttmc); + + return count; +} + +static ssize_t show_cccr_txbar(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + unsigned int init = 0; + + init = ttcan_get_cccr(priv->ttcan) & 0x1; + return sprintf(buf, "CCCR.INIT %s\n", init ? "set" : "reset"); +} + +static ssize_t store_cccr_txbar(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + char str[32]; + int txbar = 0; + + if ((sscanf(buf, "%s txbar=%d", str, &txbar) != 2) || (txbar > 32)) { + dev_err(dev, "Invalid String or txbar\n"); + pr_err("usage: reset/set txbar=0..32 (Num Elements)\n"); + return -EINVAL; + } + + /* usage: set/reset */ + if (strcmp("set", str) == 0) + ttcan_set_config_change_enable(priv->ttcan); + else if (strcmp("reset", str) == 0) + ttcan_reset_config_change_enable(priv->ttcan); + else { + dev_err(dev, "Invalid String\n"); + pr_err("valid strings: set/reset\n"); + return -EINVAL; + } + + txbar = (1UL << txbar) - 1; + ttcan_set_txbar(priv->ttcan, txbar); + + return count; +} + +static ssize_t show_txbar(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "Not implemented\n"); +} + +static ssize_t store_txbar(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + unsigned int txbar = 0; + + /* usage: txbar=1...32*/ + if ((sscanf(buf, "txbar=%u", &txbar) != 1) || (txbar > 32)) { + dev_err(dev, "Invalid TXBAR\n"); + pr_err("usage: txbar=1..32\n"); + return -EINVAL; + } + + txbar = (1UL << txbar) - 1; + ttcan_set_txbar(priv->ttcan, txbar); + + return count; +} + +static ssize_t show_trigger_mem(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + int cur = priv->ttcan->tt_mem_elements; + ssize_t ret, total = 0; + int i = 0; + + ret = sprintf(buf, "%s\n", "Trigger Memory Elements"); + if (ret < 0) { + pr_err("sprintf() failed at line %d\n", __LINE__); + return -ENOMEM; + } + + total += ret; + while (cur--) { + ret = sprintf(buf+total, "%d. 0x%llx\n", i, + ttcan_get_trigger_mem(priv->ttcan, i)); + if (ret < 0) { + pr_err("sprintf() failed at line %d\n", __LINE__); + return -ENOMEM; + } + + total += ret; + i++; + } + return total; +} + +static ssize_t store_trigger_mem(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + unsigned int tm, cc, tmin, tmex, type; + unsigned int ftype, mnr; + int idx = -1, cur; + int ret; + + struct net_device *ndev = to_net_dev(dev); + struct mttcan_priv *priv = netdev_priv(ndev); + + if (ndev->flags & IFF_UP) { + dev_err(dev, "Trigger Mem is protected, device is running\n"); + return -EBUSY; + } + /* usage: tm=0..FFFF cc=0..127 tmin=0/1 tmex=0/1 type=0..10 + ftype=0/1 mnr=0..31 idx=%u */ + ret = sscanf(buf, + "tm=%X cc=%u tmin=%u tmex=%u type=%u ftype=%u mnr=%u idx=%u", + &tm, &cc, &tmin, &tmex, &type, &ftype, &mnr, &idx); + if (ret < 7) { + /* Not passing index is allowed */ + dev_err(dev, "Invalid Trigger Element\n"); + pr_err("tm=0..0xFFFF cc=0..127 tmin=0/1 tmex=0/1 type=0..10 ftype=0/1 mnr=0..31 idx=i\n"); + return -EINVAL; + } + + cur = priv->ttcan->tt_mem_elements; + + if ((idx > cur) || (idx == -1)) + if (cur >= priv->ttcan->mram_cfg[MRAM_TMC].num) { + dev_err(dev, "Max Invalid Trigger mem Index\n"); + return -ENOSPC; + } + + if (ret == 8) { + if (idx > cur) { + dev_err(dev, "Invalid Trigger Mem Index\n"); + return -EINVAL; + } + + /* array access based on user provided index/data */ + spec_bar(); + ttcan_set_trigger_mem(priv->ttcan, priv->tmc_shadow, idx, tm, + cc, tmin, tmex, type, ftype, mnr); + + if (idx == cur) + priv->ttcan->tt_mem_elements++; + } else { + /* array access based on user provided index/data */ + spec_bar(); + ttcan_set_trigger_mem(priv->ttcan, priv->tmc_shadow, cur, tm, + cc, tmin, tmex, type, ftype, mnr); + priv->ttcan->tt_mem_elements++; + } + return count; +} + +static ssize_t show_tdc_offset(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "tdc_offset=0x%x, DBTP.tdc=%d\n", + priv->ttcan->tdc_offset, priv->ttcan->tdc); +} + +static ssize_t store_tdc_offset(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct mttcan_priv *priv = netdev_priv(to_net_dev(dev)); + unsigned int tdc_offset = 0; + + if ((sscanf(buf, "%X", &tdc_offset) != 1)) { + dev_err(dev, "wrong tdc_offset\n"); + return -EINVAL; + } + + if (tdc_offset != 0) + priv->ttcan->tdc = 1; + else + priv->ttcan->tdc = 0; + + priv->ttcan->tdc_offset = tdc_offset; + + return count; +} + +static DEVICE_ATTR(std_filter, S_IRUGO | S_IWUSR, show_std_fltr, + store_std_fltr); +static DEVICE_ATTR(xtd_filter, S_IRUGO | S_IWUSR, show_xtd_fltr, + store_xtd_fltr); +static DEVICE_ATTR(gfc_filter, S_IRUGO | S_IWUSR, show_gfc_fltr, + store_gfc_fltr); +static DEVICE_ATTR(xidam, S_IRUGO | S_IWUSR, show_xidam, store_xidam); +static DEVICE_ATTR(tx_cancel, S_IRUGO | S_IWUSR, show_tx_cancel, + store_tx_cancel); +static DEVICE_ATTR(ttrmc, S_IRUGO | S_IWUSR, show_ttrmc, store_ttrmc); +static DEVICE_ATTR(ttocf, S_IRUGO | S_IWUSR, show_ttocf, store_ttocf); +static DEVICE_ATTR(ttmlm, S_IRUGO | S_IWUSR, show_ttmlm, store_ttmlm); +static DEVICE_ATTR(tttmc, S_IRUGO | S_IWUSR, show_tttmc, store_tttmc); +static DEVICE_ATTR(txbar, S_IRUGO | S_IWUSR, show_txbar, store_txbar); +static DEVICE_ATTR(cccr_init_txbar, S_IRUGO | S_IWUSR, show_cccr_txbar, + store_cccr_txbar); +static DEVICE_ATTR(trigger_mem, S_IRUGO | S_IWUSR, show_trigger_mem, + store_trigger_mem); +static DEVICE_ATTR(tdc_offset, S_IRUGO | S_IWUSR, show_tdc_offset, + store_tdc_offset); + +static struct attribute *mttcan_attr[] = { + &dev_attr_std_filter.attr, + &dev_attr_xtd_filter.attr, + &dev_attr_gfc_filter.attr, + &dev_attr_xidam.attr, + &dev_attr_tx_cancel.attr, + &dev_attr_ttrmc.attr, + &dev_attr_ttocf.attr, + &dev_attr_ttmlm.attr, + &dev_attr_tttmc.attr, + &dev_attr_txbar.attr, + &dev_attr_cccr_init_txbar.attr, + &dev_attr_trigger_mem.attr, + &dev_attr_tdc_offset.attr, + NULL +}; + +static const struct attribute_group mttcan_attr_group = { + .attrs = mttcan_attr, +}; + +int mttcan_create_sys_files(struct device *dev) +{ + return sysfs_create_group(&dev->kobj, &mttcan_attr_group); +} + +void mttcan_delete_sys_files(struct device *dev) +{ + sysfs_remove_group(&dev->kobj, &mttcan_attr_group); +}