cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cvmx-ipd.h (10740B)


      1/***********************license start***************
      2 * Author: Cavium Networks
      3 *
      4 * Contact: support@caviumnetworks.com
      5 * This file is part of the OCTEON SDK
      6 *
      7 * Copyright (c) 2003-2008 Cavium Networks
      8 *
      9 * This file is free software; you can redistribute it and/or modify
     10 * it under the terms of the GNU General Public License, Version 2, as
     11 * published by the Free Software Foundation.
     12 *
     13 * This file is distributed in the hope that it will be useful, but
     14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
     15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
     16 * NONINFRINGEMENT.  See the GNU General Public License for more
     17 * details.
     18 *
     19 * You should have received a copy of the GNU General Public License
     20 * along with this file; if not, write to the Free Software
     21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
     22 * or visit http://www.gnu.org/licenses/.
     23 *
     24 * This file may also be available under a different license from Cavium.
     25 * Contact Cavium Networks for more information
     26 ***********************license end**************************************/
     27
     28/**
     29 *
     30 * Interface to the hardware Input Packet Data unit.
     31 */
     32
     33#ifndef __CVMX_IPD_H__
     34#define __CVMX_IPD_H__
     35
     36#include <asm/octeon/octeon-feature.h>
     37
     38#include <asm/octeon/cvmx-ipd-defs.h>
     39#include <asm/octeon/cvmx-pip-defs.h>
     40
     41enum cvmx_ipd_mode {
     42   CVMX_IPD_OPC_MODE_STT = 0LL,	  /* All blocks DRAM, not cached in L2 */
     43   CVMX_IPD_OPC_MODE_STF = 1LL,	  /* All blocks into  L2 */
     44   CVMX_IPD_OPC_MODE_STF1_STT = 2LL,   /* 1st block L2, rest DRAM */
     45   CVMX_IPD_OPC_MODE_STF2_STT = 3LL    /* 1st, 2nd blocks L2, rest DRAM */
     46};
     47
     48#ifndef CVMX_ENABLE_LEN_M8_FIX
     49#define CVMX_ENABLE_LEN_M8_FIX 0
     50#endif
     51
     52/* CSR typedefs have been moved to cvmx-csr-*.h */
     53typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t;
     54typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t;
     55
     56typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t;
     57typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
     58
     59/**
     60 * Configure IPD
     61 *
     62 * @mbuff_size: Packets buffer size in 8 byte words
     63 * @first_mbuff_skip:
     64 *		     Number of 8 byte words to skip in the first buffer
     65 * @not_first_mbuff_skip:
     66 *		     Number of 8 byte words to skip in each following buffer
     67 * @first_back: Must be same as first_mbuff_skip / 128
     68 * @second_back:
     69 *		     Must be same as not_first_mbuff_skip / 128
     70 * @wqe_fpa_pool:
     71 *		     FPA pool to get work entries from
     72 * @cache_mode:
     73 * @back_pres_enable_flag:
     74 *		     Enable or disable port back pressure
     75 */
     76static inline void cvmx_ipd_config(uint64_t mbuff_size,
     77				   uint64_t first_mbuff_skip,
     78				   uint64_t not_first_mbuff_skip,
     79				   uint64_t first_back,
     80				   uint64_t second_back,
     81				   uint64_t wqe_fpa_pool,
     82				   enum cvmx_ipd_mode cache_mode,
     83				   uint64_t back_pres_enable_flag)
     84{
     85	cvmx_ipd_mbuff_first_skip_t first_skip;
     86	cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
     87	union cvmx_ipd_packet_mbuff_size size;
     88	cvmx_ipd_first_next_ptr_back_t first_back_struct;
     89	cvmx_ipd_second_next_ptr_back_t second_back_struct;
     90	union cvmx_ipd_wqe_fpa_queue wqe_pool;
     91	union cvmx_ipd_ctl_status ipd_ctl_reg;
     92
     93	first_skip.u64 = 0;
     94	first_skip.s.skip_sz = first_mbuff_skip;
     95	cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
     96
     97	not_first_skip.u64 = 0;
     98	not_first_skip.s.skip_sz = not_first_mbuff_skip;
     99	cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
    100
    101	size.u64 = 0;
    102	size.s.mb_size = mbuff_size;
    103	cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
    104
    105	first_back_struct.u64 = 0;
    106	first_back_struct.s.back = first_back;
    107	cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
    108
    109	second_back_struct.u64 = 0;
    110	second_back_struct.s.back = second_back;
    111	cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
    112
    113	wqe_pool.u64 = 0;
    114	wqe_pool.s.wqe_pool = wqe_fpa_pool;
    115	cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
    116
    117	ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
    118	ipd_ctl_reg.s.opc_mode = cache_mode;
    119	ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
    120	cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
    121
    122	/* Note: the example RED code that used to be here has been moved to
    123	   cvmx_helper_setup_red */
    124}
    125
    126/**
    127 * Enable IPD
    128 */
    129static inline void cvmx_ipd_enable(void)
    130{
    131	union cvmx_ipd_ctl_status ipd_reg;
    132	ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
    133	if (ipd_reg.s.ipd_en) {
    134		cvmx_dprintf
    135		    ("Warning: Enabling IPD when IPD already enabled.\n");
    136	}
    137	ipd_reg.s.ipd_en = 1;
    138#if  CVMX_ENABLE_LEN_M8_FIX
    139	if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
    140		ipd_reg.s.len_m8 = TRUE;
    141#endif
    142	cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
    143}
    144
    145/**
    146 * Disable IPD
    147 */
    148static inline void cvmx_ipd_disable(void)
    149{
    150	union cvmx_ipd_ctl_status ipd_reg;
    151	ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
    152	ipd_reg.s.ipd_en = 0;
    153	cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
    154}
    155
    156/**
    157 * Supportive function for cvmx_fpa_shutdown_pool.
    158 */
    159static inline void cvmx_ipd_free_ptr(void)
    160{
    161	/* Only CN38XXp{1,2} cannot read pointer out of the IPD */
    162	if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)
    163	    && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
    164		int no_wptr = 0;
    165		union cvmx_ipd_ptr_count ipd_ptr_count;
    166		ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
    167
    168		/* Handle Work Queue Entry in cn56xx and cn52xx */
    169		if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
    170			union cvmx_ipd_ctl_status ipd_ctl_status;
    171			ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
    172			if (ipd_ctl_status.s.no_wptr)
    173				no_wptr = 1;
    174		}
    175
    176		/* Free the prefetched WQE */
    177		if (ipd_ptr_count.s.wqev_cnt) {
    178			union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid;
    179			ipd_wqe_ptr_valid.u64 =
    180			    cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
    181			if (no_wptr)
    182				cvmx_fpa_free(cvmx_phys_to_ptr
    183					      ((uint64_t) ipd_wqe_ptr_valid.s.
    184					       ptr << 7), CVMX_FPA_PACKET_POOL,
    185					      0);
    186			else
    187				cvmx_fpa_free(cvmx_phys_to_ptr
    188					      ((uint64_t) ipd_wqe_ptr_valid.s.
    189					       ptr << 7), CVMX_FPA_WQE_POOL, 0);
    190		}
    191
    192		/* Free all WQE in the fifo */
    193		if (ipd_ptr_count.s.wqe_pcnt) {
    194			int i;
    195			union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
    196			ipd_pwp_ptr_fifo_ctl.u64 =
    197			    cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
    198			for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
    199				ipd_pwp_ptr_fifo_ctl.s.cena = 0;
    200				ipd_pwp_ptr_fifo_ctl.s.raddr =
    201				    ipd_pwp_ptr_fifo_ctl.s.max_cnts +
    202				    (ipd_pwp_ptr_fifo_ctl.s.wraddr +
    203				     i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
    204				cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
    205					       ipd_pwp_ptr_fifo_ctl.u64);
    206				ipd_pwp_ptr_fifo_ctl.u64 =
    207				    cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
    208				if (no_wptr)
    209					cvmx_fpa_free(cvmx_phys_to_ptr
    210						      ((uint64_t)
    211						       ipd_pwp_ptr_fifo_ctl.s.
    212						       ptr << 7),
    213						      CVMX_FPA_PACKET_POOL, 0);
    214				else
    215					cvmx_fpa_free(cvmx_phys_to_ptr
    216						      ((uint64_t)
    217						       ipd_pwp_ptr_fifo_ctl.s.
    218						       ptr << 7),
    219						      CVMX_FPA_WQE_POOL, 0);
    220			}
    221			ipd_pwp_ptr_fifo_ctl.s.cena = 1;
    222			cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
    223				       ipd_pwp_ptr_fifo_ctl.u64);
    224		}
    225
    226		/* Free the prefetched packet */
    227		if (ipd_ptr_count.s.pktv_cnt) {
    228			union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid;
    229			ipd_pkt_ptr_valid.u64 =
    230			    cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
    231			cvmx_fpa_free(cvmx_phys_to_ptr
    232				      (ipd_pkt_ptr_valid.s.ptr << 7),
    233				      CVMX_FPA_PACKET_POOL, 0);
    234		}
    235
    236		/* Free the per port prefetched packets */
    237		if (1) {
    238			int i;
    239			union cvmx_ipd_prc_port_ptr_fifo_ctl
    240			    ipd_prc_port_ptr_fifo_ctl;
    241			ipd_prc_port_ptr_fifo_ctl.u64 =
    242			    cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
    243
    244			for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
    245			     i++) {
    246				ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
    247				ipd_prc_port_ptr_fifo_ctl.s.raddr =
    248				    i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
    249				cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
    250					       ipd_prc_port_ptr_fifo_ctl.u64);
    251				ipd_prc_port_ptr_fifo_ctl.u64 =
    252				    cvmx_read_csr
    253				    (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
    254				cvmx_fpa_free(cvmx_phys_to_ptr
    255					      ((uint64_t)
    256					       ipd_prc_port_ptr_fifo_ctl.s.
    257					       ptr << 7), CVMX_FPA_PACKET_POOL,
    258					      0);
    259			}
    260			ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
    261			cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
    262				       ipd_prc_port_ptr_fifo_ctl.u64);
    263		}
    264
    265		/* Free all packets in the holding fifo */
    266		if (ipd_ptr_count.s.pfif_cnt) {
    267			int i;
    268			union cvmx_ipd_prc_hold_ptr_fifo_ctl
    269			    ipd_prc_hold_ptr_fifo_ctl;
    270
    271			ipd_prc_hold_ptr_fifo_ctl.u64 =
    272			    cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
    273
    274			for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
    275				ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
    276				ipd_prc_hold_ptr_fifo_ctl.s.raddr =
    277				    (ipd_prc_hold_ptr_fifo_ctl.s.praddr +
    278				     i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
    279				cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
    280					       ipd_prc_hold_ptr_fifo_ctl.u64);
    281				ipd_prc_hold_ptr_fifo_ctl.u64 =
    282				    cvmx_read_csr
    283				    (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
    284				cvmx_fpa_free(cvmx_phys_to_ptr
    285					      ((uint64_t)
    286					       ipd_prc_hold_ptr_fifo_ctl.s.
    287					       ptr << 7), CVMX_FPA_PACKET_POOL,
    288					      0);
    289			}
    290			ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
    291			cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
    292				       ipd_prc_hold_ptr_fifo_ctl.u64);
    293		}
    294
    295		/* Free all packets in the fifo */
    296		if (ipd_ptr_count.s.pkt_pcnt) {
    297			int i;
    298			union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
    299			ipd_pwp_ptr_fifo_ctl.u64 =
    300			    cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
    301
    302			for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
    303				ipd_pwp_ptr_fifo_ctl.s.cena = 0;
    304				ipd_pwp_ptr_fifo_ctl.s.raddr =
    305				    (ipd_pwp_ptr_fifo_ctl.s.praddr +
    306				     i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
    307				cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
    308					       ipd_pwp_ptr_fifo_ctl.u64);
    309				ipd_pwp_ptr_fifo_ctl.u64 =
    310				    cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
    311				cvmx_fpa_free(cvmx_phys_to_ptr
    312					      ((uint64_t) ipd_pwp_ptr_fifo_ctl.
    313					       s.ptr << 7),
    314					      CVMX_FPA_PACKET_POOL, 0);
    315			}
    316			ipd_pwp_ptr_fifo_ctl.s.cena = 1;
    317			cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
    318				       ipd_pwp_ptr_fifo_ctl.u64);
    319		}
    320
    321		/* Reset the IPD to get all buffers out of it */
    322		{
    323			union cvmx_ipd_ctl_status ipd_ctl_status;
    324			ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
    325			ipd_ctl_status.s.reset = 1;
    326			cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
    327		}
    328
    329		/* Reset the PIP */
    330		{
    331			union cvmx_pip_sft_rst pip_sft_rst;
    332			pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
    333			pip_sft_rst.s.rst = 1;
    334			cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
    335		}
    336	}
    337}
    338
    339#endif /*  __CVMX_IPD_H__ */