cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ar9003_wow.c (12956B)


      1/*
      2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
      3 *
      4 * Permission to use, copy, modify, and/or distribute this software for any
      5 * purpose with or without fee is hereby granted, provided that the above
      6 * copyright notice and this permission notice appear in all copies.
      7 *
      8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
      9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     15 */
     16
     17#include <linux/export.h>
     18#include "ath9k.h"
     19#include "reg.h"
     20#include "reg_wow.h"
     21#include "hw-ops.h"
     22
     23static void ath9k_hw_set_sta_powersave(struct ath_hw *ah)
     24{
     25	if (!ath9k_hw_mci_is_enabled(ah))
     26		goto set;
     27	/*
     28	 * If MCI is being used, set PWR_SAV only when MCI's
     29	 * PS state is disabled.
     30	 */
     31	if (ar9003_mci_state(ah, MCI_STATE_GET_WLAN_PS_STATE) != MCI_PS_DISABLE)
     32		return;
     33set:
     34	REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
     35}
     36
     37static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
     38{
     39	struct ath_common *common = ath9k_hw_common(ah);
     40
     41	ath9k_hw_set_sta_powersave(ah);
     42
     43	/* set rx disable bit */
     44	REG_WRITE(ah, AR_CR, AR_CR_RXD);
     45
     46	if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
     47		ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
     48			REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
     49		return;
     50	}
     51
     52	if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
     53		if (!REG_READ(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL))
     54			REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
     55	} else if (AR_SREV_9485(ah)){
     56		if (!(REG_READ(ah, AR_NDP2_TIMER_MODE) &
     57		      AR_GEN_TIMERS2_MODE_ENABLE_MASK))
     58			REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
     59	}
     60
     61	if (ath9k_hw_mci_is_enabled(ah))
     62		REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
     63
     64	REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
     65}
     66
     67static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
     68{
     69	struct ath_common *common = ath9k_hw_common(ah);
     70	u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
     71	u32 ctl[13] = {0};
     72	u32 data_word[KAL_NUM_DATA_WORDS];
     73	u8 i;
     74	u32 wow_ka_data_word0;
     75
     76	memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
     77	memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
     78
     79	/* set the transmit buffer */
     80	ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
     81	ctl[1] = 0;
     82	ctl[4] = 0;
     83	ctl[7] = (ah->txchainmask) << 2;
     84	ctl[2] = 0xf << 16; /* tx_tries 0 */
     85
     86	if (IS_CHAN_2GHZ(ah->curchan))
     87		ctl[3] = 0x1b;	/* CCK_1M */
     88	else
     89		ctl[3] = 0xb;	/* OFDM_6M */
     90
     91	for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
     92		REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
     93
     94	data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
     95		       (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
     96	data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
     97		       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
     98	data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
     99		       (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
    100	data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
    101		       (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
    102	data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
    103		       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
    104	data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
    105
    106	if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
    107		/*
    108		 * AR9462 2.0 and AR9565 have an extra descriptor word
    109		 * (time based discard) compared to other chips.
    110		 */
    111		REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
    112		wow_ka_data_word0 = AR_WOW_TXBUF(13);
    113	} else {
    114		wow_ka_data_word0 = AR_WOW_TXBUF(12);
    115	}
    116
    117	for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
    118		REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
    119}
    120
    121int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
    122			       u8 *user_mask, int pattern_count,
    123			       int pattern_len)
    124{
    125	int i;
    126	u32 pattern_val, mask_val;
    127	u32 set, clr;
    128
    129	if (pattern_count >= ah->wow.max_patterns)
    130		return -ENOSPC;
    131
    132	if (pattern_count < MAX_NUM_PATTERN_LEGACY)
    133		REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
    134	else
    135		REG_SET_BIT(ah, AR_MAC_PCU_WOW4, BIT(pattern_count - 8));
    136
    137	for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
    138		memcpy(&pattern_val, user_pattern, 4);
    139		REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
    140			  pattern_val);
    141		user_pattern += 4;
    142	}
    143
    144	for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
    145		memcpy(&mask_val, user_mask, 4);
    146		REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
    147		user_mask += 4;
    148	}
    149
    150	if (pattern_count < MAX_NUM_PATTERN_LEGACY)
    151		ah->wow.wow_event_mask |=
    152			BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
    153	else
    154		ah->wow.wow_event_mask2 |=
    155			BIT((pattern_count - 8) + AR_WOW_PAT_FOUND_SHIFT);
    156
    157	if (pattern_count < 4) {
    158		set = (pattern_len & AR_WOW_LENGTH_MAX) <<
    159		       AR_WOW_LEN1_SHIFT(pattern_count);
    160		clr = AR_WOW_LENGTH1_MASK(pattern_count);
    161		REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
    162	} else if (pattern_count < 8) {
    163		set = (pattern_len & AR_WOW_LENGTH_MAX) <<
    164		       AR_WOW_LEN2_SHIFT(pattern_count);
    165		clr = AR_WOW_LENGTH2_MASK(pattern_count);
    166		REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
    167	} else if (pattern_count < 12) {
    168		set = (pattern_len & AR_WOW_LENGTH_MAX) <<
    169		       AR_WOW_LEN3_SHIFT(pattern_count);
    170		clr = AR_WOW_LENGTH3_MASK(pattern_count);
    171		REG_RMW(ah, AR_WOW_LENGTH3, set, clr);
    172	} else if (pattern_count < MAX_NUM_PATTERN) {
    173		set = (pattern_len & AR_WOW_LENGTH_MAX) <<
    174		       AR_WOW_LEN4_SHIFT(pattern_count);
    175		clr = AR_WOW_LENGTH4_MASK(pattern_count);
    176		REG_RMW(ah, AR_WOW_LENGTH4, set, clr);
    177	}
    178
    179	return 0;
    180}
    181EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
    182
    183u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
    184{
    185	u32 wow_status = 0;
    186	u32 val = 0, rval;
    187
    188	/*
    189	 * Read the WoW status register to know
    190	 * the wakeup reason.
    191	 */
    192	rval = REG_READ(ah, AR_WOW_PATTERN);
    193	val = AR_WOW_STATUS(rval);
    194
    195	/*
    196	 * Mask only the WoW events that we have enabled. Sometimes
    197	 * we have spurious WoW events from the AR_WOW_PATTERN
    198	 * register. This mask will clean it up.
    199	 */
    200	val &= ah->wow.wow_event_mask;
    201
    202	if (val) {
    203		if (val & AR_WOW_MAGIC_PAT_FOUND)
    204			wow_status |= AH_WOW_MAGIC_PATTERN_EN;
    205		if (AR_WOW_PATTERN_FOUND(val))
    206			wow_status |= AH_WOW_USER_PATTERN_EN;
    207		if (val & AR_WOW_KEEP_ALIVE_FAIL)
    208			wow_status |= AH_WOW_LINK_CHANGE;
    209		if (val & AR_WOW_BEACON_FAIL)
    210			wow_status |= AH_WOW_BEACON_MISS;
    211	}
    212
    213	rval = REG_READ(ah, AR_MAC_PCU_WOW4);
    214	val = AR_WOW_STATUS2(rval);
    215	val &= ah->wow.wow_event_mask2;
    216
    217	if (val) {
    218		if (AR_WOW2_PATTERN_FOUND(val))
    219			wow_status |= AH_WOW_USER_PATTERN_EN;
    220	}
    221
    222	/*
    223	 * set and clear WOW_PME_CLEAR registers for the chip to
    224	 * generate next wow signal.
    225	 * disable D3 before accessing other registers ?
    226	 */
    227
    228	/* do we need to check the bit value 0x01000000 (7-10) ?? */
    229	REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
    230		AR_PMCTRL_PWR_STATE_D1D3);
    231
    232	/*
    233	 * Clear all events.
    234	 */
    235	REG_WRITE(ah, AR_WOW_PATTERN,
    236		  AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
    237	REG_WRITE(ah, AR_MAC_PCU_WOW4,
    238		  AR_WOW_CLEAR_EVENTS2(REG_READ(ah, AR_MAC_PCU_WOW4)));
    239
    240	/*
    241	 * restore the beacon threshold to init value
    242	 */
    243	REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
    244
    245	/*
    246	 * Restore the way the PCI-E reset, Power-On-Reset, external
    247	 * PCIE_POR_SHORT pins are tied to its original value.
    248	 * Previously just before WoW sleep, we untie the PCI-E
    249	 * reset to our Chip's Power On Reset so that any PCI-E
    250	 * reset from the bus will not reset our chip
    251	 */
    252	if (ah->is_pciexpress)
    253		ath9k_hw_configpcipowersave(ah, false);
    254
    255	if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9485(ah)) {
    256		u32 dc = REG_READ(ah, AR_DIRECT_CONNECT);
    257
    258		if (!(dc & AR_DC_TSF2_ENABLE))
    259			ath9k_hw_gen_timer_start_tsf2(ah);
    260	}
    261
    262	ah->wow.wow_event_mask = 0;
    263	ah->wow.wow_event_mask2 = 0;
    264
    265	return wow_status;
    266}
    267EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
    268
    269static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah)
    270{
    271	u32 wa_reg;
    272
    273	if (!ah->is_pciexpress)
    274		return;
    275
    276	/*
    277	 * We need to untie the internal POR (power-on-reset)
    278	 * to the external PCI-E reset. We also need to tie
    279	 * the PCI-E Phy reset to the PCI-E reset.
    280	 */
    281	wa_reg = REG_READ(ah, AR_WA);
    282	wa_reg &= ~AR_WA_UNTIE_RESET_EN;
    283	wa_reg |= AR_WA_RESET_EN;
    284	wa_reg |= AR_WA_POR_SHORT;
    285
    286	REG_WRITE(ah, AR_WA, wa_reg);
    287}
    288
    289void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
    290{
    291	u32 wow_event_mask;
    292	u32 keep_alive, magic_pattern, host_pm_ctrl;
    293
    294	wow_event_mask = ah->wow.wow_event_mask;
    295
    296	/*
    297	 * AR_PMCTRL_HOST_PME_EN - Override PME enable in configuration
    298	 *                         space and allow MAC to generate WoW anyway.
    299	 *
    300	 * AR_PMCTRL_PWR_PM_CTRL_ENA - ???
    301	 *
    302	 * AR_PMCTRL_AUX_PWR_DET - PCI core SYS_AUX_PWR_DET signal,
    303	 *                         needs to be set for WoW in PCI mode.
    304	 *
    305	 * AR_PMCTRL_WOW_PME_CLR - WoW Clear Signal going to the MAC.
    306	 *
    307	 * Set the power states appropriately and enable PME.
    308	 *
    309	 * Set and clear WOW_PME_CLEAR for the chip
    310	 * to generate next wow signal.
    311	 */
    312	REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_HOST_PME_EN |
    313		    			 AR_PMCTRL_PWR_PM_CTRL_ENA |
    314		    			 AR_PMCTRL_AUX_PWR_DET |
    315		    			 AR_PMCTRL_WOW_PME_CLR);
    316	REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR);
    317
    318	/*
    319	 * Random Backoff.
    320	 *
    321	 * 31:28 in AR_WOW_PATTERN : Indicates the number of bits used in the
    322	 *                           contention window. For value N,
    323	 *                           the random backoff will be selected between
    324	 *                           0 and (2 ^ N) - 1.
    325	 */
    326	REG_SET_BIT(ah, AR_WOW_PATTERN,
    327		    AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF));
    328
    329	/*
    330	 * AIFS time, Slot time, Keep Alive count.
    331	 */
    332	REG_SET_BIT(ah, AR_WOW_COUNT, AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
    333		    		      AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
    334		    		      AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT));
    335	/*
    336	 * Beacon timeout.
    337	 */
    338	if (pattern_enable & AH_WOW_BEACON_MISS)
    339		REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO);
    340	else
    341		REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO_MAX);
    342
    343	/*
    344	 * Keep alive timeout in ms.
    345	 */
    346	if (!pattern_enable)
    347		REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, AR_WOW_KEEP_ALIVE_NEVER);
    348	else
    349		REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, KAL_TIMEOUT * 32);
    350
    351	/*
    352	 * Keep alive delay in us.
    353	 */
    354	REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, KAL_DELAY * 1000);
    355
    356	/*
    357	 * Create keep alive pattern to respond to beacons.
    358	 */
    359	ath9k_wow_create_keep_alive_pattern(ah);
    360
    361	/*
    362	 * Configure keep alive register.
    363	 */
    364	keep_alive = REG_READ(ah, AR_WOW_KEEP_ALIVE);
    365
    366	/* Send keep alive timeouts anyway */
    367	keep_alive &= ~AR_WOW_KEEP_ALIVE_AUTO_DIS;
    368
    369	if (pattern_enable & AH_WOW_LINK_CHANGE) {
    370		keep_alive &= ~AR_WOW_KEEP_ALIVE_FAIL_DIS;
    371		wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
    372	} else {
    373		keep_alive |= AR_WOW_KEEP_ALIVE_FAIL_DIS;
    374	}
    375
    376	REG_WRITE(ah, AR_WOW_KEEP_ALIVE, keep_alive);
    377
    378	/*
    379	 * We are relying on a bmiss failure, ensure we have
    380	 * enough threshold to prevent false positives.
    381	 */
    382	REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
    383		      AR_WOW_BMISSTHRESHOLD);
    384
    385	if (pattern_enable & AH_WOW_BEACON_MISS) {
    386		wow_event_mask |= AR_WOW_BEACON_FAIL;
    387		REG_SET_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
    388	} else {
    389		REG_CLR_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
    390	}
    391
    392	/*
    393	 * Enable the magic packet registers.
    394	 */
    395	magic_pattern = REG_READ(ah, AR_WOW_PATTERN);
    396	magic_pattern |= AR_WOW_MAC_INTR_EN;
    397
    398	if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
    399		magic_pattern |= AR_WOW_MAGIC_EN;
    400		wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
    401	} else {
    402		magic_pattern &= ~AR_WOW_MAGIC_EN;
    403	}
    404
    405	REG_WRITE(ah, AR_WOW_PATTERN, magic_pattern);
    406
    407	/*
    408	 * Enable pattern matching for packets which are less
    409	 * than 256 bytes.
    410	 */
    411	REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
    412		  AR_WOW_PATTERN_SUPPORTED);
    413
    414	/*
    415	 * Set the power states appropriately and enable PME.
    416	 */
    417	host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL);
    418	host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 |
    419			AR_PMCTRL_HOST_PME_EN |
    420			AR_PMCTRL_PWR_PM_CTRL_ENA;
    421	host_pm_ctrl &= ~AR_PCIE_PM_CTRL_ENA;
    422
    423	if (AR_SREV_9462(ah)) {
    424		/*
    425		 * This is needed to prevent the chip waking up
    426		 * the host within 3-4 seconds with certain
    427		 * platform/BIOS.
    428		 */
    429		host_pm_ctrl &= ~AR_PMCTRL_PWR_STATE_D1D3;
    430		host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL;
    431	}
    432
    433	REG_WRITE(ah, AR_PCIE_PM_CTRL, host_pm_ctrl);
    434
    435	/*
    436	 * Enable sequence number generation when asleep.
    437	 */
    438	REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
    439
    440	/* To bring down WOW power low margin */
    441	REG_SET_BIT(ah, AR_PCIE_PHY_REG3, BIT(13));
    442
    443	ath9k_hw_wow_set_arwr_reg(ah);
    444
    445	if (ath9k_hw_mci_is_enabled(ah))
    446		REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
    447
    448	/* HW WoW */
    449	REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
    450
    451	ath9k_hw_set_powermode_wow_sleep(ah);
    452	ah->wow.wow_event_mask = wow_event_mask;
    453}
    454EXPORT_SYMBOL(ath9k_hw_wow_enable);