offloading.c (5894B)
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2/* 3 * Copyright (C) 2012-2014, 2021 Intel Corporation 4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH 5 * Copyright (C) 2015 Intel Deutschland GmbH 6 */ 7#include <net/ipv6.h> 8#include <net/addrconf.h> 9#include <linux/bitops.h> 10#include "mvm.h" 11 12void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, 13 struct iwl_wowlan_config_cmd *cmd) 14{ 15 int i; 16 17 /* 18 * For QoS counters, we store the one to use next, so subtract 0x10 19 * since the uCode will add 0x10 *before* using the value while we 20 * increment after using the value (i.e. store the next value to use). 21 */ 22 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 23 u16 seq = mvm_ap_sta->tid_data[i].seq_number; 24 seq -= 0x10; 25 cmd->qos_seq[i] = cpu_to_le16(seq); 26 } 27} 28 29int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, 30 struct ieee80211_vif *vif, 31 bool disable_offloading, 32 bool offload_ns, 33 u32 cmd_flags) 34{ 35 union { 36 struct iwl_proto_offload_cmd_v1 v1; 37 struct iwl_proto_offload_cmd_v2 v2; 38 struct iwl_proto_offload_cmd_v3_small v3s; 39 struct iwl_proto_offload_cmd_v4 v4; 40 } cmd = {}; 41 struct iwl_host_cmd hcmd = { 42 .id = PROT_OFFLOAD_CONFIG_CMD, 43 .flags = cmd_flags, 44 .data[0] = &cmd, 45 .dataflags[0] = IWL_HCMD_DFL_DUP, 46 }; 47 struct iwl_proto_offload_cmd_common *common; 48 u32 enabled = 0, size; 49 u32 capa_flags = mvm->fw->ucode_capa.flags; 50 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0); 51 52#if IS_ENABLED(CONFIG_IPV6) 53 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 54 int i; 55 /* 56 * Skip tentative address when ns offload is enabled to avoid 57 * violating RFC4862. 58 * Keep tentative address when ns offload is disabled so the NS packets 59 * will not be filtered out and will wake up the host. 60 */ 61 bool skip_tentative = offload_ns; 62 63 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL || 64 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { 65 struct iwl_ns_config *nsc; 66 struct iwl_targ_addr *addrs; 67 int n_nsc, n_addrs; 68 int c; 69 int num_skipped = 0; 70 71 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { 72 nsc = cmd.v3s.ns_config; 73 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S; 74 addrs = cmd.v3s.targ_addrs; 75 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S; 76 } else { 77 nsc = cmd.v4.ns_config; 78 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L; 79 addrs = cmd.v4.targ_addrs; 80 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L; 81 } 82 83 /* 84 * For each address we have (and that will fit) fill a target 85 * address struct and combine for NS offload structs with the 86 * solicited node addresses. 87 */ 88 for (i = 0, c = 0; 89 i < mvmvif->num_target_ipv6_addrs && 90 i < n_addrs && c < n_nsc; i++) { 91 struct in6_addr solicited_addr; 92 int j; 93 94 if (skip_tentative && 95 test_bit(i, mvmvif->tentative_addrs)) { 96 num_skipped++; 97 continue; 98 } 99 100 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], 101 &solicited_addr); 102 for (j = 0; j < c; j++) 103 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, 104 &solicited_addr) == 0) 105 break; 106 if (j == c) 107 c++; 108 addrs[i].addr = mvmvif->target_ipv6_addrs[i]; 109 addrs[i].config_num = cpu_to_le32(j); 110 nsc[j].dest_ipv6_addr = solicited_addr; 111 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN); 112 } 113 114 if (mvmvif->num_target_ipv6_addrs - num_skipped) 115 enabled |= IWL_D3_PROTO_IPV6_VALID; 116 117 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) 118 cmd.v3s.num_valid_ipv6_addrs = 119 cpu_to_le32(i - num_skipped); 120 else 121 cmd.v4.num_valid_ipv6_addrs = 122 cpu_to_le32(i - num_skipped); 123 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { 124 bool found = false; 125 126 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != 127 sizeof(mvmvif->target_ipv6_addrs[0])); 128 129 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, 130 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) { 131 if (skip_tentative && 132 test_bit(i, mvmvif->tentative_addrs)) 133 continue; 134 135 memcpy(cmd.v2.target_ipv6_addr[i], 136 &mvmvif->target_ipv6_addrs[i], 137 sizeof(cmd.v2.target_ipv6_addr[i])); 138 139 found = true; 140 } 141 if (found) { 142 enabled |= IWL_D3_PROTO_IPV6_VALID; 143 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); 144 } 145 } else { 146 bool found = false; 147 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != 148 sizeof(mvmvif->target_ipv6_addrs[0])); 149 150 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, 151 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) { 152 if (skip_tentative && 153 test_bit(i, mvmvif->tentative_addrs)) 154 continue; 155 156 memcpy(cmd.v1.target_ipv6_addr[i], 157 &mvmvif->target_ipv6_addrs[i], 158 sizeof(cmd.v1.target_ipv6_addr[i])); 159 160 found = true; 161 } 162 163 if (found) { 164 enabled |= IWL_D3_PROTO_IPV6_VALID; 165 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); 166 } 167 } 168 169 if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID)) 170 enabled |= IWL_D3_PROTO_OFFLOAD_NS; 171#endif 172 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { 173 common = &cmd.v3s.common; 174 size = sizeof(cmd.v3s); 175 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { 176 common = &cmd.v4.common; 177 size = sizeof(cmd.v4); 178 if (ver < 4) { 179 /* 180 * This basically uses iwl_proto_offload_cmd_v3_large 181 * which doesn't have the sta_id parameter before the 182 * common part. 183 */ 184 size -= sizeof(cmd.v4.sta_id); 185 hcmd.data[0] = common; 186 } 187 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { 188 common = &cmd.v2.common; 189 size = sizeof(cmd.v2); 190 } else { 191 common = &cmd.v1.common; 192 size = sizeof(cmd.v1); 193 } 194 195 if (vif->bss_conf.arp_addr_cnt) { 196 enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID; 197 common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; 198 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); 199 } 200 201 if (!disable_offloading) 202 common->enabled = cpu_to_le32(enabled); 203 204 hcmd.len[0] = size; 205 return iwl_mvm_send_cmd(mvm, &hcmd); 206}