sorg94.c (8588B)
1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24#include "ior.h" 25 26#include <subdev/timer.h> 27 28void 29g94_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark) 30{ 31 struct nvkm_device *device = sor->disp->engine.subdev.device; 32 const u32 loff = nv50_sor_link(sor); 33 nvkm_mask(device, 0x61c128 + loff, 0x0000003f, watermark); 34} 35 36void 37g94_sor_dp_activesym(struct nvkm_ior *sor, int head, 38 u8 TU, u8 VTUa, u8 VTUf, u8 VTUi) 39{ 40 struct nvkm_device *device = sor->disp->engine.subdev.device; 41 const u32 loff = nv50_sor_link(sor); 42 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, TU << 2); 43 nvkm_mask(device, 0x61c128 + loff, 0x010f7f00, VTUa << 24 | 44 VTUf << 16 | 45 VTUi << 8); 46} 47 48void 49g94_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v) 50{ 51 struct nvkm_device *device = sor->disp->engine.subdev.device; 52 const u32 soff = nv50_ior_base(sor); 53 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, h); 54 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, v); 55} 56 57void 58g94_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu) 59{ 60 struct nvkm_device *device = sor->disp->engine.subdev.device; 61 const u32 loff = nv50_sor_link(sor); 62 const u32 shift = sor->func->dp.lanes[ln] * 8; 63 u32 data[3]; 64 65 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift); 66 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift); 67 data[2] = nvkm_rd32(device, 0x61c130 + loff); 68 if ((data[2] & 0x0000ff00) < (pu << 8) || ln == 0) 69 data[2] = (data[2] & ~0x0000ff00) | (pu << 8); 70 nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift)); 71 nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift)); 72 nvkm_wr32(device, 0x61c130 + loff, data[2]); 73} 74 75void 76g94_sor_dp_pattern(struct nvkm_ior *sor, int pattern) 77{ 78 struct nvkm_device *device = sor->disp->engine.subdev.device; 79 const u32 loff = nv50_sor_link(sor); 80 u32 data; 81 82 switch (pattern) { 83 case 0: data = 0x00001000; break; 84 case 1: data = 0x01000000; break; 85 case 2: data = 0x02000000; break; 86 default: 87 WARN_ON(1); 88 return; 89 } 90 91 nvkm_mask(device, 0x61c10c + loff, 0x0f001000, data); 92} 93 94void 95g94_sor_dp_power(struct nvkm_ior *sor, int nr) 96{ 97 struct nvkm_device *device = sor->disp->engine.subdev.device; 98 const u32 soff = nv50_ior_base(sor); 99 const u32 loff = nv50_sor_link(sor); 100 u32 mask = 0, i; 101 102 for (i = 0; i < nr; i++) 103 mask |= 1 << sor->func->dp.lanes[i]; 104 105 nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask); 106 nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000); 107 nvkm_msec(device, 2000, 108 if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000)) 109 break; 110 ); 111} 112 113int 114g94_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux) 115{ 116 struct nvkm_device *device = sor->disp->engine.subdev.device; 117 const u32 soff = nv50_ior_base(sor); 118 const u32 loff = nv50_sor_link(sor); 119 u32 dpctrl = 0x00000000; 120 u32 clksor = 0x00000000; 121 122 dpctrl |= ((1 << sor->dp.nr) - 1) << 16; 123 if (sor->dp.ef) 124 dpctrl |= 0x00004000; 125 if (sor->dp.bw > 0x06) 126 clksor |= 0x00040000; 127 128 nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor); 129 nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl); 130 return 0; 131} 132 133static bool 134g94_sor_war_needed(struct nvkm_ior *sor) 135{ 136 struct nvkm_device *device = sor->disp->engine.subdev.device; 137 const u32 soff = nv50_ior_base(sor); 138 if (sor->asy.proto == TMDS) { 139 switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) { 140 case 0x00000000: 141 case 0x00030000: 142 return true; 143 default: 144 break; 145 } 146 } 147 return false; 148} 149 150static void 151g94_sor_war_update_sppll1(struct nvkm_disp *disp) 152{ 153 struct nvkm_device *device = disp->engine.subdev.device; 154 struct nvkm_ior *ior; 155 bool used = false; 156 u32 clksor; 157 158 list_for_each_entry(ior, &disp->ior, head) { 159 if (ior->type != SOR) 160 continue; 161 162 clksor = nvkm_rd32(device, 0x614300 + nv50_ior_base(ior)); 163 switch (clksor & 0x03000000) { 164 case 0x02000000: 165 case 0x03000000: 166 used = true; 167 break; 168 default: 169 break; 170 } 171 } 172 173 if (used) 174 return; 175 176 nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000); 177} 178 179static void 180g94_sor_war_3(struct nvkm_ior *sor) 181{ 182 struct nvkm_device *device = sor->disp->engine.subdev.device; 183 const u32 soff = nv50_ior_base(sor); 184 u32 sorpwr; 185 186 if (!g94_sor_war_needed(sor)) 187 return; 188 189 sorpwr = nvkm_rd32(device, 0x61c004 + soff); 190 if (sorpwr & 0x00000001) { 191 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff); 192 u32 pd_pc = (seqctl & 0x00000f00) >> 8; 193 u32 pu_pc = seqctl & 0x0000000f; 194 195 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000); 196 197 nvkm_msec(device, 2000, 198 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000)) 199 break; 200 ); 201 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000); 202 nvkm_msec(device, 2000, 203 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000)) 204 break; 205 ); 206 207 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000); 208 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000); 209 } 210 211 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000); 212 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000); 213 214 if (sorpwr & 0x00000001) { 215 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001); 216 } 217 218 g94_sor_war_update_sppll1(sor->disp); 219} 220 221static void 222g94_sor_war_2(struct nvkm_ior *sor) 223{ 224 struct nvkm_device *device = sor->disp->engine.subdev.device; 225 const u32 soff = nv50_ior_base(sor); 226 227 if (!g94_sor_war_needed(sor)) 228 return; 229 230 nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000); 231 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000); 232 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001); 233 234 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000); 235 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000); 236 nvkm_usec(device, 400, NVKM_DELAY); 237 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000); 238 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000); 239 240 if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) { 241 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff); 242 u32 pu_pc = seqctl & 0x0000000f; 243 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000); 244 } 245} 246 247void 248g94_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state) 249{ 250 struct nvkm_device *device = sor->disp->engine.subdev.device; 251 const u32 coff = sor->id * 8 + (state == &sor->arm) * 4; 252 u32 ctrl = nvkm_rd32(device, 0x610794 + coff); 253 254 state->proto_evo = (ctrl & 0x00000f00) >> 8; 255 switch (state->proto_evo) { 256 case 0: state->proto = LVDS; state->link = 1; break; 257 case 1: state->proto = TMDS; state->link = 1; break; 258 case 2: state->proto = TMDS; state->link = 2; break; 259 case 5: state->proto = TMDS; state->link = 3; break; 260 case 8: state->proto = DP; state->link = 1; break; 261 case 9: state->proto = DP; state->link = 2; break; 262 default: 263 state->proto = UNKNOWN; 264 break; 265 } 266 267 state->head = ctrl & 0x00000003; 268 nv50_pior_depth(sor, state, ctrl); 269} 270 271static const struct nvkm_ior_func 272g94_sor = { 273 .state = g94_sor_state, 274 .power = nv50_sor_power, 275 .clock = nv50_sor_clock, 276 .war_2 = g94_sor_war_2, 277 .war_3 = g94_sor_war_3, 278 .dp = { 279 .lanes = { 2, 1, 0, 3}, 280 .links = g94_sor_dp_links, 281 .power = g94_sor_dp_power, 282 .pattern = g94_sor_dp_pattern, 283 .drive = g94_sor_dp_drive, 284 .audio_sym = g94_sor_dp_audio_sym, 285 .activesym = g94_sor_dp_activesym, 286 .watermark = g94_sor_dp_watermark, 287 }, 288}; 289 290int 291g94_sor_new(struct nvkm_disp *disp, int id) 292{ 293 return nvkm_ior_new_(&g94_sor, disp, SOR, id); 294} 295 296int 297g94_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask) 298{ 299 struct nvkm_device *device = disp->engine.subdev.device; 300 *pmask = (nvkm_rd32(device, 0x610184) & 0x0f000000) >> 24; 301 return 4; 302}