clk-rcg2.c (33918B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. 4 */ 5 6#include <linux/kernel.h> 7#include <linux/bitops.h> 8#include <linux/err.h> 9#include <linux/bug.h> 10#include <linux/export.h> 11#include <linux/clk-provider.h> 12#include <linux/delay.h> 13#include <linux/rational.h> 14#include <linux/regmap.h> 15#include <linux/math64.h> 16#include <linux/slab.h> 17 18#include <asm/div64.h> 19 20#include "clk-rcg.h" 21#include "common.h" 22 23#define CMD_REG 0x0 24#define CMD_UPDATE BIT(0) 25#define CMD_ROOT_EN BIT(1) 26#define CMD_DIRTY_CFG BIT(4) 27#define CMD_DIRTY_N BIT(5) 28#define CMD_DIRTY_M BIT(6) 29#define CMD_DIRTY_D BIT(7) 30#define CMD_ROOT_OFF BIT(31) 31 32#define CFG_REG 0x4 33#define CFG_SRC_DIV_SHIFT 0 34#define CFG_SRC_SEL_SHIFT 8 35#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT) 36#define CFG_MODE_SHIFT 12 37#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT) 38#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT) 39#define CFG_HW_CLK_CTRL_MASK BIT(20) 40 41#define M_REG 0x8 42#define N_REG 0xc 43#define D_REG 0x10 44 45#define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG) 46#define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG) 47#define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG) 48#define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG) 49 50/* Dynamic Frequency Scaling */ 51#define MAX_PERF_LEVEL 8 52#define SE_CMD_DFSR_OFFSET 0x14 53#define SE_CMD_DFS_EN BIT(0) 54#define SE_PERF_DFSR(level) (0x1c + 0x4 * (level)) 55#define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level)) 56#define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level)) 57 58enum freq_policy { 59 FLOOR, 60 CEIL, 61}; 62 63static int clk_rcg2_is_enabled(struct clk_hw *hw) 64{ 65 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 66 u32 cmd; 67 int ret; 68 69 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 70 if (ret) 71 return ret; 72 73 return (cmd & CMD_ROOT_OFF) == 0; 74} 75 76static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg) 77{ 78 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 79 int num_parents = clk_hw_get_num_parents(hw); 80 int i; 81 82 cfg &= CFG_SRC_SEL_MASK; 83 cfg >>= CFG_SRC_SEL_SHIFT; 84 85 for (i = 0; i < num_parents; i++) 86 if (cfg == rcg->parent_map[i].cfg) 87 return i; 88 89 pr_debug("%s: Clock %s has invalid parent, using default.\n", 90 __func__, clk_hw_get_name(hw)); 91 return 0; 92} 93 94static u8 clk_rcg2_get_parent(struct clk_hw *hw) 95{ 96 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 97 u32 cfg; 98 int ret; 99 100 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 101 if (ret) { 102 pr_debug("%s: Unable to read CFG register for %s\n", 103 __func__, clk_hw_get_name(hw)); 104 return 0; 105 } 106 107 return __clk_rcg2_get_parent(hw, cfg); 108} 109 110static int update_config(struct clk_rcg2 *rcg) 111{ 112 int count, ret; 113 u32 cmd; 114 struct clk_hw *hw = &rcg->clkr.hw; 115 const char *name = clk_hw_get_name(hw); 116 117 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 118 CMD_UPDATE, CMD_UPDATE); 119 if (ret) 120 return ret; 121 122 /* Wait for update to take effect */ 123 for (count = 500; count > 0; count--) { 124 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 125 if (ret) 126 return ret; 127 if (!(cmd & CMD_UPDATE)) 128 return 0; 129 udelay(1); 130 } 131 132 WARN(1, "%s: rcg didn't update its configuration.", name); 133 return -EBUSY; 134} 135 136static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) 137{ 138 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 139 int ret; 140 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 141 142 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), 143 CFG_SRC_SEL_MASK, cfg); 144 if (ret) 145 return ret; 146 147 return update_config(rcg); 148} 149 150/* 151 * Calculate m/n:d rate 152 * 153 * parent_rate m 154 * rate = ----------- x --- 155 * hid_div n 156 */ 157static unsigned long 158calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) 159{ 160 if (hid_div) { 161 rate *= 2; 162 rate /= hid_div + 1; 163 } 164 165 if (mode) { 166 u64 tmp = rate; 167 tmp *= m; 168 do_div(tmp, n); 169 rate = tmp; 170 } 171 172 return rate; 173} 174 175static unsigned long 176__clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg) 177{ 178 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 179 u32 hid_div, m = 0, n = 0, mode = 0, mask; 180 181 if (rcg->mnd_width) { 182 mask = BIT(rcg->mnd_width) - 1; 183 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 184 m &= mask; 185 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n); 186 n = ~n; 187 n &= mask; 188 n += m; 189 mode = cfg & CFG_MODE_MASK; 190 mode >>= CFG_MODE_SHIFT; 191 } 192 193 mask = BIT(rcg->hid_width) - 1; 194 hid_div = cfg >> CFG_SRC_DIV_SHIFT; 195 hid_div &= mask; 196 197 return calc_rate(parent_rate, m, n, mode, hid_div); 198} 199 200static unsigned long 201clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 202{ 203 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 204 u32 cfg; 205 206 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 207 208 return __clk_rcg2_recalc_rate(hw, parent_rate, cfg); 209} 210 211static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, 212 struct clk_rate_request *req, 213 enum freq_policy policy) 214{ 215 unsigned long clk_flags, rate = req->rate; 216 struct clk_hw *p; 217 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 218 int index; 219 220 switch (policy) { 221 case FLOOR: 222 f = qcom_find_freq_floor(f, rate); 223 break; 224 case CEIL: 225 f = qcom_find_freq(f, rate); 226 break; 227 default: 228 return -EINVAL; 229 } 230 231 if (!f) 232 return -EINVAL; 233 234 index = qcom_find_src_index(hw, rcg->parent_map, f->src); 235 if (index < 0) 236 return index; 237 238 clk_flags = clk_hw_get_flags(hw); 239 p = clk_hw_get_parent_by_index(hw, index); 240 if (!p) 241 return -EINVAL; 242 243 if (clk_flags & CLK_SET_RATE_PARENT) { 244 rate = f->freq; 245 if (f->pre_div) { 246 if (!rate) 247 rate = req->rate; 248 rate /= 2; 249 rate *= f->pre_div + 1; 250 } 251 252 if (f->n) { 253 u64 tmp = rate; 254 tmp = tmp * f->n; 255 do_div(tmp, f->m); 256 rate = tmp; 257 } 258 } else { 259 rate = clk_hw_get_rate(p); 260 } 261 req->best_parent_hw = p; 262 req->best_parent_rate = rate; 263 req->rate = f->freq; 264 265 return 0; 266} 267 268static int clk_rcg2_determine_rate(struct clk_hw *hw, 269 struct clk_rate_request *req) 270{ 271 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 272 273 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL); 274} 275 276static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, 277 struct clk_rate_request *req) 278{ 279 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 280 281 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); 282} 283 284static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f, 285 u32 *_cfg) 286{ 287 u32 cfg, mask, d_val, not2d_val, n_minus_m; 288 struct clk_hw *hw = &rcg->clkr.hw; 289 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src); 290 291 if (index < 0) 292 return index; 293 294 if (rcg->mnd_width && f->n) { 295 mask = BIT(rcg->mnd_width) - 1; 296 ret = regmap_update_bits(rcg->clkr.regmap, 297 RCG_M_OFFSET(rcg), mask, f->m); 298 if (ret) 299 return ret; 300 301 ret = regmap_update_bits(rcg->clkr.regmap, 302 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m)); 303 if (ret) 304 return ret; 305 306 /* Calculate 2d value */ 307 d_val = f->n; 308 309 n_minus_m = f->n - f->m; 310 n_minus_m *= 2; 311 312 d_val = clamp_t(u32, d_val, f->m, n_minus_m); 313 not2d_val = ~d_val & mask; 314 315 ret = regmap_update_bits(rcg->clkr.regmap, 316 RCG_D_OFFSET(rcg), mask, not2d_val); 317 if (ret) 318 return ret; 319 } 320 321 mask = BIT(rcg->hid_width) - 1; 322 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK; 323 cfg = f->pre_div << CFG_SRC_DIV_SHIFT; 324 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 325 if (rcg->mnd_width && f->n && (f->m != f->n)) 326 cfg |= CFG_MODE_DUAL_EDGE; 327 328 *_cfg &= ~mask; 329 *_cfg |= cfg; 330 331 return 0; 332} 333 334static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) 335{ 336 u32 cfg; 337 int ret; 338 339 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 340 if (ret) 341 return ret; 342 343 ret = __clk_rcg2_configure(rcg, f, &cfg); 344 if (ret) 345 return ret; 346 347 ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg); 348 if (ret) 349 return ret; 350 351 return update_config(rcg); 352} 353 354static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 355 enum freq_policy policy) 356{ 357 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 358 const struct freq_tbl *f; 359 360 switch (policy) { 361 case FLOOR: 362 f = qcom_find_freq_floor(rcg->freq_tbl, rate); 363 break; 364 case CEIL: 365 f = qcom_find_freq(rcg->freq_tbl, rate); 366 break; 367 default: 368 return -EINVAL; 369 } 370 371 if (!f) 372 return -EINVAL; 373 374 return clk_rcg2_configure(rcg, f); 375} 376 377static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 378 unsigned long parent_rate) 379{ 380 return __clk_rcg2_set_rate(hw, rate, CEIL); 381} 382 383static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate, 384 unsigned long parent_rate) 385{ 386 return __clk_rcg2_set_rate(hw, rate, FLOOR); 387} 388 389static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw, 390 unsigned long rate, unsigned long parent_rate, u8 index) 391{ 392 return __clk_rcg2_set_rate(hw, rate, CEIL); 393} 394 395static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, 396 unsigned long rate, unsigned long parent_rate, u8 index) 397{ 398 return __clk_rcg2_set_rate(hw, rate, FLOOR); 399} 400 401static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) 402{ 403 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 404 u32 notn_m, n, m, d, not2d, mask; 405 406 if (!rcg->mnd_width) { 407 /* 50 % duty-cycle for Non-MND RCGs */ 408 duty->num = 1; 409 duty->den = 2; 410 return 0; 411 } 412 413 regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d); 414 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 415 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); 416 417 if (!not2d && !m && !notn_m) { 418 /* 50 % duty-cycle always */ 419 duty->num = 1; 420 duty->den = 2; 421 return 0; 422 } 423 424 mask = BIT(rcg->mnd_width) - 1; 425 426 d = ~(not2d) & mask; 427 d = DIV_ROUND_CLOSEST(d, 2); 428 429 n = (~(notn_m) + m) & mask; 430 431 duty->num = d; 432 duty->den = n; 433 434 return 0; 435} 436 437static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) 438{ 439 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 440 u32 notn_m, n, m, d, not2d, mask, duty_per; 441 int ret; 442 443 /* Duty-cycle cannot be modified for non-MND RCGs */ 444 if (!rcg->mnd_width) 445 return -EINVAL; 446 447 mask = BIT(rcg->mnd_width) - 1; 448 449 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); 450 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 451 452 n = (~(notn_m) + m) & mask; 453 454 duty_per = (duty->num * 100) / duty->den; 455 456 /* Calculate 2d value */ 457 d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100); 458 459 /* Check bit widths of 2d. If D is too big reduce duty cycle. */ 460 if (d > mask) 461 d = mask; 462 463 if ((d / 2) > (n - m)) 464 d = (n - m) * 2; 465 else if ((d / 2) < (m / 2)) 466 d = m; 467 468 not2d = ~d & mask; 469 470 ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask, 471 not2d); 472 if (ret) 473 return ret; 474 475 return update_config(rcg); 476} 477 478const struct clk_ops clk_rcg2_ops = { 479 .is_enabled = clk_rcg2_is_enabled, 480 .get_parent = clk_rcg2_get_parent, 481 .set_parent = clk_rcg2_set_parent, 482 .recalc_rate = clk_rcg2_recalc_rate, 483 .determine_rate = clk_rcg2_determine_rate, 484 .set_rate = clk_rcg2_set_rate, 485 .set_rate_and_parent = clk_rcg2_set_rate_and_parent, 486 .get_duty_cycle = clk_rcg2_get_duty_cycle, 487 .set_duty_cycle = clk_rcg2_set_duty_cycle, 488}; 489EXPORT_SYMBOL_GPL(clk_rcg2_ops); 490 491const struct clk_ops clk_rcg2_floor_ops = { 492 .is_enabled = clk_rcg2_is_enabled, 493 .get_parent = clk_rcg2_get_parent, 494 .set_parent = clk_rcg2_set_parent, 495 .recalc_rate = clk_rcg2_recalc_rate, 496 .determine_rate = clk_rcg2_determine_floor_rate, 497 .set_rate = clk_rcg2_set_floor_rate, 498 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, 499 .get_duty_cycle = clk_rcg2_get_duty_cycle, 500 .set_duty_cycle = clk_rcg2_set_duty_cycle, 501}; 502EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); 503 504struct frac_entry { 505 int num; 506 int den; 507}; 508 509static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */ 510 { 52, 295 }, /* 119 M */ 511 { 11, 57 }, /* 130.25 M */ 512 { 63, 307 }, /* 138.50 M */ 513 { 11, 50 }, /* 148.50 M */ 514 { 47, 206 }, /* 154 M */ 515 { 31, 100 }, /* 205.25 M */ 516 { 107, 269 }, /* 268.50 M */ 517 { }, 518}; 519 520static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */ 521 { 31, 211 }, /* 119 M */ 522 { 32, 199 }, /* 130.25 M */ 523 { 63, 307 }, /* 138.50 M */ 524 { 11, 60 }, /* 148.50 M */ 525 { 50, 263 }, /* 154 M */ 526 { 31, 120 }, /* 205.25 M */ 527 { 119, 359 }, /* 268.50 M */ 528 { }, 529}; 530 531static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 532 unsigned long parent_rate) 533{ 534 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 535 struct freq_tbl f = *rcg->freq_tbl; 536 const struct frac_entry *frac; 537 int delta = 100000; 538 s64 src_rate = parent_rate; 539 s64 request; 540 u32 mask = BIT(rcg->hid_width) - 1; 541 u32 hid_div; 542 543 if (src_rate == 810000000) 544 frac = frac_table_810m; 545 else 546 frac = frac_table_675m; 547 548 for (; frac->num; frac++) { 549 request = rate; 550 request *= frac->den; 551 request = div_s64(request, frac->num); 552 if ((src_rate < (request - delta)) || 553 (src_rate > (request + delta))) 554 continue; 555 556 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 557 &hid_div); 558 f.pre_div = hid_div; 559 f.pre_div >>= CFG_SRC_DIV_SHIFT; 560 f.pre_div &= mask; 561 f.m = frac->num; 562 f.n = frac->den; 563 564 return clk_rcg2_configure(rcg, &f); 565 } 566 567 return -EINVAL; 568} 569 570static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw, 571 unsigned long rate, unsigned long parent_rate, u8 index) 572{ 573 /* Parent index is set statically in frequency table */ 574 return clk_edp_pixel_set_rate(hw, rate, parent_rate); 575} 576 577static int clk_edp_pixel_determine_rate(struct clk_hw *hw, 578 struct clk_rate_request *req) 579{ 580 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 581 const struct freq_tbl *f = rcg->freq_tbl; 582 const struct frac_entry *frac; 583 int delta = 100000; 584 s64 request; 585 u32 mask = BIT(rcg->hid_width) - 1; 586 u32 hid_div; 587 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 588 589 /* Force the correct parent */ 590 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index); 591 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw); 592 593 if (req->best_parent_rate == 810000000) 594 frac = frac_table_810m; 595 else 596 frac = frac_table_675m; 597 598 for (; frac->num; frac++) { 599 request = req->rate; 600 request *= frac->den; 601 request = div_s64(request, frac->num); 602 if ((req->best_parent_rate < (request - delta)) || 603 (req->best_parent_rate > (request + delta))) 604 continue; 605 606 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 607 &hid_div); 608 hid_div >>= CFG_SRC_DIV_SHIFT; 609 hid_div &= mask; 610 611 req->rate = calc_rate(req->best_parent_rate, 612 frac->num, frac->den, 613 !!frac->den, hid_div); 614 return 0; 615 } 616 617 return -EINVAL; 618} 619 620const struct clk_ops clk_edp_pixel_ops = { 621 .is_enabled = clk_rcg2_is_enabled, 622 .get_parent = clk_rcg2_get_parent, 623 .set_parent = clk_rcg2_set_parent, 624 .recalc_rate = clk_rcg2_recalc_rate, 625 .set_rate = clk_edp_pixel_set_rate, 626 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent, 627 .determine_rate = clk_edp_pixel_determine_rate, 628}; 629EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 630 631static int clk_byte_determine_rate(struct clk_hw *hw, 632 struct clk_rate_request *req) 633{ 634 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 635 const struct freq_tbl *f = rcg->freq_tbl; 636 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 637 unsigned long parent_rate, div; 638 u32 mask = BIT(rcg->hid_width) - 1; 639 struct clk_hw *p; 640 641 if (req->rate == 0) 642 return -EINVAL; 643 644 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index); 645 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate); 646 647 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1; 648 div = min_t(u32, div, mask); 649 650 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 651 652 return 0; 653} 654 655static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate, 656 unsigned long parent_rate) 657{ 658 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 659 struct freq_tbl f = *rcg->freq_tbl; 660 unsigned long div; 661 u32 mask = BIT(rcg->hid_width) - 1; 662 663 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 664 div = min_t(u32, div, mask); 665 666 f.pre_div = div; 667 668 return clk_rcg2_configure(rcg, &f); 669} 670 671static int clk_byte_set_rate_and_parent(struct clk_hw *hw, 672 unsigned long rate, unsigned long parent_rate, u8 index) 673{ 674 /* Parent index is set statically in frequency table */ 675 return clk_byte_set_rate(hw, rate, parent_rate); 676} 677 678const struct clk_ops clk_byte_ops = { 679 .is_enabled = clk_rcg2_is_enabled, 680 .get_parent = clk_rcg2_get_parent, 681 .set_parent = clk_rcg2_set_parent, 682 .recalc_rate = clk_rcg2_recalc_rate, 683 .set_rate = clk_byte_set_rate, 684 .set_rate_and_parent = clk_byte_set_rate_and_parent, 685 .determine_rate = clk_byte_determine_rate, 686}; 687EXPORT_SYMBOL_GPL(clk_byte_ops); 688 689static int clk_byte2_determine_rate(struct clk_hw *hw, 690 struct clk_rate_request *req) 691{ 692 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 693 unsigned long parent_rate, div; 694 u32 mask = BIT(rcg->hid_width) - 1; 695 struct clk_hw *p; 696 unsigned long rate = req->rate; 697 698 if (rate == 0) 699 return -EINVAL; 700 701 p = req->best_parent_hw; 702 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate); 703 704 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 705 div = min_t(u32, div, mask); 706 707 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 708 709 return 0; 710} 711 712static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate, 713 unsigned long parent_rate) 714{ 715 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 716 struct freq_tbl f = { 0 }; 717 unsigned long div; 718 int i, num_parents = clk_hw_get_num_parents(hw); 719 u32 mask = BIT(rcg->hid_width) - 1; 720 u32 cfg; 721 722 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 723 div = min_t(u32, div, mask); 724 725 f.pre_div = div; 726 727 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 728 cfg &= CFG_SRC_SEL_MASK; 729 cfg >>= CFG_SRC_SEL_SHIFT; 730 731 for (i = 0; i < num_parents; i++) { 732 if (cfg == rcg->parent_map[i].cfg) { 733 f.src = rcg->parent_map[i].src; 734 return clk_rcg2_configure(rcg, &f); 735 } 736 } 737 738 return -EINVAL; 739} 740 741static int clk_byte2_set_rate_and_parent(struct clk_hw *hw, 742 unsigned long rate, unsigned long parent_rate, u8 index) 743{ 744 /* Read the hardware to determine parent during set_rate */ 745 return clk_byte2_set_rate(hw, rate, parent_rate); 746} 747 748const struct clk_ops clk_byte2_ops = { 749 .is_enabled = clk_rcg2_is_enabled, 750 .get_parent = clk_rcg2_get_parent, 751 .set_parent = clk_rcg2_set_parent, 752 .recalc_rate = clk_rcg2_recalc_rate, 753 .set_rate = clk_byte2_set_rate, 754 .set_rate_and_parent = clk_byte2_set_rate_and_parent, 755 .determine_rate = clk_byte2_determine_rate, 756}; 757EXPORT_SYMBOL_GPL(clk_byte2_ops); 758 759static const struct frac_entry frac_table_pixel[] = { 760 { 3, 8 }, 761 { 2, 9 }, 762 { 4, 9 }, 763 { 1, 1 }, 764 { 2, 3 }, 765 { } 766}; 767 768static int clk_pixel_determine_rate(struct clk_hw *hw, 769 struct clk_rate_request *req) 770{ 771 unsigned long request, src_rate; 772 int delta = 100000; 773 const struct frac_entry *frac = frac_table_pixel; 774 775 for (; frac->num; frac++) { 776 request = (req->rate * frac->den) / frac->num; 777 778 src_rate = clk_hw_round_rate(req->best_parent_hw, request); 779 if ((src_rate < (request - delta)) || 780 (src_rate > (request + delta))) 781 continue; 782 783 req->best_parent_rate = src_rate; 784 req->rate = (src_rate * frac->num) / frac->den; 785 return 0; 786 } 787 788 return -EINVAL; 789} 790 791static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 792 unsigned long parent_rate) 793{ 794 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 795 struct freq_tbl f = { 0 }; 796 const struct frac_entry *frac = frac_table_pixel; 797 unsigned long request; 798 int delta = 100000; 799 u32 mask = BIT(rcg->hid_width) - 1; 800 u32 hid_div, cfg; 801 int i, num_parents = clk_hw_get_num_parents(hw); 802 803 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 804 cfg &= CFG_SRC_SEL_MASK; 805 cfg >>= CFG_SRC_SEL_SHIFT; 806 807 for (i = 0; i < num_parents; i++) 808 if (cfg == rcg->parent_map[i].cfg) { 809 f.src = rcg->parent_map[i].src; 810 break; 811 } 812 813 for (; frac->num; frac++) { 814 request = (rate * frac->den) / frac->num; 815 816 if ((parent_rate < (request - delta)) || 817 (parent_rate > (request + delta))) 818 continue; 819 820 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 821 &hid_div); 822 f.pre_div = hid_div; 823 f.pre_div >>= CFG_SRC_DIV_SHIFT; 824 f.pre_div &= mask; 825 f.m = frac->num; 826 f.n = frac->den; 827 828 return clk_rcg2_configure(rcg, &f); 829 } 830 return -EINVAL; 831} 832 833static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 834 unsigned long parent_rate, u8 index) 835{ 836 return clk_pixel_set_rate(hw, rate, parent_rate); 837} 838 839const struct clk_ops clk_pixel_ops = { 840 .is_enabled = clk_rcg2_is_enabled, 841 .get_parent = clk_rcg2_get_parent, 842 .set_parent = clk_rcg2_set_parent, 843 .recalc_rate = clk_rcg2_recalc_rate, 844 .set_rate = clk_pixel_set_rate, 845 .set_rate_and_parent = clk_pixel_set_rate_and_parent, 846 .determine_rate = clk_pixel_determine_rate, 847}; 848EXPORT_SYMBOL_GPL(clk_pixel_ops); 849 850static int clk_gfx3d_determine_rate(struct clk_hw *hw, 851 struct clk_rate_request *req) 852{ 853 struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX }; 854 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 855 struct clk_hw *xo, *p0, *p1, *p2; 856 unsigned long p0_rate; 857 u8 mux_div = cgfx->div; 858 int ret; 859 860 p0 = cgfx->hws[0]; 861 p1 = cgfx->hws[1]; 862 p2 = cgfx->hws[2]; 863 /* 864 * This function does ping-pong the RCG between PLLs: if we don't 865 * have at least one fixed PLL and two variable ones, 866 * then it's not going to work correctly. 867 */ 868 if (WARN_ON(!p0 || !p1 || !p2)) 869 return -EINVAL; 870 871 xo = clk_hw_get_parent_by_index(hw, 0); 872 if (req->rate == clk_hw_get_rate(xo)) { 873 req->best_parent_hw = xo; 874 return 0; 875 } 876 877 if (mux_div == 0) 878 mux_div = 1; 879 880 parent_req.rate = req->rate * mux_div; 881 882 /* This has to be a fixed rate PLL */ 883 p0_rate = clk_hw_get_rate(p0); 884 885 if (parent_req.rate == p0_rate) { 886 req->rate = req->best_parent_rate = p0_rate; 887 req->best_parent_hw = p0; 888 return 0; 889 } 890 891 if (req->best_parent_hw == p0) { 892 /* Are we going back to a previously used rate? */ 893 if (clk_hw_get_rate(p2) == parent_req.rate) 894 req->best_parent_hw = p2; 895 else 896 req->best_parent_hw = p1; 897 } else if (req->best_parent_hw == p2) { 898 req->best_parent_hw = p1; 899 } else { 900 req->best_parent_hw = p2; 901 } 902 903 ret = __clk_determine_rate(req->best_parent_hw, &parent_req); 904 if (ret) 905 return ret; 906 907 req->rate = req->best_parent_rate = parent_req.rate; 908 req->rate /= mux_div; 909 910 return 0; 911} 912 913static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 914 unsigned long parent_rate, u8 index) 915{ 916 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 917 struct clk_rcg2 *rcg = &cgfx->rcg; 918 u32 cfg; 919 int ret; 920 921 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 922 /* On some targets, the GFX3D RCG may need to divide PLL frequency */ 923 if (cgfx->div > 1) 924 cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT; 925 926 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg); 927 if (ret) 928 return ret; 929 930 return update_config(rcg); 931} 932 933static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate, 934 unsigned long parent_rate) 935{ 936 /* 937 * We should never get here; clk_gfx3d_determine_rate() should always 938 * make us use a different parent than what we're currently using, so 939 * clk_gfx3d_set_rate_and_parent() should always be called. 940 */ 941 return 0; 942} 943 944const struct clk_ops clk_gfx3d_ops = { 945 .is_enabled = clk_rcg2_is_enabled, 946 .get_parent = clk_rcg2_get_parent, 947 .set_parent = clk_rcg2_set_parent, 948 .recalc_rate = clk_rcg2_recalc_rate, 949 .set_rate = clk_gfx3d_set_rate, 950 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent, 951 .determine_rate = clk_gfx3d_determine_rate, 952}; 953EXPORT_SYMBOL_GPL(clk_gfx3d_ops); 954 955static int clk_rcg2_set_force_enable(struct clk_hw *hw) 956{ 957 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 958 const char *name = clk_hw_get_name(hw); 959 int ret, count; 960 961 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 962 CMD_ROOT_EN, CMD_ROOT_EN); 963 if (ret) 964 return ret; 965 966 /* wait for RCG to turn ON */ 967 for (count = 500; count > 0; count--) { 968 if (clk_rcg2_is_enabled(hw)) 969 return 0; 970 971 udelay(1); 972 } 973 974 pr_err("%s: RCG did not turn on\n", name); 975 return -ETIMEDOUT; 976} 977 978static int clk_rcg2_clear_force_enable(struct clk_hw *hw) 979{ 980 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 981 982 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 983 CMD_ROOT_EN, 0); 984} 985 986static int 987clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f) 988{ 989 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 990 int ret; 991 992 ret = clk_rcg2_set_force_enable(hw); 993 if (ret) 994 return ret; 995 996 ret = clk_rcg2_configure(rcg, f); 997 if (ret) 998 return ret; 999 1000 return clk_rcg2_clear_force_enable(hw); 1001} 1002 1003static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, 1004 unsigned long parent_rate) 1005{ 1006 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1007 const struct freq_tbl *f; 1008 1009 f = qcom_find_freq(rcg->freq_tbl, rate); 1010 if (!f) 1011 return -EINVAL; 1012 1013 /* 1014 * In case clock is disabled, update the M, N and D registers, cache 1015 * the CFG value in parked_cfg and don't hit the update bit of CMD 1016 * register. 1017 */ 1018 if (!clk_hw_is_enabled(hw)) 1019 return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg); 1020 1021 return clk_rcg2_shared_force_enable_clear(hw, f); 1022} 1023 1024static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw, 1025 unsigned long rate, unsigned long parent_rate, u8 index) 1026{ 1027 return clk_rcg2_shared_set_rate(hw, rate, parent_rate); 1028} 1029 1030static int clk_rcg2_shared_enable(struct clk_hw *hw) 1031{ 1032 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1033 int ret; 1034 1035 /* 1036 * Set the update bit because required configuration has already 1037 * been written in clk_rcg2_shared_set_rate() 1038 */ 1039 ret = clk_rcg2_set_force_enable(hw); 1040 if (ret) 1041 return ret; 1042 1043 /* Write back the stored configuration corresponding to current rate */ 1044 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg); 1045 if (ret) 1046 return ret; 1047 1048 ret = update_config(rcg); 1049 if (ret) 1050 return ret; 1051 1052 return clk_rcg2_clear_force_enable(hw); 1053} 1054 1055static void clk_rcg2_shared_disable(struct clk_hw *hw) 1056{ 1057 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1058 1059 /* 1060 * Store current configuration as switching to safe source would clear 1061 * the SRC and DIV of CFG register 1062 */ 1063 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); 1064 1065 /* 1066 * Park the RCG at a safe configuration - sourced off of safe source. 1067 * Force enable and disable the RCG while configuring it to safeguard 1068 * against any update signal coming from the downstream clock. 1069 * The current parent is still prepared and enabled at this point, and 1070 * the safe source is always on while application processor subsystem 1071 * is online. Therefore, the RCG can safely switch its parent. 1072 */ 1073 clk_rcg2_set_force_enable(hw); 1074 1075 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 1076 rcg->safe_src_index << CFG_SRC_SEL_SHIFT); 1077 1078 update_config(rcg); 1079 1080 clk_rcg2_clear_force_enable(hw); 1081} 1082 1083static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw) 1084{ 1085 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1086 1087 /* If the shared rcg is parked use the cached cfg instead */ 1088 if (!clk_hw_is_enabled(hw)) 1089 return __clk_rcg2_get_parent(hw, rcg->parked_cfg); 1090 1091 return clk_rcg2_get_parent(hw); 1092} 1093 1094static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index) 1095{ 1096 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1097 1098 /* If the shared rcg is parked only update the cached cfg */ 1099 if (!clk_hw_is_enabled(hw)) { 1100 rcg->parked_cfg &= ~CFG_SRC_SEL_MASK; 1101 rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 1102 1103 return 0; 1104 } 1105 1106 return clk_rcg2_set_parent(hw, index); 1107} 1108 1109static unsigned long 1110clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1111{ 1112 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1113 1114 /* If the shared rcg is parked use the cached cfg instead */ 1115 if (!clk_hw_is_enabled(hw)) 1116 return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg); 1117 1118 return clk_rcg2_recalc_rate(hw, parent_rate); 1119} 1120 1121const struct clk_ops clk_rcg2_shared_ops = { 1122 .enable = clk_rcg2_shared_enable, 1123 .disable = clk_rcg2_shared_disable, 1124 .get_parent = clk_rcg2_shared_get_parent, 1125 .set_parent = clk_rcg2_shared_set_parent, 1126 .recalc_rate = clk_rcg2_shared_recalc_rate, 1127 .determine_rate = clk_rcg2_determine_rate, 1128 .set_rate = clk_rcg2_shared_set_rate, 1129 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, 1130}; 1131EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); 1132 1133/* Common APIs to be used for DFS based RCGR */ 1134static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, 1135 struct freq_tbl *f) 1136{ 1137 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1138 struct clk_hw *p; 1139 unsigned long prate = 0; 1140 u32 val, mask, cfg, mode, src; 1141 int i, num_parents; 1142 1143 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); 1144 1145 mask = BIT(rcg->hid_width) - 1; 1146 f->pre_div = 1; 1147 if (cfg & mask) 1148 f->pre_div = cfg & mask; 1149 1150 src = cfg & CFG_SRC_SEL_MASK; 1151 src >>= CFG_SRC_SEL_SHIFT; 1152 1153 num_parents = clk_hw_get_num_parents(hw); 1154 for (i = 0; i < num_parents; i++) { 1155 if (src == rcg->parent_map[i].cfg) { 1156 f->src = rcg->parent_map[i].src; 1157 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); 1158 prate = clk_hw_get_rate(p); 1159 } 1160 } 1161 1162 mode = cfg & CFG_MODE_MASK; 1163 mode >>= CFG_MODE_SHIFT; 1164 if (mode) { 1165 mask = BIT(rcg->mnd_width) - 1; 1166 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l), 1167 &val); 1168 val &= mask; 1169 f->m = val; 1170 1171 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l), 1172 &val); 1173 val = ~val; 1174 val &= mask; 1175 val += f->m; 1176 f->n = val; 1177 } 1178 1179 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div); 1180} 1181 1182static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg) 1183{ 1184 struct freq_tbl *freq_tbl; 1185 int i; 1186 1187 /* Allocate space for 1 extra since table is NULL terminated */ 1188 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL); 1189 if (!freq_tbl) 1190 return -ENOMEM; 1191 rcg->freq_tbl = freq_tbl; 1192 1193 for (i = 0; i < MAX_PERF_LEVEL; i++) 1194 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i); 1195 1196 return 0; 1197} 1198 1199static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw, 1200 struct clk_rate_request *req) 1201{ 1202 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1203 int ret; 1204 1205 if (!rcg->freq_tbl) { 1206 ret = clk_rcg2_dfs_populate_freq_table(rcg); 1207 if (ret) { 1208 pr_err("Failed to update DFS tables for %s\n", 1209 clk_hw_get_name(hw)); 1210 return ret; 1211 } 1212 } 1213 1214 return clk_rcg2_determine_rate(hw, req); 1215} 1216 1217static unsigned long 1218clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1219{ 1220 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1221 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div; 1222 1223 regmap_read(rcg->clkr.regmap, 1224 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level); 1225 level &= GENMASK(4, 1); 1226 level >>= 1; 1227 1228 if (rcg->freq_tbl) 1229 return rcg->freq_tbl[level].freq; 1230 1231 /* 1232 * Assume that parent_rate is actually the parent because 1233 * we can't do any better at figuring it out when the table 1234 * hasn't been populated yet. We only populate the table 1235 * in determine_rate because we can't guarantee the parents 1236 * will be registered with the framework until then. 1237 */ 1238 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level), 1239 &cfg); 1240 1241 mask = BIT(rcg->hid_width) - 1; 1242 pre_div = 1; 1243 if (cfg & mask) 1244 pre_div = cfg & mask; 1245 1246 mode = cfg & CFG_MODE_MASK; 1247 mode >>= CFG_MODE_SHIFT; 1248 if (mode) { 1249 mask = BIT(rcg->mnd_width) - 1; 1250 regmap_read(rcg->clkr.regmap, 1251 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m); 1252 m &= mask; 1253 1254 regmap_read(rcg->clkr.regmap, 1255 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n); 1256 n = ~n; 1257 n &= mask; 1258 n += m; 1259 } 1260 1261 return calc_rate(parent_rate, m, n, mode, pre_div); 1262} 1263 1264static const struct clk_ops clk_rcg2_dfs_ops = { 1265 .is_enabled = clk_rcg2_is_enabled, 1266 .get_parent = clk_rcg2_get_parent, 1267 .determine_rate = clk_rcg2_dfs_determine_rate, 1268 .recalc_rate = clk_rcg2_dfs_recalc_rate, 1269}; 1270 1271static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data, 1272 struct regmap *regmap) 1273{ 1274 struct clk_rcg2 *rcg = data->rcg; 1275 struct clk_init_data *init = data->init; 1276 u32 val; 1277 int ret; 1278 1279 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val); 1280 if (ret) 1281 return -EINVAL; 1282 1283 if (!(val & SE_CMD_DFS_EN)) 1284 return 0; 1285 1286 /* 1287 * Rate changes with consumer writing a register in 1288 * their own I/O region 1289 */ 1290 init->flags |= CLK_GET_RATE_NOCACHE; 1291 init->ops = &clk_rcg2_dfs_ops; 1292 1293 rcg->freq_tbl = NULL; 1294 1295 return 0; 1296} 1297 1298int qcom_cc_register_rcg_dfs(struct regmap *regmap, 1299 const struct clk_rcg_dfs_data *rcgs, size_t len) 1300{ 1301 int i, ret; 1302 1303 for (i = 0; i < len; i++) { 1304 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap); 1305 if (ret) 1306 return ret; 1307 } 1308 1309 return 0; 1310} 1311EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs); 1312 1313static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate, 1314 unsigned long parent_rate) 1315{ 1316 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1317 struct freq_tbl f = { 0 }; 1318 u32 mask = BIT(rcg->hid_width) - 1; 1319 u32 hid_div, cfg; 1320 int i, num_parents = clk_hw_get_num_parents(hw); 1321 unsigned long num, den; 1322 1323 rational_best_approximation(parent_rate, rate, 1324 GENMASK(rcg->mnd_width - 1, 0), 1325 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1326 1327 if (!num || !den) 1328 return -EINVAL; 1329 1330 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 1331 hid_div = cfg; 1332 cfg &= CFG_SRC_SEL_MASK; 1333 cfg >>= CFG_SRC_SEL_SHIFT; 1334 1335 for (i = 0; i < num_parents; i++) { 1336 if (cfg == rcg->parent_map[i].cfg) { 1337 f.src = rcg->parent_map[i].src; 1338 break; 1339 } 1340 } 1341 1342 f.pre_div = hid_div; 1343 f.pre_div >>= CFG_SRC_DIV_SHIFT; 1344 f.pre_div &= mask; 1345 1346 if (num != den) { 1347 f.m = num; 1348 f.n = den; 1349 } else { 1350 f.m = 0; 1351 f.n = 0; 1352 } 1353 1354 return clk_rcg2_configure(rcg, &f); 1355} 1356 1357static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw, 1358 unsigned long rate, unsigned long parent_rate, u8 index) 1359{ 1360 return clk_rcg2_dp_set_rate(hw, rate, parent_rate); 1361} 1362 1363static int clk_rcg2_dp_determine_rate(struct clk_hw *hw, 1364 struct clk_rate_request *req) 1365{ 1366 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1367 unsigned long num, den; 1368 u64 tmp; 1369 1370 /* Parent rate is a fixed phy link rate */ 1371 rational_best_approximation(req->best_parent_rate, req->rate, 1372 GENMASK(rcg->mnd_width - 1, 0), 1373 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1374 1375 if (!num || !den) 1376 return -EINVAL; 1377 1378 tmp = req->best_parent_rate * num; 1379 do_div(tmp, den); 1380 req->rate = tmp; 1381 1382 return 0; 1383} 1384 1385const struct clk_ops clk_dp_ops = { 1386 .is_enabled = clk_rcg2_is_enabled, 1387 .get_parent = clk_rcg2_get_parent, 1388 .set_parent = clk_rcg2_set_parent, 1389 .recalc_rate = clk_rcg2_recalc_rate, 1390 .set_rate = clk_rcg2_dp_set_rate, 1391 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent, 1392 .determine_rate = clk_rcg2_dp_determine_rate, 1393}; 1394EXPORT_SYMBOL_GPL(clk_dp_ops);