clk.h (30528B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/include/linux/clk.h 4 * 5 * Copyright (C) 2004 ARM Limited. 6 * Written by Deep Blue Solutions Limited. 7 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 8 */ 9#ifndef __LINUX_CLK_H 10#define __LINUX_CLK_H 11 12#include <linux/err.h> 13#include <linux/kernel.h> 14#include <linux/notifier.h> 15 16struct device; 17struct clk; 18struct device_node; 19struct of_phandle_args; 20 21/** 22 * DOC: clk notifier callback types 23 * 24 * PRE_RATE_CHANGE - called immediately before the clk rate is changed, 25 * to indicate that the rate change will proceed. Drivers must 26 * immediately terminate any operations that will be affected by the 27 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, 28 * NOTIFY_STOP or NOTIFY_BAD. 29 * 30 * ABORT_RATE_CHANGE: called if the rate change failed for some reason 31 * after PRE_RATE_CHANGE. In this case, all registered notifiers on 32 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must 33 * always return NOTIFY_DONE or NOTIFY_OK. 34 * 35 * POST_RATE_CHANGE - called after the clk rate change has successfully 36 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. 37 * 38 */ 39#define PRE_RATE_CHANGE BIT(0) 40#define POST_RATE_CHANGE BIT(1) 41#define ABORT_RATE_CHANGE BIT(2) 42 43/** 44 * struct clk_notifier - associate a clk with a notifier 45 * @clk: struct clk * to associate the notifier with 46 * @notifier_head: a blocking_notifier_head for this clk 47 * @node: linked list pointers 48 * 49 * A list of struct clk_notifier is maintained by the notifier code. 50 * An entry is created whenever code registers the first notifier on a 51 * particular @clk. Future notifiers on that @clk are added to the 52 * @notifier_head. 53 */ 54struct clk_notifier { 55 struct clk *clk; 56 struct srcu_notifier_head notifier_head; 57 struct list_head node; 58}; 59 60/** 61 * struct clk_notifier_data - rate data to pass to the notifier callback 62 * @clk: struct clk * being changed 63 * @old_rate: previous rate of this clk 64 * @new_rate: new rate of this clk 65 * 66 * For a pre-notifier, old_rate is the clk's rate before this rate 67 * change, and new_rate is what the rate will be in the future. For a 68 * post-notifier, old_rate and new_rate are both set to the clk's 69 * current rate (this was done to optimize the implementation). 70 */ 71struct clk_notifier_data { 72 struct clk *clk; 73 unsigned long old_rate; 74 unsigned long new_rate; 75}; 76 77/** 78 * struct clk_bulk_data - Data used for bulk clk operations. 79 * 80 * @id: clock consumer ID 81 * @clk: struct clk * to store the associated clock 82 * 83 * The CLK APIs provide a series of clk_bulk_() API calls as 84 * a convenience to consumers which require multiple clks. This 85 * structure is used to manage data for these calls. 86 */ 87struct clk_bulk_data { 88 const char *id; 89 struct clk *clk; 90}; 91 92#ifdef CONFIG_COMMON_CLK 93 94/** 95 * clk_notifier_register - register a clock rate-change notifier callback 96 * @clk: clock whose rate we are interested in 97 * @nb: notifier block with callback function pointer 98 * 99 * ProTip: debugging across notifier chains can be frustrating. Make sure that 100 * your notifier callback function prints a nice big warning in case of 101 * failure. 102 */ 103int clk_notifier_register(struct clk *clk, struct notifier_block *nb); 104 105/** 106 * clk_notifier_unregister - unregister a clock rate-change notifier callback 107 * @clk: clock whose rate we are no longer interested in 108 * @nb: notifier block which will be unregistered 109 */ 110int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 111 112/** 113 * devm_clk_notifier_register - register a managed rate-change notifier callback 114 * @dev: device for clock "consumer" 115 * @clk: clock whose rate we are interested in 116 * @nb: notifier block with callback function pointer 117 * 118 * Returns 0 on success, -EERROR otherwise 119 */ 120int devm_clk_notifier_register(struct device *dev, struct clk *clk, 121 struct notifier_block *nb); 122 123/** 124 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) 125 * for a clock source. 126 * @clk: clock source 127 * 128 * This gets the clock source accuracy expressed in ppb. 129 * A perfect clock returns 0. 130 */ 131long clk_get_accuracy(struct clk *clk); 132 133/** 134 * clk_set_phase - adjust the phase shift of a clock signal 135 * @clk: clock signal source 136 * @degrees: number of degrees the signal is shifted 137 * 138 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on 139 * success, -EERROR otherwise. 140 */ 141int clk_set_phase(struct clk *clk, int degrees); 142 143/** 144 * clk_get_phase - return the phase shift of a clock signal 145 * @clk: clock signal source 146 * 147 * Returns the phase shift of a clock node in degrees, otherwise returns 148 * -EERROR. 149 */ 150int clk_get_phase(struct clk *clk); 151 152/** 153 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal 154 * @clk: clock signal source 155 * @num: numerator of the duty cycle ratio to be applied 156 * @den: denominator of the duty cycle ratio to be applied 157 * 158 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on 159 * success, -EERROR otherwise. 160 */ 161int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); 162 163/** 164 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal 165 * @clk: clock signal source 166 * @scale: scaling factor to be applied to represent the ratio as an integer 167 * 168 * Returns the duty cycle ratio multiplied by the scale provided, otherwise 169 * returns -EERROR. 170 */ 171int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); 172 173/** 174 * clk_is_match - check if two clk's point to the same hardware clock 175 * @p: clk compared against q 176 * @q: clk compared against p 177 * 178 * Returns true if the two struct clk pointers both point to the same hardware 179 * clock node. Put differently, returns true if @p and @q 180 * share the same &struct clk_core object. 181 * 182 * Returns false otherwise. Note that two NULL clks are treated as matching. 183 */ 184bool clk_is_match(const struct clk *p, const struct clk *q); 185 186#else 187 188static inline int clk_notifier_register(struct clk *clk, 189 struct notifier_block *nb) 190{ 191 return -ENOTSUPP; 192} 193 194static inline int clk_notifier_unregister(struct clk *clk, 195 struct notifier_block *nb) 196{ 197 return -ENOTSUPP; 198} 199 200static inline int devm_clk_notifier_register(struct device *dev, 201 struct clk *clk, 202 struct notifier_block *nb) 203{ 204 return -ENOTSUPP; 205} 206 207static inline long clk_get_accuracy(struct clk *clk) 208{ 209 return -ENOTSUPP; 210} 211 212static inline long clk_set_phase(struct clk *clk, int phase) 213{ 214 return -ENOTSUPP; 215} 216 217static inline long clk_get_phase(struct clk *clk) 218{ 219 return -ENOTSUPP; 220} 221 222static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, 223 unsigned int den) 224{ 225 return -ENOTSUPP; 226} 227 228static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, 229 unsigned int scale) 230{ 231 return 0; 232} 233 234static inline bool clk_is_match(const struct clk *p, const struct clk *q) 235{ 236 return p == q; 237} 238 239#endif 240 241#ifdef CONFIG_HAVE_CLK_PREPARE 242/** 243 * clk_prepare - prepare a clock source 244 * @clk: clock source 245 * 246 * This prepares the clock source for use. 247 * 248 * Must not be called from within atomic context. 249 */ 250int clk_prepare(struct clk *clk); 251int __must_check clk_bulk_prepare(int num_clks, 252 const struct clk_bulk_data *clks); 253 254/** 255 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. 256 * @clk: clock source 257 * 258 * Returns true if clk_prepare() implicitly enables the clock, effectively 259 * making clk_enable()/clk_disable() no-ops, false otherwise. 260 * 261 * This is of interest mainly to the power management code where actually 262 * disabling the clock also requires unpreparing it to have any material 263 * effect. 264 * 265 * Regardless of the value returned here, the caller must always invoke 266 * clk_enable() or clk_prepare_enable() and counterparts for usage counts 267 * to be right. 268 */ 269bool clk_is_enabled_when_prepared(struct clk *clk); 270#else 271static inline int clk_prepare(struct clk *clk) 272{ 273 might_sleep(); 274 return 0; 275} 276 277static inline int __must_check 278clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) 279{ 280 might_sleep(); 281 return 0; 282} 283 284static inline bool clk_is_enabled_when_prepared(struct clk *clk) 285{ 286 return false; 287} 288#endif 289 290/** 291 * clk_unprepare - undo preparation of a clock source 292 * @clk: clock source 293 * 294 * This undoes a previously prepared clock. The caller must balance 295 * the number of prepare and unprepare calls. 296 * 297 * Must not be called from within atomic context. 298 */ 299#ifdef CONFIG_HAVE_CLK_PREPARE 300void clk_unprepare(struct clk *clk); 301void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); 302#else 303static inline void clk_unprepare(struct clk *clk) 304{ 305 might_sleep(); 306} 307static inline void clk_bulk_unprepare(int num_clks, 308 const struct clk_bulk_data *clks) 309{ 310 might_sleep(); 311} 312#endif 313 314#ifdef CONFIG_HAVE_CLK 315/** 316 * clk_get - lookup and obtain a reference to a clock producer. 317 * @dev: device for clock "consumer" 318 * @id: clock consumer ID 319 * 320 * Returns a struct clk corresponding to the clock producer, or 321 * valid IS_ERR() condition containing errno. The implementation 322 * uses @dev and @id to determine the clock consumer, and thereby 323 * the clock producer. (IOW, @id may be identical strings, but 324 * clk_get may return different clock producers depending on @dev.) 325 * 326 * Drivers must assume that the clock source is not enabled. 327 * 328 * clk_get should not be called from within interrupt context. 329 */ 330struct clk *clk_get(struct device *dev, const char *id); 331 332/** 333 * clk_bulk_get - lookup and obtain a number of references to clock producer. 334 * @dev: device for clock "consumer" 335 * @num_clks: the number of clk_bulk_data 336 * @clks: the clk_bulk_data table of consumer 337 * 338 * This helper function allows drivers to get several clk consumers in one 339 * operation. If any of the clk cannot be acquired then any clks 340 * that were obtained will be freed before returning to the caller. 341 * 342 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 343 * successfully, or valid IS_ERR() condition containing errno. 344 * The implementation uses @dev and @clk_bulk_data.id to determine the 345 * clock consumer, and thereby the clock producer. 346 * The clock returned is stored in each @clk_bulk_data.clk field. 347 * 348 * Drivers must assume that the clock source is not enabled. 349 * 350 * clk_bulk_get should not be called from within interrupt context. 351 */ 352int __must_check clk_bulk_get(struct device *dev, int num_clks, 353 struct clk_bulk_data *clks); 354/** 355 * clk_bulk_get_all - lookup and obtain all available references to clock 356 * producer. 357 * @dev: device for clock "consumer" 358 * @clks: pointer to the clk_bulk_data table of consumer 359 * 360 * This helper function allows drivers to get all clk consumers in one 361 * operation. If any of the clk cannot be acquired then any clks 362 * that were obtained will be freed before returning to the caller. 363 * 364 * Returns a positive value for the number of clocks obtained while the 365 * clock references are stored in the clk_bulk_data table in @clks field. 366 * Returns 0 if there're none and a negative value if something failed. 367 * 368 * Drivers must assume that the clock source is not enabled. 369 * 370 * clk_bulk_get should not be called from within interrupt context. 371 */ 372int __must_check clk_bulk_get_all(struct device *dev, 373 struct clk_bulk_data **clks); 374 375/** 376 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer 377 * @dev: device for clock "consumer" 378 * @num_clks: the number of clk_bulk_data 379 * @clks: the clk_bulk_data table of consumer 380 * 381 * Behaves the same as clk_bulk_get() except where there is no clock producer. 382 * In this case, instead of returning -ENOENT, the function returns 0 and 383 * NULL for a clk for which a clock producer could not be determined. 384 */ 385int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, 386 struct clk_bulk_data *clks); 387/** 388 * devm_clk_bulk_get - managed get multiple clk consumers 389 * @dev: device for clock "consumer" 390 * @num_clks: the number of clk_bulk_data 391 * @clks: the clk_bulk_data table of consumer 392 * 393 * Return 0 on success, an errno on failure. 394 * 395 * This helper function allows drivers to get several clk 396 * consumers in one operation with management, the clks will 397 * automatically be freed when the device is unbound. 398 */ 399int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 400 struct clk_bulk_data *clks); 401/** 402 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks 403 * @dev: device for clock "consumer" 404 * @num_clks: the number of clk_bulk_data 405 * @clks: pointer to the clk_bulk_data table of consumer 406 * 407 * Behaves the same as devm_clk_bulk_get() except where there is no clock 408 * producer. In this case, instead of returning -ENOENT, the function returns 409 * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. 410 * 411 * Returns 0 if all clocks specified in clk_bulk_data table are obtained 412 * successfully or for any clk there was no clk provider available, otherwise 413 * returns valid IS_ERR() condition containing errno. 414 * The implementation uses @dev and @clk_bulk_data.id to determine the 415 * clock consumer, and thereby the clock producer. 416 * The clock returned is stored in each @clk_bulk_data.clk field. 417 * 418 * Drivers must assume that the clock source is not enabled. 419 * 420 * clk_bulk_get should not be called from within interrupt context. 421 */ 422int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, 423 struct clk_bulk_data *clks); 424/** 425 * devm_clk_bulk_get_all - managed get multiple clk consumers 426 * @dev: device for clock "consumer" 427 * @clks: pointer to the clk_bulk_data table of consumer 428 * 429 * Returns a positive value for the number of clocks obtained while the 430 * clock references are stored in the clk_bulk_data table in @clks field. 431 * Returns 0 if there're none and a negative value if something failed. 432 * 433 * This helper function allows drivers to get several clk 434 * consumers in one operation with management, the clks will 435 * automatically be freed when the device is unbound. 436 */ 437 438int __must_check devm_clk_bulk_get_all(struct device *dev, 439 struct clk_bulk_data **clks); 440 441/** 442 * devm_clk_get - lookup and obtain a managed reference to a clock producer. 443 * @dev: device for clock "consumer" 444 * @id: clock consumer ID 445 * 446 * Returns a struct clk corresponding to the clock producer, or 447 * valid IS_ERR() condition containing errno. The implementation 448 * uses @dev and @id to determine the clock consumer, and thereby 449 * the clock producer. (IOW, @id may be identical strings, but 450 * clk_get may return different clock producers depending on @dev.) 451 * 452 * Drivers must assume that the clock source is not enabled. 453 * 454 * devm_clk_get should not be called from within interrupt context. 455 * 456 * The clock will automatically be freed when the device is unbound 457 * from the bus. 458 */ 459struct clk *devm_clk_get(struct device *dev, const char *id); 460 461/** 462 * devm_clk_get_optional - lookup and obtain a managed reference to an optional 463 * clock producer. 464 * @dev: device for clock "consumer" 465 * @id: clock consumer ID 466 * 467 * Behaves the same as devm_clk_get() except where there is no clock producer. 468 * In this case, instead of returning -ENOENT, the function returns NULL. 469 */ 470struct clk *devm_clk_get_optional(struct device *dev, const char *id); 471 472/** 473 * devm_get_clk_from_child - lookup and obtain a managed reference to a 474 * clock producer from child node. 475 * @dev: device for clock "consumer" 476 * @np: pointer to clock consumer node 477 * @con_id: clock consumer ID 478 * 479 * This function parses the clocks, and uses them to look up the 480 * struct clk from the registered list of clock providers by using 481 * @np and @con_id 482 * 483 * The clock will automatically be freed when the device is unbound 484 * from the bus. 485 */ 486struct clk *devm_get_clk_from_child(struct device *dev, 487 struct device_node *np, const char *con_id); 488/** 489 * clk_rate_exclusive_get - get exclusivity over the rate control of a 490 * producer 491 * @clk: clock source 492 * 493 * This function allows drivers to get exclusive control over the rate of a 494 * provider. It prevents any other consumer to execute, even indirectly, 495 * opereation which could alter the rate of the provider or cause glitches 496 * 497 * If exlusivity is claimed more than once on clock, even by the same driver, 498 * the rate effectively gets locked as exclusivity can't be preempted. 499 * 500 * Must not be called from within atomic context. 501 * 502 * Returns success (0) or negative errno. 503 */ 504int clk_rate_exclusive_get(struct clk *clk); 505 506/** 507 * clk_rate_exclusive_put - release exclusivity over the rate control of a 508 * producer 509 * @clk: clock source 510 * 511 * This function allows drivers to release the exclusivity it previously got 512 * from clk_rate_exclusive_get() 513 * 514 * The caller must balance the number of clk_rate_exclusive_get() and 515 * clk_rate_exclusive_put() calls. 516 * 517 * Must not be called from within atomic context. 518 */ 519void clk_rate_exclusive_put(struct clk *clk); 520 521/** 522 * clk_enable - inform the system when the clock source should be running. 523 * @clk: clock source 524 * 525 * If the clock can not be enabled/disabled, this should return success. 526 * 527 * May be called from atomic contexts. 528 * 529 * Returns success (0) or negative errno. 530 */ 531int clk_enable(struct clk *clk); 532 533/** 534 * clk_bulk_enable - inform the system when the set of clks should be running. 535 * @num_clks: the number of clk_bulk_data 536 * @clks: the clk_bulk_data table of consumer 537 * 538 * May be called from atomic contexts. 539 * 540 * Returns success (0) or negative errno. 541 */ 542int __must_check clk_bulk_enable(int num_clks, 543 const struct clk_bulk_data *clks); 544 545/** 546 * clk_disable - inform the system when the clock source is no longer required. 547 * @clk: clock source 548 * 549 * Inform the system that a clock source is no longer required by 550 * a driver and may be shut down. 551 * 552 * May be called from atomic contexts. 553 * 554 * Implementation detail: if the clock source is shared between 555 * multiple drivers, clk_enable() calls must be balanced by the 556 * same number of clk_disable() calls for the clock source to be 557 * disabled. 558 */ 559void clk_disable(struct clk *clk); 560 561/** 562 * clk_bulk_disable - inform the system when the set of clks is no 563 * longer required. 564 * @num_clks: the number of clk_bulk_data 565 * @clks: the clk_bulk_data table of consumer 566 * 567 * Inform the system that a set of clks is no longer required by 568 * a driver and may be shut down. 569 * 570 * May be called from atomic contexts. 571 * 572 * Implementation detail: if the set of clks is shared between 573 * multiple drivers, clk_bulk_enable() calls must be balanced by the 574 * same number of clk_bulk_disable() calls for the clock source to be 575 * disabled. 576 */ 577void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); 578 579/** 580 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. 581 * This is only valid once the clock source has been enabled. 582 * @clk: clock source 583 */ 584unsigned long clk_get_rate(struct clk *clk); 585 586/** 587 * clk_put - "free" the clock source 588 * @clk: clock source 589 * 590 * Note: drivers must ensure that all clk_enable calls made on this 591 * clock source are balanced by clk_disable calls prior to calling 592 * this function. 593 * 594 * clk_put should not be called from within interrupt context. 595 */ 596void clk_put(struct clk *clk); 597 598/** 599 * clk_bulk_put - "free" the clock source 600 * @num_clks: the number of clk_bulk_data 601 * @clks: the clk_bulk_data table of consumer 602 * 603 * Note: drivers must ensure that all clk_bulk_enable calls made on this 604 * clock source are balanced by clk_bulk_disable calls prior to calling 605 * this function. 606 * 607 * clk_bulk_put should not be called from within interrupt context. 608 */ 609void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); 610 611/** 612 * clk_bulk_put_all - "free" all the clock source 613 * @num_clks: the number of clk_bulk_data 614 * @clks: the clk_bulk_data table of consumer 615 * 616 * Note: drivers must ensure that all clk_bulk_enable calls made on this 617 * clock source are balanced by clk_bulk_disable calls prior to calling 618 * this function. 619 * 620 * clk_bulk_put_all should not be called from within interrupt context. 621 */ 622void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); 623 624/** 625 * devm_clk_put - "free" a managed clock source 626 * @dev: device used to acquire the clock 627 * @clk: clock source acquired with devm_clk_get() 628 * 629 * Note: drivers must ensure that all clk_enable calls made on this 630 * clock source are balanced by clk_disable calls prior to calling 631 * this function. 632 * 633 * clk_put should not be called from within interrupt context. 634 */ 635void devm_clk_put(struct device *dev, struct clk *clk); 636 637/* 638 * The remaining APIs are optional for machine class support. 639 */ 640 641 642/** 643 * clk_round_rate - adjust a rate to the exact rate a clock can provide 644 * @clk: clock source 645 * @rate: desired clock rate in Hz 646 * 647 * This answers the question "if I were to pass @rate to clk_set_rate(), 648 * what clock rate would I end up with?" without changing the hardware 649 * in any way. In other words: 650 * 651 * rate = clk_round_rate(clk, r); 652 * 653 * and: 654 * 655 * clk_set_rate(clk, r); 656 * rate = clk_get_rate(clk); 657 * 658 * are equivalent except the former does not modify the clock hardware 659 * in any way. 660 * 661 * Returns rounded clock rate in Hz, or negative errno. 662 */ 663long clk_round_rate(struct clk *clk, unsigned long rate); 664 665/** 666 * clk_set_rate - set the clock rate for a clock source 667 * @clk: clock source 668 * @rate: desired clock rate in Hz 669 * 670 * Updating the rate starts at the top-most affected clock and then 671 * walks the tree down to the bottom-most clock that needs updating. 672 * 673 * Returns success (0) or negative errno. 674 */ 675int clk_set_rate(struct clk *clk, unsigned long rate); 676 677/** 678 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over 679 * clock source 680 * @clk: clock source 681 * @rate: desired clock rate in Hz 682 * 683 * This helper function allows drivers to atomically set the rate of a producer 684 * and claim exclusivity over the rate control of the producer. 685 * 686 * It is essentially a combination of clk_set_rate() and 687 * clk_rate_exclusite_get(). Caller must balance this call with a call to 688 * clk_rate_exclusive_put() 689 * 690 * Returns success (0) or negative errno. 691 */ 692int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); 693 694/** 695 * clk_has_parent - check if a clock is a possible parent for another 696 * @clk: clock source 697 * @parent: parent clock source 698 * 699 * This function can be used in drivers that need to check that a clock can be 700 * the parent of another without actually changing the parent. 701 * 702 * Returns true if @parent is a possible parent for @clk, false otherwise. 703 */ 704bool clk_has_parent(struct clk *clk, struct clk *parent); 705 706/** 707 * clk_set_rate_range - set a rate range for a clock source 708 * @clk: clock source 709 * @min: desired minimum clock rate in Hz, inclusive 710 * @max: desired maximum clock rate in Hz, inclusive 711 * 712 * Returns success (0) or negative errno. 713 */ 714int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); 715 716/** 717 * clk_set_min_rate - set a minimum clock rate for a clock source 718 * @clk: clock source 719 * @rate: desired minimum clock rate in Hz, inclusive 720 * 721 * Returns success (0) or negative errno. 722 */ 723int clk_set_min_rate(struct clk *clk, unsigned long rate); 724 725/** 726 * clk_set_max_rate - set a maximum clock rate for a clock source 727 * @clk: clock source 728 * @rate: desired maximum clock rate in Hz, inclusive 729 * 730 * Returns success (0) or negative errno. 731 */ 732int clk_set_max_rate(struct clk *clk, unsigned long rate); 733 734/** 735 * clk_set_parent - set the parent clock source for this clock 736 * @clk: clock source 737 * @parent: parent clock source 738 * 739 * Returns success (0) or negative errno. 740 */ 741int clk_set_parent(struct clk *clk, struct clk *parent); 742 743/** 744 * clk_get_parent - get the parent clock source for this clock 745 * @clk: clock source 746 * 747 * Returns struct clk corresponding to parent clock source, or 748 * valid IS_ERR() condition containing errno. 749 */ 750struct clk *clk_get_parent(struct clk *clk); 751 752/** 753 * clk_get_sys - get a clock based upon the device name 754 * @dev_id: device name 755 * @con_id: connection ID 756 * 757 * Returns a struct clk corresponding to the clock producer, or 758 * valid IS_ERR() condition containing errno. The implementation 759 * uses @dev_id and @con_id to determine the clock consumer, and 760 * thereby the clock producer. In contrast to clk_get() this function 761 * takes the device name instead of the device itself for identification. 762 * 763 * Drivers must assume that the clock source is not enabled. 764 * 765 * clk_get_sys should not be called from within interrupt context. 766 */ 767struct clk *clk_get_sys(const char *dev_id, const char *con_id); 768 769/** 770 * clk_save_context - save clock context for poweroff 771 * 772 * Saves the context of the clock register for powerstates in which the 773 * contents of the registers will be lost. Occurs deep within the suspend 774 * code so locking is not necessary. 775 */ 776int clk_save_context(void); 777 778/** 779 * clk_restore_context - restore clock context after poweroff 780 * 781 * This occurs with all clocks enabled. Occurs deep within the resume code 782 * so locking is not necessary. 783 */ 784void clk_restore_context(void); 785 786#else /* !CONFIG_HAVE_CLK */ 787 788static inline struct clk *clk_get(struct device *dev, const char *id) 789{ 790 return NULL; 791} 792 793static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, 794 struct clk_bulk_data *clks) 795{ 796 return 0; 797} 798 799static inline int __must_check clk_bulk_get_optional(struct device *dev, 800 int num_clks, struct clk_bulk_data *clks) 801{ 802 return 0; 803} 804 805static inline int __must_check clk_bulk_get_all(struct device *dev, 806 struct clk_bulk_data **clks) 807{ 808 return 0; 809} 810 811static inline struct clk *devm_clk_get(struct device *dev, const char *id) 812{ 813 return NULL; 814} 815 816static inline struct clk *devm_clk_get_optional(struct device *dev, 817 const char *id) 818{ 819 return NULL; 820} 821 822static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, 823 struct clk_bulk_data *clks) 824{ 825 return 0; 826} 827 828static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, 829 int num_clks, struct clk_bulk_data *clks) 830{ 831 return 0; 832} 833 834static inline int __must_check devm_clk_bulk_get_all(struct device *dev, 835 struct clk_bulk_data **clks) 836{ 837 838 return 0; 839} 840 841static inline struct clk *devm_get_clk_from_child(struct device *dev, 842 struct device_node *np, const char *con_id) 843{ 844 return NULL; 845} 846 847static inline void clk_put(struct clk *clk) {} 848 849static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} 850 851static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} 852 853static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 854 855 856static inline int clk_rate_exclusive_get(struct clk *clk) 857{ 858 return 0; 859} 860 861static inline void clk_rate_exclusive_put(struct clk *clk) {} 862 863static inline int clk_enable(struct clk *clk) 864{ 865 return 0; 866} 867 868static inline int __must_check clk_bulk_enable(int num_clks, 869 const struct clk_bulk_data *clks) 870{ 871 return 0; 872} 873 874static inline void clk_disable(struct clk *clk) {} 875 876 877static inline void clk_bulk_disable(int num_clks, 878 const struct clk_bulk_data *clks) {} 879 880static inline unsigned long clk_get_rate(struct clk *clk) 881{ 882 return 0; 883} 884 885static inline int clk_set_rate(struct clk *clk, unsigned long rate) 886{ 887 return 0; 888} 889 890static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) 891{ 892 return 0; 893} 894 895static inline long clk_round_rate(struct clk *clk, unsigned long rate) 896{ 897 return 0; 898} 899 900static inline bool clk_has_parent(struct clk *clk, struct clk *parent) 901{ 902 return true; 903} 904 905static inline int clk_set_rate_range(struct clk *clk, unsigned long min, 906 unsigned long max) 907{ 908 return 0; 909} 910 911static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) 912{ 913 return 0; 914} 915 916static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) 917{ 918 return 0; 919} 920 921static inline int clk_set_parent(struct clk *clk, struct clk *parent) 922{ 923 return 0; 924} 925 926static inline struct clk *clk_get_parent(struct clk *clk) 927{ 928 return NULL; 929} 930 931static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) 932{ 933 return NULL; 934} 935 936static inline int clk_save_context(void) 937{ 938 return 0; 939} 940 941static inline void clk_restore_context(void) {} 942 943#endif 944 945/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ 946static inline int clk_prepare_enable(struct clk *clk) 947{ 948 int ret; 949 950 ret = clk_prepare(clk); 951 if (ret) 952 return ret; 953 ret = clk_enable(clk); 954 if (ret) 955 clk_unprepare(clk); 956 957 return ret; 958} 959 960/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ 961static inline void clk_disable_unprepare(struct clk *clk) 962{ 963 clk_disable(clk); 964 clk_unprepare(clk); 965} 966 967static inline int __must_check 968clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) 969{ 970 int ret; 971 972 ret = clk_bulk_prepare(num_clks, clks); 973 if (ret) 974 return ret; 975 ret = clk_bulk_enable(num_clks, clks); 976 if (ret) 977 clk_bulk_unprepare(num_clks, clks); 978 979 return ret; 980} 981 982static inline void clk_bulk_disable_unprepare(int num_clks, 983 const struct clk_bulk_data *clks) 984{ 985 clk_bulk_disable(num_clks, clks); 986 clk_bulk_unprepare(num_clks, clks); 987} 988 989/** 990 * clk_drop_range - Reset any range set on that clock 991 * @clk: clock source 992 * 993 * Returns success (0) or negative errno. 994 */ 995static inline int clk_drop_range(struct clk *clk) 996{ 997 return clk_set_rate_range(clk, 0, ULONG_MAX); 998} 999 1000/** 1001 * clk_get_optional - lookup and obtain a reference to an optional clock 1002 * producer. 1003 * @dev: device for clock "consumer" 1004 * @id: clock consumer ID 1005 * 1006 * Behaves the same as clk_get() except where there is no clock producer. In 1007 * this case, instead of returning -ENOENT, the function returns NULL. 1008 */ 1009static inline struct clk *clk_get_optional(struct device *dev, const char *id) 1010{ 1011 struct clk *clk = clk_get(dev, id); 1012 1013 if (clk == ERR_PTR(-ENOENT)) 1014 return NULL; 1015 1016 return clk; 1017} 1018 1019#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 1020struct clk *of_clk_get(struct device_node *np, int index); 1021struct clk *of_clk_get_by_name(struct device_node *np, const char *name); 1022struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); 1023#else 1024static inline struct clk *of_clk_get(struct device_node *np, int index) 1025{ 1026 return ERR_PTR(-ENOENT); 1027} 1028static inline struct clk *of_clk_get_by_name(struct device_node *np, 1029 const char *name) 1030{ 1031 return ERR_PTR(-ENOENT); 1032} 1033static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 1034{ 1035 return ERR_PTR(-ENOENT); 1036} 1037#endif 1038 1039#endif