ipmi_msghandler.c (144723B)
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14#define pr_fmt(fmt) "IPMI message handler: " fmt 15#define dev_fmt(fmt) pr_fmt(fmt) 16 17#include <linux/module.h> 18#include <linux/errno.h> 19#include <linux/panic_notifier.h> 20#include <linux/poll.h> 21#include <linux/sched.h> 22#include <linux/seq_file.h> 23#include <linux/spinlock.h> 24#include <linux/mutex.h> 25#include <linux/slab.h> 26#include <linux/ipmi.h> 27#include <linux/ipmi_smi.h> 28#include <linux/notifier.h> 29#include <linux/init.h> 30#include <linux/proc_fs.h> 31#include <linux/rcupdate.h> 32#include <linux/interrupt.h> 33#include <linux/moduleparam.h> 34#include <linux/workqueue.h> 35#include <linux/uuid.h> 36#include <linux/nospec.h> 37#include <linux/vmalloc.h> 38#include <linux/delay.h> 39 40#define IPMI_DRIVER_VERSION "39.2" 41 42static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 43static int ipmi_init_msghandler(void); 44static void smi_recv_tasklet(struct tasklet_struct *t); 45static void handle_new_recv_msgs(struct ipmi_smi *intf); 46static void need_waiter(struct ipmi_smi *intf); 47static int handle_one_recv_msg(struct ipmi_smi *intf, 48 struct ipmi_smi_msg *msg); 49 50static bool initialized; 51static bool drvregistered; 52 53/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 54enum ipmi_panic_event_op { 55 IPMI_SEND_PANIC_EVENT_NONE, 56 IPMI_SEND_PANIC_EVENT, 57 IPMI_SEND_PANIC_EVENT_STRING, 58 IPMI_SEND_PANIC_EVENT_MAX 59}; 60 61/* Indices in this array should be mapped to enum ipmi_panic_event_op */ 62static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 63 64#ifdef CONFIG_IPMI_PANIC_STRING 65#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 66#elif defined(CONFIG_IPMI_PANIC_EVENT) 67#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 68#else 69#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 70#endif 71 72static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 73 74static int panic_op_write_handler(const char *val, 75 const struct kernel_param *kp) 76{ 77 char valcp[16]; 78 int e; 79 80 strscpy(valcp, val, sizeof(valcp)); 81 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 82 if (e < 0) 83 return e; 84 85 ipmi_send_panic_event = e; 86 return 0; 87} 88 89static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 90{ 91 const char *event_str; 92 93 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 94 event_str = "???"; 95 else 96 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 97 98 return sprintf(buffer, "%s\n", event_str); 99} 100 101static const struct kernel_param_ops panic_op_ops = { 102 .set = panic_op_write_handler, 103 .get = panic_op_read_handler 104}; 105module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 106MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 107 108 109#define MAX_EVENTS_IN_QUEUE 25 110 111/* Remain in auto-maintenance mode for this amount of time (in ms). */ 112static unsigned long maintenance_mode_timeout_ms = 30000; 113module_param(maintenance_mode_timeout_ms, ulong, 0644); 114MODULE_PARM_DESC(maintenance_mode_timeout_ms, 115 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 116 117/* 118 * Don't let a message sit in a queue forever, always time it with at lest 119 * the max message timer. This is in milliseconds. 120 */ 121#define MAX_MSG_TIMEOUT 60000 122 123/* 124 * Timeout times below are in milliseconds, and are done off a 1 125 * second timer. So setting the value to 1000 would mean anything 126 * between 0 and 1000ms. So really the only reasonable minimum 127 * setting it 2000ms, which is between 1 and 2 seconds. 128 */ 129 130/* The default timeout for message retries. */ 131static unsigned long default_retry_ms = 2000; 132module_param(default_retry_ms, ulong, 0644); 133MODULE_PARM_DESC(default_retry_ms, 134 "The time (milliseconds) between retry sends"); 135 136/* The default timeout for maintenance mode message retries. */ 137static unsigned long default_maintenance_retry_ms = 3000; 138module_param(default_maintenance_retry_ms, ulong, 0644); 139MODULE_PARM_DESC(default_maintenance_retry_ms, 140 "The time (milliseconds) between retry sends in maintenance mode"); 141 142/* The default maximum number of retries */ 143static unsigned int default_max_retries = 4; 144module_param(default_max_retries, uint, 0644); 145MODULE_PARM_DESC(default_max_retries, 146 "The time (milliseconds) between retry sends in maintenance mode"); 147 148/* The default maximum number of users that may register. */ 149static unsigned int max_users = 30; 150module_param(max_users, uint, 0644); 151MODULE_PARM_DESC(max_users, 152 "The most users that may use the IPMI stack at one time."); 153 154/* The default maximum number of message a user may have outstanding. */ 155static unsigned int max_msgs_per_user = 100; 156module_param(max_msgs_per_user, uint, 0644); 157MODULE_PARM_DESC(max_msgs_per_user, 158 "The most message a user may have outstanding."); 159 160/* Call every ~1000 ms. */ 161#define IPMI_TIMEOUT_TIME 1000 162 163/* How many jiffies does it take to get to the timeout time. */ 164#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 165 166/* 167 * Request events from the queue every second (this is the number of 168 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 169 * future, IPMI will add a way to know immediately if an event is in 170 * the queue and this silliness can go away. 171 */ 172#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 173 174/* How long should we cache dynamic device IDs? */ 175#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 176 177/* 178 * The main "user" data structure. 179 */ 180struct ipmi_user { 181 struct list_head link; 182 183 /* 184 * Set to NULL when the user is destroyed, a pointer to myself 185 * so srcu_dereference can be used on it. 186 */ 187 struct ipmi_user *self; 188 struct srcu_struct release_barrier; 189 190 struct kref refcount; 191 192 /* The upper layer that handles receive messages. */ 193 const struct ipmi_user_hndl *handler; 194 void *handler_data; 195 196 /* The interface this user is bound to. */ 197 struct ipmi_smi *intf; 198 199 /* Does this interface receive IPMI events? */ 200 bool gets_events; 201 202 atomic_t nr_msgs; 203 204 /* Free must run in process context for RCU cleanup. */ 205 struct work_struct remove_work; 206}; 207 208static struct workqueue_struct *remove_work_wq; 209 210static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 211 __acquires(user->release_barrier) 212{ 213 struct ipmi_user *ruser; 214 215 *index = srcu_read_lock(&user->release_barrier); 216 ruser = srcu_dereference(user->self, &user->release_barrier); 217 if (!ruser) 218 srcu_read_unlock(&user->release_barrier, *index); 219 return ruser; 220} 221 222static void release_ipmi_user(struct ipmi_user *user, int index) 223{ 224 srcu_read_unlock(&user->release_barrier, index); 225} 226 227struct cmd_rcvr { 228 struct list_head link; 229 230 struct ipmi_user *user; 231 unsigned char netfn; 232 unsigned char cmd; 233 unsigned int chans; 234 235 /* 236 * This is used to form a linked lised during mass deletion. 237 * Since this is in an RCU list, we cannot use the link above 238 * or change any data until the RCU period completes. So we 239 * use this next variable during mass deletion so we can have 240 * a list and don't have to wait and restart the search on 241 * every individual deletion of a command. 242 */ 243 struct cmd_rcvr *next; 244}; 245 246struct seq_table { 247 unsigned int inuse : 1; 248 unsigned int broadcast : 1; 249 250 unsigned long timeout; 251 unsigned long orig_timeout; 252 unsigned int retries_left; 253 254 /* 255 * To verify on an incoming send message response that this is 256 * the message that the response is for, we keep a sequence id 257 * and increment it every time we send a message. 258 */ 259 long seqid; 260 261 /* 262 * This is held so we can properly respond to the message on a 263 * timeout, and it is used to hold the temporary data for 264 * retransmission, too. 265 */ 266 struct ipmi_recv_msg *recv_msg; 267}; 268 269/* 270 * Store the information in a msgid (long) to allow us to find a 271 * sequence table entry from the msgid. 272 */ 273#define STORE_SEQ_IN_MSGID(seq, seqid) \ 274 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 275 276#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 277 do { \ 278 seq = (((msgid) >> 26) & 0x3f); \ 279 seqid = ((msgid) & 0x3ffffff); \ 280 } while (0) 281 282#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 283 284#define IPMI_MAX_CHANNELS 16 285struct ipmi_channel { 286 unsigned char medium; 287 unsigned char protocol; 288}; 289 290struct ipmi_channel_set { 291 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 292}; 293 294struct ipmi_my_addrinfo { 295 /* 296 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 297 * but may be changed by the user. 298 */ 299 unsigned char address; 300 301 /* 302 * My LUN. This should generally stay the SMS LUN, but just in 303 * case... 304 */ 305 unsigned char lun; 306}; 307 308/* 309 * Note that the product id, manufacturer id, guid, and device id are 310 * immutable in this structure, so dyn_mutex is not required for 311 * accessing those. If those change on a BMC, a new BMC is allocated. 312 */ 313struct bmc_device { 314 struct platform_device pdev; 315 struct list_head intfs; /* Interfaces on this BMC. */ 316 struct ipmi_device_id id; 317 struct ipmi_device_id fetch_id; 318 int dyn_id_set; 319 unsigned long dyn_id_expiry; 320 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 321 guid_t guid; 322 guid_t fetch_guid; 323 int dyn_guid_set; 324 struct kref usecount; 325 struct work_struct remove_work; 326 unsigned char cc; /* completion code */ 327}; 328#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 329 330static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 331 struct ipmi_device_id *id, 332 bool *guid_set, guid_t *guid); 333 334/* 335 * Various statistics for IPMI, these index stats[] in the ipmi_smi 336 * structure. 337 */ 338enum ipmi_stat_indexes { 339 /* Commands we got from the user that were invalid. */ 340 IPMI_STAT_sent_invalid_commands = 0, 341 342 /* Commands we sent to the MC. */ 343 IPMI_STAT_sent_local_commands, 344 345 /* Responses from the MC that were delivered to a user. */ 346 IPMI_STAT_handled_local_responses, 347 348 /* Responses from the MC that were not delivered to a user. */ 349 IPMI_STAT_unhandled_local_responses, 350 351 /* Commands we sent out to the IPMB bus. */ 352 IPMI_STAT_sent_ipmb_commands, 353 354 /* Commands sent on the IPMB that had errors on the SEND CMD */ 355 IPMI_STAT_sent_ipmb_command_errs, 356 357 /* Each retransmit increments this count. */ 358 IPMI_STAT_retransmitted_ipmb_commands, 359 360 /* 361 * When a message times out (runs out of retransmits) this is 362 * incremented. 363 */ 364 IPMI_STAT_timed_out_ipmb_commands, 365 366 /* 367 * This is like above, but for broadcasts. Broadcasts are 368 * *not* included in the above count (they are expected to 369 * time out). 370 */ 371 IPMI_STAT_timed_out_ipmb_broadcasts, 372 373 /* Responses I have sent to the IPMB bus. */ 374 IPMI_STAT_sent_ipmb_responses, 375 376 /* The response was delivered to the user. */ 377 IPMI_STAT_handled_ipmb_responses, 378 379 /* The response had invalid data in it. */ 380 IPMI_STAT_invalid_ipmb_responses, 381 382 /* The response didn't have anyone waiting for it. */ 383 IPMI_STAT_unhandled_ipmb_responses, 384 385 /* Commands we sent out to the IPMB bus. */ 386 IPMI_STAT_sent_lan_commands, 387 388 /* Commands sent on the IPMB that had errors on the SEND CMD */ 389 IPMI_STAT_sent_lan_command_errs, 390 391 /* Each retransmit increments this count. */ 392 IPMI_STAT_retransmitted_lan_commands, 393 394 /* 395 * When a message times out (runs out of retransmits) this is 396 * incremented. 397 */ 398 IPMI_STAT_timed_out_lan_commands, 399 400 /* Responses I have sent to the IPMB bus. */ 401 IPMI_STAT_sent_lan_responses, 402 403 /* The response was delivered to the user. */ 404 IPMI_STAT_handled_lan_responses, 405 406 /* The response had invalid data in it. */ 407 IPMI_STAT_invalid_lan_responses, 408 409 /* The response didn't have anyone waiting for it. */ 410 IPMI_STAT_unhandled_lan_responses, 411 412 /* The command was delivered to the user. */ 413 IPMI_STAT_handled_commands, 414 415 /* The command had invalid data in it. */ 416 IPMI_STAT_invalid_commands, 417 418 /* The command didn't have anyone waiting for it. */ 419 IPMI_STAT_unhandled_commands, 420 421 /* Invalid data in an event. */ 422 IPMI_STAT_invalid_events, 423 424 /* Events that were received with the proper format. */ 425 IPMI_STAT_events, 426 427 /* Retransmissions on IPMB that failed. */ 428 IPMI_STAT_dropped_rexmit_ipmb_commands, 429 430 /* Retransmissions on LAN that failed. */ 431 IPMI_STAT_dropped_rexmit_lan_commands, 432 433 /* This *must* remain last, add new values above this. */ 434 IPMI_NUM_STATS 435}; 436 437 438#define IPMI_IPMB_NUM_SEQ 64 439struct ipmi_smi { 440 struct module *owner; 441 442 /* What interface number are we? */ 443 int intf_num; 444 445 struct kref refcount; 446 447 /* Set when the interface is being unregistered. */ 448 bool in_shutdown; 449 450 /* Used for a list of interfaces. */ 451 struct list_head link; 452 453 /* 454 * The list of upper layers that are using me. seq_lock write 455 * protects this. Read protection is with srcu. 456 */ 457 struct list_head users; 458 struct srcu_struct users_srcu; 459 atomic_t nr_users; 460 struct device_attribute nr_users_devattr; 461 struct device_attribute nr_msgs_devattr; 462 463 464 /* Used for wake ups at startup. */ 465 wait_queue_head_t waitq; 466 467 /* 468 * Prevents the interface from being unregistered when the 469 * interface is used by being looked up through the BMC 470 * structure. 471 */ 472 struct mutex bmc_reg_mutex; 473 474 struct bmc_device tmp_bmc; 475 struct bmc_device *bmc; 476 bool bmc_registered; 477 struct list_head bmc_link; 478 char *my_dev_name; 479 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 480 struct work_struct bmc_reg_work; 481 482 const struct ipmi_smi_handlers *handlers; 483 void *send_info; 484 485 /* Driver-model device for the system interface. */ 486 struct device *si_dev; 487 488 /* 489 * A table of sequence numbers for this interface. We use the 490 * sequence numbers for IPMB messages that go out of the 491 * interface to match them up with their responses. A routine 492 * is called periodically to time the items in this list. 493 */ 494 spinlock_t seq_lock; 495 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 496 int curr_seq; 497 498 /* 499 * Messages queued for delivery. If delivery fails (out of memory 500 * for instance), They will stay in here to be processed later in a 501 * periodic timer interrupt. The tasklet is for handling received 502 * messages directly from the handler. 503 */ 504 spinlock_t waiting_rcv_msgs_lock; 505 struct list_head waiting_rcv_msgs; 506 atomic_t watchdog_pretimeouts_to_deliver; 507 struct tasklet_struct recv_tasklet; 508 509 spinlock_t xmit_msgs_lock; 510 struct list_head xmit_msgs; 511 struct ipmi_smi_msg *curr_msg; 512 struct list_head hp_xmit_msgs; 513 514 /* 515 * The list of command receivers that are registered for commands 516 * on this interface. 517 */ 518 struct mutex cmd_rcvrs_mutex; 519 struct list_head cmd_rcvrs; 520 521 /* 522 * Events that were queues because no one was there to receive 523 * them. 524 */ 525 spinlock_t events_lock; /* For dealing with event stuff. */ 526 struct list_head waiting_events; 527 unsigned int waiting_events_count; /* How many events in queue? */ 528 char delivering_events; 529 char event_msg_printed; 530 531 /* How many users are waiting for events? */ 532 atomic_t event_waiters; 533 unsigned int ticks_to_req_ev; 534 535 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 536 537 /* How many users are waiting for commands? */ 538 unsigned int command_waiters; 539 540 /* How many users are waiting for watchdogs? */ 541 unsigned int watchdog_waiters; 542 543 /* How many users are waiting for message responses? */ 544 unsigned int response_waiters; 545 546 /* 547 * Tells what the lower layer has last been asked to watch for, 548 * messages and/or watchdogs. Protected by watch_lock. 549 */ 550 unsigned int last_watch_mask; 551 552 /* 553 * The event receiver for my BMC, only really used at panic 554 * shutdown as a place to store this. 555 */ 556 unsigned char event_receiver; 557 unsigned char event_receiver_lun; 558 unsigned char local_sel_device; 559 unsigned char local_event_generator; 560 561 /* For handling of maintenance mode. */ 562 int maintenance_mode; 563 bool maintenance_mode_enable; 564 int auto_maintenance_timeout; 565 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 566 567 /* 568 * If we are doing maintenance on something on IPMB, extend 569 * the timeout time to avoid timeouts writing firmware and 570 * such. 571 */ 572 int ipmb_maintenance_mode_timeout; 573 574 /* 575 * A cheap hack, if this is non-null and a message to an 576 * interface comes in with a NULL user, call this routine with 577 * it. Note that the message will still be freed by the 578 * caller. This only works on the system interface. 579 * 580 * Protected by bmc_reg_mutex. 581 */ 582 void (*null_user_handler)(struct ipmi_smi *intf, 583 struct ipmi_recv_msg *msg); 584 585 /* 586 * When we are scanning the channels for an SMI, this will 587 * tell which channel we are scanning. 588 */ 589 int curr_channel; 590 591 /* Channel information */ 592 struct ipmi_channel_set *channel_list; 593 unsigned int curr_working_cset; /* First index into the following. */ 594 struct ipmi_channel_set wchannels[2]; 595 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 596 bool channels_ready; 597 598 atomic_t stats[IPMI_NUM_STATS]; 599 600 /* 601 * run_to_completion duplicate of smb_info, smi_info 602 * and ipmi_serial_info structures. Used to decrease numbers of 603 * parameters passed by "low" level IPMI code. 604 */ 605 int run_to_completion; 606}; 607#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 608 609static void __get_guid(struct ipmi_smi *intf); 610static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 611static int __ipmi_bmc_register(struct ipmi_smi *intf, 612 struct ipmi_device_id *id, 613 bool guid_set, guid_t *guid, int intf_num); 614static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 615 616 617/** 618 * The driver model view of the IPMI messaging driver. 619 */ 620static struct platform_driver ipmidriver = { 621 .driver = { 622 .name = "ipmi", 623 .bus = &platform_bus_type 624 } 625}; 626/* 627 * This mutex keeps us from adding the same BMC twice. 628 */ 629static DEFINE_MUTEX(ipmidriver_mutex); 630 631static LIST_HEAD(ipmi_interfaces); 632static DEFINE_MUTEX(ipmi_interfaces_mutex); 633#define ipmi_interfaces_mutex_held() \ 634 lockdep_is_held(&ipmi_interfaces_mutex) 635static struct srcu_struct ipmi_interfaces_srcu; 636 637/* 638 * List of watchers that want to know when smi's are added and deleted. 639 */ 640static LIST_HEAD(smi_watchers); 641static DEFINE_MUTEX(smi_watchers_mutex); 642 643#define ipmi_inc_stat(intf, stat) \ 644 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 645#define ipmi_get_stat(intf, stat) \ 646 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 647 648static const char * const addr_src_to_str[] = { 649 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 650 "device-tree", "platform" 651}; 652 653const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 654{ 655 if (src >= SI_LAST) 656 src = 0; /* Invalid */ 657 return addr_src_to_str[src]; 658} 659EXPORT_SYMBOL(ipmi_addr_src_to_str); 660 661static int is_lan_addr(struct ipmi_addr *addr) 662{ 663 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 664} 665 666static int is_ipmb_addr(struct ipmi_addr *addr) 667{ 668 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 669} 670 671static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 672{ 673 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 674} 675 676static int is_ipmb_direct_addr(struct ipmi_addr *addr) 677{ 678 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 679} 680 681static void free_recv_msg_list(struct list_head *q) 682{ 683 struct ipmi_recv_msg *msg, *msg2; 684 685 list_for_each_entry_safe(msg, msg2, q, link) { 686 list_del(&msg->link); 687 ipmi_free_recv_msg(msg); 688 } 689} 690 691static void free_smi_msg_list(struct list_head *q) 692{ 693 struct ipmi_smi_msg *msg, *msg2; 694 695 list_for_each_entry_safe(msg, msg2, q, link) { 696 list_del(&msg->link); 697 ipmi_free_smi_msg(msg); 698 } 699} 700 701static void clean_up_interface_data(struct ipmi_smi *intf) 702{ 703 int i; 704 struct cmd_rcvr *rcvr, *rcvr2; 705 struct list_head list; 706 707 tasklet_kill(&intf->recv_tasklet); 708 709 free_smi_msg_list(&intf->waiting_rcv_msgs); 710 free_recv_msg_list(&intf->waiting_events); 711 712 /* 713 * Wholesale remove all the entries from the list in the 714 * interface and wait for RCU to know that none are in use. 715 */ 716 mutex_lock(&intf->cmd_rcvrs_mutex); 717 INIT_LIST_HEAD(&list); 718 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 719 mutex_unlock(&intf->cmd_rcvrs_mutex); 720 721 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 722 kfree(rcvr); 723 724 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 725 if ((intf->seq_table[i].inuse) 726 && (intf->seq_table[i].recv_msg)) 727 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 728 } 729} 730 731static void intf_free(struct kref *ref) 732{ 733 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 734 735 clean_up_interface_data(intf); 736 kfree(intf); 737} 738 739struct watcher_entry { 740 int intf_num; 741 struct ipmi_smi *intf; 742 struct list_head link; 743}; 744 745int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 746{ 747 struct ipmi_smi *intf; 748 int index, rv; 749 750 /* 751 * Make sure the driver is actually initialized, this handles 752 * problems with initialization order. 753 */ 754 rv = ipmi_init_msghandler(); 755 if (rv) 756 return rv; 757 758 mutex_lock(&smi_watchers_mutex); 759 760 list_add(&watcher->link, &smi_watchers); 761 762 index = srcu_read_lock(&ipmi_interfaces_srcu); 763 list_for_each_entry_rcu(intf, &ipmi_interfaces, link, 764 lockdep_is_held(&smi_watchers_mutex)) { 765 int intf_num = READ_ONCE(intf->intf_num); 766 767 if (intf_num == -1) 768 continue; 769 watcher->new_smi(intf_num, intf->si_dev); 770 } 771 srcu_read_unlock(&ipmi_interfaces_srcu, index); 772 773 mutex_unlock(&smi_watchers_mutex); 774 775 return 0; 776} 777EXPORT_SYMBOL(ipmi_smi_watcher_register); 778 779int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 780{ 781 mutex_lock(&smi_watchers_mutex); 782 list_del(&watcher->link); 783 mutex_unlock(&smi_watchers_mutex); 784 return 0; 785} 786EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 787 788/* 789 * Must be called with smi_watchers_mutex held. 790 */ 791static void 792call_smi_watchers(int i, struct device *dev) 793{ 794 struct ipmi_smi_watcher *w; 795 796 mutex_lock(&smi_watchers_mutex); 797 list_for_each_entry(w, &smi_watchers, link) { 798 if (try_module_get(w->owner)) { 799 w->new_smi(i, dev); 800 module_put(w->owner); 801 } 802 } 803 mutex_unlock(&smi_watchers_mutex); 804} 805 806static int 807ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 808{ 809 if (addr1->addr_type != addr2->addr_type) 810 return 0; 811 812 if (addr1->channel != addr2->channel) 813 return 0; 814 815 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 816 struct ipmi_system_interface_addr *smi_addr1 817 = (struct ipmi_system_interface_addr *) addr1; 818 struct ipmi_system_interface_addr *smi_addr2 819 = (struct ipmi_system_interface_addr *) addr2; 820 return (smi_addr1->lun == smi_addr2->lun); 821 } 822 823 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 824 struct ipmi_ipmb_addr *ipmb_addr1 825 = (struct ipmi_ipmb_addr *) addr1; 826 struct ipmi_ipmb_addr *ipmb_addr2 827 = (struct ipmi_ipmb_addr *) addr2; 828 829 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 830 && (ipmb_addr1->lun == ipmb_addr2->lun)); 831 } 832 833 if (is_ipmb_direct_addr(addr1)) { 834 struct ipmi_ipmb_direct_addr *daddr1 835 = (struct ipmi_ipmb_direct_addr *) addr1; 836 struct ipmi_ipmb_direct_addr *daddr2 837 = (struct ipmi_ipmb_direct_addr *) addr2; 838 839 return daddr1->slave_addr == daddr2->slave_addr && 840 daddr1->rq_lun == daddr2->rq_lun && 841 daddr1->rs_lun == daddr2->rs_lun; 842 } 843 844 if (is_lan_addr(addr1)) { 845 struct ipmi_lan_addr *lan_addr1 846 = (struct ipmi_lan_addr *) addr1; 847 struct ipmi_lan_addr *lan_addr2 848 = (struct ipmi_lan_addr *) addr2; 849 850 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 851 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 852 && (lan_addr1->session_handle 853 == lan_addr2->session_handle) 854 && (lan_addr1->lun == lan_addr2->lun)); 855 } 856 857 return 1; 858} 859 860int ipmi_validate_addr(struct ipmi_addr *addr, int len) 861{ 862 if (len < sizeof(struct ipmi_system_interface_addr)) 863 return -EINVAL; 864 865 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 866 if (addr->channel != IPMI_BMC_CHANNEL) 867 return -EINVAL; 868 return 0; 869 } 870 871 if ((addr->channel == IPMI_BMC_CHANNEL) 872 || (addr->channel >= IPMI_MAX_CHANNELS) 873 || (addr->channel < 0)) 874 return -EINVAL; 875 876 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 877 if (len < sizeof(struct ipmi_ipmb_addr)) 878 return -EINVAL; 879 return 0; 880 } 881 882 if (is_ipmb_direct_addr(addr)) { 883 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 884 885 if (addr->channel != 0) 886 return -EINVAL; 887 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 888 return -EINVAL; 889 890 if (daddr->slave_addr & 0x01) 891 return -EINVAL; 892 if (daddr->rq_lun >= 4) 893 return -EINVAL; 894 if (daddr->rs_lun >= 4) 895 return -EINVAL; 896 return 0; 897 } 898 899 if (is_lan_addr(addr)) { 900 if (len < sizeof(struct ipmi_lan_addr)) 901 return -EINVAL; 902 return 0; 903 } 904 905 return -EINVAL; 906} 907EXPORT_SYMBOL(ipmi_validate_addr); 908 909unsigned int ipmi_addr_length(int addr_type) 910{ 911 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 912 return sizeof(struct ipmi_system_interface_addr); 913 914 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 915 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 916 return sizeof(struct ipmi_ipmb_addr); 917 918 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 919 return sizeof(struct ipmi_ipmb_direct_addr); 920 921 if (addr_type == IPMI_LAN_ADDR_TYPE) 922 return sizeof(struct ipmi_lan_addr); 923 924 return 0; 925} 926EXPORT_SYMBOL(ipmi_addr_length); 927 928static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 929{ 930 int rv = 0; 931 932 if (!msg->user) { 933 /* Special handling for NULL users. */ 934 if (intf->null_user_handler) { 935 intf->null_user_handler(intf, msg); 936 } else { 937 /* No handler, so give up. */ 938 rv = -EINVAL; 939 } 940 ipmi_free_recv_msg(msg); 941 } else if (oops_in_progress) { 942 /* 943 * If we are running in the panic context, calling the 944 * receive handler doesn't much meaning and has a deadlock 945 * risk. At this moment, simply skip it in that case. 946 */ 947 ipmi_free_recv_msg(msg); 948 atomic_dec(&msg->user->nr_msgs); 949 } else { 950 int index; 951 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 952 953 if (user) { 954 atomic_dec(&user->nr_msgs); 955 user->handler->ipmi_recv_hndl(msg, user->handler_data); 956 release_ipmi_user(user, index); 957 } else { 958 /* User went away, give up. */ 959 ipmi_free_recv_msg(msg); 960 rv = -EINVAL; 961 } 962 } 963 964 return rv; 965} 966 967static void deliver_local_response(struct ipmi_smi *intf, 968 struct ipmi_recv_msg *msg) 969{ 970 if (deliver_response(intf, msg)) 971 ipmi_inc_stat(intf, unhandled_local_responses); 972 else 973 ipmi_inc_stat(intf, handled_local_responses); 974} 975 976static void deliver_err_response(struct ipmi_smi *intf, 977 struct ipmi_recv_msg *msg, int err) 978{ 979 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 980 msg->msg_data[0] = err; 981 msg->msg.netfn |= 1; /* Convert to a response. */ 982 msg->msg.data_len = 1; 983 msg->msg.data = msg->msg_data; 984 deliver_local_response(intf, msg); 985} 986 987static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 988{ 989 unsigned long iflags; 990 991 if (!intf->handlers->set_need_watch) 992 return; 993 994 spin_lock_irqsave(&intf->watch_lock, iflags); 995 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 996 intf->response_waiters++; 997 998 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 999 intf->watchdog_waiters++; 1000 1001 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1002 intf->command_waiters++; 1003 1004 if ((intf->last_watch_mask & flags) != flags) { 1005 intf->last_watch_mask |= flags; 1006 intf->handlers->set_need_watch(intf->send_info, 1007 intf->last_watch_mask); 1008 } 1009 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1010} 1011 1012static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 1013{ 1014 unsigned long iflags; 1015 1016 if (!intf->handlers->set_need_watch) 1017 return; 1018 1019 spin_lock_irqsave(&intf->watch_lock, iflags); 1020 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1021 intf->response_waiters--; 1022 1023 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1024 intf->watchdog_waiters--; 1025 1026 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1027 intf->command_waiters--; 1028 1029 flags = 0; 1030 if (intf->response_waiters) 1031 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1032 if (intf->watchdog_waiters) 1033 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1034 if (intf->command_waiters) 1035 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1036 1037 if (intf->last_watch_mask != flags) { 1038 intf->last_watch_mask = flags; 1039 intf->handlers->set_need_watch(intf->send_info, 1040 intf->last_watch_mask); 1041 } 1042 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1043} 1044 1045/* 1046 * Find the next sequence number not being used and add the given 1047 * message with the given timeout to the sequence table. This must be 1048 * called with the interface's seq_lock held. 1049 */ 1050static int intf_next_seq(struct ipmi_smi *intf, 1051 struct ipmi_recv_msg *recv_msg, 1052 unsigned long timeout, 1053 int retries, 1054 int broadcast, 1055 unsigned char *seq, 1056 long *seqid) 1057{ 1058 int rv = 0; 1059 unsigned int i; 1060 1061 if (timeout == 0) 1062 timeout = default_retry_ms; 1063 if (retries < 0) 1064 retries = default_max_retries; 1065 1066 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1067 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1068 if (!intf->seq_table[i].inuse) 1069 break; 1070 } 1071 1072 if (!intf->seq_table[i].inuse) { 1073 intf->seq_table[i].recv_msg = recv_msg; 1074 1075 /* 1076 * Start with the maximum timeout, when the send response 1077 * comes in we will start the real timer. 1078 */ 1079 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1080 intf->seq_table[i].orig_timeout = timeout; 1081 intf->seq_table[i].retries_left = retries; 1082 intf->seq_table[i].broadcast = broadcast; 1083 intf->seq_table[i].inuse = 1; 1084 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1085 *seq = i; 1086 *seqid = intf->seq_table[i].seqid; 1087 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1088 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1089 need_waiter(intf); 1090 } else { 1091 rv = -EAGAIN; 1092 } 1093 1094 return rv; 1095} 1096 1097/* 1098 * Return the receive message for the given sequence number and 1099 * release the sequence number so it can be reused. Some other data 1100 * is passed in to be sure the message matches up correctly (to help 1101 * guard against message coming in after their timeout and the 1102 * sequence number being reused). 1103 */ 1104static int intf_find_seq(struct ipmi_smi *intf, 1105 unsigned char seq, 1106 short channel, 1107 unsigned char cmd, 1108 unsigned char netfn, 1109 struct ipmi_addr *addr, 1110 struct ipmi_recv_msg **recv_msg) 1111{ 1112 int rv = -ENODEV; 1113 unsigned long flags; 1114 1115 if (seq >= IPMI_IPMB_NUM_SEQ) 1116 return -EINVAL; 1117 1118 spin_lock_irqsave(&intf->seq_lock, flags); 1119 if (intf->seq_table[seq].inuse) { 1120 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1121 1122 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1123 && (msg->msg.netfn == netfn) 1124 && (ipmi_addr_equal(addr, &msg->addr))) { 1125 *recv_msg = msg; 1126 intf->seq_table[seq].inuse = 0; 1127 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1128 rv = 0; 1129 } 1130 } 1131 spin_unlock_irqrestore(&intf->seq_lock, flags); 1132 1133 return rv; 1134} 1135 1136 1137/* Start the timer for a specific sequence table entry. */ 1138static int intf_start_seq_timer(struct ipmi_smi *intf, 1139 long msgid) 1140{ 1141 int rv = -ENODEV; 1142 unsigned long flags; 1143 unsigned char seq; 1144 unsigned long seqid; 1145 1146 1147 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1148 1149 spin_lock_irqsave(&intf->seq_lock, flags); 1150 /* 1151 * We do this verification because the user can be deleted 1152 * while a message is outstanding. 1153 */ 1154 if ((intf->seq_table[seq].inuse) 1155 && (intf->seq_table[seq].seqid == seqid)) { 1156 struct seq_table *ent = &intf->seq_table[seq]; 1157 ent->timeout = ent->orig_timeout; 1158 rv = 0; 1159 } 1160 spin_unlock_irqrestore(&intf->seq_lock, flags); 1161 1162 return rv; 1163} 1164 1165/* Got an error for the send message for a specific sequence number. */ 1166static int intf_err_seq(struct ipmi_smi *intf, 1167 long msgid, 1168 unsigned int err) 1169{ 1170 int rv = -ENODEV; 1171 unsigned long flags; 1172 unsigned char seq; 1173 unsigned long seqid; 1174 struct ipmi_recv_msg *msg = NULL; 1175 1176 1177 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1178 1179 spin_lock_irqsave(&intf->seq_lock, flags); 1180 /* 1181 * We do this verification because the user can be deleted 1182 * while a message is outstanding. 1183 */ 1184 if ((intf->seq_table[seq].inuse) 1185 && (intf->seq_table[seq].seqid == seqid)) { 1186 struct seq_table *ent = &intf->seq_table[seq]; 1187 1188 ent->inuse = 0; 1189 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1190 msg = ent->recv_msg; 1191 rv = 0; 1192 } 1193 spin_unlock_irqrestore(&intf->seq_lock, flags); 1194 1195 if (msg) 1196 deliver_err_response(intf, msg, err); 1197 1198 return rv; 1199} 1200 1201static void free_user_work(struct work_struct *work) 1202{ 1203 struct ipmi_user *user = container_of(work, struct ipmi_user, 1204 remove_work); 1205 1206 cleanup_srcu_struct(&user->release_barrier); 1207 vfree(user); 1208} 1209 1210int ipmi_create_user(unsigned int if_num, 1211 const struct ipmi_user_hndl *handler, 1212 void *handler_data, 1213 struct ipmi_user **user) 1214{ 1215 unsigned long flags; 1216 struct ipmi_user *new_user; 1217 int rv, index; 1218 struct ipmi_smi *intf; 1219 1220 /* 1221 * There is no module usecount here, because it's not 1222 * required. Since this can only be used by and called from 1223 * other modules, they will implicitly use this module, and 1224 * thus this can't be removed unless the other modules are 1225 * removed. 1226 */ 1227 1228 if (handler == NULL) 1229 return -EINVAL; 1230 1231 /* 1232 * Make sure the driver is actually initialized, this handles 1233 * problems with initialization order. 1234 */ 1235 rv = ipmi_init_msghandler(); 1236 if (rv) 1237 return rv; 1238 1239 new_user = vzalloc(sizeof(*new_user)); 1240 if (!new_user) 1241 return -ENOMEM; 1242 1243 index = srcu_read_lock(&ipmi_interfaces_srcu); 1244 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1245 if (intf->intf_num == if_num) 1246 goto found; 1247 } 1248 /* Not found, return an error */ 1249 rv = -EINVAL; 1250 goto out_kfree; 1251 1252 found: 1253 if (atomic_add_return(1, &intf->nr_users) > max_users) { 1254 rv = -EBUSY; 1255 goto out_kfree; 1256 } 1257 1258 INIT_WORK(&new_user->remove_work, free_user_work); 1259 1260 rv = init_srcu_struct(&new_user->release_barrier); 1261 if (rv) 1262 goto out_kfree; 1263 1264 if (!try_module_get(intf->owner)) { 1265 rv = -ENODEV; 1266 goto out_kfree; 1267 } 1268 1269 /* Note that each existing user holds a refcount to the interface. */ 1270 kref_get(&intf->refcount); 1271 1272 atomic_set(&new_user->nr_msgs, 0); 1273 kref_init(&new_user->refcount); 1274 new_user->handler = handler; 1275 new_user->handler_data = handler_data; 1276 new_user->intf = intf; 1277 new_user->gets_events = false; 1278 1279 rcu_assign_pointer(new_user->self, new_user); 1280 spin_lock_irqsave(&intf->seq_lock, flags); 1281 list_add_rcu(&new_user->link, &intf->users); 1282 spin_unlock_irqrestore(&intf->seq_lock, flags); 1283 if (handler->ipmi_watchdog_pretimeout) 1284 /* User wants pretimeouts, so make sure to watch for them. */ 1285 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1286 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1287 *user = new_user; 1288 return 0; 1289 1290out_kfree: 1291 atomic_dec(&intf->nr_users); 1292 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1293 vfree(new_user); 1294 return rv; 1295} 1296EXPORT_SYMBOL(ipmi_create_user); 1297 1298int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1299{ 1300 int rv, index; 1301 struct ipmi_smi *intf; 1302 1303 index = srcu_read_lock(&ipmi_interfaces_srcu); 1304 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1305 if (intf->intf_num == if_num) 1306 goto found; 1307 } 1308 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1309 1310 /* Not found, return an error */ 1311 return -EINVAL; 1312 1313found: 1314 if (!intf->handlers->get_smi_info) 1315 rv = -ENOTTY; 1316 else 1317 rv = intf->handlers->get_smi_info(intf->send_info, data); 1318 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1319 1320 return rv; 1321} 1322EXPORT_SYMBOL(ipmi_get_smi_info); 1323 1324static void free_user(struct kref *ref) 1325{ 1326 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1327 1328 /* SRCU cleanup must happen in task context. */ 1329 queue_work(remove_work_wq, &user->remove_work); 1330} 1331 1332static void _ipmi_destroy_user(struct ipmi_user *user) 1333{ 1334 struct ipmi_smi *intf = user->intf; 1335 int i; 1336 unsigned long flags; 1337 struct cmd_rcvr *rcvr; 1338 struct cmd_rcvr *rcvrs = NULL; 1339 1340 if (!acquire_ipmi_user(user, &i)) { 1341 /* 1342 * The user has already been cleaned up, just make sure 1343 * nothing is using it and return. 1344 */ 1345 synchronize_srcu(&user->release_barrier); 1346 return; 1347 } 1348 1349 rcu_assign_pointer(user->self, NULL); 1350 release_ipmi_user(user, i); 1351 1352 synchronize_srcu(&user->release_barrier); 1353 1354 if (user->handler->shutdown) 1355 user->handler->shutdown(user->handler_data); 1356 1357 if (user->handler->ipmi_watchdog_pretimeout) 1358 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1359 1360 if (user->gets_events) 1361 atomic_dec(&intf->event_waiters); 1362 1363 /* Remove the user from the interface's sequence table. */ 1364 spin_lock_irqsave(&intf->seq_lock, flags); 1365 list_del_rcu(&user->link); 1366 atomic_dec(&intf->nr_users); 1367 1368 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1369 if (intf->seq_table[i].inuse 1370 && (intf->seq_table[i].recv_msg->user == user)) { 1371 intf->seq_table[i].inuse = 0; 1372 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1373 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1374 } 1375 } 1376 spin_unlock_irqrestore(&intf->seq_lock, flags); 1377 1378 /* 1379 * Remove the user from the command receiver's table. First 1380 * we build a list of everything (not using the standard link, 1381 * since other things may be using it till we do 1382 * synchronize_srcu()) then free everything in that list. 1383 */ 1384 mutex_lock(&intf->cmd_rcvrs_mutex); 1385 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1386 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1387 if (rcvr->user == user) { 1388 list_del_rcu(&rcvr->link); 1389 rcvr->next = rcvrs; 1390 rcvrs = rcvr; 1391 } 1392 } 1393 mutex_unlock(&intf->cmd_rcvrs_mutex); 1394 synchronize_rcu(); 1395 while (rcvrs) { 1396 rcvr = rcvrs; 1397 rcvrs = rcvr->next; 1398 kfree(rcvr); 1399 } 1400 1401 kref_put(&intf->refcount, intf_free); 1402 module_put(intf->owner); 1403} 1404 1405int ipmi_destroy_user(struct ipmi_user *user) 1406{ 1407 _ipmi_destroy_user(user); 1408 1409 kref_put(&user->refcount, free_user); 1410 1411 return 0; 1412} 1413EXPORT_SYMBOL(ipmi_destroy_user); 1414 1415int ipmi_get_version(struct ipmi_user *user, 1416 unsigned char *major, 1417 unsigned char *minor) 1418{ 1419 struct ipmi_device_id id; 1420 int rv, index; 1421 1422 user = acquire_ipmi_user(user, &index); 1423 if (!user) 1424 return -ENODEV; 1425 1426 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1427 if (!rv) { 1428 *major = ipmi_version_major(&id); 1429 *minor = ipmi_version_minor(&id); 1430 } 1431 release_ipmi_user(user, index); 1432 1433 return rv; 1434} 1435EXPORT_SYMBOL(ipmi_get_version); 1436 1437int ipmi_set_my_address(struct ipmi_user *user, 1438 unsigned int channel, 1439 unsigned char address) 1440{ 1441 int index, rv = 0; 1442 1443 user = acquire_ipmi_user(user, &index); 1444 if (!user) 1445 return -ENODEV; 1446 1447 if (channel >= IPMI_MAX_CHANNELS) { 1448 rv = -EINVAL; 1449 } else { 1450 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1451 user->intf->addrinfo[channel].address = address; 1452 } 1453 release_ipmi_user(user, index); 1454 1455 return rv; 1456} 1457EXPORT_SYMBOL(ipmi_set_my_address); 1458 1459int ipmi_get_my_address(struct ipmi_user *user, 1460 unsigned int channel, 1461 unsigned char *address) 1462{ 1463 int index, rv = 0; 1464 1465 user = acquire_ipmi_user(user, &index); 1466 if (!user) 1467 return -ENODEV; 1468 1469 if (channel >= IPMI_MAX_CHANNELS) { 1470 rv = -EINVAL; 1471 } else { 1472 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1473 *address = user->intf->addrinfo[channel].address; 1474 } 1475 release_ipmi_user(user, index); 1476 1477 return rv; 1478} 1479EXPORT_SYMBOL(ipmi_get_my_address); 1480 1481int ipmi_set_my_LUN(struct ipmi_user *user, 1482 unsigned int channel, 1483 unsigned char LUN) 1484{ 1485 int index, rv = 0; 1486 1487 user = acquire_ipmi_user(user, &index); 1488 if (!user) 1489 return -ENODEV; 1490 1491 if (channel >= IPMI_MAX_CHANNELS) { 1492 rv = -EINVAL; 1493 } else { 1494 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1495 user->intf->addrinfo[channel].lun = LUN & 0x3; 1496 } 1497 release_ipmi_user(user, index); 1498 1499 return rv; 1500} 1501EXPORT_SYMBOL(ipmi_set_my_LUN); 1502 1503int ipmi_get_my_LUN(struct ipmi_user *user, 1504 unsigned int channel, 1505 unsigned char *address) 1506{ 1507 int index, rv = 0; 1508 1509 user = acquire_ipmi_user(user, &index); 1510 if (!user) 1511 return -ENODEV; 1512 1513 if (channel >= IPMI_MAX_CHANNELS) { 1514 rv = -EINVAL; 1515 } else { 1516 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1517 *address = user->intf->addrinfo[channel].lun; 1518 } 1519 release_ipmi_user(user, index); 1520 1521 return rv; 1522} 1523EXPORT_SYMBOL(ipmi_get_my_LUN); 1524 1525int ipmi_get_maintenance_mode(struct ipmi_user *user) 1526{ 1527 int mode, index; 1528 unsigned long flags; 1529 1530 user = acquire_ipmi_user(user, &index); 1531 if (!user) 1532 return -ENODEV; 1533 1534 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1535 mode = user->intf->maintenance_mode; 1536 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1537 release_ipmi_user(user, index); 1538 1539 return mode; 1540} 1541EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1542 1543static void maintenance_mode_update(struct ipmi_smi *intf) 1544{ 1545 if (intf->handlers->set_maintenance_mode) 1546 intf->handlers->set_maintenance_mode( 1547 intf->send_info, intf->maintenance_mode_enable); 1548} 1549 1550int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1551{ 1552 int rv = 0, index; 1553 unsigned long flags; 1554 struct ipmi_smi *intf = user->intf; 1555 1556 user = acquire_ipmi_user(user, &index); 1557 if (!user) 1558 return -ENODEV; 1559 1560 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1561 if (intf->maintenance_mode != mode) { 1562 switch (mode) { 1563 case IPMI_MAINTENANCE_MODE_AUTO: 1564 intf->maintenance_mode_enable 1565 = (intf->auto_maintenance_timeout > 0); 1566 break; 1567 1568 case IPMI_MAINTENANCE_MODE_OFF: 1569 intf->maintenance_mode_enable = false; 1570 break; 1571 1572 case IPMI_MAINTENANCE_MODE_ON: 1573 intf->maintenance_mode_enable = true; 1574 break; 1575 1576 default: 1577 rv = -EINVAL; 1578 goto out_unlock; 1579 } 1580 intf->maintenance_mode = mode; 1581 1582 maintenance_mode_update(intf); 1583 } 1584 out_unlock: 1585 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1586 release_ipmi_user(user, index); 1587 1588 return rv; 1589} 1590EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1591 1592int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1593{ 1594 unsigned long flags; 1595 struct ipmi_smi *intf = user->intf; 1596 struct ipmi_recv_msg *msg, *msg2; 1597 struct list_head msgs; 1598 int index; 1599 1600 user = acquire_ipmi_user(user, &index); 1601 if (!user) 1602 return -ENODEV; 1603 1604 INIT_LIST_HEAD(&msgs); 1605 1606 spin_lock_irqsave(&intf->events_lock, flags); 1607 if (user->gets_events == val) 1608 goto out; 1609 1610 user->gets_events = val; 1611 1612 if (val) { 1613 if (atomic_inc_return(&intf->event_waiters) == 1) 1614 need_waiter(intf); 1615 } else { 1616 atomic_dec(&intf->event_waiters); 1617 } 1618 1619 if (intf->delivering_events) 1620 /* 1621 * Another thread is delivering events for this, so 1622 * let it handle any new events. 1623 */ 1624 goto out; 1625 1626 /* Deliver any queued events. */ 1627 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1628 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1629 list_move_tail(&msg->link, &msgs); 1630 intf->waiting_events_count = 0; 1631 if (intf->event_msg_printed) { 1632 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1633 intf->event_msg_printed = 0; 1634 } 1635 1636 intf->delivering_events = 1; 1637 spin_unlock_irqrestore(&intf->events_lock, flags); 1638 1639 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1640 msg->user = user; 1641 kref_get(&user->refcount); 1642 deliver_local_response(intf, msg); 1643 } 1644 1645 spin_lock_irqsave(&intf->events_lock, flags); 1646 intf->delivering_events = 0; 1647 } 1648 1649 out: 1650 spin_unlock_irqrestore(&intf->events_lock, flags); 1651 release_ipmi_user(user, index); 1652 1653 return 0; 1654} 1655EXPORT_SYMBOL(ipmi_set_gets_events); 1656 1657static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1658 unsigned char netfn, 1659 unsigned char cmd, 1660 unsigned char chan) 1661{ 1662 struct cmd_rcvr *rcvr; 1663 1664 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1665 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1666 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1667 && (rcvr->chans & (1 << chan))) 1668 return rcvr; 1669 } 1670 return NULL; 1671} 1672 1673static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1674 unsigned char netfn, 1675 unsigned char cmd, 1676 unsigned int chans) 1677{ 1678 struct cmd_rcvr *rcvr; 1679 1680 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1681 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1682 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1683 && (rcvr->chans & chans)) 1684 return 0; 1685 } 1686 return 1; 1687} 1688 1689int ipmi_register_for_cmd(struct ipmi_user *user, 1690 unsigned char netfn, 1691 unsigned char cmd, 1692 unsigned int chans) 1693{ 1694 struct ipmi_smi *intf = user->intf; 1695 struct cmd_rcvr *rcvr; 1696 int rv = 0, index; 1697 1698 user = acquire_ipmi_user(user, &index); 1699 if (!user) 1700 return -ENODEV; 1701 1702 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1703 if (!rcvr) { 1704 rv = -ENOMEM; 1705 goto out_release; 1706 } 1707 rcvr->cmd = cmd; 1708 rcvr->netfn = netfn; 1709 rcvr->chans = chans; 1710 rcvr->user = user; 1711 1712 mutex_lock(&intf->cmd_rcvrs_mutex); 1713 /* Make sure the command/netfn is not already registered. */ 1714 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1715 rv = -EBUSY; 1716 goto out_unlock; 1717 } 1718 1719 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1720 1721 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1722 1723out_unlock: 1724 mutex_unlock(&intf->cmd_rcvrs_mutex); 1725 if (rv) 1726 kfree(rcvr); 1727out_release: 1728 release_ipmi_user(user, index); 1729 1730 return rv; 1731} 1732EXPORT_SYMBOL(ipmi_register_for_cmd); 1733 1734int ipmi_unregister_for_cmd(struct ipmi_user *user, 1735 unsigned char netfn, 1736 unsigned char cmd, 1737 unsigned int chans) 1738{ 1739 struct ipmi_smi *intf = user->intf; 1740 struct cmd_rcvr *rcvr; 1741 struct cmd_rcvr *rcvrs = NULL; 1742 int i, rv = -ENOENT, index; 1743 1744 user = acquire_ipmi_user(user, &index); 1745 if (!user) 1746 return -ENODEV; 1747 1748 mutex_lock(&intf->cmd_rcvrs_mutex); 1749 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1750 if (((1 << i) & chans) == 0) 1751 continue; 1752 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1753 if (rcvr == NULL) 1754 continue; 1755 if (rcvr->user == user) { 1756 rv = 0; 1757 rcvr->chans &= ~chans; 1758 if (rcvr->chans == 0) { 1759 list_del_rcu(&rcvr->link); 1760 rcvr->next = rcvrs; 1761 rcvrs = rcvr; 1762 } 1763 } 1764 } 1765 mutex_unlock(&intf->cmd_rcvrs_mutex); 1766 synchronize_rcu(); 1767 release_ipmi_user(user, index); 1768 while (rcvrs) { 1769 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1770 rcvr = rcvrs; 1771 rcvrs = rcvr->next; 1772 kfree(rcvr); 1773 } 1774 1775 return rv; 1776} 1777EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1778 1779unsigned char 1780ipmb_checksum(unsigned char *data, int size) 1781{ 1782 unsigned char csum = 0; 1783 1784 for (; size > 0; size--, data++) 1785 csum += *data; 1786 1787 return -csum; 1788} 1789EXPORT_SYMBOL(ipmb_checksum); 1790 1791static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1792 struct kernel_ipmi_msg *msg, 1793 struct ipmi_ipmb_addr *ipmb_addr, 1794 long msgid, 1795 unsigned char ipmb_seq, 1796 int broadcast, 1797 unsigned char source_address, 1798 unsigned char source_lun) 1799{ 1800 int i = broadcast; 1801 1802 /* Format the IPMB header data. */ 1803 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1804 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1805 smi_msg->data[2] = ipmb_addr->channel; 1806 if (broadcast) 1807 smi_msg->data[3] = 0; 1808 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1809 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1810 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1811 smi_msg->data[i+6] = source_address; 1812 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1813 smi_msg->data[i+8] = msg->cmd; 1814 1815 /* Now tack on the data to the message. */ 1816 if (msg->data_len > 0) 1817 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1818 smi_msg->data_size = msg->data_len + 9; 1819 1820 /* Now calculate the checksum and tack it on. */ 1821 smi_msg->data[i+smi_msg->data_size] 1822 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1823 1824 /* 1825 * Add on the checksum size and the offset from the 1826 * broadcast. 1827 */ 1828 smi_msg->data_size += 1 + i; 1829 1830 smi_msg->msgid = msgid; 1831} 1832 1833static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1834 struct kernel_ipmi_msg *msg, 1835 struct ipmi_lan_addr *lan_addr, 1836 long msgid, 1837 unsigned char ipmb_seq, 1838 unsigned char source_lun) 1839{ 1840 /* Format the IPMB header data. */ 1841 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1842 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1843 smi_msg->data[2] = lan_addr->channel; 1844 smi_msg->data[3] = lan_addr->session_handle; 1845 smi_msg->data[4] = lan_addr->remote_SWID; 1846 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1847 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1848 smi_msg->data[7] = lan_addr->local_SWID; 1849 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1850 smi_msg->data[9] = msg->cmd; 1851 1852 /* Now tack on the data to the message. */ 1853 if (msg->data_len > 0) 1854 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1855 smi_msg->data_size = msg->data_len + 10; 1856 1857 /* Now calculate the checksum and tack it on. */ 1858 smi_msg->data[smi_msg->data_size] 1859 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1860 1861 /* 1862 * Add on the checksum size and the offset from the 1863 * broadcast. 1864 */ 1865 smi_msg->data_size += 1; 1866 1867 smi_msg->msgid = msgid; 1868} 1869 1870static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1871 struct ipmi_smi_msg *smi_msg, 1872 int priority) 1873{ 1874 if (intf->curr_msg) { 1875 if (priority > 0) 1876 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1877 else 1878 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1879 smi_msg = NULL; 1880 } else { 1881 intf->curr_msg = smi_msg; 1882 } 1883 1884 return smi_msg; 1885} 1886 1887static void smi_send(struct ipmi_smi *intf, 1888 const struct ipmi_smi_handlers *handlers, 1889 struct ipmi_smi_msg *smi_msg, int priority) 1890{ 1891 int run_to_completion = intf->run_to_completion; 1892 unsigned long flags = 0; 1893 1894 if (!run_to_completion) 1895 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1896 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1897 1898 if (!run_to_completion) 1899 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1900 1901 if (smi_msg) 1902 handlers->sender(intf->send_info, smi_msg); 1903} 1904 1905static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1906{ 1907 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1908 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1909 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1910 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1911} 1912 1913static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1914 struct ipmi_addr *addr, 1915 long msgid, 1916 struct kernel_ipmi_msg *msg, 1917 struct ipmi_smi_msg *smi_msg, 1918 struct ipmi_recv_msg *recv_msg, 1919 int retries, 1920 unsigned int retry_time_ms) 1921{ 1922 struct ipmi_system_interface_addr *smi_addr; 1923 1924 if (msg->netfn & 1) 1925 /* Responses are not allowed to the SMI. */ 1926 return -EINVAL; 1927 1928 smi_addr = (struct ipmi_system_interface_addr *) addr; 1929 if (smi_addr->lun > 3) { 1930 ipmi_inc_stat(intf, sent_invalid_commands); 1931 return -EINVAL; 1932 } 1933 1934 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1935 1936 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1937 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1938 || (msg->cmd == IPMI_GET_MSG_CMD) 1939 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1940 /* 1941 * We don't let the user do these, since we manage 1942 * the sequence numbers. 1943 */ 1944 ipmi_inc_stat(intf, sent_invalid_commands); 1945 return -EINVAL; 1946 } 1947 1948 if (is_maintenance_mode_cmd(msg)) { 1949 unsigned long flags; 1950 1951 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1952 intf->auto_maintenance_timeout 1953 = maintenance_mode_timeout_ms; 1954 if (!intf->maintenance_mode 1955 && !intf->maintenance_mode_enable) { 1956 intf->maintenance_mode_enable = true; 1957 maintenance_mode_update(intf); 1958 } 1959 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1960 flags); 1961 } 1962 1963 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1964 ipmi_inc_stat(intf, sent_invalid_commands); 1965 return -EMSGSIZE; 1966 } 1967 1968 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1969 smi_msg->data[1] = msg->cmd; 1970 smi_msg->msgid = msgid; 1971 smi_msg->user_data = recv_msg; 1972 if (msg->data_len > 0) 1973 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1974 smi_msg->data_size = msg->data_len + 2; 1975 ipmi_inc_stat(intf, sent_local_commands); 1976 1977 return 0; 1978} 1979 1980static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1981 struct ipmi_addr *addr, 1982 long msgid, 1983 struct kernel_ipmi_msg *msg, 1984 struct ipmi_smi_msg *smi_msg, 1985 struct ipmi_recv_msg *recv_msg, 1986 unsigned char source_address, 1987 unsigned char source_lun, 1988 int retries, 1989 unsigned int retry_time_ms) 1990{ 1991 struct ipmi_ipmb_addr *ipmb_addr; 1992 unsigned char ipmb_seq; 1993 long seqid; 1994 int broadcast = 0; 1995 struct ipmi_channel *chans; 1996 int rv = 0; 1997 1998 if (addr->channel >= IPMI_MAX_CHANNELS) { 1999 ipmi_inc_stat(intf, sent_invalid_commands); 2000 return -EINVAL; 2001 } 2002 2003 chans = READ_ONCE(intf->channel_list)->c; 2004 2005 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 2006 ipmi_inc_stat(intf, sent_invalid_commands); 2007 return -EINVAL; 2008 } 2009 2010 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 2011 /* 2012 * Broadcasts add a zero at the beginning of the 2013 * message, but otherwise is the same as an IPMB 2014 * address. 2015 */ 2016 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 2017 broadcast = 1; 2018 retries = 0; /* Don't retry broadcasts. */ 2019 } 2020 2021 /* 2022 * 9 for the header and 1 for the checksum, plus 2023 * possibly one for the broadcast. 2024 */ 2025 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 2026 ipmi_inc_stat(intf, sent_invalid_commands); 2027 return -EMSGSIZE; 2028 } 2029 2030 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 2031 if (ipmb_addr->lun > 3) { 2032 ipmi_inc_stat(intf, sent_invalid_commands); 2033 return -EINVAL; 2034 } 2035 2036 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2037 2038 if (recv_msg->msg.netfn & 0x1) { 2039 /* 2040 * It's a response, so use the user's sequence 2041 * from msgid. 2042 */ 2043 ipmi_inc_stat(intf, sent_ipmb_responses); 2044 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2045 msgid, broadcast, 2046 source_address, source_lun); 2047 2048 /* 2049 * Save the receive message so we can use it 2050 * to deliver the response. 2051 */ 2052 smi_msg->user_data = recv_msg; 2053 } else { 2054 /* It's a command, so get a sequence for it. */ 2055 unsigned long flags; 2056 2057 spin_lock_irqsave(&intf->seq_lock, flags); 2058 2059 if (is_maintenance_mode_cmd(msg)) 2060 intf->ipmb_maintenance_mode_timeout = 2061 maintenance_mode_timeout_ms; 2062 2063 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2064 /* Different default in maintenance mode */ 2065 retry_time_ms = default_maintenance_retry_ms; 2066 2067 /* 2068 * Create a sequence number with a 1 second 2069 * timeout and 4 retries. 2070 */ 2071 rv = intf_next_seq(intf, 2072 recv_msg, 2073 retry_time_ms, 2074 retries, 2075 broadcast, 2076 &ipmb_seq, 2077 &seqid); 2078 if (rv) 2079 /* 2080 * We have used up all the sequence numbers, 2081 * probably, so abort. 2082 */ 2083 goto out_err; 2084 2085 ipmi_inc_stat(intf, sent_ipmb_commands); 2086 2087 /* 2088 * Store the sequence number in the message, 2089 * so that when the send message response 2090 * comes back we can start the timer. 2091 */ 2092 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2093 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2094 ipmb_seq, broadcast, 2095 source_address, source_lun); 2096 2097 /* 2098 * Copy the message into the recv message data, so we 2099 * can retransmit it later if necessary. 2100 */ 2101 memcpy(recv_msg->msg_data, smi_msg->data, 2102 smi_msg->data_size); 2103 recv_msg->msg.data = recv_msg->msg_data; 2104 recv_msg->msg.data_len = smi_msg->data_size; 2105 2106 /* 2107 * We don't unlock until here, because we need 2108 * to copy the completed message into the 2109 * recv_msg before we release the lock. 2110 * Otherwise, race conditions may bite us. I 2111 * know that's pretty paranoid, but I prefer 2112 * to be correct. 2113 */ 2114out_err: 2115 spin_unlock_irqrestore(&intf->seq_lock, flags); 2116 } 2117 2118 return rv; 2119} 2120 2121static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2122 struct ipmi_addr *addr, 2123 long msgid, 2124 struct kernel_ipmi_msg *msg, 2125 struct ipmi_smi_msg *smi_msg, 2126 struct ipmi_recv_msg *recv_msg, 2127 unsigned char source_lun) 2128{ 2129 struct ipmi_ipmb_direct_addr *daddr; 2130 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2131 2132 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2133 return -EAFNOSUPPORT; 2134 2135 /* Responses must have a completion code. */ 2136 if (!is_cmd && msg->data_len < 1) { 2137 ipmi_inc_stat(intf, sent_invalid_commands); 2138 return -EINVAL; 2139 } 2140 2141 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2142 ipmi_inc_stat(intf, sent_invalid_commands); 2143 return -EMSGSIZE; 2144 } 2145 2146 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2147 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2148 ipmi_inc_stat(intf, sent_invalid_commands); 2149 return -EINVAL; 2150 } 2151 2152 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2153 smi_msg->msgid = msgid; 2154 2155 if (is_cmd) { 2156 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2157 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2158 } else { 2159 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2160 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2161 } 2162 smi_msg->data[1] = daddr->slave_addr; 2163 smi_msg->data[3] = msg->cmd; 2164 2165 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2166 smi_msg->data_size = msg->data_len + 4; 2167 2168 smi_msg->user_data = recv_msg; 2169 2170 return 0; 2171} 2172 2173static int i_ipmi_req_lan(struct ipmi_smi *intf, 2174 struct ipmi_addr *addr, 2175 long msgid, 2176 struct kernel_ipmi_msg *msg, 2177 struct ipmi_smi_msg *smi_msg, 2178 struct ipmi_recv_msg *recv_msg, 2179 unsigned char source_lun, 2180 int retries, 2181 unsigned int retry_time_ms) 2182{ 2183 struct ipmi_lan_addr *lan_addr; 2184 unsigned char ipmb_seq; 2185 long seqid; 2186 struct ipmi_channel *chans; 2187 int rv = 0; 2188 2189 if (addr->channel >= IPMI_MAX_CHANNELS) { 2190 ipmi_inc_stat(intf, sent_invalid_commands); 2191 return -EINVAL; 2192 } 2193 2194 chans = READ_ONCE(intf->channel_list)->c; 2195 2196 if ((chans[addr->channel].medium 2197 != IPMI_CHANNEL_MEDIUM_8023LAN) 2198 && (chans[addr->channel].medium 2199 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2200 ipmi_inc_stat(intf, sent_invalid_commands); 2201 return -EINVAL; 2202 } 2203 2204 /* 11 for the header and 1 for the checksum. */ 2205 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2206 ipmi_inc_stat(intf, sent_invalid_commands); 2207 return -EMSGSIZE; 2208 } 2209 2210 lan_addr = (struct ipmi_lan_addr *) addr; 2211 if (lan_addr->lun > 3) { 2212 ipmi_inc_stat(intf, sent_invalid_commands); 2213 return -EINVAL; 2214 } 2215 2216 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2217 2218 if (recv_msg->msg.netfn & 0x1) { 2219 /* 2220 * It's a response, so use the user's sequence 2221 * from msgid. 2222 */ 2223 ipmi_inc_stat(intf, sent_lan_responses); 2224 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2225 msgid, source_lun); 2226 2227 /* 2228 * Save the receive message so we can use it 2229 * to deliver the response. 2230 */ 2231 smi_msg->user_data = recv_msg; 2232 } else { 2233 /* It's a command, so get a sequence for it. */ 2234 unsigned long flags; 2235 2236 spin_lock_irqsave(&intf->seq_lock, flags); 2237 2238 /* 2239 * Create a sequence number with a 1 second 2240 * timeout and 4 retries. 2241 */ 2242 rv = intf_next_seq(intf, 2243 recv_msg, 2244 retry_time_ms, 2245 retries, 2246 0, 2247 &ipmb_seq, 2248 &seqid); 2249 if (rv) 2250 /* 2251 * We have used up all the sequence numbers, 2252 * probably, so abort. 2253 */ 2254 goto out_err; 2255 2256 ipmi_inc_stat(intf, sent_lan_commands); 2257 2258 /* 2259 * Store the sequence number in the message, 2260 * so that when the send message response 2261 * comes back we can start the timer. 2262 */ 2263 format_lan_msg(smi_msg, msg, lan_addr, 2264 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2265 ipmb_seq, source_lun); 2266 2267 /* 2268 * Copy the message into the recv message data, so we 2269 * can retransmit it later if necessary. 2270 */ 2271 memcpy(recv_msg->msg_data, smi_msg->data, 2272 smi_msg->data_size); 2273 recv_msg->msg.data = recv_msg->msg_data; 2274 recv_msg->msg.data_len = smi_msg->data_size; 2275 2276 /* 2277 * We don't unlock until here, because we need 2278 * to copy the completed message into the 2279 * recv_msg before we release the lock. 2280 * Otherwise, race conditions may bite us. I 2281 * know that's pretty paranoid, but I prefer 2282 * to be correct. 2283 */ 2284out_err: 2285 spin_unlock_irqrestore(&intf->seq_lock, flags); 2286 } 2287 2288 return rv; 2289} 2290 2291/* 2292 * Separate from ipmi_request so that the user does not have to be 2293 * supplied in certain circumstances (mainly at panic time). If 2294 * messages are supplied, they will be freed, even if an error 2295 * occurs. 2296 */ 2297static int i_ipmi_request(struct ipmi_user *user, 2298 struct ipmi_smi *intf, 2299 struct ipmi_addr *addr, 2300 long msgid, 2301 struct kernel_ipmi_msg *msg, 2302 void *user_msg_data, 2303 void *supplied_smi, 2304 struct ipmi_recv_msg *supplied_recv, 2305 int priority, 2306 unsigned char source_address, 2307 unsigned char source_lun, 2308 int retries, 2309 unsigned int retry_time_ms) 2310{ 2311 struct ipmi_smi_msg *smi_msg; 2312 struct ipmi_recv_msg *recv_msg; 2313 int rv = 0; 2314 2315 if (user) { 2316 if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { 2317 /* Decrement will happen at the end of the routine. */ 2318 rv = -EBUSY; 2319 goto out; 2320 } 2321 } 2322 2323 if (supplied_recv) 2324 recv_msg = supplied_recv; 2325 else { 2326 recv_msg = ipmi_alloc_recv_msg(); 2327 if (recv_msg == NULL) { 2328 rv = -ENOMEM; 2329 goto out; 2330 } 2331 } 2332 recv_msg->user_msg_data = user_msg_data; 2333 2334 if (supplied_smi) 2335 smi_msg = supplied_smi; 2336 else { 2337 smi_msg = ipmi_alloc_smi_msg(); 2338 if (smi_msg == NULL) { 2339 if (!supplied_recv) 2340 ipmi_free_recv_msg(recv_msg); 2341 rv = -ENOMEM; 2342 goto out; 2343 } 2344 } 2345 2346 rcu_read_lock(); 2347 if (intf->in_shutdown) { 2348 rv = -ENODEV; 2349 goto out_err; 2350 } 2351 2352 recv_msg->user = user; 2353 if (user) 2354 /* The put happens when the message is freed. */ 2355 kref_get(&user->refcount); 2356 recv_msg->msgid = msgid; 2357 /* 2358 * Store the message to send in the receive message so timeout 2359 * responses can get the proper response data. 2360 */ 2361 recv_msg->msg = *msg; 2362 2363 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2364 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2365 recv_msg, retries, retry_time_ms); 2366 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2367 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2368 source_address, source_lun, 2369 retries, retry_time_ms); 2370 } else if (is_ipmb_direct_addr(addr)) { 2371 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2372 recv_msg, source_lun); 2373 } else if (is_lan_addr(addr)) { 2374 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2375 source_lun, retries, retry_time_ms); 2376 } else { 2377 /* Unknown address type. */ 2378 ipmi_inc_stat(intf, sent_invalid_commands); 2379 rv = -EINVAL; 2380 } 2381 2382 if (rv) { 2383out_err: 2384 ipmi_free_smi_msg(smi_msg); 2385 ipmi_free_recv_msg(recv_msg); 2386 } else { 2387 dev_dbg(intf->si_dev, "Send: %*ph\n", 2388 smi_msg->data_size, smi_msg->data); 2389 2390 smi_send(intf, intf->handlers, smi_msg, priority); 2391 } 2392 rcu_read_unlock(); 2393 2394out: 2395 if (rv && user) 2396 atomic_dec(&user->nr_msgs); 2397 return rv; 2398} 2399 2400static int check_addr(struct ipmi_smi *intf, 2401 struct ipmi_addr *addr, 2402 unsigned char *saddr, 2403 unsigned char *lun) 2404{ 2405 if (addr->channel >= IPMI_MAX_CHANNELS) 2406 return -EINVAL; 2407 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2408 *lun = intf->addrinfo[addr->channel].lun; 2409 *saddr = intf->addrinfo[addr->channel].address; 2410 return 0; 2411} 2412 2413int ipmi_request_settime(struct ipmi_user *user, 2414 struct ipmi_addr *addr, 2415 long msgid, 2416 struct kernel_ipmi_msg *msg, 2417 void *user_msg_data, 2418 int priority, 2419 int retries, 2420 unsigned int retry_time_ms) 2421{ 2422 unsigned char saddr = 0, lun = 0; 2423 int rv, index; 2424 2425 if (!user) 2426 return -EINVAL; 2427 2428 user = acquire_ipmi_user(user, &index); 2429 if (!user) 2430 return -ENODEV; 2431 2432 rv = check_addr(user->intf, addr, &saddr, &lun); 2433 if (!rv) 2434 rv = i_ipmi_request(user, 2435 user->intf, 2436 addr, 2437 msgid, 2438 msg, 2439 user_msg_data, 2440 NULL, NULL, 2441 priority, 2442 saddr, 2443 lun, 2444 retries, 2445 retry_time_ms); 2446 2447 release_ipmi_user(user, index); 2448 return rv; 2449} 2450EXPORT_SYMBOL(ipmi_request_settime); 2451 2452int ipmi_request_supply_msgs(struct ipmi_user *user, 2453 struct ipmi_addr *addr, 2454 long msgid, 2455 struct kernel_ipmi_msg *msg, 2456 void *user_msg_data, 2457 void *supplied_smi, 2458 struct ipmi_recv_msg *supplied_recv, 2459 int priority) 2460{ 2461 unsigned char saddr = 0, lun = 0; 2462 int rv, index; 2463 2464 if (!user) 2465 return -EINVAL; 2466 2467 user = acquire_ipmi_user(user, &index); 2468 if (!user) 2469 return -ENODEV; 2470 2471 rv = check_addr(user->intf, addr, &saddr, &lun); 2472 if (!rv) 2473 rv = i_ipmi_request(user, 2474 user->intf, 2475 addr, 2476 msgid, 2477 msg, 2478 user_msg_data, 2479 supplied_smi, 2480 supplied_recv, 2481 priority, 2482 saddr, 2483 lun, 2484 -1, 0); 2485 2486 release_ipmi_user(user, index); 2487 return rv; 2488} 2489EXPORT_SYMBOL(ipmi_request_supply_msgs); 2490 2491static void bmc_device_id_handler(struct ipmi_smi *intf, 2492 struct ipmi_recv_msg *msg) 2493{ 2494 int rv; 2495 2496 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2497 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2498 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2499 dev_warn(intf->si_dev, 2500 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2501 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2502 return; 2503 } 2504 2505 if (msg->msg.data[0]) { 2506 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2507 msg->msg.data[0]); 2508 intf->bmc->dyn_id_set = 0; 2509 goto out; 2510 } 2511 2512 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2513 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2514 if (rv) { 2515 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2516 /* record completion code when error */ 2517 intf->bmc->cc = msg->msg.data[0]; 2518 intf->bmc->dyn_id_set = 0; 2519 } else { 2520 /* 2521 * Make sure the id data is available before setting 2522 * dyn_id_set. 2523 */ 2524 smp_wmb(); 2525 intf->bmc->dyn_id_set = 1; 2526 } 2527out: 2528 wake_up(&intf->waitq); 2529} 2530 2531static int 2532send_get_device_id_cmd(struct ipmi_smi *intf) 2533{ 2534 struct ipmi_system_interface_addr si; 2535 struct kernel_ipmi_msg msg; 2536 2537 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2538 si.channel = IPMI_BMC_CHANNEL; 2539 si.lun = 0; 2540 2541 msg.netfn = IPMI_NETFN_APP_REQUEST; 2542 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2543 msg.data = NULL; 2544 msg.data_len = 0; 2545 2546 return i_ipmi_request(NULL, 2547 intf, 2548 (struct ipmi_addr *) &si, 2549 0, 2550 &msg, 2551 intf, 2552 NULL, 2553 NULL, 2554 0, 2555 intf->addrinfo[0].address, 2556 intf->addrinfo[0].lun, 2557 -1, 0); 2558} 2559 2560static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2561{ 2562 int rv; 2563 unsigned int retry_count = 0; 2564 2565 intf->null_user_handler = bmc_device_id_handler; 2566 2567retry: 2568 bmc->cc = 0; 2569 bmc->dyn_id_set = 2; 2570 2571 rv = send_get_device_id_cmd(intf); 2572 if (rv) 2573 goto out_reset_handler; 2574 2575 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2576 2577 if (!bmc->dyn_id_set) { 2578 if (bmc->cc != IPMI_CC_NO_ERROR && 2579 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2580 msleep(500); 2581 dev_warn(intf->si_dev, 2582 "BMC returned 0x%2.2x, retry get bmc device id\n", 2583 bmc->cc); 2584 goto retry; 2585 } 2586 2587 rv = -EIO; /* Something went wrong in the fetch. */ 2588 } 2589 2590 /* dyn_id_set makes the id data available. */ 2591 smp_rmb(); 2592 2593out_reset_handler: 2594 intf->null_user_handler = NULL; 2595 2596 return rv; 2597} 2598 2599/* 2600 * Fetch the device id for the bmc/interface. You must pass in either 2601 * bmc or intf, this code will get the other one. If the data has 2602 * been recently fetched, this will just use the cached data. Otherwise 2603 * it will run a new fetch. 2604 * 2605 * Except for the first time this is called (in ipmi_add_smi()), 2606 * this will always return good data; 2607 */ 2608static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2609 struct ipmi_device_id *id, 2610 bool *guid_set, guid_t *guid, int intf_num) 2611{ 2612 int rv = 0; 2613 int prev_dyn_id_set, prev_guid_set; 2614 bool intf_set = intf != NULL; 2615 2616 if (!intf) { 2617 mutex_lock(&bmc->dyn_mutex); 2618retry_bmc_lock: 2619 if (list_empty(&bmc->intfs)) { 2620 mutex_unlock(&bmc->dyn_mutex); 2621 return -ENOENT; 2622 } 2623 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2624 bmc_link); 2625 kref_get(&intf->refcount); 2626 mutex_unlock(&bmc->dyn_mutex); 2627 mutex_lock(&intf->bmc_reg_mutex); 2628 mutex_lock(&bmc->dyn_mutex); 2629 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2630 bmc_link)) { 2631 mutex_unlock(&intf->bmc_reg_mutex); 2632 kref_put(&intf->refcount, intf_free); 2633 goto retry_bmc_lock; 2634 } 2635 } else { 2636 mutex_lock(&intf->bmc_reg_mutex); 2637 bmc = intf->bmc; 2638 mutex_lock(&bmc->dyn_mutex); 2639 kref_get(&intf->refcount); 2640 } 2641 2642 /* If we have a valid and current ID, just return that. */ 2643 if (intf->in_bmc_register || 2644 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2645 goto out_noprocessing; 2646 2647 prev_guid_set = bmc->dyn_guid_set; 2648 __get_guid(intf); 2649 2650 prev_dyn_id_set = bmc->dyn_id_set; 2651 rv = __get_device_id(intf, bmc); 2652 if (rv) 2653 goto out; 2654 2655 /* 2656 * The guid, device id, manufacturer id, and product id should 2657 * not change on a BMC. If it does we have to do some dancing. 2658 */ 2659 if (!intf->bmc_registered 2660 || (!prev_guid_set && bmc->dyn_guid_set) 2661 || (!prev_dyn_id_set && bmc->dyn_id_set) 2662 || (prev_guid_set && bmc->dyn_guid_set 2663 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2664 || bmc->id.device_id != bmc->fetch_id.device_id 2665 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2666 || bmc->id.product_id != bmc->fetch_id.product_id) { 2667 struct ipmi_device_id id = bmc->fetch_id; 2668 int guid_set = bmc->dyn_guid_set; 2669 guid_t guid; 2670 2671 guid = bmc->fetch_guid; 2672 mutex_unlock(&bmc->dyn_mutex); 2673 2674 __ipmi_bmc_unregister(intf); 2675 /* Fill in the temporary BMC for good measure. */ 2676 intf->bmc->id = id; 2677 intf->bmc->dyn_guid_set = guid_set; 2678 intf->bmc->guid = guid; 2679 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2680 need_waiter(intf); /* Retry later on an error. */ 2681 else 2682 __scan_channels(intf, &id); 2683 2684 2685 if (!intf_set) { 2686 /* 2687 * We weren't given the interface on the 2688 * command line, so restart the operation on 2689 * the next interface for the BMC. 2690 */ 2691 mutex_unlock(&intf->bmc_reg_mutex); 2692 mutex_lock(&bmc->dyn_mutex); 2693 goto retry_bmc_lock; 2694 } 2695 2696 /* We have a new BMC, set it up. */ 2697 bmc = intf->bmc; 2698 mutex_lock(&bmc->dyn_mutex); 2699 goto out_noprocessing; 2700 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2701 /* Version info changes, scan the channels again. */ 2702 __scan_channels(intf, &bmc->fetch_id); 2703 2704 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2705 2706out: 2707 if (rv && prev_dyn_id_set) { 2708 rv = 0; /* Ignore failures if we have previous data. */ 2709 bmc->dyn_id_set = prev_dyn_id_set; 2710 } 2711 if (!rv) { 2712 bmc->id = bmc->fetch_id; 2713 if (bmc->dyn_guid_set) 2714 bmc->guid = bmc->fetch_guid; 2715 else if (prev_guid_set) 2716 /* 2717 * The guid used to be valid and it failed to fetch, 2718 * just use the cached value. 2719 */ 2720 bmc->dyn_guid_set = prev_guid_set; 2721 } 2722out_noprocessing: 2723 if (!rv) { 2724 if (id) 2725 *id = bmc->id; 2726 2727 if (guid_set) 2728 *guid_set = bmc->dyn_guid_set; 2729 2730 if (guid && bmc->dyn_guid_set) 2731 *guid = bmc->guid; 2732 } 2733 2734 mutex_unlock(&bmc->dyn_mutex); 2735 mutex_unlock(&intf->bmc_reg_mutex); 2736 2737 kref_put(&intf->refcount, intf_free); 2738 return rv; 2739} 2740 2741static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2742 struct ipmi_device_id *id, 2743 bool *guid_set, guid_t *guid) 2744{ 2745 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2746} 2747 2748static ssize_t device_id_show(struct device *dev, 2749 struct device_attribute *attr, 2750 char *buf) 2751{ 2752 struct bmc_device *bmc = to_bmc_device(dev); 2753 struct ipmi_device_id id; 2754 int rv; 2755 2756 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2757 if (rv) 2758 return rv; 2759 2760 return sysfs_emit(buf, "%u\n", id.device_id); 2761} 2762static DEVICE_ATTR_RO(device_id); 2763 2764static ssize_t provides_device_sdrs_show(struct device *dev, 2765 struct device_attribute *attr, 2766 char *buf) 2767{ 2768 struct bmc_device *bmc = to_bmc_device(dev); 2769 struct ipmi_device_id id; 2770 int rv; 2771 2772 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2773 if (rv) 2774 return rv; 2775 2776 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2777} 2778static DEVICE_ATTR_RO(provides_device_sdrs); 2779 2780static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2781 char *buf) 2782{ 2783 struct bmc_device *bmc = to_bmc_device(dev); 2784 struct ipmi_device_id id; 2785 int rv; 2786 2787 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2788 if (rv) 2789 return rv; 2790 2791 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2792} 2793static DEVICE_ATTR_RO(revision); 2794 2795static ssize_t firmware_revision_show(struct device *dev, 2796 struct device_attribute *attr, 2797 char *buf) 2798{ 2799 struct bmc_device *bmc = to_bmc_device(dev); 2800 struct ipmi_device_id id; 2801 int rv; 2802 2803 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2804 if (rv) 2805 return rv; 2806 2807 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2808 id.firmware_revision_2); 2809} 2810static DEVICE_ATTR_RO(firmware_revision); 2811 2812static ssize_t ipmi_version_show(struct device *dev, 2813 struct device_attribute *attr, 2814 char *buf) 2815{ 2816 struct bmc_device *bmc = to_bmc_device(dev); 2817 struct ipmi_device_id id; 2818 int rv; 2819 2820 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2821 if (rv) 2822 return rv; 2823 2824 return sysfs_emit(buf, "%u.%u\n", 2825 ipmi_version_major(&id), 2826 ipmi_version_minor(&id)); 2827} 2828static DEVICE_ATTR_RO(ipmi_version); 2829 2830static ssize_t add_dev_support_show(struct device *dev, 2831 struct device_attribute *attr, 2832 char *buf) 2833{ 2834 struct bmc_device *bmc = to_bmc_device(dev); 2835 struct ipmi_device_id id; 2836 int rv; 2837 2838 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2839 if (rv) 2840 return rv; 2841 2842 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2843} 2844static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2845 NULL); 2846 2847static ssize_t manufacturer_id_show(struct device *dev, 2848 struct device_attribute *attr, 2849 char *buf) 2850{ 2851 struct bmc_device *bmc = to_bmc_device(dev); 2852 struct ipmi_device_id id; 2853 int rv; 2854 2855 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2856 if (rv) 2857 return rv; 2858 2859 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2860} 2861static DEVICE_ATTR_RO(manufacturer_id); 2862 2863static ssize_t product_id_show(struct device *dev, 2864 struct device_attribute *attr, 2865 char *buf) 2866{ 2867 struct bmc_device *bmc = to_bmc_device(dev); 2868 struct ipmi_device_id id; 2869 int rv; 2870 2871 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2872 if (rv) 2873 return rv; 2874 2875 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2876} 2877static DEVICE_ATTR_RO(product_id); 2878 2879static ssize_t aux_firmware_rev_show(struct device *dev, 2880 struct device_attribute *attr, 2881 char *buf) 2882{ 2883 struct bmc_device *bmc = to_bmc_device(dev); 2884 struct ipmi_device_id id; 2885 int rv; 2886 2887 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2888 if (rv) 2889 return rv; 2890 2891 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2892 id.aux_firmware_revision[3], 2893 id.aux_firmware_revision[2], 2894 id.aux_firmware_revision[1], 2895 id.aux_firmware_revision[0]); 2896} 2897static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2898 2899static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2900 char *buf) 2901{ 2902 struct bmc_device *bmc = to_bmc_device(dev); 2903 bool guid_set; 2904 guid_t guid; 2905 int rv; 2906 2907 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2908 if (rv) 2909 return rv; 2910 if (!guid_set) 2911 return -ENOENT; 2912 2913 return sysfs_emit(buf, "%pUl\n", &guid); 2914} 2915static DEVICE_ATTR_RO(guid); 2916 2917static struct attribute *bmc_dev_attrs[] = { 2918 &dev_attr_device_id.attr, 2919 &dev_attr_provides_device_sdrs.attr, 2920 &dev_attr_revision.attr, 2921 &dev_attr_firmware_revision.attr, 2922 &dev_attr_ipmi_version.attr, 2923 &dev_attr_additional_device_support.attr, 2924 &dev_attr_manufacturer_id.attr, 2925 &dev_attr_product_id.attr, 2926 &dev_attr_aux_firmware_revision.attr, 2927 &dev_attr_guid.attr, 2928 NULL 2929}; 2930 2931static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2932 struct attribute *attr, int idx) 2933{ 2934 struct device *dev = kobj_to_dev(kobj); 2935 struct bmc_device *bmc = to_bmc_device(dev); 2936 umode_t mode = attr->mode; 2937 int rv; 2938 2939 if (attr == &dev_attr_aux_firmware_revision.attr) { 2940 struct ipmi_device_id id; 2941 2942 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2943 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2944 } 2945 if (attr == &dev_attr_guid.attr) { 2946 bool guid_set; 2947 2948 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2949 return (!rv && guid_set) ? mode : 0; 2950 } 2951 return mode; 2952} 2953 2954static const struct attribute_group bmc_dev_attr_group = { 2955 .attrs = bmc_dev_attrs, 2956 .is_visible = bmc_dev_attr_is_visible, 2957}; 2958 2959static const struct attribute_group *bmc_dev_attr_groups[] = { 2960 &bmc_dev_attr_group, 2961 NULL 2962}; 2963 2964static const struct device_type bmc_device_type = { 2965 .groups = bmc_dev_attr_groups, 2966}; 2967 2968static int __find_bmc_guid(struct device *dev, const void *data) 2969{ 2970 const guid_t *guid = data; 2971 struct bmc_device *bmc; 2972 int rv; 2973 2974 if (dev->type != &bmc_device_type) 2975 return 0; 2976 2977 bmc = to_bmc_device(dev); 2978 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2979 if (rv) 2980 rv = kref_get_unless_zero(&bmc->usecount); 2981 return rv; 2982} 2983 2984/* 2985 * Returns with the bmc's usecount incremented, if it is non-NULL. 2986 */ 2987static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2988 guid_t *guid) 2989{ 2990 struct device *dev; 2991 struct bmc_device *bmc = NULL; 2992 2993 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2994 if (dev) { 2995 bmc = to_bmc_device(dev); 2996 put_device(dev); 2997 } 2998 return bmc; 2999} 3000 3001struct prod_dev_id { 3002 unsigned int product_id; 3003 unsigned char device_id; 3004}; 3005 3006static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 3007{ 3008 const struct prod_dev_id *cid = data; 3009 struct bmc_device *bmc; 3010 int rv; 3011 3012 if (dev->type != &bmc_device_type) 3013 return 0; 3014 3015 bmc = to_bmc_device(dev); 3016 rv = (bmc->id.product_id == cid->product_id 3017 && bmc->id.device_id == cid->device_id); 3018 if (rv) 3019 rv = kref_get_unless_zero(&bmc->usecount); 3020 return rv; 3021} 3022 3023/* 3024 * Returns with the bmc's usecount incremented, if it is non-NULL. 3025 */ 3026static struct bmc_device *ipmi_find_bmc_prod_dev_id( 3027 struct device_driver *drv, 3028 unsigned int product_id, unsigned char device_id) 3029{ 3030 struct prod_dev_id id = { 3031 .product_id = product_id, 3032 .device_id = device_id, 3033 }; 3034 struct device *dev; 3035 struct bmc_device *bmc = NULL; 3036 3037 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 3038 if (dev) { 3039 bmc = to_bmc_device(dev); 3040 put_device(dev); 3041 } 3042 return bmc; 3043} 3044 3045static DEFINE_IDA(ipmi_bmc_ida); 3046 3047static void 3048release_bmc_device(struct device *dev) 3049{ 3050 kfree(to_bmc_device(dev)); 3051} 3052 3053static void cleanup_bmc_work(struct work_struct *work) 3054{ 3055 struct bmc_device *bmc = container_of(work, struct bmc_device, 3056 remove_work); 3057 int id = bmc->pdev.id; /* Unregister overwrites id */ 3058 3059 platform_device_unregister(&bmc->pdev); 3060 ida_simple_remove(&ipmi_bmc_ida, id); 3061} 3062 3063static void 3064cleanup_bmc_device(struct kref *ref) 3065{ 3066 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3067 3068 /* 3069 * Remove the platform device in a work queue to avoid issues 3070 * with removing the device attributes while reading a device 3071 * attribute. 3072 */ 3073 queue_work(remove_work_wq, &bmc->remove_work); 3074} 3075 3076/* 3077 * Must be called with intf->bmc_reg_mutex held. 3078 */ 3079static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3080{ 3081 struct bmc_device *bmc = intf->bmc; 3082 3083 if (!intf->bmc_registered) 3084 return; 3085 3086 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3087 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3088 kfree(intf->my_dev_name); 3089 intf->my_dev_name = NULL; 3090 3091 mutex_lock(&bmc->dyn_mutex); 3092 list_del(&intf->bmc_link); 3093 mutex_unlock(&bmc->dyn_mutex); 3094 intf->bmc = &intf->tmp_bmc; 3095 kref_put(&bmc->usecount, cleanup_bmc_device); 3096 intf->bmc_registered = false; 3097} 3098 3099static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3100{ 3101 mutex_lock(&intf->bmc_reg_mutex); 3102 __ipmi_bmc_unregister(intf); 3103 mutex_unlock(&intf->bmc_reg_mutex); 3104} 3105 3106/* 3107 * Must be called with intf->bmc_reg_mutex held. 3108 */ 3109static int __ipmi_bmc_register(struct ipmi_smi *intf, 3110 struct ipmi_device_id *id, 3111 bool guid_set, guid_t *guid, int intf_num) 3112{ 3113 int rv; 3114 struct bmc_device *bmc; 3115 struct bmc_device *old_bmc; 3116 3117 /* 3118 * platform_device_register() can cause bmc_reg_mutex to 3119 * be claimed because of the is_visible functions of 3120 * the attributes. Eliminate possible recursion and 3121 * release the lock. 3122 */ 3123 intf->in_bmc_register = true; 3124 mutex_unlock(&intf->bmc_reg_mutex); 3125 3126 /* 3127 * Try to find if there is an bmc_device struct 3128 * representing the interfaced BMC already 3129 */ 3130 mutex_lock(&ipmidriver_mutex); 3131 if (guid_set) 3132 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3133 else 3134 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3135 id->product_id, 3136 id->device_id); 3137 3138 /* 3139 * If there is already an bmc_device, free the new one, 3140 * otherwise register the new BMC device 3141 */ 3142 if (old_bmc) { 3143 bmc = old_bmc; 3144 /* 3145 * Note: old_bmc already has usecount incremented by 3146 * the BMC find functions. 3147 */ 3148 intf->bmc = old_bmc; 3149 mutex_lock(&bmc->dyn_mutex); 3150 list_add_tail(&intf->bmc_link, &bmc->intfs); 3151 mutex_unlock(&bmc->dyn_mutex); 3152 3153 dev_info(intf->si_dev, 3154 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3155 bmc->id.manufacturer_id, 3156 bmc->id.product_id, 3157 bmc->id.device_id); 3158 } else { 3159 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3160 if (!bmc) { 3161 rv = -ENOMEM; 3162 goto out; 3163 } 3164 INIT_LIST_HEAD(&bmc->intfs); 3165 mutex_init(&bmc->dyn_mutex); 3166 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3167 3168 bmc->id = *id; 3169 bmc->dyn_id_set = 1; 3170 bmc->dyn_guid_set = guid_set; 3171 bmc->guid = *guid; 3172 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3173 3174 bmc->pdev.name = "ipmi_bmc"; 3175 3176 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 3177 if (rv < 0) { 3178 kfree(bmc); 3179 goto out; 3180 } 3181 3182 bmc->pdev.dev.driver = &ipmidriver.driver; 3183 bmc->pdev.id = rv; 3184 bmc->pdev.dev.release = release_bmc_device; 3185 bmc->pdev.dev.type = &bmc_device_type; 3186 kref_init(&bmc->usecount); 3187 3188 intf->bmc = bmc; 3189 mutex_lock(&bmc->dyn_mutex); 3190 list_add_tail(&intf->bmc_link, &bmc->intfs); 3191 mutex_unlock(&bmc->dyn_mutex); 3192 3193 rv = platform_device_register(&bmc->pdev); 3194 if (rv) { 3195 dev_err(intf->si_dev, 3196 "Unable to register bmc device: %d\n", 3197 rv); 3198 goto out_list_del; 3199 } 3200 3201 dev_info(intf->si_dev, 3202 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3203 bmc->id.manufacturer_id, 3204 bmc->id.product_id, 3205 bmc->id.device_id); 3206 } 3207 3208 /* 3209 * create symlink from system interface device to bmc device 3210 * and back. 3211 */ 3212 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3213 if (rv) { 3214 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3215 goto out_put_bmc; 3216 } 3217 3218 if (intf_num == -1) 3219 intf_num = intf->intf_num; 3220 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3221 if (!intf->my_dev_name) { 3222 rv = -ENOMEM; 3223 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3224 rv); 3225 goto out_unlink1; 3226 } 3227 3228 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3229 intf->my_dev_name); 3230 if (rv) { 3231 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3232 rv); 3233 goto out_free_my_dev_name; 3234 } 3235 3236 intf->bmc_registered = true; 3237 3238out: 3239 mutex_unlock(&ipmidriver_mutex); 3240 mutex_lock(&intf->bmc_reg_mutex); 3241 intf->in_bmc_register = false; 3242 return rv; 3243 3244 3245out_free_my_dev_name: 3246 kfree(intf->my_dev_name); 3247 intf->my_dev_name = NULL; 3248 3249out_unlink1: 3250 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3251 3252out_put_bmc: 3253 mutex_lock(&bmc->dyn_mutex); 3254 list_del(&intf->bmc_link); 3255 mutex_unlock(&bmc->dyn_mutex); 3256 intf->bmc = &intf->tmp_bmc; 3257 kref_put(&bmc->usecount, cleanup_bmc_device); 3258 goto out; 3259 3260out_list_del: 3261 mutex_lock(&bmc->dyn_mutex); 3262 list_del(&intf->bmc_link); 3263 mutex_unlock(&bmc->dyn_mutex); 3264 intf->bmc = &intf->tmp_bmc; 3265 put_device(&bmc->pdev.dev); 3266 goto out; 3267} 3268 3269static int 3270send_guid_cmd(struct ipmi_smi *intf, int chan) 3271{ 3272 struct kernel_ipmi_msg msg; 3273 struct ipmi_system_interface_addr si; 3274 3275 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3276 si.channel = IPMI_BMC_CHANNEL; 3277 si.lun = 0; 3278 3279 msg.netfn = IPMI_NETFN_APP_REQUEST; 3280 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3281 msg.data = NULL; 3282 msg.data_len = 0; 3283 return i_ipmi_request(NULL, 3284 intf, 3285 (struct ipmi_addr *) &si, 3286 0, 3287 &msg, 3288 intf, 3289 NULL, 3290 NULL, 3291 0, 3292 intf->addrinfo[0].address, 3293 intf->addrinfo[0].lun, 3294 -1, 0); 3295} 3296 3297static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3298{ 3299 struct bmc_device *bmc = intf->bmc; 3300 3301 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3302 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3303 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3304 /* Not for me */ 3305 return; 3306 3307 if (msg->msg.data[0] != 0) { 3308 /* Error from getting the GUID, the BMC doesn't have one. */ 3309 bmc->dyn_guid_set = 0; 3310 goto out; 3311 } 3312 3313 if (msg->msg.data_len < UUID_SIZE + 1) { 3314 bmc->dyn_guid_set = 0; 3315 dev_warn(intf->si_dev, 3316 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3317 msg->msg.data_len, UUID_SIZE + 1); 3318 goto out; 3319 } 3320 3321 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3322 /* 3323 * Make sure the guid data is available before setting 3324 * dyn_guid_set. 3325 */ 3326 smp_wmb(); 3327 bmc->dyn_guid_set = 1; 3328 out: 3329 wake_up(&intf->waitq); 3330} 3331 3332static void __get_guid(struct ipmi_smi *intf) 3333{ 3334 int rv; 3335 struct bmc_device *bmc = intf->bmc; 3336 3337 bmc->dyn_guid_set = 2; 3338 intf->null_user_handler = guid_handler; 3339 rv = send_guid_cmd(intf, 0); 3340 if (rv) 3341 /* Send failed, no GUID available. */ 3342 bmc->dyn_guid_set = 0; 3343 else 3344 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3345 3346 /* dyn_guid_set makes the guid data available. */ 3347 smp_rmb(); 3348 3349 intf->null_user_handler = NULL; 3350} 3351 3352static int 3353send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3354{ 3355 struct kernel_ipmi_msg msg; 3356 unsigned char data[1]; 3357 struct ipmi_system_interface_addr si; 3358 3359 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3360 si.channel = IPMI_BMC_CHANNEL; 3361 si.lun = 0; 3362 3363 msg.netfn = IPMI_NETFN_APP_REQUEST; 3364 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3365 msg.data = data; 3366 msg.data_len = 1; 3367 data[0] = chan; 3368 return i_ipmi_request(NULL, 3369 intf, 3370 (struct ipmi_addr *) &si, 3371 0, 3372 &msg, 3373 intf, 3374 NULL, 3375 NULL, 3376 0, 3377 intf->addrinfo[0].address, 3378 intf->addrinfo[0].lun, 3379 -1, 0); 3380} 3381 3382static void 3383channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3384{ 3385 int rv = 0; 3386 int ch; 3387 unsigned int set = intf->curr_working_cset; 3388 struct ipmi_channel *chans; 3389 3390 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3391 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3392 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3393 /* It's the one we want */ 3394 if (msg->msg.data[0] != 0) { 3395 /* Got an error from the channel, just go on. */ 3396 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3397 /* 3398 * If the MC does not support this 3399 * command, that is legal. We just 3400 * assume it has one IPMB at channel 3401 * zero. 3402 */ 3403 intf->wchannels[set].c[0].medium 3404 = IPMI_CHANNEL_MEDIUM_IPMB; 3405 intf->wchannels[set].c[0].protocol 3406 = IPMI_CHANNEL_PROTOCOL_IPMB; 3407 3408 intf->channel_list = intf->wchannels + set; 3409 intf->channels_ready = true; 3410 wake_up(&intf->waitq); 3411 goto out; 3412 } 3413 goto next_channel; 3414 } 3415 if (msg->msg.data_len < 4) { 3416 /* Message not big enough, just go on. */ 3417 goto next_channel; 3418 } 3419 ch = intf->curr_channel; 3420 chans = intf->wchannels[set].c; 3421 chans[ch].medium = msg->msg.data[2] & 0x7f; 3422 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3423 3424 next_channel: 3425 intf->curr_channel++; 3426 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3427 intf->channel_list = intf->wchannels + set; 3428 intf->channels_ready = true; 3429 wake_up(&intf->waitq); 3430 } else { 3431 intf->channel_list = intf->wchannels + set; 3432 intf->channels_ready = true; 3433 rv = send_channel_info_cmd(intf, intf->curr_channel); 3434 } 3435 3436 if (rv) { 3437 /* Got an error somehow, just give up. */ 3438 dev_warn(intf->si_dev, 3439 "Error sending channel information for channel %d: %d\n", 3440 intf->curr_channel, rv); 3441 3442 intf->channel_list = intf->wchannels + set; 3443 intf->channels_ready = true; 3444 wake_up(&intf->waitq); 3445 } 3446 } 3447 out: 3448 return; 3449} 3450 3451/* 3452 * Must be holding intf->bmc_reg_mutex to call this. 3453 */ 3454static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3455{ 3456 int rv; 3457 3458 if (ipmi_version_major(id) > 1 3459 || (ipmi_version_major(id) == 1 3460 && ipmi_version_minor(id) >= 5)) { 3461 unsigned int set; 3462 3463 /* 3464 * Start scanning the channels to see what is 3465 * available. 3466 */ 3467 set = !intf->curr_working_cset; 3468 intf->curr_working_cset = set; 3469 memset(&intf->wchannels[set], 0, 3470 sizeof(struct ipmi_channel_set)); 3471 3472 intf->null_user_handler = channel_handler; 3473 intf->curr_channel = 0; 3474 rv = send_channel_info_cmd(intf, 0); 3475 if (rv) { 3476 dev_warn(intf->si_dev, 3477 "Error sending channel information for channel 0, %d\n", 3478 rv); 3479 intf->null_user_handler = NULL; 3480 return -EIO; 3481 } 3482 3483 /* Wait for the channel info to be read. */ 3484 wait_event(intf->waitq, intf->channels_ready); 3485 intf->null_user_handler = NULL; 3486 } else { 3487 unsigned int set = intf->curr_working_cset; 3488 3489 /* Assume a single IPMB channel at zero. */ 3490 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3491 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3492 intf->channel_list = intf->wchannels + set; 3493 intf->channels_ready = true; 3494 } 3495 3496 return 0; 3497} 3498 3499static void ipmi_poll(struct ipmi_smi *intf) 3500{ 3501 if (intf->handlers->poll) 3502 intf->handlers->poll(intf->send_info); 3503 /* In case something came in */ 3504 handle_new_recv_msgs(intf); 3505} 3506 3507void ipmi_poll_interface(struct ipmi_user *user) 3508{ 3509 ipmi_poll(user->intf); 3510} 3511EXPORT_SYMBOL(ipmi_poll_interface); 3512 3513static ssize_t nr_users_show(struct device *dev, 3514 struct device_attribute *attr, 3515 char *buf) 3516{ 3517 struct ipmi_smi *intf = container_of(attr, 3518 struct ipmi_smi, nr_users_devattr); 3519 3520 return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users)); 3521} 3522static DEVICE_ATTR_RO(nr_users); 3523 3524static ssize_t nr_msgs_show(struct device *dev, 3525 struct device_attribute *attr, 3526 char *buf) 3527{ 3528 struct ipmi_smi *intf = container_of(attr, 3529 struct ipmi_smi, nr_msgs_devattr); 3530 struct ipmi_user *user; 3531 int index; 3532 unsigned int count = 0; 3533 3534 index = srcu_read_lock(&intf->users_srcu); 3535 list_for_each_entry_rcu(user, &intf->users, link) 3536 count += atomic_read(&user->nr_msgs); 3537 srcu_read_unlock(&intf->users_srcu, index); 3538 3539 return sysfs_emit(buf, "%u\n", count); 3540} 3541static DEVICE_ATTR_RO(nr_msgs); 3542 3543static void redo_bmc_reg(struct work_struct *work) 3544{ 3545 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3546 bmc_reg_work); 3547 3548 if (!intf->in_shutdown) 3549 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3550 3551 kref_put(&intf->refcount, intf_free); 3552} 3553 3554int ipmi_add_smi(struct module *owner, 3555 const struct ipmi_smi_handlers *handlers, 3556 void *send_info, 3557 struct device *si_dev, 3558 unsigned char slave_addr) 3559{ 3560 int i, j; 3561 int rv; 3562 struct ipmi_smi *intf, *tintf; 3563 struct list_head *link; 3564 struct ipmi_device_id id; 3565 3566 /* 3567 * Make sure the driver is actually initialized, this handles 3568 * problems with initialization order. 3569 */ 3570 rv = ipmi_init_msghandler(); 3571 if (rv) 3572 return rv; 3573 3574 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3575 if (!intf) 3576 return -ENOMEM; 3577 3578 rv = init_srcu_struct(&intf->users_srcu); 3579 if (rv) { 3580 kfree(intf); 3581 return rv; 3582 } 3583 3584 intf->owner = owner; 3585 intf->bmc = &intf->tmp_bmc; 3586 INIT_LIST_HEAD(&intf->bmc->intfs); 3587 mutex_init(&intf->bmc->dyn_mutex); 3588 INIT_LIST_HEAD(&intf->bmc_link); 3589 mutex_init(&intf->bmc_reg_mutex); 3590 intf->intf_num = -1; /* Mark it invalid for now. */ 3591 kref_init(&intf->refcount); 3592 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3593 intf->si_dev = si_dev; 3594 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3595 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3596 intf->addrinfo[j].lun = 2; 3597 } 3598 if (slave_addr != 0) 3599 intf->addrinfo[0].address = slave_addr; 3600 INIT_LIST_HEAD(&intf->users); 3601 atomic_set(&intf->nr_users, 0); 3602 intf->handlers = handlers; 3603 intf->send_info = send_info; 3604 spin_lock_init(&intf->seq_lock); 3605 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3606 intf->seq_table[j].inuse = 0; 3607 intf->seq_table[j].seqid = 0; 3608 } 3609 intf->curr_seq = 0; 3610 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3611 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3612 tasklet_setup(&intf->recv_tasklet, 3613 smi_recv_tasklet); 3614 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3615 spin_lock_init(&intf->xmit_msgs_lock); 3616 INIT_LIST_HEAD(&intf->xmit_msgs); 3617 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3618 spin_lock_init(&intf->events_lock); 3619 spin_lock_init(&intf->watch_lock); 3620 atomic_set(&intf->event_waiters, 0); 3621 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3622 INIT_LIST_HEAD(&intf->waiting_events); 3623 intf->waiting_events_count = 0; 3624 mutex_init(&intf->cmd_rcvrs_mutex); 3625 spin_lock_init(&intf->maintenance_mode_lock); 3626 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3627 init_waitqueue_head(&intf->waitq); 3628 for (i = 0; i < IPMI_NUM_STATS; i++) 3629 atomic_set(&intf->stats[i], 0); 3630 3631 mutex_lock(&ipmi_interfaces_mutex); 3632 /* Look for a hole in the numbers. */ 3633 i = 0; 3634 link = &ipmi_interfaces; 3635 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, 3636 ipmi_interfaces_mutex_held()) { 3637 if (tintf->intf_num != i) { 3638 link = &tintf->link; 3639 break; 3640 } 3641 i++; 3642 } 3643 /* Add the new interface in numeric order. */ 3644 if (i == 0) 3645 list_add_rcu(&intf->link, &ipmi_interfaces); 3646 else 3647 list_add_tail_rcu(&intf->link, link); 3648 3649 rv = handlers->start_processing(send_info, intf); 3650 if (rv) 3651 goto out_err; 3652 3653 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3654 if (rv) { 3655 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3656 goto out_err_started; 3657 } 3658 3659 mutex_lock(&intf->bmc_reg_mutex); 3660 rv = __scan_channels(intf, &id); 3661 mutex_unlock(&intf->bmc_reg_mutex); 3662 if (rv) 3663 goto out_err_bmc_reg; 3664 3665 intf->nr_users_devattr = dev_attr_nr_users; 3666 sysfs_attr_init(&intf->nr_users_devattr.attr); 3667 rv = device_create_file(intf->si_dev, &intf->nr_users_devattr); 3668 if (rv) 3669 goto out_err_bmc_reg; 3670 3671 intf->nr_msgs_devattr = dev_attr_nr_msgs; 3672 sysfs_attr_init(&intf->nr_msgs_devattr.attr); 3673 rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr); 3674 if (rv) { 3675 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3676 goto out_err_bmc_reg; 3677 } 3678 3679 /* 3680 * Keep memory order straight for RCU readers. Make 3681 * sure everything else is committed to memory before 3682 * setting intf_num to mark the interface valid. 3683 */ 3684 smp_wmb(); 3685 intf->intf_num = i; 3686 mutex_unlock(&ipmi_interfaces_mutex); 3687 3688 /* After this point the interface is legal to use. */ 3689 call_smi_watchers(i, intf->si_dev); 3690 3691 return 0; 3692 3693 out_err_bmc_reg: 3694 ipmi_bmc_unregister(intf); 3695 out_err_started: 3696 if (intf->handlers->shutdown) 3697 intf->handlers->shutdown(intf->send_info); 3698 out_err: 3699 list_del_rcu(&intf->link); 3700 mutex_unlock(&ipmi_interfaces_mutex); 3701 synchronize_srcu(&ipmi_interfaces_srcu); 3702 cleanup_srcu_struct(&intf->users_srcu); 3703 kref_put(&intf->refcount, intf_free); 3704 3705 return rv; 3706} 3707EXPORT_SYMBOL(ipmi_add_smi); 3708 3709static void deliver_smi_err_response(struct ipmi_smi *intf, 3710 struct ipmi_smi_msg *msg, 3711 unsigned char err) 3712{ 3713 msg->rsp[0] = msg->data[0] | 4; 3714 msg->rsp[1] = msg->data[1]; 3715 msg->rsp[2] = err; 3716 msg->rsp_size = 3; 3717 /* It's an error, so it will never requeue, no need to check return. */ 3718 handle_one_recv_msg(intf, msg); 3719} 3720 3721static void cleanup_smi_msgs(struct ipmi_smi *intf) 3722{ 3723 int i; 3724 struct seq_table *ent; 3725 struct ipmi_smi_msg *msg; 3726 struct list_head *entry; 3727 struct list_head tmplist; 3728 3729 /* Clear out our transmit queues and hold the messages. */ 3730 INIT_LIST_HEAD(&tmplist); 3731 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3732 list_splice_tail(&intf->xmit_msgs, &tmplist); 3733 3734 /* Current message first, to preserve order */ 3735 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3736 /* Wait for the message to clear out. */ 3737 schedule_timeout(1); 3738 } 3739 3740 /* No need for locks, the interface is down. */ 3741 3742 /* 3743 * Return errors for all pending messages in queue and in the 3744 * tables waiting for remote responses. 3745 */ 3746 while (!list_empty(&tmplist)) { 3747 entry = tmplist.next; 3748 list_del(entry); 3749 msg = list_entry(entry, struct ipmi_smi_msg, link); 3750 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3751 } 3752 3753 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3754 ent = &intf->seq_table[i]; 3755 if (!ent->inuse) 3756 continue; 3757 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3758 } 3759} 3760 3761void ipmi_unregister_smi(struct ipmi_smi *intf) 3762{ 3763 struct ipmi_smi_watcher *w; 3764 int intf_num, index; 3765 3766 if (!intf) 3767 return; 3768 intf_num = intf->intf_num; 3769 mutex_lock(&ipmi_interfaces_mutex); 3770 intf->intf_num = -1; 3771 intf->in_shutdown = true; 3772 list_del_rcu(&intf->link); 3773 mutex_unlock(&ipmi_interfaces_mutex); 3774 synchronize_srcu(&ipmi_interfaces_srcu); 3775 3776 /* At this point no users can be added to the interface. */ 3777 3778 device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); 3779 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3780 3781 /* 3782 * Call all the watcher interfaces to tell them that 3783 * an interface is going away. 3784 */ 3785 mutex_lock(&smi_watchers_mutex); 3786 list_for_each_entry(w, &smi_watchers, link) 3787 w->smi_gone(intf_num); 3788 mutex_unlock(&smi_watchers_mutex); 3789 3790 index = srcu_read_lock(&intf->users_srcu); 3791 while (!list_empty(&intf->users)) { 3792 struct ipmi_user *user = 3793 container_of(list_next_rcu(&intf->users), 3794 struct ipmi_user, link); 3795 3796 _ipmi_destroy_user(user); 3797 } 3798 srcu_read_unlock(&intf->users_srcu, index); 3799 3800 if (intf->handlers->shutdown) 3801 intf->handlers->shutdown(intf->send_info); 3802 3803 cleanup_smi_msgs(intf); 3804 3805 ipmi_bmc_unregister(intf); 3806 3807 cleanup_srcu_struct(&intf->users_srcu); 3808 kref_put(&intf->refcount, intf_free); 3809} 3810EXPORT_SYMBOL(ipmi_unregister_smi); 3811 3812static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3813 struct ipmi_smi_msg *msg) 3814{ 3815 struct ipmi_ipmb_addr ipmb_addr; 3816 struct ipmi_recv_msg *recv_msg; 3817 3818 /* 3819 * This is 11, not 10, because the response must contain a 3820 * completion code. 3821 */ 3822 if (msg->rsp_size < 11) { 3823 /* Message not big enough, just ignore it. */ 3824 ipmi_inc_stat(intf, invalid_ipmb_responses); 3825 return 0; 3826 } 3827 3828 if (msg->rsp[2] != 0) { 3829 /* An error getting the response, just ignore it. */ 3830 return 0; 3831 } 3832 3833 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3834 ipmb_addr.slave_addr = msg->rsp[6]; 3835 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3836 ipmb_addr.lun = msg->rsp[7] & 3; 3837 3838 /* 3839 * It's a response from a remote entity. Look up the sequence 3840 * number and handle the response. 3841 */ 3842 if (intf_find_seq(intf, 3843 msg->rsp[7] >> 2, 3844 msg->rsp[3] & 0x0f, 3845 msg->rsp[8], 3846 (msg->rsp[4] >> 2) & (~1), 3847 (struct ipmi_addr *) &ipmb_addr, 3848 &recv_msg)) { 3849 /* 3850 * We were unable to find the sequence number, 3851 * so just nuke the message. 3852 */ 3853 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3854 return 0; 3855 } 3856 3857 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3858 /* 3859 * The other fields matched, so no need to set them, except 3860 * for netfn, which needs to be the response that was 3861 * returned, not the request value. 3862 */ 3863 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3864 recv_msg->msg.data = recv_msg->msg_data; 3865 recv_msg->msg.data_len = msg->rsp_size - 10; 3866 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3867 if (deliver_response(intf, recv_msg)) 3868 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3869 else 3870 ipmi_inc_stat(intf, handled_ipmb_responses); 3871 3872 return 0; 3873} 3874 3875static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3876 struct ipmi_smi_msg *msg) 3877{ 3878 struct cmd_rcvr *rcvr; 3879 int rv = 0; 3880 unsigned char netfn; 3881 unsigned char cmd; 3882 unsigned char chan; 3883 struct ipmi_user *user = NULL; 3884 struct ipmi_ipmb_addr *ipmb_addr; 3885 struct ipmi_recv_msg *recv_msg; 3886 3887 if (msg->rsp_size < 10) { 3888 /* Message not big enough, just ignore it. */ 3889 ipmi_inc_stat(intf, invalid_commands); 3890 return 0; 3891 } 3892 3893 if (msg->rsp[2] != 0) { 3894 /* An error getting the response, just ignore it. */ 3895 return 0; 3896 } 3897 3898 netfn = msg->rsp[4] >> 2; 3899 cmd = msg->rsp[8]; 3900 chan = msg->rsp[3] & 0xf; 3901 3902 rcu_read_lock(); 3903 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3904 if (rcvr) { 3905 user = rcvr->user; 3906 kref_get(&user->refcount); 3907 } else 3908 user = NULL; 3909 rcu_read_unlock(); 3910 3911 if (user == NULL) { 3912 /* We didn't find a user, deliver an error response. */ 3913 ipmi_inc_stat(intf, unhandled_commands); 3914 3915 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3916 msg->data[1] = IPMI_SEND_MSG_CMD; 3917 msg->data[2] = msg->rsp[3]; 3918 msg->data[3] = msg->rsp[6]; 3919 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3920 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3921 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3922 /* rqseq/lun */ 3923 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3924 msg->data[8] = msg->rsp[8]; /* cmd */ 3925 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3926 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3927 msg->data_size = 11; 3928 3929 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 3930 msg->data_size, msg->data); 3931 3932 rcu_read_lock(); 3933 if (!intf->in_shutdown) { 3934 smi_send(intf, intf->handlers, msg, 0); 3935 /* 3936 * We used the message, so return the value 3937 * that causes it to not be freed or 3938 * queued. 3939 */ 3940 rv = -1; 3941 } 3942 rcu_read_unlock(); 3943 } else { 3944 recv_msg = ipmi_alloc_recv_msg(); 3945 if (!recv_msg) { 3946 /* 3947 * We couldn't allocate memory for the 3948 * message, so requeue it for handling 3949 * later. 3950 */ 3951 rv = 1; 3952 kref_put(&user->refcount, free_user); 3953 } else { 3954 /* Extract the source address from the data. */ 3955 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3956 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3957 ipmb_addr->slave_addr = msg->rsp[6]; 3958 ipmb_addr->lun = msg->rsp[7] & 3; 3959 ipmb_addr->channel = msg->rsp[3] & 0xf; 3960 3961 /* 3962 * Extract the rest of the message information 3963 * from the IPMB header. 3964 */ 3965 recv_msg->user = user; 3966 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3967 recv_msg->msgid = msg->rsp[7] >> 2; 3968 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3969 recv_msg->msg.cmd = msg->rsp[8]; 3970 recv_msg->msg.data = recv_msg->msg_data; 3971 3972 /* 3973 * We chop off 10, not 9 bytes because the checksum 3974 * at the end also needs to be removed. 3975 */ 3976 recv_msg->msg.data_len = msg->rsp_size - 10; 3977 memcpy(recv_msg->msg_data, &msg->rsp[9], 3978 msg->rsp_size - 10); 3979 if (deliver_response(intf, recv_msg)) 3980 ipmi_inc_stat(intf, unhandled_commands); 3981 else 3982 ipmi_inc_stat(intf, handled_commands); 3983 } 3984 } 3985 3986 return rv; 3987} 3988 3989static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 3990 struct ipmi_smi_msg *msg) 3991{ 3992 struct cmd_rcvr *rcvr; 3993 int rv = 0; 3994 struct ipmi_user *user = NULL; 3995 struct ipmi_ipmb_direct_addr *daddr; 3996 struct ipmi_recv_msg *recv_msg; 3997 unsigned char netfn = msg->rsp[0] >> 2; 3998 unsigned char cmd = msg->rsp[3]; 3999 4000 rcu_read_lock(); 4001 /* We always use channel 0 for direct messages. */ 4002 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 4003 if (rcvr) { 4004 user = rcvr->user; 4005 kref_get(&user->refcount); 4006 } else 4007 user = NULL; 4008 rcu_read_unlock(); 4009 4010 if (user == NULL) { 4011 /* We didn't find a user, deliver an error response. */ 4012 ipmi_inc_stat(intf, unhandled_commands); 4013 4014 msg->data[0] = (netfn + 1) << 2; 4015 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ 4016 msg->data[1] = msg->rsp[1]; /* Addr */ 4017 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ 4018 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ 4019 msg->data[3] = cmd; 4020 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 4021 msg->data_size = 5; 4022 4023 rcu_read_lock(); 4024 if (!intf->in_shutdown) { 4025 smi_send(intf, intf->handlers, msg, 0); 4026 /* 4027 * We used the message, so return the value 4028 * that causes it to not be freed or 4029 * queued. 4030 */ 4031 rv = -1; 4032 } 4033 rcu_read_unlock(); 4034 } else { 4035 recv_msg = ipmi_alloc_recv_msg(); 4036 if (!recv_msg) { 4037 /* 4038 * We couldn't allocate memory for the 4039 * message, so requeue it for handling 4040 * later. 4041 */ 4042 rv = 1; 4043 kref_put(&user->refcount, free_user); 4044 } else { 4045 /* Extract the source address from the data. */ 4046 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 4047 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4048 daddr->channel = 0; 4049 daddr->slave_addr = msg->rsp[1]; 4050 daddr->rs_lun = msg->rsp[0] & 3; 4051 daddr->rq_lun = msg->rsp[2] & 3; 4052 4053 /* 4054 * Extract the rest of the message information 4055 * from the IPMB header. 4056 */ 4057 recv_msg->user = user; 4058 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4059 recv_msg->msgid = (msg->rsp[2] >> 2); 4060 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4061 recv_msg->msg.cmd = msg->rsp[3]; 4062 recv_msg->msg.data = recv_msg->msg_data; 4063 4064 recv_msg->msg.data_len = msg->rsp_size - 4; 4065 memcpy(recv_msg->msg_data, msg->rsp + 4, 4066 msg->rsp_size - 4); 4067 if (deliver_response(intf, recv_msg)) 4068 ipmi_inc_stat(intf, unhandled_commands); 4069 else 4070 ipmi_inc_stat(intf, handled_commands); 4071 } 4072 } 4073 4074 return rv; 4075} 4076 4077static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 4078 struct ipmi_smi_msg *msg) 4079{ 4080 struct ipmi_recv_msg *recv_msg; 4081 struct ipmi_ipmb_direct_addr *daddr; 4082 4083 recv_msg = msg->user_data; 4084 if (recv_msg == NULL) { 4085 dev_warn(intf->si_dev, 4086 "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4087 return 0; 4088 } 4089 4090 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4091 recv_msg->msgid = msg->msgid; 4092 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 4093 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4094 daddr->channel = 0; 4095 daddr->slave_addr = msg->rsp[1]; 4096 daddr->rq_lun = msg->rsp[0] & 3; 4097 daddr->rs_lun = msg->rsp[2] & 3; 4098 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4099 recv_msg->msg.cmd = msg->rsp[3]; 4100 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4101 recv_msg->msg.data = recv_msg->msg_data; 4102 recv_msg->msg.data_len = msg->rsp_size - 4; 4103 deliver_local_response(intf, recv_msg); 4104 4105 return 0; 4106} 4107 4108static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4109 struct ipmi_smi_msg *msg) 4110{ 4111 struct ipmi_lan_addr lan_addr; 4112 struct ipmi_recv_msg *recv_msg; 4113 4114 4115 /* 4116 * This is 13, not 12, because the response must contain a 4117 * completion code. 4118 */ 4119 if (msg->rsp_size < 13) { 4120 /* Message not big enough, just ignore it. */ 4121 ipmi_inc_stat(intf, invalid_lan_responses); 4122 return 0; 4123 } 4124 4125 if (msg->rsp[2] != 0) { 4126 /* An error getting the response, just ignore it. */ 4127 return 0; 4128 } 4129 4130 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4131 lan_addr.session_handle = msg->rsp[4]; 4132 lan_addr.remote_SWID = msg->rsp[8]; 4133 lan_addr.local_SWID = msg->rsp[5]; 4134 lan_addr.channel = msg->rsp[3] & 0x0f; 4135 lan_addr.privilege = msg->rsp[3] >> 4; 4136 lan_addr.lun = msg->rsp[9] & 3; 4137 4138 /* 4139 * It's a response from a remote entity. Look up the sequence 4140 * number and handle the response. 4141 */ 4142 if (intf_find_seq(intf, 4143 msg->rsp[9] >> 2, 4144 msg->rsp[3] & 0x0f, 4145 msg->rsp[10], 4146 (msg->rsp[6] >> 2) & (~1), 4147 (struct ipmi_addr *) &lan_addr, 4148 &recv_msg)) { 4149 /* 4150 * We were unable to find the sequence number, 4151 * so just nuke the message. 4152 */ 4153 ipmi_inc_stat(intf, unhandled_lan_responses); 4154 return 0; 4155 } 4156 4157 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4158 /* 4159 * The other fields matched, so no need to set them, except 4160 * for netfn, which needs to be the response that was 4161 * returned, not the request value. 4162 */ 4163 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4164 recv_msg->msg.data = recv_msg->msg_data; 4165 recv_msg->msg.data_len = msg->rsp_size - 12; 4166 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4167 if (deliver_response(intf, recv_msg)) 4168 ipmi_inc_stat(intf, unhandled_lan_responses); 4169 else 4170 ipmi_inc_stat(intf, handled_lan_responses); 4171 4172 return 0; 4173} 4174 4175static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4176 struct ipmi_smi_msg *msg) 4177{ 4178 struct cmd_rcvr *rcvr; 4179 int rv = 0; 4180 unsigned char netfn; 4181 unsigned char cmd; 4182 unsigned char chan; 4183 struct ipmi_user *user = NULL; 4184 struct ipmi_lan_addr *lan_addr; 4185 struct ipmi_recv_msg *recv_msg; 4186 4187 if (msg->rsp_size < 12) { 4188 /* Message not big enough, just ignore it. */ 4189 ipmi_inc_stat(intf, invalid_commands); 4190 return 0; 4191 } 4192 4193 if (msg->rsp[2] != 0) { 4194 /* An error getting the response, just ignore it. */ 4195 return 0; 4196 } 4197 4198 netfn = msg->rsp[6] >> 2; 4199 cmd = msg->rsp[10]; 4200 chan = msg->rsp[3] & 0xf; 4201 4202 rcu_read_lock(); 4203 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4204 if (rcvr) { 4205 user = rcvr->user; 4206 kref_get(&user->refcount); 4207 } else 4208 user = NULL; 4209 rcu_read_unlock(); 4210 4211 if (user == NULL) { 4212 /* We didn't find a user, just give up. */ 4213 ipmi_inc_stat(intf, unhandled_commands); 4214 4215 /* 4216 * Don't do anything with these messages, just allow 4217 * them to be freed. 4218 */ 4219 rv = 0; 4220 } else { 4221 recv_msg = ipmi_alloc_recv_msg(); 4222 if (!recv_msg) { 4223 /* 4224 * We couldn't allocate memory for the 4225 * message, so requeue it for handling later. 4226 */ 4227 rv = 1; 4228 kref_put(&user->refcount, free_user); 4229 } else { 4230 /* Extract the source address from the data. */ 4231 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4232 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4233 lan_addr->session_handle = msg->rsp[4]; 4234 lan_addr->remote_SWID = msg->rsp[8]; 4235 lan_addr->local_SWID = msg->rsp[5]; 4236 lan_addr->lun = msg->rsp[9] & 3; 4237 lan_addr->channel = msg->rsp[3] & 0xf; 4238 lan_addr->privilege = msg->rsp[3] >> 4; 4239 4240 /* 4241 * Extract the rest of the message information 4242 * from the IPMB header. 4243 */ 4244 recv_msg->user = user; 4245 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4246 recv_msg->msgid = msg->rsp[9] >> 2; 4247 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4248 recv_msg->msg.cmd = msg->rsp[10]; 4249 recv_msg->msg.data = recv_msg->msg_data; 4250 4251 /* 4252 * We chop off 12, not 11 bytes because the checksum 4253 * at the end also needs to be removed. 4254 */ 4255 recv_msg->msg.data_len = msg->rsp_size - 12; 4256 memcpy(recv_msg->msg_data, &msg->rsp[11], 4257 msg->rsp_size - 12); 4258 if (deliver_response(intf, recv_msg)) 4259 ipmi_inc_stat(intf, unhandled_commands); 4260 else 4261 ipmi_inc_stat(intf, handled_commands); 4262 } 4263 } 4264 4265 return rv; 4266} 4267 4268/* 4269 * This routine will handle "Get Message" command responses with 4270 * channels that use an OEM Medium. The message format belongs to 4271 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4272 * Chapter 22, sections 22.6 and 22.24 for more details. 4273 */ 4274static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4275 struct ipmi_smi_msg *msg) 4276{ 4277 struct cmd_rcvr *rcvr; 4278 int rv = 0; 4279 unsigned char netfn; 4280 unsigned char cmd; 4281 unsigned char chan; 4282 struct ipmi_user *user = NULL; 4283 struct ipmi_system_interface_addr *smi_addr; 4284 struct ipmi_recv_msg *recv_msg; 4285 4286 /* 4287 * We expect the OEM SW to perform error checking 4288 * so we just do some basic sanity checks 4289 */ 4290 if (msg->rsp_size < 4) { 4291 /* Message not big enough, just ignore it. */ 4292 ipmi_inc_stat(intf, invalid_commands); 4293 return 0; 4294 } 4295 4296 if (msg->rsp[2] != 0) { 4297 /* An error getting the response, just ignore it. */ 4298 return 0; 4299 } 4300 4301 /* 4302 * This is an OEM Message so the OEM needs to know how 4303 * handle the message. We do no interpretation. 4304 */ 4305 netfn = msg->rsp[0] >> 2; 4306 cmd = msg->rsp[1]; 4307 chan = msg->rsp[3] & 0xf; 4308 4309 rcu_read_lock(); 4310 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4311 if (rcvr) { 4312 user = rcvr->user; 4313 kref_get(&user->refcount); 4314 } else 4315 user = NULL; 4316 rcu_read_unlock(); 4317 4318 if (user == NULL) { 4319 /* We didn't find a user, just give up. */ 4320 ipmi_inc_stat(intf, unhandled_commands); 4321 4322 /* 4323 * Don't do anything with these messages, just allow 4324 * them to be freed. 4325 */ 4326 4327 rv = 0; 4328 } else { 4329 recv_msg = ipmi_alloc_recv_msg(); 4330 if (!recv_msg) { 4331 /* 4332 * We couldn't allocate memory for the 4333 * message, so requeue it for handling 4334 * later. 4335 */ 4336 rv = 1; 4337 kref_put(&user->refcount, free_user); 4338 } else { 4339 /* 4340 * OEM Messages are expected to be delivered via 4341 * the system interface to SMS software. We might 4342 * need to visit this again depending on OEM 4343 * requirements 4344 */ 4345 smi_addr = ((struct ipmi_system_interface_addr *) 4346 &recv_msg->addr); 4347 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4348 smi_addr->channel = IPMI_BMC_CHANNEL; 4349 smi_addr->lun = msg->rsp[0] & 3; 4350 4351 recv_msg->user = user; 4352 recv_msg->user_msg_data = NULL; 4353 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4354 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4355 recv_msg->msg.cmd = msg->rsp[1]; 4356 recv_msg->msg.data = recv_msg->msg_data; 4357 4358 /* 4359 * The message starts at byte 4 which follows the 4360 * the Channel Byte in the "GET MESSAGE" command 4361 */ 4362 recv_msg->msg.data_len = msg->rsp_size - 4; 4363 memcpy(recv_msg->msg_data, &msg->rsp[4], 4364 msg->rsp_size - 4); 4365 if (deliver_response(intf, recv_msg)) 4366 ipmi_inc_stat(intf, unhandled_commands); 4367 else 4368 ipmi_inc_stat(intf, handled_commands); 4369 } 4370 } 4371 4372 return rv; 4373} 4374 4375static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4376 struct ipmi_smi_msg *msg) 4377{ 4378 struct ipmi_system_interface_addr *smi_addr; 4379 4380 recv_msg->msgid = 0; 4381 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4382 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4383 smi_addr->channel = IPMI_BMC_CHANNEL; 4384 smi_addr->lun = msg->rsp[0] & 3; 4385 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4386 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4387 recv_msg->msg.cmd = msg->rsp[1]; 4388 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4389 recv_msg->msg.data = recv_msg->msg_data; 4390 recv_msg->msg.data_len = msg->rsp_size - 3; 4391} 4392 4393static int handle_read_event_rsp(struct ipmi_smi *intf, 4394 struct ipmi_smi_msg *msg) 4395{ 4396 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4397 struct list_head msgs; 4398 struct ipmi_user *user; 4399 int rv = 0, deliver_count = 0, index; 4400 unsigned long flags; 4401 4402 if (msg->rsp_size < 19) { 4403 /* Message is too small to be an IPMB event. */ 4404 ipmi_inc_stat(intf, invalid_events); 4405 return 0; 4406 } 4407 4408 if (msg->rsp[2] != 0) { 4409 /* An error getting the event, just ignore it. */ 4410 return 0; 4411 } 4412 4413 INIT_LIST_HEAD(&msgs); 4414 4415 spin_lock_irqsave(&intf->events_lock, flags); 4416 4417 ipmi_inc_stat(intf, events); 4418 4419 /* 4420 * Allocate and fill in one message for every user that is 4421 * getting events. 4422 */ 4423 index = srcu_read_lock(&intf->users_srcu); 4424 list_for_each_entry_rcu(user, &intf->users, link) { 4425 if (!user->gets_events) 4426 continue; 4427 4428 recv_msg = ipmi_alloc_recv_msg(); 4429 if (!recv_msg) { 4430 rcu_read_unlock(); 4431 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4432 link) { 4433 list_del(&recv_msg->link); 4434 ipmi_free_recv_msg(recv_msg); 4435 } 4436 /* 4437 * We couldn't allocate memory for the 4438 * message, so requeue it for handling 4439 * later. 4440 */ 4441 rv = 1; 4442 goto out; 4443 } 4444 4445 deliver_count++; 4446 4447 copy_event_into_recv_msg(recv_msg, msg); 4448 recv_msg->user = user; 4449 kref_get(&user->refcount); 4450 list_add_tail(&recv_msg->link, &msgs); 4451 } 4452 srcu_read_unlock(&intf->users_srcu, index); 4453 4454 if (deliver_count) { 4455 /* Now deliver all the messages. */ 4456 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4457 list_del(&recv_msg->link); 4458 deliver_local_response(intf, recv_msg); 4459 } 4460 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4461 /* 4462 * No one to receive the message, put it in queue if there's 4463 * not already too many things in the queue. 4464 */ 4465 recv_msg = ipmi_alloc_recv_msg(); 4466 if (!recv_msg) { 4467 /* 4468 * We couldn't allocate memory for the 4469 * message, so requeue it for handling 4470 * later. 4471 */ 4472 rv = 1; 4473 goto out; 4474 } 4475 4476 copy_event_into_recv_msg(recv_msg, msg); 4477 list_add_tail(&recv_msg->link, &intf->waiting_events); 4478 intf->waiting_events_count++; 4479 } else if (!intf->event_msg_printed) { 4480 /* 4481 * There's too many things in the queue, discard this 4482 * message. 4483 */ 4484 dev_warn(intf->si_dev, 4485 "Event queue full, discarding incoming events\n"); 4486 intf->event_msg_printed = 1; 4487 } 4488 4489 out: 4490 spin_unlock_irqrestore(&intf->events_lock, flags); 4491 4492 return rv; 4493} 4494 4495static int handle_bmc_rsp(struct ipmi_smi *intf, 4496 struct ipmi_smi_msg *msg) 4497{ 4498 struct ipmi_recv_msg *recv_msg; 4499 struct ipmi_system_interface_addr *smi_addr; 4500 4501 recv_msg = msg->user_data; 4502 if (recv_msg == NULL) { 4503 dev_warn(intf->si_dev, 4504 "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4505 return 0; 4506 } 4507 4508 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4509 recv_msg->msgid = msg->msgid; 4510 smi_addr = ((struct ipmi_system_interface_addr *) 4511 &recv_msg->addr); 4512 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4513 smi_addr->channel = IPMI_BMC_CHANNEL; 4514 smi_addr->lun = msg->rsp[0] & 3; 4515 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4516 recv_msg->msg.cmd = msg->rsp[1]; 4517 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4518 recv_msg->msg.data = recv_msg->msg_data; 4519 recv_msg->msg.data_len = msg->rsp_size - 2; 4520 deliver_local_response(intf, recv_msg); 4521 4522 return 0; 4523} 4524 4525/* 4526 * Handle a received message. Return 1 if the message should be requeued, 4527 * 0 if the message should be freed, or -1 if the message should not 4528 * be freed or requeued. 4529 */ 4530static int handle_one_recv_msg(struct ipmi_smi *intf, 4531 struct ipmi_smi_msg *msg) 4532{ 4533 int requeue = 0; 4534 int chan; 4535 unsigned char cc; 4536 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4537 4538 dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp); 4539 4540 if (msg->rsp_size < 2) { 4541 /* Message is too small to be correct. */ 4542 dev_warn(intf->si_dev, 4543 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4544 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4545 4546return_unspecified: 4547 /* Generate an error response for the message. */ 4548 msg->rsp[0] = msg->data[0] | (1 << 2); 4549 msg->rsp[1] = msg->data[1]; 4550 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4551 msg->rsp_size = 3; 4552 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4553 /* commands must have at least 4 bytes, responses 5. */ 4554 if (is_cmd && (msg->rsp_size < 4)) { 4555 ipmi_inc_stat(intf, invalid_commands); 4556 goto out; 4557 } 4558 if (!is_cmd && (msg->rsp_size < 5)) { 4559 ipmi_inc_stat(intf, invalid_ipmb_responses); 4560 /* Construct a valid error response. */ 4561 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ 4562 msg->rsp[0] |= (1 << 2); /* Make it a response */ 4563 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ 4564 msg->rsp[1] = msg->data[1]; /* Addr */ 4565 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ 4566 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ 4567 msg->rsp[3] = msg->data[3]; /* Cmd */ 4568 msg->rsp[4] = IPMI_ERR_UNSPECIFIED; 4569 msg->rsp_size = 5; 4570 } 4571 } else if ((msg->data_size >= 2) 4572 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4573 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4574 && (msg->user_data == NULL)) { 4575 4576 if (intf->in_shutdown) 4577 goto out; 4578 4579 /* 4580 * This is the local response to a command send, start 4581 * the timer for these. The user_data will not be 4582 * NULL if this is a response send, and we will let 4583 * response sends just go through. 4584 */ 4585 4586 /* 4587 * Check for errors, if we get certain errors (ones 4588 * that mean basically we can try again later), we 4589 * ignore them and start the timer. Otherwise we 4590 * report the error immediately. 4591 */ 4592 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4593 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4594 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4595 && (msg->rsp[2] != IPMI_BUS_ERR) 4596 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4597 int ch = msg->rsp[3] & 0xf; 4598 struct ipmi_channel *chans; 4599 4600 /* Got an error sending the message, handle it. */ 4601 4602 chans = READ_ONCE(intf->channel_list)->c; 4603 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4604 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4605 ipmi_inc_stat(intf, sent_lan_command_errs); 4606 else 4607 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4608 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4609 } else 4610 /* The message was sent, start the timer. */ 4611 intf_start_seq_timer(intf, msg->msgid); 4612 requeue = 0; 4613 goto out; 4614 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4615 || (msg->rsp[1] != msg->data[1])) { 4616 /* 4617 * The NetFN and Command in the response is not even 4618 * marginally correct. 4619 */ 4620 dev_warn(intf->si_dev, 4621 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4622 (msg->data[0] >> 2) | 1, msg->data[1], 4623 msg->rsp[0] >> 2, msg->rsp[1]); 4624 4625 goto return_unspecified; 4626 } 4627 4628 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4629 if ((msg->data[0] >> 2) & 1) { 4630 /* It's a response to a sent response. */ 4631 chan = 0; 4632 cc = msg->rsp[4]; 4633 goto process_response_response; 4634 } 4635 if (is_cmd) 4636 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4637 else 4638 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4639 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4640 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4641 && (msg->user_data != NULL)) { 4642 /* 4643 * It's a response to a response we sent. For this we 4644 * deliver a send message response to the user. 4645 */ 4646 struct ipmi_recv_msg *recv_msg; 4647 4648 chan = msg->data[2] & 0x0f; 4649 if (chan >= IPMI_MAX_CHANNELS) 4650 /* Invalid channel number */ 4651 goto out; 4652 cc = msg->rsp[2]; 4653 4654process_response_response: 4655 recv_msg = msg->user_data; 4656 4657 requeue = 0; 4658 if (!recv_msg) 4659 goto out; 4660 4661 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4662 recv_msg->msg.data = recv_msg->msg_data; 4663 recv_msg->msg_data[0] = cc; 4664 recv_msg->msg.data_len = 1; 4665 deliver_local_response(intf, recv_msg); 4666 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4667 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4668 struct ipmi_channel *chans; 4669 4670 /* It's from the receive queue. */ 4671 chan = msg->rsp[3] & 0xf; 4672 if (chan >= IPMI_MAX_CHANNELS) { 4673 /* Invalid channel number */ 4674 requeue = 0; 4675 goto out; 4676 } 4677 4678 /* 4679 * We need to make sure the channels have been initialized. 4680 * The channel_handler routine will set the "curr_channel" 4681 * equal to or greater than IPMI_MAX_CHANNELS when all the 4682 * channels for this interface have been initialized. 4683 */ 4684 if (!intf->channels_ready) { 4685 requeue = 0; /* Throw the message away */ 4686 goto out; 4687 } 4688 4689 chans = READ_ONCE(intf->channel_list)->c; 4690 4691 switch (chans[chan].medium) { 4692 case IPMI_CHANNEL_MEDIUM_IPMB: 4693 if (msg->rsp[4] & 0x04) { 4694 /* 4695 * It's a response, so find the 4696 * requesting message and send it up. 4697 */ 4698 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4699 } else { 4700 /* 4701 * It's a command to the SMS from some other 4702 * entity. Handle that. 4703 */ 4704 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4705 } 4706 break; 4707 4708 case IPMI_CHANNEL_MEDIUM_8023LAN: 4709 case IPMI_CHANNEL_MEDIUM_ASYNC: 4710 if (msg->rsp[6] & 0x04) { 4711 /* 4712 * It's a response, so find the 4713 * requesting message and send it up. 4714 */ 4715 requeue = handle_lan_get_msg_rsp(intf, msg); 4716 } else { 4717 /* 4718 * It's a command to the SMS from some other 4719 * entity. Handle that. 4720 */ 4721 requeue = handle_lan_get_msg_cmd(intf, msg); 4722 } 4723 break; 4724 4725 default: 4726 /* Check for OEM Channels. Clients had better 4727 register for these commands. */ 4728 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4729 && (chans[chan].medium 4730 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4731 requeue = handle_oem_get_msg_cmd(intf, msg); 4732 } else { 4733 /* 4734 * We don't handle the channel type, so just 4735 * free the message. 4736 */ 4737 requeue = 0; 4738 } 4739 } 4740 4741 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4742 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4743 /* It's an asynchronous event. */ 4744 requeue = handle_read_event_rsp(intf, msg); 4745 } else { 4746 /* It's a response from the local BMC. */ 4747 requeue = handle_bmc_rsp(intf, msg); 4748 } 4749 4750 out: 4751 return requeue; 4752} 4753 4754/* 4755 * If there are messages in the queue or pretimeouts, handle them. 4756 */ 4757static void handle_new_recv_msgs(struct ipmi_smi *intf) 4758{ 4759 struct ipmi_smi_msg *smi_msg; 4760 unsigned long flags = 0; 4761 int rv; 4762 int run_to_completion = intf->run_to_completion; 4763 4764 /* See if any waiting messages need to be processed. */ 4765 if (!run_to_completion) 4766 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4767 while (!list_empty(&intf->waiting_rcv_msgs)) { 4768 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4769 struct ipmi_smi_msg, link); 4770 list_del(&smi_msg->link); 4771 if (!run_to_completion) 4772 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4773 flags); 4774 rv = handle_one_recv_msg(intf, smi_msg); 4775 if (!run_to_completion) 4776 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4777 if (rv > 0) { 4778 /* 4779 * To preserve message order, quit if we 4780 * can't handle a message. Add the message 4781 * back at the head, this is safe because this 4782 * tasklet is the only thing that pulls the 4783 * messages. 4784 */ 4785 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4786 break; 4787 } else { 4788 if (rv == 0) 4789 /* Message handled */ 4790 ipmi_free_smi_msg(smi_msg); 4791 /* If rv < 0, fatal error, del but don't free. */ 4792 } 4793 } 4794 if (!run_to_completion) 4795 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4796 4797 /* 4798 * If the pretimout count is non-zero, decrement one from it and 4799 * deliver pretimeouts to all the users. 4800 */ 4801 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4802 struct ipmi_user *user; 4803 int index; 4804 4805 index = srcu_read_lock(&intf->users_srcu); 4806 list_for_each_entry_rcu(user, &intf->users, link) { 4807 if (user->handler->ipmi_watchdog_pretimeout) 4808 user->handler->ipmi_watchdog_pretimeout( 4809 user->handler_data); 4810 } 4811 srcu_read_unlock(&intf->users_srcu, index); 4812 } 4813} 4814 4815static void smi_recv_tasklet(struct tasklet_struct *t) 4816{ 4817 unsigned long flags = 0; /* keep us warning-free. */ 4818 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); 4819 int run_to_completion = intf->run_to_completion; 4820 struct ipmi_smi_msg *newmsg = NULL; 4821 4822 /* 4823 * Start the next message if available. 4824 * 4825 * Do this here, not in the actual receiver, because we may deadlock 4826 * because the lower layer is allowed to hold locks while calling 4827 * message delivery. 4828 */ 4829 4830 rcu_read_lock(); 4831 4832 if (!run_to_completion) 4833 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4834 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4835 struct list_head *entry = NULL; 4836 4837 /* Pick the high priority queue first. */ 4838 if (!list_empty(&intf->hp_xmit_msgs)) 4839 entry = intf->hp_xmit_msgs.next; 4840 else if (!list_empty(&intf->xmit_msgs)) 4841 entry = intf->xmit_msgs.next; 4842 4843 if (entry) { 4844 list_del(entry); 4845 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4846 intf->curr_msg = newmsg; 4847 } 4848 } 4849 4850 if (!run_to_completion) 4851 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4852 if (newmsg) 4853 intf->handlers->sender(intf->send_info, newmsg); 4854 4855 rcu_read_unlock(); 4856 4857 handle_new_recv_msgs(intf); 4858} 4859 4860/* Handle a new message from the lower layer. */ 4861void ipmi_smi_msg_received(struct ipmi_smi *intf, 4862 struct ipmi_smi_msg *msg) 4863{ 4864 unsigned long flags = 0; /* keep us warning-free. */ 4865 int run_to_completion = intf->run_to_completion; 4866 4867 /* 4868 * To preserve message order, we keep a queue and deliver from 4869 * a tasklet. 4870 */ 4871 if (!run_to_completion) 4872 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4873 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4874 if (!run_to_completion) 4875 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4876 flags); 4877 4878 if (!run_to_completion) 4879 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4880 /* 4881 * We can get an asynchronous event or receive message in addition 4882 * to commands we send. 4883 */ 4884 if (msg == intf->curr_msg) 4885 intf->curr_msg = NULL; 4886 if (!run_to_completion) 4887 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4888 4889 if (run_to_completion) 4890 smi_recv_tasklet(&intf->recv_tasklet); 4891 else 4892 tasklet_schedule(&intf->recv_tasklet); 4893} 4894EXPORT_SYMBOL(ipmi_smi_msg_received); 4895 4896void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4897{ 4898 if (intf->in_shutdown) 4899 return; 4900 4901 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4902 tasklet_schedule(&intf->recv_tasklet); 4903} 4904EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4905 4906static struct ipmi_smi_msg * 4907smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4908 unsigned char seq, long seqid) 4909{ 4910 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4911 if (!smi_msg) 4912 /* 4913 * If we can't allocate the message, then just return, we 4914 * get 4 retries, so this should be ok. 4915 */ 4916 return NULL; 4917 4918 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4919 smi_msg->data_size = recv_msg->msg.data_len; 4920 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4921 4922 dev_dbg(intf->si_dev, "Resend: %*ph\n", 4923 smi_msg->data_size, smi_msg->data); 4924 4925 return smi_msg; 4926} 4927 4928static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4929 struct list_head *timeouts, 4930 unsigned long timeout_period, 4931 int slot, unsigned long *flags, 4932 bool *need_timer) 4933{ 4934 struct ipmi_recv_msg *msg; 4935 4936 if (intf->in_shutdown) 4937 return; 4938 4939 if (!ent->inuse) 4940 return; 4941 4942 if (timeout_period < ent->timeout) { 4943 ent->timeout -= timeout_period; 4944 *need_timer = true; 4945 return; 4946 } 4947 4948 if (ent->retries_left == 0) { 4949 /* The message has used all its retries. */ 4950 ent->inuse = 0; 4951 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4952 msg = ent->recv_msg; 4953 list_add_tail(&msg->link, timeouts); 4954 if (ent->broadcast) 4955 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4956 else if (is_lan_addr(&ent->recv_msg->addr)) 4957 ipmi_inc_stat(intf, timed_out_lan_commands); 4958 else 4959 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4960 } else { 4961 struct ipmi_smi_msg *smi_msg; 4962 /* More retries, send again. */ 4963 4964 *need_timer = true; 4965 4966 /* 4967 * Start with the max timer, set to normal timer after 4968 * the message is sent. 4969 */ 4970 ent->timeout = MAX_MSG_TIMEOUT; 4971 ent->retries_left--; 4972 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4973 ent->seqid); 4974 if (!smi_msg) { 4975 if (is_lan_addr(&ent->recv_msg->addr)) 4976 ipmi_inc_stat(intf, 4977 dropped_rexmit_lan_commands); 4978 else 4979 ipmi_inc_stat(intf, 4980 dropped_rexmit_ipmb_commands); 4981 return; 4982 } 4983 4984 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4985 4986 /* 4987 * Send the new message. We send with a zero 4988 * priority. It timed out, I doubt time is that 4989 * critical now, and high priority messages are really 4990 * only for messages to the local MC, which don't get 4991 * resent. 4992 */ 4993 if (intf->handlers) { 4994 if (is_lan_addr(&ent->recv_msg->addr)) 4995 ipmi_inc_stat(intf, 4996 retransmitted_lan_commands); 4997 else 4998 ipmi_inc_stat(intf, 4999 retransmitted_ipmb_commands); 5000 5001 smi_send(intf, intf->handlers, smi_msg, 0); 5002 } else 5003 ipmi_free_smi_msg(smi_msg); 5004 5005 spin_lock_irqsave(&intf->seq_lock, *flags); 5006 } 5007} 5008 5009static bool ipmi_timeout_handler(struct ipmi_smi *intf, 5010 unsigned long timeout_period) 5011{ 5012 struct list_head timeouts; 5013 struct ipmi_recv_msg *msg, *msg2; 5014 unsigned long flags; 5015 int i; 5016 bool need_timer = false; 5017 5018 if (!intf->bmc_registered) { 5019 kref_get(&intf->refcount); 5020 if (!schedule_work(&intf->bmc_reg_work)) { 5021 kref_put(&intf->refcount, intf_free); 5022 need_timer = true; 5023 } 5024 } 5025 5026 /* 5027 * Go through the seq table and find any messages that 5028 * have timed out, putting them in the timeouts 5029 * list. 5030 */ 5031 INIT_LIST_HEAD(&timeouts); 5032 spin_lock_irqsave(&intf->seq_lock, flags); 5033 if (intf->ipmb_maintenance_mode_timeout) { 5034 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 5035 intf->ipmb_maintenance_mode_timeout = 0; 5036 else 5037 intf->ipmb_maintenance_mode_timeout -= timeout_period; 5038 } 5039 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 5040 check_msg_timeout(intf, &intf->seq_table[i], 5041 &timeouts, timeout_period, i, 5042 &flags, &need_timer); 5043 spin_unlock_irqrestore(&intf->seq_lock, flags); 5044 5045 list_for_each_entry_safe(msg, msg2, &timeouts, link) 5046 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 5047 5048 /* 5049 * Maintenance mode handling. Check the timeout 5050 * optimistically before we claim the lock. It may 5051 * mean a timeout gets missed occasionally, but that 5052 * only means the timeout gets extended by one period 5053 * in that case. No big deal, and it avoids the lock 5054 * most of the time. 5055 */ 5056 if (intf->auto_maintenance_timeout > 0) { 5057 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 5058 if (intf->auto_maintenance_timeout > 0) { 5059 intf->auto_maintenance_timeout 5060 -= timeout_period; 5061 if (!intf->maintenance_mode 5062 && (intf->auto_maintenance_timeout <= 0)) { 5063 intf->maintenance_mode_enable = false; 5064 maintenance_mode_update(intf); 5065 } 5066 } 5067 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 5068 flags); 5069 } 5070 5071 tasklet_schedule(&intf->recv_tasklet); 5072 5073 return need_timer; 5074} 5075 5076static void ipmi_request_event(struct ipmi_smi *intf) 5077{ 5078 /* No event requests when in maintenance mode. */ 5079 if (intf->maintenance_mode_enable) 5080 return; 5081 5082 if (!intf->in_shutdown) 5083 intf->handlers->request_events(intf->send_info); 5084} 5085 5086static struct timer_list ipmi_timer; 5087 5088static atomic_t stop_operation; 5089 5090static void ipmi_timeout(struct timer_list *unused) 5091{ 5092 struct ipmi_smi *intf; 5093 bool need_timer = false; 5094 int index; 5095 5096 if (atomic_read(&stop_operation)) 5097 return; 5098 5099 index = srcu_read_lock(&ipmi_interfaces_srcu); 5100 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5101 if (atomic_read(&intf->event_waiters)) { 5102 intf->ticks_to_req_ev--; 5103 if (intf->ticks_to_req_ev == 0) { 5104 ipmi_request_event(intf); 5105 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 5106 } 5107 need_timer = true; 5108 } 5109 5110 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5111 } 5112 srcu_read_unlock(&ipmi_interfaces_srcu, index); 5113 5114 if (need_timer) 5115 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5116} 5117 5118static void need_waiter(struct ipmi_smi *intf) 5119{ 5120 /* Racy, but worst case we start the timer twice. */ 5121 if (!timer_pending(&ipmi_timer)) 5122 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5123} 5124 5125static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5126static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5127 5128static void free_smi_msg(struct ipmi_smi_msg *msg) 5129{ 5130 atomic_dec(&smi_msg_inuse_count); 5131 /* Try to keep as much stuff out of the panic path as possible. */ 5132 if (!oops_in_progress) 5133 kfree(msg); 5134} 5135 5136struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5137{ 5138 struct ipmi_smi_msg *rv; 5139 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 5140 if (rv) { 5141 rv->done = free_smi_msg; 5142 rv->user_data = NULL; 5143 rv->type = IPMI_SMI_MSG_TYPE_NORMAL; 5144 atomic_inc(&smi_msg_inuse_count); 5145 } 5146 return rv; 5147} 5148EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5149 5150static void free_recv_msg(struct ipmi_recv_msg *msg) 5151{ 5152 atomic_dec(&recv_msg_inuse_count); 5153 /* Try to keep as much stuff out of the panic path as possible. */ 5154 if (!oops_in_progress) 5155 kfree(msg); 5156} 5157 5158static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 5159{ 5160 struct ipmi_recv_msg *rv; 5161 5162 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 5163 if (rv) { 5164 rv->user = NULL; 5165 rv->done = free_recv_msg; 5166 atomic_inc(&recv_msg_inuse_count); 5167 } 5168 return rv; 5169} 5170 5171void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5172{ 5173 if (msg->user && !oops_in_progress) 5174 kref_put(&msg->user->refcount, free_user); 5175 msg->done(msg); 5176} 5177EXPORT_SYMBOL(ipmi_free_recv_msg); 5178 5179static atomic_t panic_done_count = ATOMIC_INIT(0); 5180 5181static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5182{ 5183 atomic_dec(&panic_done_count); 5184} 5185 5186static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5187{ 5188 atomic_dec(&panic_done_count); 5189} 5190 5191/* 5192 * Inside a panic, send a message and wait for a response. 5193 */ 5194static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5195 struct ipmi_addr *addr, 5196 struct kernel_ipmi_msg *msg) 5197{ 5198 struct ipmi_smi_msg smi_msg; 5199 struct ipmi_recv_msg recv_msg; 5200 int rv; 5201 5202 smi_msg.done = dummy_smi_done_handler; 5203 recv_msg.done = dummy_recv_done_handler; 5204 atomic_add(2, &panic_done_count); 5205 rv = i_ipmi_request(NULL, 5206 intf, 5207 addr, 5208 0, 5209 msg, 5210 intf, 5211 &smi_msg, 5212 &recv_msg, 5213 0, 5214 intf->addrinfo[0].address, 5215 intf->addrinfo[0].lun, 5216 0, 1); /* Don't retry, and don't wait. */ 5217 if (rv) 5218 atomic_sub(2, &panic_done_count); 5219 else if (intf->handlers->flush_messages) 5220 intf->handlers->flush_messages(intf->send_info); 5221 5222 while (atomic_read(&panic_done_count) != 0) 5223 ipmi_poll(intf); 5224} 5225 5226static void event_receiver_fetcher(struct ipmi_smi *intf, 5227 struct ipmi_recv_msg *msg) 5228{ 5229 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5230 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5231 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5232 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5233 /* A get event receiver command, save it. */ 5234 intf->event_receiver = msg->msg.data[1]; 5235 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5236 } 5237} 5238 5239static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5240{ 5241 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5242 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5243 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5244 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5245 /* 5246 * A get device id command, save if we are an event 5247 * receiver or generator. 5248 */ 5249 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5250 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5251 } 5252} 5253 5254static void send_panic_events(struct ipmi_smi *intf, char *str) 5255{ 5256 struct kernel_ipmi_msg msg; 5257 unsigned char data[16]; 5258 struct ipmi_system_interface_addr *si; 5259 struct ipmi_addr addr; 5260 char *p = str; 5261 struct ipmi_ipmb_addr *ipmb; 5262 int j; 5263 5264 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5265 return; 5266 5267 si = (struct ipmi_system_interface_addr *) &addr; 5268 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5269 si->channel = IPMI_BMC_CHANNEL; 5270 si->lun = 0; 5271 5272 /* Fill in an event telling that we have failed. */ 5273 msg.netfn = 0x04; /* Sensor or Event. */ 5274 msg.cmd = 2; /* Platform event command. */ 5275 msg.data = data; 5276 msg.data_len = 8; 5277 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5278 data[1] = 0x03; /* This is for IPMI 1.0. */ 5279 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5280 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5281 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5282 5283 /* 5284 * Put a few breadcrumbs in. Hopefully later we can add more things 5285 * to make the panic events more useful. 5286 */ 5287 if (str) { 5288 data[3] = str[0]; 5289 data[6] = str[1]; 5290 data[7] = str[2]; 5291 } 5292 5293 /* Send the event announcing the panic. */ 5294 ipmi_panic_request_and_wait(intf, &addr, &msg); 5295 5296 /* 5297 * On every interface, dump a bunch of OEM event holding the 5298 * string. 5299 */ 5300 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5301 return; 5302 5303 /* 5304 * intf_num is used as an marker to tell if the 5305 * interface is valid. Thus we need a read barrier to 5306 * make sure data fetched before checking intf_num 5307 * won't be used. 5308 */ 5309 smp_rmb(); 5310 5311 /* 5312 * First job here is to figure out where to send the 5313 * OEM events. There's no way in IPMI to send OEM 5314 * events using an event send command, so we have to 5315 * find the SEL to put them in and stick them in 5316 * there. 5317 */ 5318 5319 /* Get capabilities from the get device id. */ 5320 intf->local_sel_device = 0; 5321 intf->local_event_generator = 0; 5322 intf->event_receiver = 0; 5323 5324 /* Request the device info from the local MC. */ 5325 msg.netfn = IPMI_NETFN_APP_REQUEST; 5326 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5327 msg.data = NULL; 5328 msg.data_len = 0; 5329 intf->null_user_handler = device_id_fetcher; 5330 ipmi_panic_request_and_wait(intf, &addr, &msg); 5331 5332 if (intf->local_event_generator) { 5333 /* Request the event receiver from the local MC. */ 5334 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5335 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5336 msg.data = NULL; 5337 msg.data_len = 0; 5338 intf->null_user_handler = event_receiver_fetcher; 5339 ipmi_panic_request_and_wait(intf, &addr, &msg); 5340 } 5341 intf->null_user_handler = NULL; 5342 5343 /* 5344 * Validate the event receiver. The low bit must not 5345 * be 1 (it must be a valid IPMB address), it cannot 5346 * be zero, and it must not be my address. 5347 */ 5348 if (((intf->event_receiver & 1) == 0) 5349 && (intf->event_receiver != 0) 5350 && (intf->event_receiver != intf->addrinfo[0].address)) { 5351 /* 5352 * The event receiver is valid, send an IPMB 5353 * message. 5354 */ 5355 ipmb = (struct ipmi_ipmb_addr *) &addr; 5356 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5357 ipmb->channel = 0; /* FIXME - is this right? */ 5358 ipmb->lun = intf->event_receiver_lun; 5359 ipmb->slave_addr = intf->event_receiver; 5360 } else if (intf->local_sel_device) { 5361 /* 5362 * The event receiver was not valid (or was 5363 * me), but I am an SEL device, just dump it 5364 * in my SEL. 5365 */ 5366 si = (struct ipmi_system_interface_addr *) &addr; 5367 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5368 si->channel = IPMI_BMC_CHANNEL; 5369 si->lun = 0; 5370 } else 5371 return; /* No where to send the event. */ 5372 5373 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5374 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5375 msg.data = data; 5376 msg.data_len = 16; 5377 5378 j = 0; 5379 while (*p) { 5380 int size = strlen(p); 5381 5382 if (size > 11) 5383 size = 11; 5384 data[0] = 0; 5385 data[1] = 0; 5386 data[2] = 0xf0; /* OEM event without timestamp. */ 5387 data[3] = intf->addrinfo[0].address; 5388 data[4] = j++; /* sequence # */ 5389 /* 5390 * Always give 11 bytes, so strncpy will fill 5391 * it with zeroes for me. 5392 */ 5393 strncpy(data+5, p, 11); 5394 p += size; 5395 5396 ipmi_panic_request_and_wait(intf, &addr, &msg); 5397 } 5398} 5399 5400static int has_panicked; 5401 5402static int panic_event(struct notifier_block *this, 5403 unsigned long event, 5404 void *ptr) 5405{ 5406 struct ipmi_smi *intf; 5407 struct ipmi_user *user; 5408 5409 if (has_panicked) 5410 return NOTIFY_DONE; 5411 has_panicked = 1; 5412 5413 /* For every registered interface, set it to run to completion. */ 5414 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5415 if (!intf->handlers || intf->intf_num == -1) 5416 /* Interface is not ready. */ 5417 continue; 5418 5419 if (!intf->handlers->poll) 5420 continue; 5421 5422 /* 5423 * If we were interrupted while locking xmit_msgs_lock or 5424 * waiting_rcv_msgs_lock, the corresponding list may be 5425 * corrupted. In this case, drop items on the list for 5426 * the safety. 5427 */ 5428 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5429 INIT_LIST_HEAD(&intf->xmit_msgs); 5430 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5431 } else 5432 spin_unlock(&intf->xmit_msgs_lock); 5433 5434 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5435 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5436 else 5437 spin_unlock(&intf->waiting_rcv_msgs_lock); 5438 5439 intf->run_to_completion = 1; 5440 if (intf->handlers->set_run_to_completion) 5441 intf->handlers->set_run_to_completion(intf->send_info, 5442 1); 5443 5444 list_for_each_entry_rcu(user, &intf->users, link) { 5445 if (user->handler->ipmi_panic_handler) 5446 user->handler->ipmi_panic_handler( 5447 user->handler_data); 5448 } 5449 5450 send_panic_events(intf, ptr); 5451 } 5452 5453 return NOTIFY_DONE; 5454} 5455 5456/* Must be called with ipmi_interfaces_mutex held. */ 5457static int ipmi_register_driver(void) 5458{ 5459 int rv; 5460 5461 if (drvregistered) 5462 return 0; 5463 5464 rv = driver_register(&ipmidriver.driver); 5465 if (rv) 5466 pr_err("Could not register IPMI driver\n"); 5467 else 5468 drvregistered = true; 5469 return rv; 5470} 5471 5472static struct notifier_block panic_block = { 5473 .notifier_call = panic_event, 5474 .next = NULL, 5475 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5476}; 5477 5478static int ipmi_init_msghandler(void) 5479{ 5480 int rv; 5481 5482 mutex_lock(&ipmi_interfaces_mutex); 5483 rv = ipmi_register_driver(); 5484 if (rv) 5485 goto out; 5486 if (initialized) 5487 goto out; 5488 5489 rv = init_srcu_struct(&ipmi_interfaces_srcu); 5490 if (rv) 5491 goto out; 5492 5493 remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5494 if (!remove_work_wq) { 5495 pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5496 rv = -ENOMEM; 5497 goto out_wq; 5498 } 5499 5500 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5501 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5502 5503 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5504 5505 initialized = true; 5506 5507out_wq: 5508 if (rv) 5509 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5510out: 5511 mutex_unlock(&ipmi_interfaces_mutex); 5512 return rv; 5513} 5514 5515static int __init ipmi_init_msghandler_mod(void) 5516{ 5517 int rv; 5518 5519 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5520 5521 mutex_lock(&ipmi_interfaces_mutex); 5522 rv = ipmi_register_driver(); 5523 mutex_unlock(&ipmi_interfaces_mutex); 5524 5525 return rv; 5526} 5527 5528static void __exit cleanup_ipmi(void) 5529{ 5530 int count; 5531 5532 if (initialized) { 5533 destroy_workqueue(remove_work_wq); 5534 5535 atomic_notifier_chain_unregister(&panic_notifier_list, 5536 &panic_block); 5537 5538 /* 5539 * This can't be called if any interfaces exist, so no worry 5540 * about shutting down the interfaces. 5541 */ 5542 5543 /* 5544 * Tell the timer to stop, then wait for it to stop. This 5545 * avoids problems with race conditions removing the timer 5546 * here. 5547 */ 5548 atomic_set(&stop_operation, 1); 5549 del_timer_sync(&ipmi_timer); 5550 5551 initialized = false; 5552 5553 /* Check for buffer leaks. */ 5554 count = atomic_read(&smi_msg_inuse_count); 5555 if (count != 0) 5556 pr_warn("SMI message count %d at exit\n", count); 5557 count = atomic_read(&recv_msg_inuse_count); 5558 if (count != 0) 5559 pr_warn("recv message count %d at exit\n", count); 5560 5561 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5562 } 5563 if (drvregistered) 5564 driver_unregister(&ipmidriver.driver); 5565} 5566module_exit(cleanup_ipmi); 5567 5568module_init(ipmi_init_msghandler_mod); 5569MODULE_LICENSE("GPL"); 5570MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5571MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5572MODULE_VERSION(IPMI_DRIVER_VERSION); 5573MODULE_SOFTDEP("post: ipmi_devintf");